All articles
SEOSERP monitoringresidential proxies

Residential Proxies for SEO & SERP Monitoring

JL
James Liu
Lead Engineer @ ProxyLabs
March 15, 2026
8 min read
Share

Search engines personalize results based on location, search history, device type, and dozens of other signals. If you're tracking keyword rankings from your office IP, you're seeing results tailored to your location and browsing profile — not what your target audience sees. A keyword that ranks #3 from your London office might be #8 for a user in Houston and #12 for someone in Sydney.

Residential proxies solve this by letting you check rankings from real IPs in specific cities and countries, seeing exactly what local users see.

Why Datacenter Proxies Fail for SERP Monitoring

Google, Bing, and other search engines treat datacenter IPs differently from residential IPs:

FactorDatacenter IPResidential IP
CAPTCHA rate on Google60-80% after 20-30 queriesUnder 5% with proper delays
Result accuracyOften shows "clean" unlocalized resultsMatches real user experience
Local pack resultsFrequently missing or genericAccurate local business listings
IP reputationKnown proxy ranges, flagged quicklyLegitimate ISP ranges
Google Maps integrationOften excluded from resultsIncluded like real users

The data accuracy difference is the critical part. If you're reporting rankings to a client, those numbers need to match what their customers actually see. Datacenter proxies give you search results that no real user would ever see.

For more on the technical differences, see our residential vs datacenter proxies comparison.

Basic SERP Scraping Setup

import requests
from bs4 import BeautifulSoup
import time
import random

PROXY_BASE = 'http://your-username'
PROXY_PASS = 'your-password'
PROXY_HOST = 'gate.proxylabs.app:8080'

HEADERS = {
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
    'Accept-Language': 'en-US,en;q=0.9',
    'Accept-Encoding': 'gzip, deflate, br',
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36',
}


def get_google_rankings(keyword, country='US', city=None, num_results=20):
    """Scrape Google search results for a keyword from a specific location."""
    username = f'{PROXY_BASE}-country-{country}'
    if city:
        username += f'-city-{city}'

    proxy = {
        'http': f'{username}:{PROXY_PASS}@{PROXY_HOST}',
        'https': f'{username}:{PROXY_PASS}@{PROXY_HOST}',
    }

    params = {
        'q': keyword,
        'num': num_results,
        'hl': 'en',
        'gl': country.lower(),
    }

    try:
        r = requests.get(
            'https://www.google.com/search',
            params=params,
            headers=HEADERS,
            proxies=proxy,
            timeout=15,
        )

        if r.status_code != 200:
            return {'error': f'HTTP {r.status_code}', 'keyword': keyword}

        soup = BeautifulSoup(r.text, 'html.parser')

        # Check for CAPTCHA
        if soup.find('form', {'id': 'captcha-form'}):
            return {'error': 'captcha', 'keyword': keyword}

        results = []
        for i, div in enumerate(soup.select('div.g'), start=1):
            link = div.select_one('a')
            title = div.select_one('h3')
            snippet = div.select_one('.VwiC3b')

            if link and title:
                results.append({
                    'position': i,
                    'url': link.get('href', ''),
                    'title': title.get_text(strip=True),
                    'snippet': snippet.get_text(strip=True) if snippet else '',
                })

        return {'keyword': keyword, 'country': country, 'city': city, 'results': results}

    except requests.exceptions.RequestException as e:
        return {'error': str(e), 'keyword': keyword}


# Track rankings for a keyword from different locations
locations = [
    ('US', 'NewYork'),
    ('US', 'LosAngeles'),
    ('US', 'Chicago'),
    ('GB', 'London'),
    ('DE', 'Berlin'),
]

keyword = 'best project management software'

for country, city in locations:
    data = get_google_rankings(keyword, country=country, city=city)
    if 'error' not in data:
        print(f"\n{city}, {country}:")
        for r in data['results'][:5]:
            print(f"  #{r['position']}: {r['url'][:60]}")
    else:
        print(f"\n{city}, {country}: Error — {data['error']}")

    time.sleep(random.uniform(3, 7))

Tracking Your Rankings Over Time

The real value comes from daily tracking. Here's a more complete rank tracker:

import requests
from bs4 import BeautifulSoup
import json
import time
import random
from datetime import datetime

PROXY_USER = 'your-username'
PROXY_PASS = 'your-password'
PROXY_HOST = 'gate.proxylabs.app:8080'

HEADERS = {
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
    'Accept-Language': 'en-US,en;q=0.9',
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36',
}


def find_domain_rank(keyword, target_domain, country='US', city=None, max_results=50):
    """Find where a specific domain ranks for a keyword."""
    username = f'{PROXY_USER}-country-{country}'
    if city:
        username += f'-city-{city}'

    proxy = {
        'http': f'http://{username}:{PROXY_PASS}@{PROXY_HOST}',
        'https': f'http://{username}:{PROXY_PASS}@{PROXY_HOST}',
    }

    # Fetch up to 50 results to find deeper rankings
    params = {'q': keyword, 'num': min(max_results, 100), 'hl': 'en', 'gl': country.lower()}

    try:
        r = requests.get('https://www.google.com/search', params=params,
                         headers=HEADERS, proxies=proxy, timeout=15)

        if r.status_code != 200:
            return None

        soup = BeautifulSoup(r.text, 'html.parser')
        if soup.find('form', {'id': 'captcha-form'}):
            return None

        for i, div in enumerate(soup.select('div.g'), start=1):
            link = div.select_one('a')
            if link:
                href = link.get('href', '')
                if target_domain in href:
                    return i

        return -1  # Not found in results

    except Exception:
        return None


def daily_rank_check(keywords, target_domain, locations):
    """Run a full rank check across keywords and locations."""
    today = datetime.now().strftime('%Y-%m-%d')
    results = []

    for keyword in keywords:
        for country, city in locations:
            rank = find_domain_rank(keyword, target_domain, country, city)
            entry = {
                'date': today,
                'keyword': keyword,
                'country': country,
                'city': city,
                'rank': rank,
                'domain': target_domain,
            }
            results.append(entry)
            status = f"#{rank}" if rank and rank > 0 else "Not found" if rank == -1 else "Error"
            print(f"  {keyword} ({city}, {country}): {status}")

            # Delay between queries — Google rate limits aggressively
            time.sleep(random.uniform(4, 8))

    # Append to JSONL file for historical tracking
    with open(f'rankings_{today}.jsonl', 'a') as f:
        for entry in results:
            f.write(json.dumps(entry) + '\n')

    return results


# Configuration
keywords = [
    'residential proxies',
    'rotating proxy service',
    'web scraping proxy',
    'buy residential proxies',
]

target_domain = 'proxylabs.app'

locations = [
    ('US', 'NewYork'),
    ('US', 'SanFrancisco'),
    ('GB', 'London'),
]

daily_rank_check(keywords, target_domain, locations)

Google SERP Features to Track

Modern SERPs aren't just 10 blue links. You need to track these features too:

FeatureCSS selector (approximate)Why it matters
Featured snippetdiv.xpdopenPosition 0 — massive CTR
People Also Askdiv.related-question-pairExpanding into this drives traffic
Local pack (map)div.VkpGBbCritical for local businesses
Knowledge paneldiv.kp-wholepageBrand queries
Shopping resultsdiv.commercial-unit-desktop-topE-commerce keywords
Video carouseldiv.MjjYud video-voyagerVideo SEO opportunities
def detect_serp_features(soup):
    """Detect which SERP features appear for a query."""
    features = {}
    features['featured_snippet'] = bool(soup.select_one('div.xpdopen'))
    features['people_also_ask'] = bool(soup.select_one('div.related-question-pair'))
    features['local_pack'] = bool(soup.select_one('div.VkpGBb'))
    features['knowledge_panel'] = bool(soup.select_one('div.kp-wholepage'))
    features['shopping'] = bool(soup.select_one('div.commercial-unit-desktop-top'))
    features['video_carousel'] = bool(soup.select('div[data-init-vis="true"] video-voyager'))
    return features

Avoiding Google Detection

Google is more aggressive about blocking scrapers than most sites. These patterns keep success rates above 90%:

Request Timing

  • Minimum 3 seconds between queries from the same IP. Below this, CAPTCHA rates spike dramatically.
  • Randomize delays: random.uniform(3, 8) is better than a fixed 5 seconds. Fixed intervals are a bot signal.
  • Rotate IPs every 5-10 queries: Don't use rotating mode (new IP per request). Instead, use a sticky session for a batch of 5-10 queries, then switch to a new session. This mimics a real user doing several searches in a sitting.
import uuid

def batch_search(keywords, country='US', batch_size=7):
    """Search in batches, rotating IP between batches."""
    results = []

    for i in range(0, len(keywords), batch_size):
        batch = keywords[i:i + batch_size]
        session_id = uuid.uuid4().hex[:12]

        proxy = {
            'http': f'http://{PROXY_USER}-country-{country}-session-{session_id}:{PROXY_PASS}@{PROXY_HOST}',
            'https': f'http://{PROXY_USER}-country-{country}-session-{session_id}:{PROXY_PASS}@{PROXY_HOST}',
        }

        for keyword in batch:
            r = requests.get(
                'https://www.google.com/search',
                params={'q': keyword, 'num': 20, 'hl': 'en'},
                headers=HEADERS,
                proxies=proxy,
                timeout=15,
            )
            results.append({'keyword': keyword, 'status': r.status_code, 'size': len(r.text)})
            time.sleep(random.uniform(4, 8))

        # Longer pause between batches
        time.sleep(random.uniform(10, 20))

    return results

Accept-Language Matching

If you're using a German IP but sending Accept-Language: en-US, Google notices the mismatch. Match the language header to your proxy country:

LANGUAGE_MAP = {
    'US': 'en-US,en;q=0.9',
    'GB': 'en-GB,en;q=0.9',
    'DE': 'de-DE,de;q=0.9,en;q=0.8',
    'FR': 'fr-FR,fr;q=0.9,en;q=0.8',
    'JP': 'ja-JP,ja;q=0.9,en;q=0.8',
    'BR': 'pt-BR,pt;q=0.9,en;q=0.8',
}

def get_headers_for_country(country):
    headers = HEADERS.copy()
    headers['Accept-Language'] = LANGUAGE_MAP.get(country, 'en-US,en;q=0.9')
    return headers

Bandwidth Estimation

SERP monitoring is bandwidth-efficient compared to full-page scraping:

ScaleQueries/dayBandwidth/dayMonthly cost (£2.50/GB)
Small (1 site, 50 keywords, 3 locations)150~75 MB~£5.60/mo
Medium (5 sites, 200 keywords, 5 locations)5,000~2.5 GB~£187/mo
Large (20 sites, 1000 keywords, 10 locations)200,000~100 GB~£250/mo (100GB tier)

A Google SERP page averages ~500KB. With ProxyLabs' 100GB tier at £2.50/GB, large-scale SERP monitoring is cost-effective — especially compared to commercial rank tracking tools that charge $100-500/month for similar volumes.

Multi-Engine Monitoring

Google isn't the only search engine. For comprehensive SEO monitoring, track Bing and regional engines too:

SEARCH_ENGINES = {
    'google': {
        'url': 'https://www.google.com/search',
        'params': lambda kw, n: {'q': kw, 'num': n, 'hl': 'en'},
        'result_selector': 'div.g',
    },
    'bing': {
        'url': 'https://www.bing.com/search',
        'params': lambda kw, n: {'q': kw, 'count': n},
        'result_selector': 'li.b_algo',
    },
}


def multi_engine_rank(keyword, target_domain, engines=None, country='US'):
    """Check rankings across multiple search engines."""
    if engines is None:
        engines = ['google', 'bing']

    proxy = {
        'http': f'http://{PROXY_USER}-country-{country}:{PROXY_PASS}@{PROXY_HOST}',
        'https': f'http://{PROXY_USER}-country-{country}:{PROXY_PASS}@{PROXY_HOST}',
    }

    rankings = {}
    for engine_name in engines:
        engine = SEARCH_ENGINES[engine_name]
        try:
            r = requests.get(
                engine['url'],
                params=engine['params'](keyword, 20),
                headers=HEADERS,
                proxies=proxy,
                timeout=15,
            )
            soup = BeautifulSoup(r.text, 'html.parser')
            results = soup.select(engine['result_selector'])

            rank = -1
            for i, result in enumerate(results, start=1):
                link = result.select_one('a')
                if link and target_domain in link.get('href', ''):
                    rank = i
                    break

            rankings[engine_name] = rank
        except Exception as e:
            rankings[engine_name] = None

        time.sleep(random.uniform(3, 6))

    return rankings

For more on scraping strategies and avoiding blocks, see our scraping without getting blocked guide and general scraping use case page.

Ready to try the fastest residential proxies?

Join developers and businesses who trust ProxyLabs for mission-critical proxy infrastructure.

~200ms responseBest anti-bot bypass£2.50/GB
Start Building NowNo subscription required
SEOSERP monitoringresidential proxiesrank trackingpythonweb scraping
JL
James Liu
Lead Engineer @ ProxyLabs

Building proxy infrastructure since 2019. Previously failed at many things, now failing slightly less.

Found this helpful? Share it with others.

Share