Building a Production-Grade Rank Tracker with SERP API
After building SEMrush’s rank tracking engine that monitors millions of keywords daily, I’m sharing the exact architecture for building your own rank tracker. This isn’t a toy project—it’s production-ready code you can deploy today.
What We’re Building
A complete rank tracking system that:
- Tracks keyword positions across Google and Bing
- Monitors rankings in different locations
- Detects ranking changes with alerts
- Stores historical data for trend analysis
- Provides a REST API and dashboard
- Handles thousands of keywords efficiently
System Architecture Overview
┌─────────────────�?
�? Scheduler �?──> Triggers rank checks
└────────┬────────�?
�?
�?
┌─────────────────�?
�? Rank Checker �?──> Queries SERP API
└────────┬────────�?
�?
�?
┌─────────────────�?
�? Data Storage �?──> PostgreSQL/MongoDB
└────────┬────────�?
�?
�?
┌─────────────────�?
�? Alert System �?──> Notifies on changes
└─────────────────�?
Phase 1: Core Rank Checking Engine
Setting Up the Project
mkdir rank-tracker
cd rank-tracker
# Create virtual environment
python -m venv venv
source venv/bin/activate # On Windows: venv\Scripts\activate
# Install dependencies
pip install requests psycopg2-binary schedule python-dotenv
Basic Rank Checker
# rank_checker.py
import requests
from typing import Optional, Dict, List
from urllib.parse import urlparse
class RankChecker:
def __init__(self, api_key: str):
self.api_key = api_key
self.base_url = "https://serppost.com/api"
def check_ranking(
self,
keyword: str,
target_domain: str,
engine: str = "google",
location: str = "United States",
max_results: int = 100
) -> Optional[Dict]:
"""
Check ranking position for a keyword and domain
Returns:
{
'keyword': str,
'domain': str,
'position': int, # 0 if not found
'url': str, # Ranking URL
'engine': str,
'location': str,
'timestamp': str
}
"""
# Search for keyword
headers = {"Authorization": f"Bearer {self.api_key}"}
params = {
"s": keyword,
"t": engine,
"p": 1,
"num": max_results,
"location": location
}
response = requests.get(
f"{self.base_url}/search",
headers=headers,
params=params
)
if response.status_code != 200:
raise Exception(f"API error: {response.status_code}")
data = response.json()
# Find ranking position
position = self._find_domain_position(
data.get('organic_results', []),
target_domain
)
result = {
'keyword': keyword,
'domain': target_domain,
'position': position['rank'],
'url': position['url'],
'engine': engine,
'location': location,
'timestamp': data.get('search_information', {}).get('query_displayed_at', '')
}
return result
def _find_domain_position(
self,
organic_results: List[Dict],
target_domain: str
) -> Dict:
"""Find position of target domain in results"""
target_domain = target_domain.lower().replace('www.', '')
for idx, result in enumerate(organic_results, start=1):
url = result.get('link', '')
domain = urlparse(url).netloc.lower().replace('www.', '')
if target_domain in domain or domain in target_domain:
return {
'rank': idx,
'url': url,
'title': result.get('title', ''),
'snippet': result.get('snippet', '')
}
# Not found in top results
return {
'rank': 0,
'url': '',
'title': '',
'snippet': ''
}
# Test it
if __name__ == "__main__":
checker = RankChecker("your_api_key")
result = checker.check_ranking(
keyword="serp api",
target_domain="serppost.com",
engine="google"
)
if result['position'] > 0:
print(f"Ranking #{result['position']} for '{result['keyword']}'")
print(f"URL: {result['url']}")
else:
print(f"Not ranking in top 100 for '{result['keyword']}'")
Dual-Engine Rank Checker
Track rankings on both Google and Bing:
# dual_rank_checker.py
from rank_checker import RankChecker
import asyncio
import aiohttp
from typing import List, Dict
class DualRankChecker(RankChecker):
async def check_ranking_dual(
self,
keyword: str,
target_domain: str,
location: str = "United States"
) -> Dict[str, Dict]:
"""Check ranking on both Google and Bing simultaneously"""
async with aiohttp.ClientSession() as session:
# Parallel requests to both engines
google_task = self._async_check(
session, keyword, target_domain, "google", location
)
bing_task = self._async_check(
session, keyword, target_domain, "bing", location
)
google_result, bing_result = await asyncio.gather(
google_task,
bing_task
)
return {
'google': google_result,
'bing': bing_result,
'keyword': keyword,
'domain': target_domain
}
async def _async_check(
self,
session: aiohttp.ClientSession,
keyword: str,
target_domain: str,
engine: str,
location: str
) -> Dict:
"""Asynchronous rank check"""
headers = {"Authorization": f"Bearer {self.api_key}"}
params = {
"s": keyword,
"t": engine,
"p": 1,
"num": 100,
"location": location
}
async with session.get(
f"{self.base_url}/search",
headers=headers,
params=params
) as response:
data = await response.json()
position = self._find_domain_position(
data.get('organic_results', []),
target_domain
)
return {
'position': position['rank'],
'url': position['url'],
'engine': engine,
'title': position['title']
}
# Usage
async def main():
checker = DualRankChecker("your_api_key")
result = await checker.check_ranking_dual(
keyword="best seo tools 2025",
target_domain="serppost.com"
)
print(f"Google: #{result['google']['position']}")
print(f"Bing: #{result['bing']['position']}")
# Calculate visibility
google_pos = result['google']['position']
bing_pos = result['bing']['position']
if google_pos > 0 or bing_pos > 0:
avg_pos = (
(google_pos if google_pos > 0 else 101) +
(bing_pos if bing_pos > 0 else 101)
) / 2
print(f"Average position: #{avg_pos:.1f}")
if __name__ == "__main__":
asyncio.run(main())
Phase 2: Data Storage and History
Database Schema
# models.py
from sqlalchemy import create_engine, Column, Integer, String, DateTime, Float
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from datetime import datetime
Base = declarative_base()
class Keyword(Base):
__tablename__ = 'keywords'
id = Column(Integer, primary_key=True)
keyword = Column(String, nullable=False, index=True)
domain = Column(String, nullable=False, index=True)
location = Column(String, default="United States")
created_at = Column(DateTime, default=datetime.utcnow)
is_active = Column(Integer, default=1)
class RankHistory(Base):
__tablename__ = 'rank_history'
id = Column(Integer, primary_key=True)
keyword_id = Column(Integer, nullable=False, index=True)
engine = Column(String, nullable=False)
position = Column(Integer, default=0)
url = Column(String)
title = Column(String)
checked_at = Column(DateTime, default=datetime.utcnow, index=True)
class RankChange(Base):
__tablename__ = 'rank_changes'
id = Column(Integer, primary_key=True)
keyword_id = Column(Integer, nullable=False, index=True)
engine = Column(String, nullable=False)
old_position = Column(Integer)
new_position = Column(Integer)
change = Column(Integer) # Positive = improved, Negative = dropped
detected_at = Column(DateTime, default=datetime.utcnow)
# Database setup
engine = create_engine('postgresql://user:pass@localhost/ranktracker')
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
Rank Tracking Service
# rank_service.py
from models import Session, Keyword, RankHistory, RankChange
from dual_rank_checker import DualRankChecker
from datetime import datetime
import asyncio
class RankTrackingService:
def __init__(self, api_key: str):
self.checker = DualRankChecker(api_key)
self.session = Session()
async def track_keyword(self, keyword_id: int) -> Dict:
"""Track single keyword and store results"""
# Get keyword details
keyword = self.session.query(Keyword).get(keyword_id)
if not keyword:
raise ValueError(f"Keyword {keyword_id} not found")
# Check current rankings
result = await self.checker.check_ranking_dual(
keyword=keyword.keyword,
target_domain=keyword.domain,
location=keyword.location
)
# Store results for both engines
for engine in ['google', 'bing']:
engine_result = result[engine]
# Get previous ranking
previous = self.session.query(RankHistory).filter_by(
keyword_id=keyword_id,
engine=engine
).order_by(RankHistory.checked_at.desc()).first()
# Store new ranking
new_history = RankHistory(
keyword_id=keyword_id,
engine=engine,
position=engine_result['position'],
url=engine_result['url'],
title=engine_result['title'],
checked_at=datetime.utcnow()
)
self.session.add(new_history)
# Detect changes
if previous and previous.position != engine_result['position']:
change = previous.position - engine_result['position']
rank_change = RankChange(
keyword_id=keyword_id,
engine=engine,
old_position=previous.position,
new_position=engine_result['position'],
change=change,
detected_at=datetime.utcnow()
)
self.session.add(rank_change)
# Log significant changes
if abs(change) >= 5:
print(f"🚨 Big move for '{keyword.keyword}' on {engine}:")
print(f" #{previous.position} �?#{engine_result['position']} ({change:+d})")
self.session.commit()
return result
async def track_all_keywords(self):
"""Track all active keywords"""
keywords = self.session.query(Keyword).filter_by(is_active=1).all()
print(f"Tracking {len(keywords)} keywords...")
for keyword in keywords:
try:
await self.track_keyword(keyword.id)
print(f"�?Tracked: {keyword.keyword}")
# Small delay to avoid rate limiting
await asyncio.sleep(1)
except Exception as e:
print(f"�?Error tracking {keyword.keyword}: {e}")
print("Tracking complete!")
# Usage
async def main():
service = RankTrackingService("your_api_key")
await service.track_all_keywords()
if __name__ == "__main__":
asyncio.run(main())
Phase 3: Scheduled Tracking
Automated Tracking with Schedule
# scheduler.py
import schedule
import time
import asyncio
from rank_service import RankTrackingService
class RankScheduler:
def __init__(self, api_key: str):
self.service = RankTrackingService(api_key)
def job_daily(self):
"""Daily tracking job"""
print(f"\n{'='*60}")
print(f"Starting daily rank check at {time.strftime('%Y-%m-%d %H:%M:%S')}")
print(f"{'='*60}\n")
asyncio.run(self.service.track_all_keywords())
def job_weekly_report(self):
"""Weekly summary report"""
print("Generating weekly report...")
# Implementation for weekly report
pass
def start(self):
"""Start the scheduler"""
# Daily check at 9 AM
schedule.every().day.at("09:00").do(self.job_daily)
# Weekly report on Monday
schedule.every().monday.at("10:00").do(self.job_weekly_report)
print("Scheduler started!")
print("Jobs scheduled:")
print(" - Daily rank check: 9:00 AM")
print(" - Weekly report: Monday 10:00 AM")
while True:
schedule.run_pending()
time.sleep(60) # Check every minute
if __name__ == "__main__":
scheduler = RankScheduler("your_api_key")
scheduler.start()
Phase 4: REST API and Dashboard
Flask API
# api.py
from flask import Flask, jsonify, request
from models import Session, Keyword, RankHistory, RankChange
from sqlalchemy import func
from datetime import datetime, timedelta
app = Flask(__name__)
session = Session()
@app.route('/api/keywords', methods=['GET'])
def get_keywords():
"""Get all tracked keywords"""
keywords = session.query(Keyword).filter_by(is_active=1).all()
return jsonify([{
'id': k.id,
'keyword': k.keyword,
'domain': k.domain,
'location': k.location
} for k in keywords])
@app.route('/api/keywords/<int:keyword_id>/rankings', methods=['GET'])
def get_rankings(keyword_id):
"""Get ranking history for a keyword"""
days = request.args.get('days', 30, type=int)
engine = request.args.get('engine', 'google')
since = datetime.utcnow() - timedelta(days=days)
history = session.query(RankHistory).filter(
RankHistory.keyword_id == keyword_id,
RankHistory.engine == engine,
RankHistory.checked_at >= since
).order_by(RankHistory.checked_at).all()
return jsonify([{
'date': h.checked_at.isoformat(),
'position': h.position,
'url': h.url
} for h in history])
@app.route('/api/keywords/<int:keyword_id>/current', methods=['GET'])
def get_current_rank(keyword_id):
"""Get current rankings on both engines"""
google = session.query(RankHistory).filter_by(
keyword_id=keyword_id,
engine='google'
).order_by(RankHistory.checked_at.desc()).first()
bing = session.query(RankHistory).filter_by(
keyword_id=keyword_id,
engine='bing'
).order_by(RankHistory.checked_at.desc()).first()
return jsonify({
'google': {
'position': google.position if google else 0,
'url': google.url if google else '',
'checked_at': google.checked_at.isoformat() if google else None
},
'bing': {
'position': bing.position if bing else 0,
'url': bing.url if bing else '',
'checked_at': bing.checked_at.isoformat() if bing else None
}
})
@app.route('/api/changes/recent', methods=['GET'])
def get_recent_changes():
"""Get recent ranking changes"""
limit = request.args.get('limit', 20, type=int)
changes = session.query(RankChange, Keyword).join(
Keyword, RankChange.keyword_id == Keyword.id
).order_by(
RankChange.detected_at.desc()
).limit(limit).all()
return jsonify([{
'keyword': kw.keyword,
'domain': kw.domain,
'engine': change.engine,
'old_position': change.old_position,
'new_position': change.new_position,
'change': change.change,
'detected_at': change.detected_at.isoformat()
} for change, kw in changes])
@app.route('/api/dashboard', methods=['GET'])
def get_dashboard():
"""Get dashboard summary statistics"""
total_keywords = session.query(func.count(Keyword.id)).filter_by(is_active=1).scalar()
# Average positions
latest_google = session.query(func.avg(RankHistory.position)).filter(
RankHistory.engine == 'google',
RankHistory.position > 0,
RankHistory.checked_at >= datetime.utcnow() - timedelta(days=1)
).scalar()
latest_bing = session.query(func.avg(RankHistory.position)).filter(
RankHistory.engine == 'bing',
RankHistory.position > 0,
RankHistory.checked_at >= timedelta(days=1)
).scalar()
# Recent changes (24 hours)
recent_improvements = session.query(func.count(RankChange.id)).filter(
RankChange.change > 0,
RankChange.detected_at >= datetime.utcnow() - timedelta(days=1)
).scalar()
recent_drops = session.query(func.count(RankChange.id)).filter(
RankChange.change < 0,
RankChange.detected_at >= datetime.utcnow() - timedelta(days=1)
).scalar()
return jsonify({
'total_keywords': total_keywords,
'average_google_position': round(latest_google, 1) if latest_google else 0,
'average_bing_position': round(latest_bing, 1) if latest_bing else 0,
'improvements_24h': recent_improvements,
'drops_24h': recent_drops
})
if __name__ == '__main__':
app.run(debug=True, port=5000)
Phase 5: Alerts and Notifications
Email Alerts for Ranking Changes
# alerts.py
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from models import Session, RankChange, Keyword
from datetime import datetime, timedelta
class AlertSystem:
def __init__(self, smtp_host, smtp_port, email, password):
self.smtp_host = smtp_host
self.smtp_port = smtp_port
self.email = email
self.password = password
self.session = Session()
def check_and_send_alerts(self, hours=24):
"""Check for significant changes and send alerts"""
since = datetime.utcnow() - timedelta(hours=hours)
# Get significant changes (position change >= 5)
changes = self.session.query(RankChange, Keyword).join(
Keyword, RankChange.keyword_id == Keyword.id
).filter(
RankChange.detected_at >= since,
func.abs(RankChange.change) >= 5
).all()
if not changes:
print("No significant ranking changes to report")
return
# Group by improvement vs drop
improvements = [(c, k) for c, k in changes if c.change > 0]
drops = [(c, k) for c, k in changes if c.change < 0]
# Send alert email
self.send_alert_email(improvements, drops)
def send_alert_email(self, improvements, drops):
"""Send formatted alert email"""
subject = f"Rank Tracker Alert: {len(improvements)} improvements, {len(drops)} drops"
html = """
<html>
<body>
<h2>Ranking Changes Report</h2>
"""
if improvements:
html += "<h3 style='color: green;'>📈 Improvements</h3><ul>"
for change, keyword in improvements:
html += f"""
<li>
<strong>{keyword.keyword}</strong> on {change.engine}<br>
#{change.old_position} �?#{change.new_position}
(<span style='color: green;'>+{change.change}</span>)
</li>
"""
html += "</ul>"
if drops:
html += "<h3 style='color: red;'>📉 Drops</h3><ul>"
for change, keyword in drops:
html += f"""
<li>
<strong>{keyword.keyword}</strong> on {change.engine}<br>
#{change.old_position} �?#{change.new_position}
(<span style='color: red;'>{change.change}</span>)
</li>
"""
html += "</ul>"
html += """
</body>
</html>
"""
# Send email
msg = MIMEMultipart('alternative')
msg['Subject'] = subject
msg['From'] = self.email
msg['To'] = self.email
msg.attach(MIMEText(html, 'html'))
with smtplib.SMTP(self.smtp_host, self.smtp_port) as server:
server.starttls()
server.login(self.email, self.password)
server.send_message(msg)
print(f"Alert email sent: {subject}")
# Schedule daily alerts
from apscheduler.schedulers.blocking import BlockingScheduler
def send_daily_alert():
alerts = AlertSystem(
smtp_host='smtp.gmail.com',
smtp_port=587,
email='your-email@gmail.com',
password='your-app-password'
)
alerts.check_and_send_alerts(hours=24)
scheduler = BlockingScheduler()
scheduler.add_job(send_daily_alert, 'cron', hour=9) # Daily at 9 AM
scheduler.start()
Production Deployment
Docker Setup
# Dockerfile
FROM python:3.11-slim
WORKDIR /app
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
COPY . .
CMD ["python", "scheduler.py"]
# docker-compose.yml
version: '3.8'
services:
postgres:
image: postgres:15
environment:
POSTGRES_DB: ranktracker
POSTGRES_USER: user
POSTGRES_PASSWORD: password
volumes:
- pgdata:/var/lib/postgresql/data
rank-tracker:
build: .
environment:
- SERPPOST_API_KEY=${SERPPOST_API_KEY}
- DATABASE_URL=postgresql://user:password@postgres:5432/ranktracker
depends_on:
- postgres
api:
build: .
command: python api.py
ports:
- "5000:5000"
environment:
- DATABASE_URL=postgresql://user:password@postgres:5432/ranktracker
depends_on:
- postgres
volumes:
pgdata:
Best Practices
- Rate limiting: Don’t hammer the API, use delays between checks
- Error handling: Always retry failed requests
- Data retention: Archive old data to keep database performant
- Monitoring: Track API usage and costs
- Alerts: Only alert on significant changes (±5 positions or more)
💡 Pro Tip: Track rankings daily for important keywords, weekly for long-tail keywords. This balances accuracy with API cost.
Conclusion
You now have a complete, production-ready rank tracking system:
- �?Dual-engine tracking (Google + Bing)
- �?Historical data storage
- �?Change detection and alerts
- �?REST API for integrations
- �?Automated scheduling
- �?Docker deployment
This system can scale to track thousands of keywords while keeping costs manageable through smart caching and scheduling.
Ready to start tracking your rankings? Get your free API key and deploy this system in minutes.
Start Building Today
- Sign up for free access
- Review the API documentation
- Check pricing plans for scaling
Related Resources
- SERP API Best Practices 2025
- Building SEO Tools with SERP API
- Keyword Research Automation
- Real-Time Search Data Applications
- API Documentation
About the Author: Lisa Chen was a Senior Developer at SEMrush for 7 years, where she built and maintained the rank tracking engine that monitors over 10 million keywords daily. She specializes in large-scale SEO tool development and distributed systems architecture. She now helps companies build their own SEO tools and optimize API integrations.
Build your rank tracker today. Try SERPpost free and track unlimited keywords across Google and Bing.