tutorial 52 min read

SERP API for AI Agents & LLMs: Real-Time Search Integration Guide

Integrate real-time search capabilities into AI agents, LLMs, and chatbots. Complete guide for GPT, Claude, LangChain, AutoGPT with code examples and best practices.

Dr. Kevin Zhang, AI Research Lead at SERPpost
SERP API for AI Agents & LLMs: Real-Time Search Integration Guide

SERP API for AI Agents & LLMs: Complete Integration Guide

Large Language Models need real-time data. This comprehensive guide shows you how to give your AI agents, chatbots, and LLMs access to current search results through SERP APIs.

Why AI Agents Need SERP APIs

The Knowledge Cutoff Problem

# LLMs have a knowledge cutoff date
llm_knowledge = {
    'GPT-4': 'April 2023',
    'Claude 3': 'August 2023',
    'Gemini': 'April 2023'
}

# Real-world queries need current data
user_queries = [
    "What's the current price of Bitcoin?",  # Changes every second
    "Who won the 2024 election?",            # After cutoff date
    "Latest iPhone features",                 # Product updates
    "Current weather in Tokyo",               # Real-time data
    "Today's news about AI"                   # Time-sensitive
]

# Solution: SERP API provides real-time data
def answer_with_current_data(query):
    # Get real-time search results
    search_results = serppost.search(query)
    
    # Augment LLM with current data
    context = extract_context(search_results)
    
    # Generate informed response
    response = llm.generate(
        prompt=query,
        context=context
    )
    
    return response

Retrieval-Augmented Generation (RAG)

/**
 * RAG Architecture with SERP API
 */

class RAGWithSERP {
  constructor(llm, serpAPI) {
    this.llm = llm;
    this.serpAPI = serpAPI;
  }
  
  async answer(question) {
    // Step 1: Retrieve relevant information
    const searchResults = await this.retrieve(question);
    
    // Step 2: Augment with context
    const context = this.augment(searchResults);
    
    // Step 3: Generate response
    const response = await this.generate(question, context);
    
    return {
      answer: response,
      sources: searchResults.map(r => r.link),
      confidence: this.calculateConfidence(searchResults)
    };
  }
  
  async retrieve(question) {
    /**
     * Retrieve relevant information from search
     */
    const results = await this.serpAPI.search({
      q: question,
      engine: 'google',
      num: 10
    });
    
    return results.organic_results;
  }
  
  augment(searchResults) {
    /**
     * Extract and format context from search results
     */
    return searchResults.map(result => ({
      title: result.title,
      snippet: result.snippet,
      url: result.link,
      relevance: this.calculateRelevance(result)
    })).slice(0, 5);  // Top 5 most relevant
  }
  
  async generate(question, context) {
    /**
     * Generate response using LLM with context
     */
    const prompt = `
      Answer the following question using the provided context.
      If the context doesn't contain enough information, say so.
      
      Question: ${question}
      
      Context:
      ${context.map((c, i) => `
        [${i + 1}] ${c.title}
        ${c.snippet}
        Source: ${c.url}
      `).join('\n')}
      
      Answer:
    `;
    
    return await this.llm.complete(prompt);
  }
}

// Usage
const rag = new RAGWithSERP(gpt4, serppostAPI);
const answer = await rag.answer("What are the latest developments in quantum computing?");

OpenAI GPT Integration

import openai
from serppost import SERPpost

class GPTWithSearch:
    def __init__(self, openai_key, serppost_key):
        self.openai = openai
        self.openai.api_key = openai_key
        self.serppost = SERPpost(serppost_key)
    
    async def chat_with_search(self, user_message):
        """
        GPT chat with real-time search capability
        """
        # Determine if search is needed
        needs_search = await self.should_search(user_message)
        
        if needs_search:
            # Get search results
            search_query = await self.extract_search_query(user_message)
            search_results = await self.serppost.search(search_query)
            
            # Format context
            context = self.format_search_context(search_results)
            
            # Generate response with context
            response = await self.openai.ChatCompletion.create(
                model="gpt-4",
                messages=[
                    {
                        "role": "system",
                        "content": "You are a helpful assistant with access to real-time search data."
                    },
                    {
                        "role": "user",
                        "content": f"Context from search:\n{context}\n\nUser question: {user_message}"
                    }
                ]
            )
        else:
            # Regular GPT response
            response = await self.openai.ChatCompletion.create(
                model="gpt-4",
                messages=[
                    {"role": "user", "content": user_message}
                ]
            )
        
        return response.choices[0].message.content
    
    async def should_search(self, message):
        """
        Determine if message requires real-time search
        """
        search_indicators = [
            'current', 'latest', 'recent', 'today',
            'now', 'price', 'weather', 'news',
            'what is', 'who is', 'when did'
        ]
        
        return any(indicator in message.lower() for indicator in search_indicators)
    
    async def extract_search_query(self, message):
        """
        Extract optimal search query from user message
        """
        response = await self.openai.ChatCompletion.create(
            model="gpt-3.5-turbo",
            messages=[
                {
                    "role": "system",
                    "content": "Extract the best search query from the user's message. Return only the search query, nothing else."
                },
                {
                    "role": "user",
                    "content": message
                }
            ]
        )
        
        return response.choices[0].message.content.strip()
    
    def format_search_context(self, search_results):
        """
        Format search results as context for GPT
        """
        context = []
        
        for i, result in enumerate(search_results['organic_results'][:5]):
            context.append(f"""
[Source {i+1}]
Title: {result['title']}
Snippet: {result['snippet']}
URL: {result['link']}
            """)
        
        return '\n'.join(context)

# Example usage
gpt_search = GPTWithSearch(
    openai_key="sk-...",
    serppost_key="your_serppost_key"
)

response = await gpt_search.chat_with_search(
    "What's the current price of Tesla stock?"
)
print(response)

Anthropic Claude Integration

const Anthropic = require('@anthropic-ai/sdk');
const SERPpost = require('serppost');

class ClaudeWithSearch {
  constructor(anthropicKey, serppostKey) {
    this.claude = new Anthropic({ apiKey: anthropicKey });
    this.serppost = new SERPpost(serppostKey);
  }
  
  async chat(message, conversationHistory = []) {
    /**
     * Claude chat with search augmentation
     */
    
    // Check if search is needed
    const searchNeeded = this.needsSearch(message);
    
    let systemPrompt = "You are Claude, a helpful AI assistant.";
    
    if (searchNeeded) {
      // Perform search
      const searchQuery = await this.extractSearchQuery(message);
      const searchResults = await this.serppost.search({
        q: searchQuery,
        engine: 'google'
      });
      
      // Add search context to system prompt
      const searchContext = this.formatSearchResults(searchResults);
      systemPrompt += `\n\nYou have access to the following current information from search:\n${searchContext}`;
    }
    
    // Generate response
    const response = await this.claude.messages.create({
      model: 'claude-3-opus-20240229',
      max_tokens: 1024,
      system: systemPrompt,
      messages: [
        ...conversationHistory,
        { role: 'user', content: message }
      ]
    });
    
    return {
      response: response.content[0].text,
      searchUsed: searchNeeded,
      sources: searchNeeded ? this.extractSources(searchResults) : []
    };
  }
  
  needsSearch(message) {
    /**
     * Determine if message requires search
     */
    const patterns = [
      /what('s| is) (the )?(current|latest|recent)/i,
      /today('s)?/i,
      /right now/i,
      /as of (today|now)/i,
      /(price|cost|value) of/i
    ];
    
    return patterns.some(pattern => pattern.test(message));
  }
  
  formatSearchResults(results) {
    /**
     * Format search results for Claude
     */
    return results.organic_results
      .slice(0, 5)
      .map((result, i) => `
[${i + 1}] ${result.title}
${result.snippet}
Source: ${result.link}
      `)
      .join('\n');
  }
}

// Usage
const claude = new ClaudeWithSearch(
  'your_anthropic_key',
  'your_serppost_key'
);

const result = await claude.chat(
  "What are the latest developments in AI regulation?"
);

console.log(result.response);
console.log('Sources:', result.sources);

2. LangChain Integration

LangChain Tool Implementation

from langchain.tools import Tool
from langchain.agents import initialize_agent, AgentType
from langchain.llms import OpenAI
from serppost import SERPpost

class SERPpostTool:
    """
    LangChain tool for SERPpost API
    """
    
    def __init__(self, api_key):
        self.serppost = SERPpost(api_key)
    
    def search(self, query: str) -> str:
        """
        Search using SERPpost and return formatted results
        """
        try:
            results = self.serppost.search(query)
            
            # Format results for LLM consumption
            formatted = []
            for i, result in enumerate(results['organic_results'][:5]):
                formatted.append(f"""
Result {i+1}:
Title: {result['title']}
Snippet: {result['snippet']}
URL: {result['link']}
                """)
            
            return '\n'.join(formatted)
        except Exception as e:
            return f"Search error: {str(e)}"
    
    def get_tool(self):
        """
        Return LangChain Tool object
        """
        return Tool(
            name="Search",
            func=self.search,
            description="""
            Useful for when you need to answer questions about current events,
            prices, weather, news, or any information that requires real-time data.
            Input should be a search query string.
            """
        )

# Create LangChain agent with search capability
serppost_tool = SERPpostTool(api_key="your_serppost_key")

tools = [
    serppost_tool.get_tool(),
    # Add other tools as needed
]

llm = OpenAI(temperature=0)

agent = initialize_agent(
    tools=tools,
    llm=llm,
    agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
    verbose=True
)

# Use the agent
response = agent.run("What's the current weather in San Francisco?")
print(response)

Advanced LangChain RAG Chain

from langchain.chains import RetrievalQA
from langchain.vectorstores import FAISS
from langchain.embeddings import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.docstore.document import Document

class SERPRAGChain:
    """
    Advanced RAG chain using SERP results
    """
    
    def __init__(self, openai_key, serppost_key):
        self.serppost = SERPpost(serppost_key)
        self.embeddings = OpenAIEmbeddings(openai_api_key=openai_key)
        self.llm = OpenAI(temperature=0, openai_api_key=openai_key)
    
    async def query(self, question):
        """
        Query with RAG using SERP results
        """
        # Step 1: Get search results
        search_results = await self.serppost.search(question)
        
        # Step 2: Scrape full content from top results
        documents = await self.scrape_results(search_results)
        
        # Step 3: Split into chunks
        text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=1000,
            chunk_overlap=200
        )
        splits = text_splitter.split_documents(documents)
        
        # Step 4: Create vector store
        vectorstore = FAISS.from_documents(splits, self.embeddings)
        
        # Step 5: Create retrieval chain
        qa_chain = RetrievalQA.from_chain_type(
            llm=self.llm,
            chain_type="stuff",
            retriever=vectorstore.as_retriever(
                search_kwargs={"k": 5}
            ),
            return_source_documents=True
        )
        
        # Step 6: Get answer
        result = qa_chain({"query": question})
        
        return {
            'answer': result['result'],
            'sources': [doc.metadata['source'] for doc in result['source_documents']]
        }
    
    async def scrape_results(self, search_results):
        """
        Scrape full content from search results
        """
        documents = []
        
        for result in search_results['organic_results'][:5]:
            try:
                # Use SERPpost web scraping API
                content = await self.serppost.scrape(result['link'])
                
                doc = Document(
                    page_content=content['text'],
                    metadata={
                        'source': result['link'],
                        'title': result['title']
                    }
                )
                documents.append(doc)
            except Exception as e:
                print(f"Failed to scrape {result['link']}: {e}")
        
        return documents

# Usage
rag = SERPRAGChain(
    openai_key="sk-...",
    serppost_key="your_serppost_key"
)

result = await rag.query("Explain the latest breakthroughs in quantum computing")
print(result['answer'])
print("Sources:", result['sources'])

3. AutoGPT & Autonomous Agents

AutoGPT Plugin

from typing import Any, Dict, List
import json

class SERPpostPlugin:
    """
    AutoGPT plugin for SERPpost
    """
    
    def __init__(self):
        self.name = "serppost_search"
        self.description = "Search the web for current information"
        self.serppost = None
    
    def can_handle_post_prompt(self) -> bool:
        return True
    
    def can_handle_on_planning(self) -> bool:
        return True
    
    def can_handle_post_planning(self, response: str) -> bool:
        return True
    
    def can_handle_pre_instruction(self) -> bool:
        return True
    
    def can_handle_on_instruction(self, messages: List[Dict[str, Any]]) -> bool:
        return True
    
    def can_handle_post_instruction(self, response: str) -> bool:
        return True
    
    def can_handle_pre_command(
        self, command_name: str, arguments: Dict[str, Any]
    ) -> bool:
        return command_name == "serppost_search"
    
    def can_handle_post_command(
        self, command_name: str, response: str
    ) -> bool:
        return command_name == "serppost_search"
    
    def post_prompt(self, prompt: str) -> str:
        """
        Add search capability to prompt
        """
        return prompt + """
        
You have access to a search command:
- serppost_search: Search the web for current information
  Arguments: {"query": "search query"}
        """
    
    def on_planning(
        self, prompt: str, messages: List[Dict[str, Any]]
    ) -> List[Dict[str, Any]]:
        """
        Add search planning capability
        """
        return messages
    
    def pre_command(
        self, command_name: str, arguments: Dict[str, Any]
    ) -> Dict[str, Any]:
        """
        Execute search command
        """
        if command_name == "serppost_search":
            query = arguments.get("query", "")
            
            if not self.serppost:
                from serppost import SERPpost
                import os
                self.serppost = SERPpost(os.getenv("SERPPOST_API_KEY"))
            
            results = self.serppost.search(query)
            
            # Format results
            formatted = {
                "results": [
                    {
                        "title": r["title"],
                        "snippet": r["snippet"],
                        "url": r["link"]
                    }
                    for r in results["organic_results"][:5]
                ]
            }
            
            arguments["search_results"] = json.dumps(formatted)
        
        return arguments

Custom AI Agent with Tools

class AIAgentWithTools {
  constructor(llm, tools) {
    this.llm = llm;
    this.tools = tools;
    this.conversationHistory = [];
  }
  
  async run(task) {
    /**
     * Run autonomous agent with tool access
     */
    let iterations = 0;
    const maxIterations = 10;
    
    while (iterations < maxIterations) {
      // Get next action from LLM
      const action = await this.getNextAction(task);
      
      if (action.type === 'final_answer') {
        return action.content;
      }
      
      // Execute tool
      const toolResult = await this.executeTool(
        action.tool,
        action.arguments
      );
      
      // Add to history
      this.conversationHistory.push({
        action: action,
        result: toolResult
      });
      
      iterations++;
    }
    
    return "Max iterations reached";
  }
  
  async getNextAction(task) {
    /**
     * Determine next action using LLM
     */
    const prompt = this.buildPrompt(task);
    const response = await this.llm.complete(prompt);
    
    return this.parseAction(response);
  }
  
  buildPrompt(task) {
    /**
     * Build prompt with available tools and history
     */
    const toolDescriptions = this.tools.map(tool => `
- ${tool.name}: ${tool.description}
  Arguments: ${JSON.stringify(tool.arguments)}
    `).join('\n');
    
    const history = this.conversationHistory.map(h => `
Action: ${h.action.tool}(${JSON.stringify(h.action.arguments)})
Result: ${h.result}
    `).join('\n');
    
    return `
You are an AI agent with access to the following tools:
${toolDescriptions}

Task: ${task}

Previous actions:
${history}

What should you do next? Respond in this format:
Thought: [your reasoning]
Action: [tool_name]
Arguments: [JSON arguments]

Or if you have the final answer:
Thought: [your reasoning]
Final Answer: [the answer]
    `;
  }
  
  async executeTool(toolName, arguments) {
    /**
     * Execute specified tool
     */
    const tool = this.tools.find(t => t.name === toolName);
    
    if (!tool) {
      return `Error: Tool ${toolName} not found`;
    }
    
    try {
      return await tool.execute(arguments);
    } catch (error) {
      return `Error executing ${toolName}: ${error.message}`;
    }
  }
}

// Define search tool
const searchTool = {
  name: 'search',
  description: 'Search the web for current information',
  arguments: { query: 'string' },
  execute: async (args) => {
    const results = await serppostAPI.search(args.query);
    return JSON.stringify(results.organic_results.slice(0, 5));
  }
};

// Create and run agent
const agent = new AIAgentWithTools(gpt4, [searchTool]);
const result = await agent.run(
  "Find the current price of Bitcoin and explain why it changed"
);

4. Function Calling & Tool Use

OpenAI Function Calling

import openai
from serppost import SERPpost

class GPTFunctionCalling:
    def __init__(self, openai_key, serppost_key):
        self.openai = openai
        self.openai.api_key = openai_key
        self.serppost = SERPpost(serppost_key)
    
    def get_functions(self):
        """
        Define available functions for GPT
        """
        return [
            {
                "name": "search_web",
                "description": "Search the web for current information",
                "parameters": {
                    "type": "object",
                    "properties": {
                        "query": {
                            "type": "string",
                            "description": "The search query"
                        },
                        "engine": {
                            "type": "string",
                            "enum": ["google", "bing"],
                            "description": "Search engine to use"
                        },
                        "num_results": {
                            "type": "integer",
                            "description": "Number of results to return",
                            "default": 5
                        }
                    },
                    "required": ["query"]
                }
            },
            {
                "name": "scrape_webpage",
                "description": "Scrape content from a specific webpage",
                "parameters": {
                    "type": "object",
                    "properties": {
                        "url": {
                            "type": "string",
                            "description": "The URL to scrape"
                        }
                    },
                    "required": ["url"]
                }
            }
        ]
    
    async def chat(self, user_message):
        """
        Chat with function calling capability
        """
        messages = [
            {"role": "system", "content": "You are a helpful assistant with access to web search."},
            {"role": "user", "content": user_message}
        ]
        
        # Initial GPT call
        response = await self.openai.ChatCompletion.create(
            model="gpt-4",
            messages=messages,
            functions=self.get_functions(),
            function_call="auto"
        )
        
        message = response.choices[0].message
        
        # Check if GPT wants to call a function
        if message.get("function_call"):
            function_name = message["function_call"]["name"]
            function_args = json.loads(message["function_call"]["arguments"])
            
            # Execute function
            if function_name == "search_web":
                function_response = await self.search_web(**function_args)
            elif function_name == "scrape_webpage":
                function_response = await self.scrape_webpage(**function_args)
            
            # Add function response to messages
            messages.append(message)
            messages.append({
                "role": "function",
                "name": function_name,
                "content": json.dumps(function_response)
            })
            
            # Get final response from GPT
            final_response = await self.openai.ChatCompletion.create(
                model="gpt-4",
                messages=messages
            )
            
            return final_response.choices[0].message.content
        
        return message.content
    
    async def search_web(self, query, engine="google", num_results=5):
        """
        Execute web search
        """
        results = await self.serppost.search({
            "q": query,
            "engine": engine
        })
        
        return {
            "results": [
                {
                    "title": r["title"],
                    "snippet": r["snippet"],
                    "url": r["link"]
                }
                for r in results["organic_results"][:num_results]
            ]
        }
    
    async def scrape_webpage(self, url):
        """
        Scrape webpage content
        """
        content = await self.serppost.scrape(url)
        return {
            "url": url,
            "title": content.get("title", ""),
            "text": content.get("text", "")[:5000]  # Limit to 5000 chars
        }

# Usage
gpt = GPTFunctionCalling(
    openai_key="sk-...",
    serppost_key="your_serppost_key"
)

response = await gpt.chat("What are the top 3 AI news stories today?")
print(response)

Claude Tool Use

const Anthropic = require('@anthropic-ai/sdk');

class ClaudeToolUse {
  constructor(anthropicKey, serppostKey) {
    this.claude = new Anthropic({ apiKey: anthropicKey });
    this.serppost = new SERPpost(serppostKey);
  }
  
  getTools() {
    /**
     * Define tools for Claude
     */
    return [
      {
        name: 'search',
        description: 'Search the web for current information. Use this when you need up-to-date information.',
        input_schema: {
          type: 'object',
          properties: {
            query: {
              type: 'string',
              description: 'The search query'
            },
            engine: {
              type: 'string',
              enum: ['google', 'bing'],
              description: 'Search engine to use',
              default: 'google'
            }
          },
          required: ['query']
        }
      },
      {
        name: 'scrape',
        description: 'Extract content from a specific webpage',
        input_schema: {
          type: 'object',
          properties: {
            url: {
              type: 'string',
              description: 'The URL to scrape'
            }
          },
          required: ['url']
        }
      }
    ];
  }
  
  async chat(message) {
    /**
     * Chat with tool use capability
     */
    const messages = [{ role: 'user', content: message }];
    
    let response = await this.claude.messages.create({
      model: 'claude-3-opus-20240229',
      max_tokens: 4096,
      tools: this.getTools(),
      messages: messages
    });
    
    // Process tool use
    while (response.stop_reason === 'tool_use') {
      const toolUse = response.content.find(
        block => block.type === 'tool_use'
      );
      
      // Execute tool
      const toolResult = await this.executeTool(
        toolUse.name,
        toolUse.input
      );
      
      // Add tool result to messages
      messages.push({
        role: 'assistant',
        content: response.content
      });
      
      messages.push({
        role: 'user',
        content: [
          {
            type: 'tool_result',
            tool_use_id: toolUse.id,
            content: JSON.stringify(toolResult)
          }
        ]
      });
      
      // Get next response
      response = await this.claude.messages.create({
        model: 'claude-3-opus-20240229',
        max_tokens: 4096,
        tools: this.getTools(),
        messages: messages
      });
    }
    
    return response.content[0].text;
  }
  
  async executeTool(toolName, input) {
    /**
     * Execute specified tool
     */
    if (toolName === 'search') {
      const results = await this.serppost.search({
        q: input.query,
        engine: input.engine || 'google'
      });
      
      return {
        results: results.organic_results.slice(0, 5).map(r => ({
          title: r.title,
          snippet: r.snippet,
          url: r.link
        }))
      };
    }
    
    if (toolName === 'scrape') {
      const content = await this.serppost.scrape(input.url);
      return {
        url: input.url,
        title: content.title,
        text: content.text.substring(0, 5000)
      };
    }
    
    throw new Error(`Unknown tool: ${toolName}`);
  }
}

// Usage
const claude = new ClaudeToolUse(
  'your_anthropic_key',
  'your_serppost_key'
);

const response = await claude.chat(
  "Find information about the latest SpaceX launch and summarize it"
);
console.log(response);

5. Chatbot Integration

Custom Chatbot with Memory

from typing import List, Dict
import json

class ChatbotWithSearch:
    """
    Chatbot with search capability and conversation memory
    """
    
    def __init__(self, llm, serppost_key):
        self.llm = llm
        self.serppost = SERPpost(serppost_key)
        self.conversation_history = []
        self.search_cache = {}
    
    async def chat(self, user_message: str) -> Dict:
        """
        Process user message and return response
        """
        # Add to history
        self.conversation_history.append({
            "role": "user",
            "content": user_message
        })
        
        # Determine if search is needed
        search_needed = await self.analyze_intent(user_message)
        
        search_results = None
        if search_needed:
            # Extract search query
            search_query = await self.extract_search_query(user_message)
            
            # Check cache
            if search_query in self.search_cache:
                search_results = self.search_cache[search_query]
            else:
                # Perform search
                search_results = await self.serppost.search(search_query)
                self.search_cache[search_query] = search_results
        
        # Generate response
        response = await self.generate_response(
            user_message,
            search_results
        )
        
        # Add to history
        self.conversation_history.append({
            "role": "assistant",
            "content": response,
            "search_used": search_needed,
            "sources": self.extract_sources(search_results) if search_results else []
        })
        
        return {
            "response": response,
            "search_used": search_needed,
            "sources": self.extract_sources(search_results) if search_results else [],
            "conversation_id": self.get_conversation_id()
        }
    
    async def analyze_intent(self, message: str) -> bool:
        """
        Analyze if message requires search
        """
        prompt = f"""
Analyze if this message requires real-time web search:
"{message}"

Consider:
- Does it ask about current events, prices, or time-sensitive information?
- Does it reference "today", "latest", "current", etc.?
- Can it be answered with general knowledge?

Respond with only "YES" or "NO".
        """
        
        response = await self.llm.complete(prompt)
        return "YES" in response.upper()
    
    async def extract_search_query(self, message: str) -> str:
        """
        Extract optimal search query
        """
        prompt = f"""
Extract the best search query from this message:
"{message}"

Return only the search query, nothing else.
        """
        
        response = await self.llm.complete(prompt)
        return response.strip()
    
    async def generate_response(
        self,
        user_message: str,
        search_results: Dict = None
    ) -> str:
        """
        Generate chatbot response
        """
        # Build context from conversation history
        context = self.build_context()
        
        # Add search results if available
        if search_results:
            search_context = self.format_search_results(search_results)
            context += f"\n\nCurrent information from search:\n{search_context}"
        
        # Generate response
        prompt = f"""
{context}

User: {user_message}

Assistant: {response}
        """
        
        response = await self.llm.complete(prompt)
        return response.strip()
    
    def build_context(self) -> str:
        """Build context from conversation history"""
        recent_history = self.conversation_history[-10:]  # Last 10 messages
        return "\n".join([
            f"{msg['role'].capitalize()}: {msg['content']}"
            for msg in recent_history
        ])
    
    def format_search_results(self, results: Dict) -> str:
        """Format search results for context"""
        formatted = []
        for i, result in enumerate(results['organic_results'][:3]):
            formatted.append(f"[{i+1}] {result['title']}: {result['snippet']}")
        return "\n".join(formatted)
    
    def extract_sources(self, results: Dict) -> List[str]:
        """Extract source URLs"""
        if not results:
            return []
        return [r['link'] for r in results['organic_results'][:3]]

6. Best Practices for AI Integration

Caching Strategy

class SmartSearchCache {
  constructor(ttl = 3600) {
    this.cache = new Map();
    this.ttl = ttl * 1000; // Convert to ms
  }
  
  async get(query, searchFunc) {
    const key = this.normalizeQuery(query);
    const cached = this.cache.get(key);
    
    if (cached && Date.now() - cached.timestamp < this.ttl) {
      return { ...cached.data, fromCache: true };
    }
    
    // Fetch fresh data
    const data = await searchFunc(query);
    this.cache.set(key, {
      data,
      timestamp: Date.now()
    });
    
    return { ...data, fromCache: false };
  }
  
  normalizeQuery(query) {
    return query.toLowerCase().trim().replace(/\s+/g, ' ');
  }
}

Cost Optimization

class CostOptimizedSearch:
    """Optimize search costs for AI applications"""
    
    def __init__(self, serppost_key, budget_per_day=100):
        self.serppost = SERPpost(serppost_key)
        self.budget_per_day = budget_per_day
        self.daily_spend = 0
        self.request_count = 0
    
    async def search(self, query):
        """Search with cost tracking"""
        cost_per_request = 0.004
        
        if self.daily_spend + cost_per_request > self.budget_per_day:
            raise Exception("Daily budget exceeded")
        
        results = await self.serppost.search(query)
        self.daily_spend += cost_per_request
        self.request_count += 1
        
        return results

7. Production Deployment

Monitoring & Logging

import logging
from datetime import datetime

class AISearchMonitor:
    def __init__(self):
        self.logger = logging.getLogger(__name__)
        self.metrics = {
            'total_searches': 0,
            'cache_hits': 0,
            'errors': 0,
            'avg_latency': 0
        }
    
    async def monitored_search(self, query, search_func):
        start_time = datetime.now()
        
        try:
            result = await search_func(query)
            self.metrics['total_searches'] += 1
            
            latency = (datetime.now() - start_time).total_seconds()
            self.update_latency(latency)
            
            self.logger.info(f"Search successful: {query} ({latency}s)")
            return result
            
        except Exception as e:
            self.metrics['errors'] += 1
            self.logger.error(f"Search failed: {query} - {str(e)}")
            raise

Conclusion

Key Takeaways

  • RAG is Essential: Combine LLMs with real-time search for accurate, current responses
  • Function Calling: Use native function calling for seamless tool integration
  • Caching Saves Money: Implement smart caching to reduce API costs by 70%
  • Monitor Everything: Track usage, costs, and performance in production

Quick Start

  1. Get API key - Free 1,000 requests
  2. Read docs - Integration guides
  3. Try playground - Test before coding
  4. View blog - Learn from tutorials and guides

About the Author: Dr. Kevin Zhang is the AI Research Lead at SERPpost with a PhD in Machine Learning from Stanford. He specializes in RAG systems, LLM optimization, and has published 15+ papers on information retrieval and natural language processing.

Ready to power your AI with real-time search? Start building with SERPpost today.

Share:

Tags:

#AI #LLM #GPT #Claude #LangChain #AI Agents #RAG

Ready to try SERPpost?

Get started with 100 free credits. No credit card required.