Skip to content

Examples

Eighteen ready-to-run patterns covering the most common retrievalagent use cases.


1. Knowledge base Q&A (InMemory, no external services)

from retrievalagent import Agent, InMemoryBackend
import hashlib

def embed(text: str) -> list[float]:
    h = hashlib.sha256(text.encode()).digest()
    return [b / 255.0 for b in h[:8]]

backend = InMemoryBackend(embed_fn=embed)
backend.add_documents([
    {"id": "1", "content": "Hybrid search combines BM25 and vector retrieval."},
    {"id": "2", "content": "RRF fusion ranks results by reciprocal rank."},
])

rag = Agent(index="kb", backend=backend, auto_strategy=False)
state = rag.invoke("What is hybrid search?")
print(state.answer)

2. Retrieve documents without generating an answer

from retrievalagent import init_agent

rag = init_agent("docs", model="openai:gpt-5.4", backend="qdrant",
                 backend_url="http://localhost:6333")

query, docs = rag.retrieve_documents("What is retrieval-augmented generation?")
for doc in docs:
    print(doc.page_content[:100])

3. Multi-turn chat

from retrievalagent import init_agent, ConversationTurn

rag = init_agent("docs", model="openai:gpt-5.4", backend="qdrant",
                 backend_url="http://localhost:6333")

history: list[ConversationTurn] = []

for question in ["What is RAG?", "How does it compare to fine-tuning?"]:
    state = rag.chat(question, history)
    history.append(ConversationTurn(question=question, answer=state.answer))
    print(f"Q: {question}\nA: {state.answer}\n")

4. Always-on filter (e-commerce in-stock)

from retrievalagent import init_agent

rag = init_agent(
    "products",
    model="openai:gpt-5.4",
    backend="meilisearch",
    filter="in_stock = true",
)

state = rag.invoke("cordless drill under 100 euros")
print(state.answer)

from retrievalagent import init_agent

rag = init_agent(
    "products",
    model="openai:gpt-5.4",
    backend="meilisearch",
    filter="category != 'discontinued'",
)

state = rag.invoke("Find alternatives to brake cleaner 500ml")
print(state.answer)

6. Async usage (FastAPI / Databricks / Jupyter)

import asyncio
from retrievalagent import init_agent

async def main():
    rag = init_agent("kb", model="openai:gpt-5.4", backend="qdrant",
                     backend_url="http://localhost:6333")
    state = await rag.ainvoke("What is hybrid search?")
    print(state.answer)

asyncio.run(main())

rag.invoke() also works from Jupyter / Databricks without asyncio.run() — retrievalagent handles running-loop detection automatically.


7. Tool-calling agent — dynamic filter discovery

from retrievalagent import init_agent

rag = init_agent("products", model="openai:gpt-5.4", backend="meilisearch")

# The LLM inspects the index schema, samples field values, and builds
# the filter expression itself — no hardcoded filter needed.
result = rag.invoke_agent("Show me Bosch products under 50 euros in stock")
print(result)

8. Streaming the final answer

import asyncio
from retrievalagent import init_agent

async def main():
    rag = init_agent("docs", model="openai:gpt-5.4", backend="qdrant",
                     backend_url="http://localhost:6333")
    async for chunk in rag.astream("Explain retrieval-augmented generation"):
        print(chunk, end="", flush=True)

asyncio.run(main())

9. Qdrant with metadata filter

from retrievalagent import Agent
from retrievalagent.backend import QdrantBackend
from qdrant_client.models import FieldCondition, MatchValue, Filter

backend = QdrantBackend(
    "docs",
    url="http://localhost:6333",
    embed_fn=my_embed_fn,
)

rag = Agent(index="docs", backend=backend)

from retrievalagent.backend import SearchRequest
hits = backend.search(SearchRequest(
    query="hybrid search",
    limit=5,
    filter_expr=Filter(must=[FieldCondition(key="category", match=MatchValue(value="tech"))]),
))

10. Custom instructions (tone / domain)

from retrievalagent import init_agent

rag = init_agent(
    "legal-docs",
    model="openai:gpt-5.4",
    backend="qdrant",
    backend_url="http://localhost:6333",
    instructions=(
        "You are a legal assistant. Always cite the specific clause or section. "
        "Use formal language and note when something requires professional legal advice."
    ),
)

state = rag.invoke("What are the termination conditions in the SLA?")
print(state.answer)

11. Auto-tune for your corpus

Optuna explores ~20 retrieval knobs against a hand-crafted testset and saves the winning config as TOML.

from retrievalagent import MeilisearchBackend
from retrievalagent.tuner import RAGTuner, load_testset
from retrievalagent.utils import _make_azure_embed_fn

tuner = RAGTuner(
    backend_factory=lambda: MeilisearchBackend("products"),
    embed_fn=_make_azure_embed_fn(),
    hit_cases=load_testset("testset.json"),
    eval_k=5,
)

best = tuner.tune(n_trials=50, patience=8)   # stops after 8 trials with no improvement
best.save_toml("retrievalagent.config.toml")

Or from the CLI:

python -m retrievalagent.tuner --index products --hits testset.json --trials 50

12. Load auto-discovered config

RAGConfig.auto() walks retrievalagent.config.tomlpyproject.toml [tool.retrievalagent] → env vars → defaults.

from retrievalagent import AgenticRAG, RAGConfig, MeilisearchBackend

rag = AgenticRAG(
    index="products",
    backend=MeilisearchBackend("products"),
    embed_fn=my_embed_fn,
    config=RAGConfig.auto(),
)

13. Mix LLM tiers — cheap weak, strong generation

Use a small model for high-frequency calls (preprocess, filter-intent) and your best model only for the final answer.

from retrievalagent import AgenticRAG, RAGConfig, MeilisearchBackend

cfg = RAGConfig(
    strong_model="azure:gpt-5.4",
    weak_model="azure:gpt-5.4-mini",     # preprocess / quality / filter-intent
    thinking_model="azure:gpt-5.4-mini", # per-doc reasoning
)

rag = AgenticRAG(
    index="products",
    backend=MeilisearchBackend("products"),
    embed_fn=my_embed_fn,
    config=cfg,
)

Resolves through langchain.chat_models.init_chat_model — any provider:model string works: azure:, openai:, anthropic:, bedrock:, ollama:, etc.


14. Disable optional stages in TOML

Commit a [tool.retrievalagent] block in pyproject.toml to disable stages your corpus doesn't need:

[tool.retrievalagent]
semantic_ratio = 0.4
fusion = "dbsf"

disable = ["bm25_fallback_threshold", "expert_threshold"]
enable_preprocess_llm = false   # skip LLM preprocess for short-keyword product queries

TOML has no null; disable = [...] tells retrievalagent to set those fields to None on load — distinct from "use default" (just omit the field).


15. Multi-turn with follow-up context

retrievalagent rewrites short follow-ups using prior-turn context before retrieval.

from retrievalagent import init_agent, ConversationTurn

rag = init_agent("products", model="azure:gpt-5.4", backend="meilisearch")

s1 = rag.chat("Makita Akku Bohrhammer 18V", history=[])
history = [ConversationTurn(question="Makita Akku Bohrhammer 18V", answer=s1.answer)]

# "und die 36V Version?" is rewritten to "Makita Akku Bohrhammer 36V" before retrieval
s2 = rag.chat("und die 36V Version?", history=history)
print(s2.answer)

16. Multilingual filter intent (DE / FR / IT / EN)

Lowercase queries with filter-intent words (von, de, di, from, ohne, sans, senza, without, …) trigger LLM filter extraction.

from retrievalagent import init_agent

rag = init_agent("products", model="azure:gpt-5.4", backend="meilisearch")

queries = [
    "trockenbeton von fixit",          # DE — "from Fixit"
    "ciment de fixit",                 # FR
    "cemento di fixit",                # IT
    "concrete from fixit",             # EN
    "rohre ohne pvc",                  # DE negation — "without PVC"
]
for q in queries:
    _, docs = rag.retrieve_documents(q, top_k=1)
    print(q, "→", docs[0].metadata.get("supplier_name"))

17. Measure and weight latency in your eval

RAGTuner tracks per-query wall time and exposes mean_latency_ms, speed, and combined_prod metrics.

from retrievalagent.tuner import RAGTuner, load_testset

tuner = RAGTuner(
    backend_factory=lambda: my_backend,
    embed_fn=my_embed_fn,
    hit_cases=load_testset("testset.json"),
    latency_weight=0.25,
    latency_budget_ms=1200,     # queries over this budget get linear penalty
)
best = tuner.tune(n_trials=30)

print(f"hit@{tuner.eval_k}: {best.overrides()}")

18. Inspect the auto-init cache

retrievalagent caches the LLM's auto-strategy result per schema-fingerprint, so repeat initializations against the same corpus skip the LLM call.

from pathlib import Path
import json

cache_dir = Path.home() / ".cache" / "retrievalagent"
for f in cache_dir.glob("auto_*.json"):
    strategy = json.loads(f.read_text())
    print(f"{f.stem}:")
    print(f"  semantic_ratio: {strategy.get('semantic_ratio')}")
    print(f"  fusion:         {strategy.get('fusion')}")
    print(f"  domain_hint:    {strategy.get('domain_hint','')[:80]}")

Delete any file to force a fresh LLM call on next init — useful after schema changes the fingerprint doesn't catch.