Skip to main content
The polyvia Python package wraps the entire REST API in a typed, IDE-friendly client and provides first-class support for connecting to the Polyvia MCP server from any AI framework.
pip install polyvia
Requires Python 3.9+. LangChain agent support:
pip install "polyvia[langchain]"

Quick Start

from polyvia import Polyvia

client = Polyvia(api_key="poly_...")

# Ingest → wait → query
result = client.ingest.file("report.pdf", name="Q4 Report")
client.ingest.wait(result.task_id)
print(client.query("What are the key findings?").answer)
Or set POLYVIA_API_KEY in your environment and omit the argument:
export POLYVIA_API_KEY=poly_...
client = Polyvia()

MCP Server

client.mcp returns an MCPConfig object with a helper for every major client:
MethodUse with
to_anthropic_mcp_server()ant.beta.messages.create(mcp_servers=[...])
to_openai_responses_tool()oai.responses.create(tools=[...])
to_openai_mcp_server()OpenAI Agents SDK MCPServerStreamableHTTP
to_claude_desktop_config()~/.claude/claude_desktop_config.json

Anthropic beta MCP client

from anthropic import Anthropic
from polyvia import Polyvia

polyvia = Polyvia(api_key="poly_...")
ant     = Anthropic()

response = ant.beta.messages.create(
    model="claude-opus-4-5",
    max_tokens=1000,
    messages=[{"role": "user", "content": "What are my Q4 findings?"}],
    mcp_servers=[polyvia.mcp.to_anthropic_mcp_server()],
    betas=["mcp-client-2025-04-04"],
)
print(response.content[0].text)

OpenAI Responses API

from openai import OpenAI
from polyvia import Polyvia

polyvia = Polyvia(api_key="poly_...")
oai     = OpenAI()

response = oai.responses.create(
    model="gpt-4o",
    tools=[polyvia.mcp.to_openai_responses_tool()],
    input="What are my Q4 findings?",
)
print(response.output_text)

OpenAI Agents SDK

from agents import Agent, Runner
from agents.mcp import MCPServerStreamableHTTP
from polyvia import Polyvia

polyvia = Polyvia(api_key="poly_...")
cfg = polyvia.mcp.to_openai_mcp_server()

server = MCPServerStreamableHTTP(url=cfg["url"], headers=cfg["headers"])
agent  = Agent(name="Research", mcp_servers=[server])
result = Runner.run_sync(agent, "What do my Q4 reports say about revenue?")
print(result.final_output)

Claude Desktop

Print a snippet to copy-paste into ~/.claude/claude_desktop_config.json:
client.mcp.print_claude_desktop_snippet()
Or wire it up programmatically:
import json, pathlib

cfg_path = pathlib.Path.home() / ".claude" / "claude_desktop_config.json"
config = json.loads(cfg_path.read_text()) if cfg_path.exists() else {}
config.setdefault("mcpServers", {})["polyvia"] = client.mcp.to_claude_desktop_config()
cfg_path.write_text(json.dumps(config, indent=2))

Agent Tools (programmatic)

If you’d rather manage the tool-dispatch loop yourself — or your framework doesn’t support remote MCP — use client.tools to get JSON-schema tool definitions and an executor. All 10 Polyvia tools are available: ingest, status, list/get/update/delete documents, list/create/delete groups, and query.

Anthropic Messages API

import anthropic
from polyvia import Polyvia

client = Polyvia(api_key="poly_...")
ant    = anthropic.Anthropic()

tools, call = client.tools.anthropic()

response = ant.messages.create(
    model="claude-opus-4-5",
    max_tokens=2048,
    messages=[{"role": "user", "content": "Summarise my Finance documents."}],
    tools=tools,
)

for block in response.content:
    if block.type == "tool_use":
        result = call(block.name, block.input)
        print(result)

OpenAI ChatCompletion

import json
from openai import OpenAI
from polyvia import Polyvia

client = Polyvia(api_key="poly_...")
oai    = OpenAI()

tools, call = client.tools.openai()

response = oai.chat.completions.create(
    model="gpt-4o",
    messages=[{"role": "user", "content": "What are my Q4 findings?"}],
    tools=tools,
)

for tc in response.choices[0].message.tool_calls or []:
    result = call(tc.function.name, json.loads(tc.function.arguments))
    print(result)

LangChain

Requires pip install "polyvia[langchain]".
from langchain_openai import ChatOpenAI
from langchain.agents import AgentExecutor, create_tool_calling_agent
from langchain_core.prompts import ChatPromptTemplate
from polyvia import Polyvia

client = Polyvia(api_key="poly_...")
tools  = client.tools.langchain()

prompt = ChatPromptTemplate.from_messages([
    ("system", "You are a helpful assistant with access to a document workspace."),
    ("user", "{input}"),
    ("placeholder", "{agent_scratchpad}"),
])
agent    = create_tool_calling_agent(ChatOpenAI(model="gpt-4o"), tools, prompt)
executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
executor.invoke({"input": "What risks are mentioned in my reports?"})

Async Client

Every method on AsyncPolyvia is a coroutine — same API surface as the sync client.
import asyncio
from polyvia import AsyncPolyvia

async def main():
    async with AsyncPolyvia(api_key="poly_...") as client:
        result = await client.ingest.file("report.pdf")
        await client.ingest.wait(result.task_id)
        answer = await client.query("Key findings?")
        print(answer.answer)

asyncio.run(main())

Error Handling

from polyvia import (
    AuthenticationError,  # 401 — bad or missing API key
    ForbiddenError,        # 403 — document belongs to another user
    NotFoundError,         # 404 — document, group, or task not found
    RateLimitError,        # 429 — too many requests
    IngestionError,        # task finished with status='failed'
    IngestionTimeout,      # ingest.wait() exceeded its timeout
)

try:
    done = client.ingest.wait(task_id, timeout=60)
except IngestionError as e:
    print(f"Parsing failed: {e.error}")
except IngestionTimeout:
    print("Timed out — document may still be processing")
except RateLimitError:
    print("Rate limit hit — back off and retry")
except NotFoundError:
    print("Document or task not found")
except AuthenticationError:
    print("Invalid API key")

PyPI

pip install polyvia

GitHub

Source code and examples