Skip to main content

Installation

pip install openai

Configuration

from openai import OpenAI

client = OpenAI(
    api_key="mvra_live_your_key_here",
    base_url="https://app.mavera.io/api/v1",
)
Store your API key in an environment variable: MAVERA_API_KEY
import os
from openai import OpenAI

client = OpenAI(
    api_key=os.environ["MAVERA_API_KEY"],
    base_url="https://app.mavera.io/api/v1",
)

Chat Completions

Basic Usage

response = client.chat.completions.create(
    model="mavera-1",
    messages=[
        {"role": "system", "content": "You are a helpful assistant."},
        {"role": "user", "content": "What are the latest trends in AI?"}
    ],
    extra_body={"persona_id": "YOUR_PERSONA_ID"},
)

print(response.choices[0].message.content)
print(f"Credits used: {response.usage.credits_used}")

Streaming

stream = client.chat.completions.create(
    model="mavera-1",
    messages=[{"role": "user", "content": "Write a short story"}],
    extra_body={"persona_id": "YOUR_PERSONA_ID"},
    stream=True,
)

for chunk in stream:
    content = chunk.choices[0].delta.content
    if content:
        print(content, end="", flush=True)

With Analysis Mode

response = client.chat.completions.create(
    model="mavera-1",
    messages=[
        {"role": "user", "content": "How do millennials feel about remote work?"}
    ],
    extra_body={
        "persona_id": "YOUR_PERSONA_ID",
        "analysis_mode": True,
        "reasoning_effort": "high",
    },
)

# Access analysis data
analysis = response.analysis
print(f"Confidence: {analysis['confidence']}/10")

REST API Endpoints

For non-Chat endpoints, use requests or httpx:
import requests

headers = {"Authorization": "Bearer mvra_live_your_key_here"}
base_url = "https://app.mavera.io/api/v1"

# List personas
response = requests.get(f"{base_url}/personas", headers=headers)
personas = response.json()["data"]

# Mave Agent
response = requests.post(
    f"{base_url}/mave/chat",
    headers=headers,
    json={"message": "Analyze the EV market"}
)
result = response.json()

# Create Focus Group
response = requests.post(
    f"{base_url}/focus-groups",
    headers=headers,
    json={
        "name": "Product Feedback",
        "sample_size": 50,
        "persona_ids": ["persona_1", "persona_2"],
        "questions": [...]
    }
)

Async Support

import asyncio
from openai import AsyncOpenAI

client = AsyncOpenAI(
    api_key="mvra_live_your_key_here",
    base_url="https://app.mavera.io/api/v1",
)

async def main():
    response = await client.chat.completions.create(
        model="mavera-1",
        messages=[{"role": "user", "content": "Hello!"}],
        extra_body={"persona_id": "YOUR_PERSONA_ID"},
    )
    print(response.choices[0].message.content)

asyncio.run(main())

Error Handling

from openai import APIError, AuthenticationError, RateLimitError

try:
    response = client.chat.completions.create(...)
except AuthenticationError:
    print("Invalid API key")
except RateLimitError:
    print("Rate limited - implement backoff")
except APIError as e:
    if e.status_code == 402:
        print("Insufficient credits")
    else:
        print(f"API error: {e}")

Type Hints

The OpenAI SDK includes full type hints:
from openai import OpenAI
from openai.types.chat import ChatCompletion

client = OpenAI(
    api_key="mvra_live_your_key_here",
    base_url="https://app.mavera.io/api/v1",
)

response: ChatCompletion = client.chat.completions.create(
    model="mavera-1",
    messages=[{"role": "user", "content": "Hello"}],
)