Skip to main content
You can use our API to send individual queries or have long-running conversations with chat models. You do not need to configure a system prompt for claim normalization tasks or even regular chat queries. Our backend API endpoints are configured with our custom system prompts to handle both generic and claim normalization tasks. Queries run against a model of your choice. You are welcome to use any model from multiple providers.

Available Models

Retrieve a list of available models using the client.models.list() method:
from checkthat_ai import CheckThatAI
import os

# Initialize client with your provider API key
client = CheckThatAI(api_key=os.getenv("OPENAI_API_KEY"))

# Get all available models
models = client.models.list()

# Print models by provider
for provider in models.models_list:
    print(f"\n{provider['provider']} Models:")
    for model in provider['available_models']:
        print(f"  - {model['name']}: {model['model_id']}")
Models List Response
{
  "models_list": [
    {
      "provider": "OpenAI",
      "available_models": [
        {
          "name": "GPT-4o",
          "model_id": "gpt-4o",
          "description": "Most capable GPT-4 model, optimized for chat and code"
        },
        {
          "name": "GPT-5",
          "model_id": "gpt-5",
          "description": "Latest GPT-5 model with enhanced reasoning"
        }
      ]
    }
  ]
}

Non-Streaming Responses

Use non-streaming responses for standard chat interactions where you want to receive the complete response at once:
from checkthat_ai import CheckThatAI
import os

client = CheckThatAI(api_key=os.getenv("OPENAI_API_KEY"))

response = client.chat.completions.create(
    model="gpt-4o",
    messages=[
        {"role": "user", "content": "Fact-check this claim: Coffee consumption is linked to increased longevity"}
    ],
    temperature=0.1,  # Lower temperature for factual responses
    max_tokens=1000
)

print(response.choices[0].message.content)

Response Structure

{
  "id": "chatcmpl-abc123",
  "object": "chat.completion",
  "created": 1704067200,
  "model": "gpt-4o",
  "choices": [
    {
      "index": 0,
      "message": {
        "role": "assistant",
        "content": "Regular exercise provides numerous health benefits including improved cardiovascular health and enhanced mental well-being."
      },
      "finish_reason": "stop"
    }
  ],
  "usage": {
    "prompt_tokens": 25,
    "completion_tokens": 18,
    "total_tokens": 43
  }
}

Streaming Responses

Use streaming responses for real-time chat experiences where you want to display text as it’s generated:

Synchronous Streaming

from checkthat_ai import CheckThatAI
import os

client = CheckThatAI(api_key=os.getenv("OPENAI_API_KEY"))

# Enable streaming with stream=True
response = client.chat.completions.create(
    model="gpt-4o",
    messages=[
        {"role": "user", "content": "Tell me about the latest developments in renewable energy"}
    ],
    stream=True,
    temperature=0.7,
    max_tokens=1500
)

# Process streaming chunks
print("Streaming response: ", end="", flush=True)
for chunk in response:
    if chunk.choices[0].delta.content:
        print(chunk.choices[0].delta.content, end="", flush=True)
print("\n")  # New line when complete

Asynchronous Streaming

import asyncio
from checkthat_ai import AsyncCheckThatAI

async def stream_chat():
    client = AsyncCheckThatAI(api_key="your-api-key")
    
    try:
        stream = await client.chat.completions.create(
            model="claude-sonnet-4-2025-03-10",
            messages=[
                {"role": "user", "content": "Discuss the impact of artificial intelligence on society"}
            ],
            stream=True,
            temperature=0.8
        )
        
        print("AI Response: ", end="", flush=True)
        async for chunk in stream:
            if chunk.choices[0].delta.content:
                print(chunk.choices[0].delta.content, end="", flush=True)
        print("\n")
        
    finally:
        await client.close()

# Run the async function
asyncio.run(stream_chat())

Streaming Response Format

Streaming Chunk Example
{
  "id": "chatcmpl-abc123",
  "object": "chat.completion.chunk",
  "created": 1704067200,
  "model": "gpt-4o",
  "choices": [
    {
      "index": 0,
      "delta": {
        "content": "Renewable energy has seen remarkable"
      },
      "finish_reason": null
    }
  ]
}

// Final chunk
{
  "id": "chatcmpl-abc123", 
  "object": "chat.completion.chunk",
  "created": 1704067200,
  "model": "gpt-4o",
  "choices": [
    {
      "index": 0,
      "delta": {},
      "finish_reason": "stop"
    }
  ]
}
Streaming Benefits: Streaming responses provide better user experience for long-form content, allow for real-time interaction, and can reduce perceived latency in chat applications.
Memory Management: When using streaming, especially with async operations, ensure you properly close clients and handle exceptions to prevent memory leaks.