API Reference
Debugging Requests
Learn how to debug API requests and troubleshoot issues
Debugging Requests
This guide will help you debug API requests and troubleshoot common issues.
Request Logging
Enable Request Logging
Add verbose logging to see detailed request information:
import logging
from Jan import Jan
# Enable debug logging
logging.basicConfig(level=logging.DEBUG)
client = Jan(api_key="your-api-key")
response = client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": "Hello"}]
)Request/Response Headers
Log all request and response headers:
import requests
from Jan import Jan
# Custom session with logging
session = requests.Session()
# Add request logging
def log_request(req):
print(f"Request: {req.method} {req.url}")
print(f"Headers: {dict(req.headers)}")
if req.body:
print(f"Body: {req.body.decode()}")
# Add response logging
def log_response(resp):
print(f"Response: {resp.status_code}")
print(f"Headers: {dict(resp.headers)}")
print(f"Body: {resp.text}")
session.hooks['response'] = log_response
client = Jan(api_key="your-api-key", session=session)Common Issues
1. Authentication Errors
Error: 401 Unauthorized
Debug Steps:
# Check API key format
api_key = "sk-Jan-1234567890abcdef"
print(f"API key length: {len(api_key)}")
print(f"API key starts with: {api_key[:8]}")
# Test authentication
from Jan import Jan, APIError
try:
client = Jan(api_key=api_key)
response = client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": "test"}]
)
print("Authentication successful")
except APIError as e:
print(f"Authentication failed: {e.message}")
print(f"Error code: {e.code}")Common Causes:
- Invalid API key
- Expired API key
- Missing Authorization header
- Incorrect header format
2. Rate Limiting
Error: 429 Too Many Requests
Debug Steps:
import time
from Jan import Jan, APIError
client = Jan(api_key="your-api-key")
# Check rate limit headers
response = client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": "test"}]
)
print(f"Rate limit: {response.headers.get('X-RateLimit-Limit')}")
print(f"Remaining: {response.headers.get('X-RateLimit-Remaining')}")
print(f"Reset time: {response.headers.get('X-RateLimit-Reset')}")Handling Rate Limits:
def make_request_with_retry(client, max_retries=3):
for attempt in range(max_retries):
try:
return client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": "test"}]
)
except APIError as e:
if e.code == "rate_limit_exceeded" and attempt < max_retries - 1:
wait_time = 2 ** attempt # Exponential backoff
print(f"Rate limited, waiting {wait_time} seconds...")
time.sleep(wait_time)
else:
raise3. Model Not Found
Error: 400 Bad Request with model_not_found
Debug Steps:
# List available models
models = client.models.list()
print("Available models:")
for model in models.data:
print(f"- {model.id}")
# Check model name
model_name = "gpt-4"
if model_name not in [m.id for m in models.data]:
print(f"Model '{model_name}' not found")
print("Available models:", [m.id for m in models.data])4. Invalid Parameters
Error: 400 Bad Request with invalid_parameter
Debug Steps:
# Validate request parameters
request_data = {
"model": "gpt-4",
"messages": [{"role": "user", "content": "Hello"}],
"max_tokens": 100,
"temperature": 0.7
}
# Check required fields
required_fields = ["model", "messages"]
for field in required_fields:
if field not in request_data:
print(f"Missing required field: {field}")
# Check message format
for i, message in enumerate(request_data["messages"]):
if "role" not in message:
print(f"Message {i} missing 'role' field")
if "content" not in message:
print(f"Message {i} missing 'content' field")
if message["role"] not in ["user", "assistant", "system"]:
print(f"Message {i} has invalid role: {message['role']}")Request Validation
Validate Request Before Sending
def validate_request(request_data):
errors = []
# Check required fields
if "model" not in request_data:
errors.append("Missing required field: model")
if "messages" not in request_data:
errors.append("Missing required field: messages")
# Check messages format
if "messages" in request_data:
messages = request_data["messages"]
if not isinstance(messages, list):
errors.append("Messages must be a list")
else:
for i, message in enumerate(messages):
if not isinstance(message, dict):
errors.append(f"Message {i} must be a dictionary")
else:
if "role" not in message:
errors.append(f"Message {i} missing 'role' field")
if "content" not in message:
errors.append(f"Message {i} missing 'content' field")
# Check optional fields
if "max_tokens" in request_data:
if not isinstance(request_data["max_tokens"], int) or request_data["max_tokens"] <= 0:
errors.append("max_tokens must be a positive integer")
if "temperature" in request_data:
temp = request_data["temperature"]
if not isinstance(temp, (int, float)) or temp < 0 or temp > 2:
errors.append("temperature must be between 0 and 2")
return errors
# Use validation
request_data = {
"model": "gpt-4",
"messages": [{"role": "user", "content": "Hello"}]
}
errors = validate_request(request_data)
if errors:
print("Validation errors:")
for error in errors:
print(f"- {error}")
else:
print("Request is valid")Response Debugging
Inspect Response Details
response = client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": "Hello"}]
)
# Print response details
print(f"Status Code: {response.status_code}")
print(f"Response ID: {response.id}")
print(f"Model: {response.model}")
print(f"Created: {response.created}")
print(f"Usage: {response.usage}")
print(f"Choices: {len(response.choices)}")
# Print each choice
for i, choice in enumerate(response.choices):
print(f"Choice {i}:")
print(f" Index: {choice.index}")
print(f" Message: {choice.message.content}")
print(f" Finish Reason: {choice.finish_reason}")Debug Streaming Responses
stream = client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": "Write a story"}],
stream=True
)
chunk_count = 0
for chunk in stream:
chunk_count += 1
print(f"Chunk {chunk_count}:")
print(f" ID: {chunk.id}")
print(f" Object: {chunk.object}")
if chunk.choices:
for choice in chunk.choices:
if choice.delta.content:
print(f" Content: {choice.delta.content}")
if choice.delta.role:
print(f" Role: {choice.delta.role}")
if choice.finish_reason:
print(f" Finish Reason: {choice.finish_reason}")
print()Network Issues
Check Network Connectivity
import requests
import time
def check_connectivity():
try:
response = requests.get("https://api.Jan.ai/v1/version", timeout=10)
print(f"API is reachable: {response.status_code}")
return True
except requests.exceptions.Timeout:
print("Request timed out")
return False
except requests.exceptions.ConnectionError:
print("Connection error")
return False
except Exception as e:
print(f"Unexpected error: {e}")
return False
# Test connectivity
if check_connectivity():
print("Network connectivity is good")
else:
print("Network connectivity issues detected")Handle Network Errors
import requests
from Jan import Jan, APIError
def make_request_with_retry(client, max_retries=3):
for attempt in range(max_retries):
try:
return client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": "test"}]
)
except requests.exceptions.Timeout:
if attempt < max_retries - 1:
wait_time = 2 ** attempt
print(f"Timeout, retrying in {wait_time} seconds...")
time.sleep(wait_time)
else:
raise
except requests.exceptions.ConnectionError:
if attempt < max_retries - 1:
wait_time = 2 ** attempt
print(f"Connection error, retrying in {wait_time} seconds...")
time.sleep(wait_time)
else:
raise
except APIError as e:
# Don't retry API errors
raisePerformance Debugging
Measure Request Latency
import time
from Jan import Jan
client = Jan(api_key="your-api-key")
# Measure request time
start_time = time.time()
response = client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": "Hello"}]
)
end_time = time.time()
latency = end_time - start_time
print(f"Request latency: {latency:.2f} seconds")
print(f"Tokens generated: {response.usage.completion_tokens}")
print(f"Tokens per second: {response.usage.completion_tokens / latency:.2f}")Monitor Token Usage
def monitor_token_usage(client, requests_count=10):
total_tokens = 0
total_cost = 0
for i in range(requests_count):
response = client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": f"Request {i+1}"}]
)
usage = response.usage
total_tokens += usage.total_tokens
# Calculate cost (example rates)
cost = usage.total_tokens * 0.00003 # $0.03 per 1K tokens
total_cost += cost
print(f"Request {i+1}: {usage.total_tokens} tokens, ${cost:.4f}")
print(f"Total tokens: {total_tokens}")
print(f"Total cost: ${total_cost:.4f}")
print(f"Average tokens per request: {total_tokens / requests_count:.2f}")
monitor_token_usage(client)Debugging Tools
Request Interceptor
import requests
from Jan import Jan
class DebugSession(requests.Session):
def request(self, method, url, **kwargs):
print(f"Making {method} request to {url}")
print(f"Headers: {kwargs.get('headers', {})}")
if 'json' in kwargs:
print(f"Body: {kwargs['json']}")
response = super().request(method, url, **kwargs)
print(f"Response status: {response.status_code}")
print(f"Response headers: {dict(response.headers)}")
return response
# Use debug session
session = DebugSession()
client = Jan(api_key="your-api-key", session=session)Error Logger
import logging
from Jan import Jan, APIError
# Set up logging
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
handlers=[
logging.FileHandler('debug.log'),
logging.StreamHandler()
]
)
logger = logging.getLogger(__name__)
client = Jan(api_key="your-api-key")
try:
response = client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": "Hello"}]
)
logger.info(f"Request successful: {response.id}")
except APIError as e:
logger.error(f"API Error: {e.code} - {e.message}")
except Exception as e:
logger.error(f"Unexpected error: {e}")Best Practices
1. Always Handle Errors
from Jan import Jan, APIError
try:
response = client.chat.completions.create(...)
except APIError as e:
# Log error details
logger.error(f"API Error: {e.code} - {e.message}")
# Handle specific error types
if e.code == "rate_limit_exceeded":
# Implement backoff
pass
elif e.code == "invalid_parameter":
# Fix request parameters
pass
else:
# Handle other errors
pass2. Validate Inputs
def validate_message(message):
if not isinstance(message, dict):
raise ValueError("Message must be a dictionary")
if "role" not in message:
raise ValueError("Message missing 'role' field")
if "content" not in message:
raise ValueError("Message missing 'content' field")
if message["role"] not in ["user", "assistant", "system"]:
raise ValueError(f"Invalid role: {message['role']}")
return True3. Use Timeouts
import requests
from Jan import Jan
# Set reasonable timeouts
session = requests.Session()
session.timeout = 30 # 30 second timeout
client = Jan(api_key="your-api-key", session=session)Troubleshooting Checklist
Before Making Requests
- API key is valid and not expired
- Request format is correct
- Required fields are present
- Parameter values are within valid ranges
- Network connectivity is working
When Requests Fail
- Check error message and code
- Verify request parameters
- Check rate limits
- Verify API key permissions
- Test with minimal request
- Check API status page
Performance Issues
- Measure request latency
- Check token usage
- Monitor rate limits
- Use appropriate models
- Optimize prompts
Next Steps
- API Introduction - Learn about the API
- Authentication - Understand authentication
- Error Handling - Handle errors properly