SynapseX Python SDK
The official Python SDK for integrating SynapseX AI capabilities into your applications.
Installation
pip install synapsex-sdk
Requirements:
- Python 3.9+
- Dependencies installed automatically:
requests,pydantic,httpx
Quick Start
from synapsex import SynapseX
# Initialize client
client = SynapseX(
api_key="sk-synapsex-your-key",
license_key="SQXL-XXXX-XXXX-XXXX" # Optional for licensed features
)
# Simple chat
response = client.chat("Hello, SynapseX!")
print(response.content)
Client Initialization
Basic Setup
from synapsex import SynapseX
client = SynapseX(
api_key="sk-synapsex-your-key",
license_key="SQXL-XXXX-XXXX-XXXX"
)
With Custom Configuration
client = SynapseX(
api_key="sk-synapsex-your-key",
license_key="SQXL-XXXX-XXXX-XXXX",
api_url="https://api.synapsex.ai", # Custom API URL
timeout=30, # Request timeout (seconds)
max_retries=3, # Retry failed requests
)
From Environment Variables
import os
from synapsex import SynapseX
# Set environment variables
os.environ["SYNAPSEX_API_KEY"] = "sk-synapsex-your-key"
os.environ["SYNAPSEX_LICENSE_KEY"] = "SQXL-XXXX-XXXX-XXXX"
# Initialize from environment
client = SynapseX.from_env()
Async Client
from synapsex import AsyncSynapseX
async_client = AsyncSynapseX(
api_key="sk-synapsex-your-key",
license_key="SQXL-XXXX-XXXX-XXXX"
)
# Usage
async def main():
response = await async_client.chat("Hello!")
print(response.content)
Chat Completions
Simple Chat
response = client.chat("What is quantum computing?")
print(response.content)
With Options
response = client.chat(
message="Explain machine learning",
model="synapsex-14b",
temperature=0.7,
max_tokens=1024,
system_prompt="You are a helpful AI assistant."
)
print(response.content)
print(f"Tokens used: {response.usage.total_tokens}")
Streaming Responses
# Real-time streaming
for chunk in client.chat_stream("Write a story about AI"):
print(chunk.content, end="", flush=True)
print() # New line at end
Multi-turn Conversations
from synapsex.types import Message
messages = [
Message(role="system", content="You are a helpful assistant."),
Message(role="user", content="What is Python?"),
Message(role="assistant", content="Python is a programming language..."),
Message(role="user", content="Show me an example"),
]
response = client.chat_messages(messages)
print(response.content)
With Quantum Reranking
response = client.chat(
message="Complex question requiring deep analysis",
rerank_mode="quantum_gpu" # Options: none, cpu, quantum_gpu
)
print(response.content)
Training Jobs
Start a Training Job
from synapsex.types import TrainingConfig
job = client.training.create(
training_type="sft",
base_model="synapsex-14b",
dataset_id="my-dataset-123",
config=TrainingConfig(
epochs=3,
batch_size=4,
learning_rate=2e-5,
lora_rank=16
)
)
print(f"Training started: {job.id}")
print(f"Status: {job.status}")
Monitor Training
# Get job status
job = client.training.get("job-123")
print(f"Status: {job.status}")
print(f"Progress: {job.progress}%")
print(f"Current epoch: {job.current_epoch}/{job.total_epochs}")
# List all jobs
jobs = client.training.list(status="running")
for job in jobs:
print(f"{job.id}: {job.status}")
Training with Callbacks
def on_progress(job):
print(f"Progress: {job.progress}% | Loss: {job.metrics.loss:.4f}")
job = client.training.create(
training_type="sft",
base_model="synapsex-14b",
dataset_id="my-dataset",
on_progress=on_progress,
wait=True # Block until complete
)
Datasets
Upload a Dataset
# From file
dataset = client.datasets.upload(
file_path="./training_data.jsonl",
name="my-training-data",
description="Customer support conversations"
)
print(f"Dataset uploaded: {dataset.id}")
# From data
data = [
{"messages": [{"role": "user", "content": "Hi"}, {"role": "assistant", "content": "Hello!"}]},
{"messages": [{"role": "user", "content": "Bye"}, {"role": "assistant", "content": "Goodbye!"}]},
]
dataset = client.datasets.create(data=data, name="inline-dataset")
List Datasets
datasets = client.datasets.list()
for ds in datasets:
print(f"{ds.name}: {ds.record_count} records")
Models
List Available Models
models = client.models.list()
for model in models:
print(f"{model.id}: {model.description}")
print(f" Context: {model.context_length} tokens")
print(f" Pricing: ${model.pricing.per_1k_tokens}/1K tokens")
Get Model Details
model = client.models.get("synapsex-14b")
print(f"Name: {model.name}")
print(f"Parameters: {model.parameters}")
print(f"Capabilities: {model.capabilities}")
License Management
Check License Status
license_info = client.license.info()
print(f"Plan: {license_info.plan}")
print(f"Status: {license_info.status}")
print(f"Expires: {license_info.expires_at}")
print(f"Features: {license_info.features}")
Validate License
validation = client.license.validate()
if validation.valid:
print("License is valid")
print(f"Remaining quota: {validation.quota_remaining}")
else:
print(f"License invalid: {validation.reason}")
Feedback
Submit Feedback
client.feedback.submit(
request_id="req-123",
rating=5,
comment="Great response!",
categories=["accurate", "helpful"]
)
List Feedback
feedback = client.feedback.list(limit=10)
for item in feedback:
print(f"Rating: {item.rating} - {item.comment}")
Error Handling
from synapsex.exceptions import (
SynapseXError,
AuthenticationError,
RateLimitError,
QuotaExceededError,
ValidationError
)
try:
response = client.chat("Hello")
except AuthenticationError as e:
print(f"Invalid API key: {e}")
except RateLimitError as e:
print(f"Rate limited. Retry after: {e.retry_after}s")
except QuotaExceededError as e:
print(f"Quota exceeded: {e.quota_type}")
except ValidationError as e:
print(f"Invalid request: {e.details}")
except SynapseXError as e:
print(f"API error: {e}")
Type Reference
Response Objects
# ChatResponse
response.content # str: The response text
response.model # str: Model used
response.usage # Usage object
response.finish_reason # str: "stop", "length", etc.
# Usage
response.usage.prompt_tokens # int
response.usage.completion_tokens # int
response.usage.total_tokens # int
# TrainingJob
job.id # str: Job ID
job.status # str: "pending", "running", "completed", "failed"
job.progress # float: 0-100
job.current_epoch # int
job.total_epochs # int
job.metrics # Metrics object
Best Practices
Connection Reuse
# ✅ Good: Reuse client
client = SynapseX.from_env()
for question in questions:
response = client.chat(question)
print(response.content)
# ❌ Bad: Create new client each time
for question in questions:
client = SynapseX.from_env() # Wasteful!
response = client.chat(question)
Async for High Throughput
import asyncio
from synapsex import AsyncSynapseX
async def process_batch(questions):
client = AsyncSynapseX.from_env()
tasks = [client.chat(q) for q in questions]
responses = await asyncio.gather(*tasks)
return [r.content for r in responses]
# Process 100 questions concurrently
results = asyncio.run(process_batch(questions))
Graceful Degradation
def chat_with_fallback(message):
try:
# Try quantum reranking first
return client.chat(message, rerank_mode="quantum_gpu")
except QuotaExceededError:
# Fall back to CPU reranking
return client.chat(message, rerank_mode="cpu")
except RateLimitError as e:
# Wait and retry
time.sleep(e.retry_after)
return client.chat(message, rerank_mode="none")
Complete Example
"""
Full example: Customer support bot with training
"""
from synapsex import SynapseX
from synapsex.types import Message, TrainingConfig
# Initialize
client = SynapseX.from_env()
# Check license
license_info = client.license.info()
print(f"Plan: {license_info.plan}")
# Upload training data
dataset = client.datasets.upload(
file_path="./support_conversations.jsonl",
name="support-data-v1"
)
# Start fine-tuning
job = client.training.create(
training_type="lora",
base_model="synapsex-14b",
dataset_id=dataset.id,
config=TrainingConfig(
epochs=3,
batch_size=4,
lora_rank=32
)
)
# Wait for completion
job = client.training.wait(job.id)
print(f"Training complete! Model: {job.output_model}")
# Use fine-tuned model
response = client.chat(
message="How do I reset my password?",
model=job.output_model
)
print(response.content)
# Submit feedback
client.feedback.submit(
request_id=response.request_id,
rating=5,
comment="Perfect answer"
)
See Also
- API Reference - REST API documentation
- CLI Reference - Command-line interface
- Training Guide - Fine-tuning models
- Quantum Reranking - Advanced features