Use Batch Endpoints
Request multiple resources in one call when possible.
Learn best practices for working with the Quantum Trader API, including authentication, error handling, and performance optimization.
// Axios interceptor for automatic token refreshapi.interceptors.response.use( response => response, async error => { if (error.response?.status === 401 && !error.config._retry) { error.config._retry = true;
try { const tokens = await refreshTokens(); error.config.headers['Authorization'] = `Bearer ${tokens.access_token}`; return api.request(error.config); } catch (refreshError) { // Redirect to login window.location.href = '/login'; return Promise.reject(refreshError); } } return Promise.reject(error); });| Code | Meaning | Action |
|---|---|---|
200 | Success | Process response |
201 | Created | Process created resource |
204 | No Content | Deletion successful |
400 | Bad Request | Fix request parameters |
401 | Unauthorized | Refresh token or re-login |
403 | Forbidden | Check user permissions |
404 | Not Found | Handle missing resource |
422 | Validation Error | Fix input data |
429 | Rate Limited | Wait and retry |
500 | Server Error | Retry with backoff |
class APIError extends Error { constructor(response, data) { super(data.detail || 'An error occurred'); this.status = response.status; this.detail = data.detail; }}
async function apiRequest(method, endpoint, data = null) { const config = { method, url: endpoint, headers: { 'Authorization': `Bearer ${getToken()}` } };
if (data) { config.data = data; config.headers['Content-Type'] = 'application/json'; }
try { const response = await api.request(config); return response.data; } catch (error) { if (error.response) { const { status, data } = error.response;
switch (status) { case 400: throw new APIError(error.response, data); case 401: // Try refresh, then retry await refreshTokens(); return apiRequest(method, endpoint, data); case 403: throw new Error('Permission denied'); case 404: throw new Error('Resource not found'); case 429: // Retry after delay await sleep(60000); return apiRequest(method, endpoint, data); case 500: // Retry with exponential backoff throw new Error('Server error - try again later'); default: throw new APIError(error.response, data); } } throw error; }}import requestsimport timefrom typing import Optional, Any
class APIError(Exception): def __init__(self, status: int, detail: str): self.status = status self.detail = detail super().__init__(f"[{status}] {detail}")
class APIClient: def __init__(self, base_url: str): self.base_url = base_url self.access_token: Optional[str] = None self.refresh_token: Optional[str] = None
def _headers(self) -> dict: headers = {"Content-Type": "application/json"} if self.access_token: headers["Authorization"] = f"Bearer {self.access_token}" return headers
def request( self, method: str, endpoint: str, data: Any = None, retry_count: int = 0 ) -> dict: """Make API request with error handling.""" try: response = requests.request( method, f"{self.base_url}{endpoint}", headers=self._headers(), json=data )
if response.status_code == 204: return {}
if response.status_code >= 400: error_data = response.json()
if response.status_code == 401 and retry_count == 0: self._refresh() return self.request(method, endpoint, data, retry_count + 1)
if response.status_code == 429: time.sleep(60) return self.request(method, endpoint, data, retry_count + 1)
raise APIError(response.status_code, error_data.get("detail", "Unknown error"))
return response.json()
except requests.exceptions.ConnectionError: if retry_count < 3: time.sleep(2 ** retry_count) return self.request(method, endpoint, data, retry_count + 1) raise
def get(self, endpoint: str, params: dict = None) -> dict: return self.request("GET", endpoint)
def post(self, endpoint: str, data: dict = None) -> dict: return self.request("POST", endpoint, data)| Provider | Rate Limit | Daily Limit | History |
|---|---|---|---|
| Massive (Polygon) | 5 calls/min | Unlimited | 2 years |
| Alpha Vantage | 5 calls/min | 25 calls | 20+ years |
| yfinance | Variable | - | 20+ years |
async function fetchWithRetry(fn, maxRetries = 3) { for (let attempt = 0; attempt < maxRetries; attempt++) { try { return await fn(); } catch (error) { if (error.response?.status === 429) { // Rate limited - wait longer const delay = Math.pow(2, attempt) * 10000; // 10s, 20s, 40s console.log(`Rate limited. Waiting ${delay/1000}s...`); await sleep(delay); } else if (error.response?.status >= 500) { // Server error - quick retry const delay = Math.pow(2, attempt) * 1000; // 1s, 2s, 4s await sleep(delay); } else { throw error; // Don't retry other errors } } } throw new Error('Max retries exceeded');}Use Batch Endpoints
Request multiple resources in one call when possible.
Specify Date Ranges
Always include start_date and end_date to limit data volume.
Cache Responses
OHLCV data is immutable - cache aggressively.
Use Appropriate Limits
Request only what you need with limit parameter.
// ❌ Bad: Multiple individual requestsconst aapl = await api.get('/stocks/AAPL/ohlcv');const msft = await api.get('/stocks/MSFT/ohlcv');const googl = await api.get('/stocks/GOOGL/ohlcv');
// ✅ Good: Batch requestconst { data } = await api.get('/stocks/batch', { params: { symbols: 'AAPL,MSFT,GOOGL' }});
// ❌ Bad: Fetching all dataconst { data } = await api.get('/stocks/AAPL/ohlcv');
// ✅ Good: Specify date range and limitconst { data } = await api.get('/stocks/AAPL/ohlcv', { params: { start_date: '2024-01-01', end_date: '2024-12-31', limit: 365 }});// Simple in-memory cache with TTLclass Cache { constructor(ttlMs = 15 * 60 * 1000) { // 15 minutes this.cache = new Map(); this.ttl = ttlMs; }
get(key) { const item = this.cache.get(key); if (!item) return null; if (Date.now() > item.expiry) { this.cache.delete(key); return null; } return item.value; }
set(key, value) { this.cache.set(key, { value, expiry: Date.now() + this.ttl }); }}
const cache = new Cache();
async function getIndicators(symbol, params) { const cacheKey = `indicators:${symbol}:${JSON.stringify(params)}`; const cached = cache.get(cacheKey);
if (cached) { console.log('Cache hit'); return cached; }
const { data } = await api.get(`/indicators/${symbol}`, { params }); cache.set(cacheKey, data); return data;}Handle paginated responses properly:
async function fetchAllPages(endpoint, params = {}) { const allItems = []; let offset = 0; const limit = 100;
while (true) { const { data } = await api.get(endpoint, { params: { ...params, limit, offset } });
// Handle different response formats const items = data.items || data; const total = data.total;
allItems.push(...items);
if (items.length < limit || (total && allItems.length >= total)) { break; }
offset += limit; }
return allItems;}
// Usageconst allStrategies = await fetchAllPages('/strategies');For real-time updates, poll efficiently:
class Poller { constructor(endpoint, interval = 5000) { this.endpoint = endpoint; this.interval = interval; this.running = false; this.callbacks = []; }
subscribe(callback) { this.callbacks.push(callback); if (!this.running) this.start(); }
unsubscribe(callback) { this.callbacks = this.callbacks.filter(cb => cb !== callback); if (this.callbacks.length === 0) this.stop(); }
async start() { this.running = true; while (this.running) { try { const { data } = await api.get(this.endpoint); this.callbacks.forEach(cb => cb(data)); } catch (error) { console.error('Poll error:', error); } await sleep(this.interval); } }
stop() { this.running = false; }}
// Usage: Poll backtest progressconst progressPoller = new Poller(`/backtests/${id}`, 2000);progressPoller.subscribe(data => { console.log(`Progress: ${data.progress}%`); if (data.status !== 'RUNNING') { progressPoller.stop(); }});// Use environment variablesconst API_URL = process.env.API_URL || 'http://localhost:8501/api/v1';
// Mock API for testingconst mockApi = { get: jest.fn(), post: jest.fn(),};
// Test with fixturesconst mockStrategy = { id: '550e8400-e29b-41d4-a716-446655440000', name: 'Test Strategy', strategy_type: 'rules', is_active: false};
mockApi.get.mockResolvedValue({ data: [mockStrategy] });Security First
Secure token storage, HTTPS, proper error handling.
Efficiency
Batch requests, caching, appropriate limits.
Resilience
Retry logic, exponential backoff, graceful degradation.
Monitoring
Log errors, track metrics, alert on failures.