Configure Wispr Flow API for voice-to-text transcription. Supports WebSocket (recommended, lower latency) and REST endpoints. Auth via API key (backend) or access tokens (client-side).
# .env
WISPR_API_KEY=your-api-key-here
WISPR_API_URL=https://api.wisprflow.ai
const ws = new WebSocket('wss://api.wisprflow.ai/api/v1/ws', {
headers: { Authorization: `Bearer ${process.env.WISPR_API_KEY}` },
});
ws.on('open', () => {
// Send context for better transcription
ws.send(JSON.stringify({
type: 'config',
context: { app: 'code-editor', language: 'en' },
}));
console.log('Connected to Wispr Flow');
});
ws.on('message', (data) => {
const result = JSON.parse(data.toString());
if (result.type === 'transcription') {
console.log(`Transcript: ${result.text}`);
}
});
import requests, os
response = requests.post(
f"{os.environ['WISPR_API_URL']}/api/v1/transcribe",
headers={"Authorization": f"Bearer {os.environ['WISPR_API_KEY']}"},
files={"audio": open("recording.wav", "rb")},
data={"language": "en"},
)
print(f"Transcript: {response.json()['text']}")
// Backend: generate short-lived token for client use
const response = await fetch('https://api.wisprflow.ai/api/v1/auth/token', {
method: 'POST',
headers: {
Authorization: `Bearer ${process.env.WISPR_API_KEY}`,
'Content-Type': 'application/json',
},
body: JSON.stringify({ expires_in: 3600 }), // 1 hour
});
const { access_token } = await response.json();
// Send access_token to client for direct WebSocket connection
| Error | Cause | Solution |
|---|---|---|
401 Unauthorized |
Invalid API key | Check key at wisprflow.ai/developers |
| WebSocket disconnect | Network interruption | Reconnect with backoff |
| Empty transcript | No speech detected | Check audio format and quality |
Proceed to wispr-hello-world for your first transcription.