| API | Typical Latency | Notes |
|---|---|---|
| Writing Score | 1-3s | Depends on text length |
| AI Detection | 1-2s | Fast for short text |
| Plagiarism | 10-60s | Async, requires polling |
import { LRUCache } from 'lru-cache';
import { createHash } from 'crypto';
const scoreCache = new LRUCache<string, any>({ max: 500, ttl: 3600000 });
async function cachedScore(text: string, token: string) {
const key = createHash('sha256').update(text).digest('hex');
const cached = scoreCache.get(key);
if (cached) return cached;
const score = await grammarlyClient.score(text);
scoreCache.set(key, score);
return score;
}
// Score + AI detect in parallel (they're independent)
async function fullAudit(text: string, token: string) {
const [score, ai] = await Promise.all([
grammarlyClient.score(text),
grammarlyClient.detectAI(text),
]);
return { score, ai };
}
For cost optimization, see grammarly-cost-tuning.