Merge pull request #119 from minhphuc429/feature/count-tokens-endpoint

feat: implement /v1/messages/count_tokens endpoint
This commit is contained in:
Badri Narayanan S
2026-01-14 18:04:09 +05:30
committed by GitHub
6 changed files with 861 additions and 16 deletions

53
package-lock.json generated
View File

@@ -9,6 +9,8 @@
"version": "1.2.6",
"license": "MIT",
"dependencies": {
"@anthropic-ai/tokenizer": "^0.0.4",
"@lenml/tokenizer-gemini": "^3.7.2",
"async-mutex": "^0.5.0",
"better-sqlite3": "^12.5.0",
"cors": "^2.8.5",
@@ -42,6 +44,16 @@
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/@anthropic-ai/tokenizer": {
"version": "0.0.4",
"resolved": "https://registry.npmjs.org/@anthropic-ai/tokenizer/-/tokenizer-0.0.4.tgz",
"integrity": "sha512-EHRKbxlxlc8W4KCBEseByJ7YwyYCmgu9OyN59H9+IYIGPoKv8tXyQXinkeGDI+cI8Tiuz9wk2jZb/kK7AyvL7g==",
"license": "Apache-2.0",
"dependencies": {
"@types/node": "^18.11.18",
"tiktoken": "^1.0.10"
}
},
"node_modules/@babel/runtime": {
"version": "7.28.4",
"resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.28.4.tgz",
@@ -91,6 +103,21 @@
"@jridgewell/sourcemap-codec": "^1.4.14"
}
},
"node_modules/@lenml/tokenizer-gemini": {
"version": "3.7.2",
"resolved": "https://registry.npmjs.org/@lenml/tokenizer-gemini/-/tokenizer-gemini-3.7.2.tgz",
"integrity": "sha512-sdSfXqjGSZWRHtf4toMcjzpBm/tOPPAtUQ5arTx4neQ2nzHUtJQJyHkoiB9KRyEfvVjW6WtQU+WbvU9glsFT2g==",
"license": "Apache-2.0",
"dependencies": {
"@lenml/tokenizers": "^3.7.2"
}
},
"node_modules/@lenml/tokenizers": {
"version": "3.7.2",
"resolved": "https://registry.npmjs.org/@lenml/tokenizers/-/tokenizers-3.7.2.tgz",
"integrity": "sha512-tuap9T7Q80Czor8NHzxjlLNvxEX8MgFINzsBTV+lq1v7G+78YR3ZvBhmLsPHtgqExB4Q4kCJH6dhXOYWSLdHLw==",
"license": "Apache-2.0"
},
"node_modules/@nodelib/fs.scandir": {
"version": "2.1.5",
"resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz",
@@ -142,6 +169,15 @@
"tailwindcss": ">=3.0.0 || >= 3.0.0-alpha.1 || >= 4.0.0-alpha.20 || >= 4.0.0-beta.1"
}
},
"node_modules/@types/node": {
"version": "18.19.130",
"resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.130.tgz",
"integrity": "sha512-GRaXQx6jGfL8sKfaIDD6OupbIHBr9jv7Jnaml9tB7l4v068PAOXqfcujMMo5PhbIs6ggR1XODELqahT2R8v0fg==",
"license": "MIT",
"dependencies": {
"undici-types": "~5.26.4"
}
},
"node_modules/accepts": {
"version": "1.3.8",
"resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz",
@@ -395,7 +431,6 @@
}
],
"license": "MIT",
"peer": true,
"dependencies": {
"baseline-browser-mapping": "^2.9.0",
"caniuse-lite": "^1.0.30001759",
@@ -1416,7 +1451,6 @@
"integrity": "sha512-/imKNG4EbWNrVjoNC/1H5/9GFy+tqjGBHCaSsN+P2RnPqjsLmv6UD3Ej+Kj8nBWaRAwyk7kK5ZUc+OEatnTR3A==",
"dev": true,
"license": "MIT",
"peer": true,
"bin": {
"jiti": "bin/jiti.js"
}
@@ -1793,7 +1827,6 @@
}
],
"license": "MIT",
"peer": true,
"dependencies": {
"nanoid": "^3.3.11",
"picocolors": "^1.1.1",
@@ -2611,7 +2644,6 @@
"integrity": "sha512-3ofp+LL8E+pK/JuPLPggVAIaEuhvIz4qNcf3nA1Xn2o/7fb7s/TYpHhwGDv1ZU3PkBluUVaF8PyCHcm48cKLWQ==",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"@alloc/quick-lru": "^5.2.0",
"arg": "^5.0.2",
@@ -2695,6 +2727,12 @@
"node": ">=0.8"
}
},
"node_modules/tiktoken": {
"version": "1.0.22",
"resolved": "https://registry.npmjs.org/tiktoken/-/tiktoken-1.0.22.tgz",
"integrity": "sha512-PKvy1rVF1RibfF3JlXBSP0Jrcw2uq3yXdgcEXtKTYn3QJ/cBRBHDnrJ5jHky+MENZ6DIPwNUGWpkVx+7joCpNA==",
"license": "MIT"
},
"node_modules/tinyglobby": {
"version": "0.2.15",
"resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.15.tgz",
@@ -2736,7 +2774,6 @@
"integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==",
"dev": true,
"license": "MIT",
"peer": true,
"engines": {
"node": ">=12"
},
@@ -2814,6 +2851,12 @@
"node": ">= 0.6"
}
},
"node_modules/undici-types": {
"version": "5.26.5",
"resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz",
"integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==",
"license": "MIT"
},
"node_modules/unpipe": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz",

View File

@@ -34,7 +34,8 @@
"test:crossmodel": "node tests/test-cross-model-thinking.cjs",
"test:oauth": "node tests/test-oauth-no-browser.cjs",
"test:emptyretry": "node tests/test-empty-response-retry.cjs",
"test:sanitizer": "node tests/test-schema-sanitizer.cjs"
"test:sanitizer": "node tests/test-schema-sanitizer.cjs",
"test:counttokens": "node tests/test-count-tokens.cjs"
},
"keywords": [
"claude",
@@ -57,6 +58,8 @@
"node": ">=18.0.0"
},
"dependencies": {
"@anthropic-ai/tokenizer": "^0.0.4",
"@lenml/tokenizer-gemini": "^3.7.2",
"async-mutex": "^0.5.0",
"better-sqlite3": "^12.5.0",
"cors": "^2.8.5",

View File

@@ -0,0 +1,302 @@
/**
* Token Counter Implementation for antigravity-claude-proxy
*
* Implements Anthropic's /v1/messages/count_tokens endpoint
* Uses official tokenizers for each model family:
* - Claude: @anthropic-ai/tokenizer
* - Gemini: @lenml/tokenizer-gemini
*
* @see https://platform.claude.com/docs/en/api/messages-count-tokens
*/
import { countTokens as claudeCountTokens } from '@anthropic-ai/tokenizer';
import { fromPreTrained as loadGeminiTokenizer } from '@lenml/tokenizer-gemini';
import { logger } from '../utils/logger.js';
import { getModelFamily } from '../constants.js';
// Lazy-loaded Gemini tokenizer (138MB, loaded once on first use)
let geminiTokenizer = null;
let geminiTokenizerLoading = null;
/**
* Get or initialize the Gemini tokenizer
* Uses singleton pattern with loading lock to prevent multiple loads
*
* @returns {Promise<Object>} Gemini tokenizer instance
*/
async function getGeminiTokenizer() {
if (geminiTokenizer) {
return geminiTokenizer;
}
// Prevent multiple simultaneous loads
if (geminiTokenizerLoading) {
return geminiTokenizerLoading;
}
geminiTokenizerLoading = (async () => {
try {
logger.debug('[TokenCounter] Loading Gemini tokenizer...');
geminiTokenizer = await loadGeminiTokenizer();
logger.debug('[TokenCounter] Gemini tokenizer loaded successfully');
return geminiTokenizer;
} catch (error) {
logger.warn(`[TokenCounter] Failed to load Gemini tokenizer: ${error.message}`);
throw error;
} finally {
geminiTokenizerLoading = null;
}
})();
return geminiTokenizerLoading;
}
/**
* Count tokens for text using Claude tokenizer
*
* @param {string} text - Text to tokenize
* @returns {number} Token count
*/
function countClaudeTokens(text) {
if (!text) return 0;
try {
return claudeCountTokens(text);
} catch (error) {
logger.debug(`[TokenCounter] Claude tokenizer error: ${error.message}`);
return Math.ceil(text.length / 4);
}
}
/**
* Count tokens for text using Gemini tokenizer
*
* @param {Object} tokenizer - Gemini tokenizer instance
* @param {string} text - Text to tokenize
* @returns {number} Token count
*/
function countGeminiTokens(tokenizer, text) {
if (!text) return 0;
try {
const tokens = tokenizer.encode(text);
// Remove BOS token if present (token id 2)
return tokens[0] === 2 ? tokens.length - 1 : tokens.length;
} catch (error) {
logger.debug(`[TokenCounter] Gemini tokenizer error: ${error.message}`);
return Math.ceil(text.length / 4);
}
}
/**
* Estimate tokens for text content using appropriate tokenizer
*
* @param {string} text - Text to tokenize
* @param {string} model - Model name to determine tokenizer
* @param {Object} geminiTok - Gemini tokenizer instance (optional)
* @returns {number} Token count
*/
function estimateTextTokens(text, model, geminiTok = null) {
if (!text) return 0;
const family = getModelFamily(model);
if (family === 'claude') {
return countClaudeTokens(text);
} else if (family === 'gemini' && geminiTok) {
return countGeminiTokens(geminiTok, text);
}
// Fallback for unknown models: rough estimate
return Math.ceil(text.length / 4);
}
/**
* Extract text from message content
*
* Note: This function only extracts text from 'text' type blocks.
* Image blocks (type: 'image') and document blocks (type: 'document') are not tokenized
* and will not contribute to the token count. This is intentional as binary content
* requires different handling and Anthropic's actual token counting for images uses
* a fixed estimate (~1600 tokens per image) that depends on image dimensions.
*
* @param {string|Array} content - Message content
* @returns {string} Concatenated text
*/
function extractText(content) {
if (typeof content === 'string') {
return content;
}
if (Array.isArray(content)) {
return content
.filter(block => block.type === 'text')
.map(block => block.text)
.join('\n');
}
return '';
}
/**
* Count tokens locally using model-specific tokenizer
*
* @param {Object} request - Anthropic format request
* @param {Object} geminiTok - Gemini tokenizer instance (optional)
* @returns {number} Token count
*/
function countTokensLocally(request, geminiTok = null) {
const { messages = [], system, tools, model } = request;
let totalTokens = 0;
// Count system prompt tokens
if (system) {
if (typeof system === 'string') {
totalTokens += estimateTextTokens(system, model, geminiTok);
} else if (Array.isArray(system)) {
for (const block of system) {
if (block.type === 'text') {
totalTokens += estimateTextTokens(block.text, model, geminiTok);
}
}
}
}
// Count message tokens
for (const message of messages) {
// Add overhead for role and structure (~4 tokens per message)
totalTokens += 4;
totalTokens += estimateTextTokens(extractText(message.content), model, geminiTok);
// Handle tool_use and tool_result blocks
if (Array.isArray(message.content)) {
for (const block of message.content) {
if (block.type === 'tool_use') {
totalTokens += estimateTextTokens(block.name, model, geminiTok);
totalTokens += estimateTextTokens(JSON.stringify(block.input), model, geminiTok);
} else if (block.type === 'tool_result') {
if (typeof block.content === 'string') {
totalTokens += estimateTextTokens(block.content, model, geminiTok);
} else if (Array.isArray(block.content)) {
totalTokens += estimateTextTokens(extractText(block.content), model, geminiTok);
}
} else if (block.type === 'thinking') {
totalTokens += estimateTextTokens(block.thinking, model, geminiTok);
}
}
}
}
// Count tool definitions
if (tools && tools.length > 0) {
for (const tool of tools) {
totalTokens += estimateTextTokens(tool.name, model, geminiTok);
totalTokens += estimateTextTokens(tool.description || '', model, geminiTok);
totalTokens += estimateTextTokens(JSON.stringify(tool.input_schema || {}), model, geminiTok);
}
}
return totalTokens;
}
/**
* Count tokens in a message request
* Implements Anthropic's /v1/messages/count_tokens endpoint
* Uses local tokenization for all content types
*
* @param {Object} anthropicRequest - Anthropic format request with messages, model, system, tools
* @param {Object} accountManager - Account manager instance (unused, kept for API compatibility)
* @param {Object} options - Options (unused, kept for API compatibility)
* @returns {Promise<Object>} Response with input_tokens count
*/
export async function countTokens(anthropicRequest, accountManager = null, options = {}) {
try {
const family = getModelFamily(anthropicRequest.model);
let geminiTok = null;
// Load Gemini tokenizer if needed
if (family === 'gemini') {
try {
geminiTok = await getGeminiTokenizer();
} catch (error) {
logger.warn(`[TokenCounter] Gemini tokenizer unavailable, using fallback`);
}
}
const inputTokens = countTokensLocally(anthropicRequest, geminiTok);
logger.debug(`[TokenCounter] Local count (${family}): ${inputTokens} tokens`);
return {
input_tokens: inputTokens
};
} catch (error) {
logger.warn(`[TokenCounter] Error: ${error.message}, using character-based fallback`);
// Ultimate fallback: character-based estimation
const { messages = [], system } = anthropicRequest;
let charCount = 0;
if (system) {
charCount += typeof system === 'string' ? system.length : JSON.stringify(system).length;
}
for (const message of messages) {
charCount += JSON.stringify(message.content).length;
}
return {
input_tokens: Math.ceil(charCount / 4)
};
}
}
/**
* Express route handler for /v1/messages/count_tokens
*
* @param {Object} accountManager - Account manager instance
* @returns {Function} Express middleware
*/
export function createCountTokensHandler(accountManager) {
return async (req, res) => {
try {
const { messages, model, system, tools, tool_choice, thinking } = req.body;
// Validate required fields
if (!messages || !Array.isArray(messages)) {
return res.status(400).json({
type: 'error',
error: {
type: 'invalid_request_error',
message: 'messages is required and must be an array'
}
});
}
if (!model) {
return res.status(400).json({
type: 'error',
error: {
type: 'invalid_request_error',
message: 'model is required'
}
});
}
const result = await countTokens(
{ messages, model, system, tools, tool_choice, thinking },
accountManager
);
res.json(result);
} catch (error) {
logger.error(`[TokenCounter] Handler error: ${error.message}`);
res.status(500).json({
type: 'error',
error: {
type: 'api_error',
message: error.message
}
});
}
};
}

View File

@@ -9,6 +9,7 @@ import cors from 'cors';
import path from 'path';
import { fileURLToPath } from 'url';
import { sendMessage, sendMessageStream, listModels, getModelQuotas, getSubscriptionTier } from './cloudcode/index.js';
import { createCountTokensHandler } from './cloudcode/count-tokens.js';
import { mountWebUI } from './webui/index.js';
import { config } from './config.js';
@@ -597,16 +598,19 @@ app.get('/v1/models', async (req, res) => {
});
/**
* Count tokens endpoint (not supported)
* Count tokens endpoint - Anthropic Messages API compatible
* Uses local tokenization with official tokenizers (@anthropic-ai/tokenizer for Claude, @lenml/tokenizer-gemini for Gemini)
*/
app.post('/v1/messages/count_tokens', (req, res) => {
res.status(501).json({
type: 'error',
error: {
type: 'not_implemented',
message: 'Token counting is not implemented. Use /v1/messages with max_tokens or configure your client to skip token counting.'
}
});
app.post('/v1/messages/count_tokens', async (req, res) => {
try {
// Ensure account manager is initialized for API-based counting
await ensureInitialized();
} catch (error) {
// If initialization fails, handler will fall back to local estimation
logger.debug(`[TokenCounter] Account manager not initialized: ${error.message}`);
}
return createCountTokensHandler(accountManager)(req, res);
});
/**

View File

@@ -18,7 +18,8 @@ const tests = [
{ name: 'Cross-Model Thinking', file: 'test-cross-model-thinking.cjs' },
{ name: 'OAuth No-Browser Mode', file: 'test-oauth-no-browser.cjs' },
{ name: 'Empty Response Retry', file: 'test-empty-response-retry.cjs' },
{ name: 'Schema Sanitizer', file: 'test-schema-sanitizer.cjs' }
{ name: 'Schema Sanitizer', file: 'test-schema-sanitizer.cjs' },
{ name: 'Count Tokens', file: 'test-count-tokens.cjs' }
];
async function runTest(test) {

492
tests/test-count-tokens.cjs Normal file
View File

@@ -0,0 +1,492 @@
/**
* Test Count Tokens - Tests for the /v1/messages/count_tokens endpoint
*
* Verifies token counting functionality:
* - Local estimation using official tokenizers (@anthropic-ai/tokenizer for Claude, @lenml/tokenizer-gemini for Gemini)
* - Request validation
* - Different content types (text, tools, system prompts)
*/
const http = require('http');
// Server configuration
const BASE_URL = 'localhost';
const PORT = 8080;
/**
* Make a request to the count_tokens endpoint
* @param {Object} body - Request body
* @returns {Promise<Object>} - Parsed JSON response with statusCode
*/
function countTokensRequest(body) {
return new Promise((resolve, reject) => {
const data = JSON.stringify(body);
const req = http.request({
host: BASE_URL,
port: PORT,
path: '/v1/messages/count_tokens',
method: 'POST',
headers: {
'Content-Type': 'application/json',
'x-api-key': 'test',
'anthropic-version': '2023-06-01',
'Content-Length': Buffer.byteLength(data)
}
}, res => {
let fullData = '';
res.on('data', chunk => fullData += chunk.toString());
res.on('end', () => {
try {
const parsed = JSON.parse(fullData);
resolve({ ...parsed, statusCode: res.statusCode });
} catch (e) {
reject(new Error(`Parse error: ${e.message}\nRaw: ${fullData.substring(0, 500)}`));
}
});
});
req.on('error', reject);
req.write(data);
req.end();
});
}
async function runTests() {
console.log('╔══════════════════════════════════════════════════════════════╗');
console.log('║ COUNT TOKENS ENDPOINT TEST SUITE ║');
console.log('╚══════════════════════════════════════════════════════════════╝\n');
let passed = 0;
let failed = 0;
function test(name, fn) {
return fn()
.then(() => {
console.log(`${name}`);
passed++;
})
.catch(e => {
console.log(`${name}`);
console.log(` Error: ${e.message}`);
failed++;
});
}
function assert(condition, message) {
if (!condition) throw new Error(message);
}
function assertType(value, type, name) {
if (typeof value !== type) {
throw new Error(`${name} should be ${type}, got ${typeof value}`);
}
}
function assertGreater(value, min, name) {
if (value <= min) {
throw new Error(`${name} should be greater than ${min}, got ${value}`);
}
}
// Test 1: Simple text message
await test('Simple text message returns token count', async () => {
const response = await countTokensRequest({
model: 'claude-sonnet-4-5',
messages: [
{ role: 'user', content: 'Hello, how are you?' }
]
});
assert(response.statusCode === 200, `Expected 200, got ${response.statusCode}`);
assertType(response.input_tokens, 'number', 'input_tokens');
assertGreater(response.input_tokens, 0, 'input_tokens');
});
// Test 2: Multi-turn conversation
await test('Multi-turn conversation counts all messages', async () => {
const response = await countTokensRequest({
model: 'claude-sonnet-4-5',
messages: [
{ role: 'user', content: 'What is the capital of France?' },
{ role: 'assistant', content: 'The capital of France is Paris.' },
{ role: 'user', content: 'And what about Germany?' }
]
});
assert(response.statusCode === 200, `Expected 200, got ${response.statusCode}`);
assertType(response.input_tokens, 'number', 'input_tokens');
// Multi-turn should have more tokens than single message
assertGreater(response.input_tokens, 10, 'input_tokens for multi-turn');
});
// Test 3: System prompt
await test('System prompt tokens are counted', async () => {
const responseWithSystem = await countTokensRequest({
model: 'claude-sonnet-4-5',
system: 'You are a helpful assistant that speaks like a pirate.',
messages: [
{ role: 'user', content: 'Hello' }
]
});
const responseWithoutSystem = await countTokensRequest({
model: 'claude-sonnet-4-5',
messages: [
{ role: 'user', content: 'Hello' }
]
});
assert(responseWithSystem.statusCode === 200, `Expected 200, got ${responseWithSystem.statusCode}`);
// With system prompt should have more tokens
assertGreater(responseWithSystem.input_tokens, responseWithoutSystem.input_tokens,
'tokens with system prompt');
});
// Test 4: System prompt as array
await test('System prompt as array is counted', async () => {
const response = await countTokensRequest({
model: 'claude-sonnet-4-5',
system: [
{ type: 'text', text: 'You are a helpful assistant.' },
{ type: 'text', text: 'Be concise and clear.' }
],
messages: [
{ role: 'user', content: 'Hello' }
]
});
assert(response.statusCode === 200, `Expected 200, got ${response.statusCode}`);
assertType(response.input_tokens, 'number', 'input_tokens');
assertGreater(response.input_tokens, 5, 'input_tokens');
});
// Test 5: With tools
await test('Tool definitions are counted', async () => {
const responseWithTools = await countTokensRequest({
model: 'claude-sonnet-4-5',
messages: [
{ role: 'user', content: 'Get the weather in Tokyo' }
],
tools: [
{
name: 'get_weather',
description: 'Get the current weather for a location',
input_schema: {
type: 'object',
properties: {
location: { type: 'string', description: 'City name' }
},
required: ['location']
}
}
]
});
const responseWithoutTools = await countTokensRequest({
model: 'claude-sonnet-4-5',
messages: [
{ role: 'user', content: 'Get the weather in Tokyo' }
]
});
assert(responseWithTools.statusCode === 200, `Expected 200, got ${responseWithTools.statusCode}`);
// With tools should have more tokens
assertGreater(responseWithTools.input_tokens, responseWithoutTools.input_tokens,
'tokens with tools');
});
// Test 6: Content as array with text blocks
await test('Content array with text blocks', async () => {
const response = await countTokensRequest({
model: 'claude-sonnet-4-5',
messages: [
{
role: 'user',
content: [
{ type: 'text', text: 'First part of the message.' },
{ type: 'text', text: 'Second part of the message.' }
]
}
]
});
assert(response.statusCode === 200, `Expected 200, got ${response.statusCode}`);
assertType(response.input_tokens, 'number', 'input_tokens');
assertGreater(response.input_tokens, 5, 'input_tokens');
});
// Test 7: Tool use and tool result blocks
await test('Tool use and tool result blocks are counted', async () => {
const response = await countTokensRequest({
model: 'claude-sonnet-4-5',
messages: [
{ role: 'user', content: 'What is the weather in Paris?' },
{
role: 'assistant',
content: [
{
type: 'tool_use',
id: 'tool_123',
name: 'get_weather',
input: { location: 'Paris' }
}
]
},
{
role: 'user',
content: [
{
type: 'tool_result',
tool_use_id: 'tool_123',
content: 'The weather in Paris is sunny with 22°C'
}
]
}
],
tools: [
{
name: 'get_weather',
description: 'Get weather for a location',
input_schema: {
type: 'object',
properties: {
location: { type: 'string' }
}
}
}
]
});
assert(response.statusCode === 200, `Expected 200, got ${response.statusCode}`);
assertType(response.input_tokens, 'number', 'input_tokens');
assertGreater(response.input_tokens, 20, 'input_tokens for tool conversation');
});
// Test 8: Thinking blocks
await test('Thinking blocks are counted', async () => {
const response = await countTokensRequest({
model: 'claude-sonnet-4-5',
messages: [
{ role: 'user', content: 'Solve this problem step by step' },
{
role: 'assistant',
content: [
{
type: 'thinking',
thinking: 'Let me think about this problem carefully. First, I need to understand what is being asked...'
},
{ type: 'text', text: 'Here is my solution.' }
]
},
{ role: 'user', content: 'Can you explain further?' }
]
});
assert(response.statusCode === 200, `Expected 200, got ${response.statusCode}`);
assertType(response.input_tokens, 'number', 'input_tokens');
assertGreater(response.input_tokens, 20, 'input_tokens with thinking');
});
// Test 9: Long text
await test('Long text message', async () => {
const longText = 'This is a test message. '.repeat(100);
const response = await countTokensRequest({
model: 'claude-sonnet-4-5',
messages: [
{ role: 'user', content: longText }
]
});
assert(response.statusCode === 200, `Expected 200, got ${response.statusCode}`);
assertType(response.input_tokens, 'number', 'input_tokens');
// Long text should have many tokens
assertGreater(response.input_tokens, 100, 'input_tokens for long text');
});
// Test 10: Missing messages field (error case)
await test('Missing messages returns error', async () => {
const response = await countTokensRequest({
model: 'claude-sonnet-4-5'
});
assert(response.statusCode === 400, `Expected 400, got ${response.statusCode}`);
assert(response.type === 'error', 'Should return error type');
assert(response.error.type === 'invalid_request_error',
`Expected invalid_request_error, got ${response.error?.type}`);
});
// Test 11: Missing model field (error case)
await test('Missing model returns error', async () => {
const response = await countTokensRequest({
messages: [
{ role: 'user', content: 'Hello' }
]
});
assert(response.statusCode === 400, `Expected 400, got ${response.statusCode}`);
assert(response.type === 'error', 'Should return error type');
assert(response.error.type === 'invalid_request_error',
`Expected invalid_request_error, got ${response.error?.type}`);
});
// Test 12: Invalid messages type (error case)
await test('Invalid messages type returns error', async () => {
const response = await countTokensRequest({
model: 'claude-sonnet-4-5',
messages: 'not an array'
});
assert(response.statusCode === 400, `Expected 400, got ${response.statusCode}`);
assert(response.type === 'error', 'Should return error type');
});
// Test 13: Empty messages array
await test('Empty messages array returns token count', async () => {
const response = await countTokensRequest({
model: 'claude-sonnet-4-5',
messages: []
});
assert(response.statusCode === 200, `Expected 200, got ${response.statusCode}`);
assertType(response.input_tokens, 'number', 'input_tokens');
});
// Test 14: Multiple tools with complex schemas
await test('Multiple tools with complex schemas', async () => {
const response = await countTokensRequest({
model: 'claude-sonnet-4-5',
messages: [
{ role: 'user', content: 'Help me with file operations' }
],
tools: [
{
name: 'read_file',
description: 'Read a file from the filesystem',
input_schema: {
type: 'object',
properties: {
path: { type: 'string', description: 'Path to the file' },
encoding: { type: 'string', description: 'File encoding' }
},
required: ['path']
}
},
{
name: 'write_file',
description: 'Write content to a file',
input_schema: {
type: 'object',
properties: {
path: { type: 'string', description: 'Path to the file' },
content: { type: 'string', description: 'Content to write' },
append: { type: 'boolean', description: 'Append mode' }
},
required: ['path', 'content']
}
},
{
name: 'list_directory',
description: 'List files in a directory',
input_schema: {
type: 'object',
properties: {
path: { type: 'string', description: 'Directory path' },
recursive: { type: 'boolean', description: 'List recursively' }
},
required: ['path']
}
}
]
});
assert(response.statusCode === 200, `Expected 200, got ${response.statusCode}`);
assertType(response.input_tokens, 'number', 'input_tokens');
// Multiple tools should have significant token count
assertGreater(response.input_tokens, 50, 'input_tokens for multiple tools');
});
// Test 15: Tool result as array content
await test('Tool result with array content', async () => {
const response = await countTokensRequest({
model: 'claude-sonnet-4-5',
messages: [
{ role: 'user', content: 'Search for files' },
{
role: 'assistant',
content: [
{ type: 'tool_use', id: 'tool_456', name: 'search', input: { query: 'test' } }
]
},
{
role: 'user',
content: [
{
type: 'tool_result',
tool_use_id: 'tool_456',
content: [
{ type: 'text', text: 'Found file1.txt' },
{ type: 'text', text: 'Found file2.txt' }
]
}
]
}
]
});
assert(response.statusCode === 200, `Expected 200, got ${response.statusCode}`);
assertType(response.input_tokens, 'number', 'input_tokens');
assertGreater(response.input_tokens, 10, 'input_tokens');
});
// Test 16: Gemini model token counting
await test('Gemini model returns token count', async () => {
const response = await countTokensRequest({
model: 'gemini-3-flash',
messages: [
{ role: 'user', content: 'Hello, how are you?' }
]
});
assert(response.statusCode === 200, `Expected 200, got ${response.statusCode}`);
assertType(response.input_tokens, 'number', 'input_tokens');
assertGreater(response.input_tokens, 0, 'input_tokens');
});
// Test 17: Gemini model with system prompt and tools
await test('Gemini model with system prompt and tools', async () => {
const response = await countTokensRequest({
model: 'gemini-3-flash',
system: 'You are a helpful assistant.',
messages: [
{ role: 'user', content: 'What is the weather in Tokyo?' }
],
tools: [
{
name: 'get_weather',
description: 'Get weather for a location',
input_schema: {
type: 'object',
properties: {
location: { type: 'string' }
}
}
}
]
});
assert(response.statusCode === 200, `Expected 200, got ${response.statusCode}`);
assertType(response.input_tokens, 'number', 'input_tokens');
assertGreater(response.input_tokens, 10, 'input_tokens for Gemini with tools');
});
// Summary
console.log('\n' + '═'.repeat(60));
console.log(`Tests completed: ${passed} passed, ${failed} failed`);
if (failed > 0) {
process.exit(1);
}
}
runTests().catch(err => {
console.error('Test suite failed:', err);
process.exit(1);
});