docs: fix misleading tokenizer comments

This commit is contained in:
behemoth-phucnm
2026-01-14 19:31:43 +07:00
parent 7da7e887bf
commit d33de409d4
3 changed files with 52 additions and 5 deletions

View File

@@ -3,8 +3,8 @@
*
* Implements Anthropic's /v1/messages/count_tokens endpoint
* Uses official tokenizers for each model family:
* - Claude: @anthropic-ai/tokenizer (99.99% accuracy)
* - Gemini: @lenml/tokenizer-gemini (99.99% accuracy)
* - Claude: @anthropic-ai/tokenizer
* - Gemini: @lenml/tokenizer-gemini
*
* @see https://platform.claude.com/docs/en/api/messages-count-tokens
*/
@@ -112,6 +112,12 @@ function estimateTextTokens(text, model, geminiTok = null) {
/**
* Extract text from message content
*
* Note: This function only extracts text from 'text' type blocks.
* Image blocks (type: 'image') and document blocks (type: 'document') are not tokenized
* and will not contribute to the token count. This is intentional as binary content
* requires different handling and Anthropic's actual token counting for images uses
* a fixed estimate (~1600 tokens per image) that depends on image dimensions.
*
* @param {string|Array} content - Message content
* @returns {string} Concatenated text
*/
@@ -194,7 +200,7 @@ function countTokensLocally(request, geminiTok = null) {
/**
* Count tokens in a message request
* Implements Anthropic's /v1/messages/count_tokens endpoint
* Uses local tokenization for all content types (99.99% accuracy)
* Uses local tokenization for all content types
*
* @param {Object} anthropicRequest - Anthropic format request with messages, model, system, tools
* @param {Object} accountManager - Account manager instance (unused, kept for API compatibility)

View File

@@ -599,7 +599,7 @@ app.get('/v1/models', async (req, res) => {
/**
* Count tokens endpoint - Anthropic Messages API compatible
* Uses hybrid approach: local tokenizer for text, API for complex content (images, documents)
* Uses local tokenization with official tokenizers (@anthropic-ai/tokenizer for Claude, @lenml/tokenizer-gemini for Gemini)
*/
app.post('/v1/messages/count_tokens', async (req, res) => {
try {

View File

@@ -2,7 +2,7 @@
* Test Count Tokens - Tests for the /v1/messages/count_tokens endpoint
*
* Verifies token counting functionality:
* - Local estimation using gpt-tokenizer
* - Local estimation using official tokenizers (@anthropic-ai/tokenizer for Claude, @lenml/tokenizer-gemini for Gemini)
* - Request validation
* - Different content types (text, tools, system prompts)
*/
@@ -436,6 +436,47 @@ async function runTests() {
assertGreater(response.input_tokens, 10, 'input_tokens');
});
// Test 16: Gemini model token counting
await test('Gemini model returns token count', async () => {
const response = await countTokensRequest({
model: 'gemini-3-flash',
messages: [
{ role: 'user', content: 'Hello, how are you?' }
]
});
assert(response.statusCode === 200, `Expected 200, got ${response.statusCode}`);
assertType(response.input_tokens, 'number', 'input_tokens');
assertGreater(response.input_tokens, 0, 'input_tokens');
});
// Test 17: Gemini model with system prompt and tools
await test('Gemini model with system prompt and tools', async () => {
const response = await countTokensRequest({
model: 'gemini-3-flash',
system: 'You are a helpful assistant.',
messages: [
{ role: 'user', content: 'What is the weather in Tokyo?' }
],
tools: [
{
name: 'get_weather',
description: 'Get weather for a location',
input_schema: {
type: 'object',
properties: {
location: { type: 'string' }
}
}
}
]
});
assert(response.statusCode === 200, `Expected 200, got ${response.statusCode}`);
assertType(response.input_tokens, 'number', 'input_tokens');
assertGreater(response.input_tokens, 10, 'input_tokens for Gemini with tools');
});
// Summary
console.log('\n' + '═'.repeat(60));
console.log(`Tests completed: ${passed} passed, ${failed} failed`);