refactor: cleanup token counting

This commit is contained in:
Fahad
2025-10-02 11:35:29 +04:00
parent 14a35afa1d
commit 7fe9fc49f8
4 changed files with 43 additions and 61 deletions

View File

@@ -361,15 +361,6 @@ class GeminiModelProvider(ModelProvider):
error_msg = f"Gemini API error for model {resolved_name} after {actual_attempts} attempt{'s' if actual_attempts > 1 else ''}: {str(last_exception)}"
raise RuntimeError(error_msg) from last_exception
def count_tokens(self, text: str, model_name: str) -> int:
"""Count tokens for the given text using Gemini's tokenizer."""
self._resolve_model_name(model_name)
# For now, use a simple estimation
# TODO: Use actual Gemini tokenizer when available in SDK
# Rough estimation: ~4 characters per token for English text
return len(text) // 4
def get_provider_type(self) -> ProviderType:
"""Get the provider type."""
return ProviderType.GOOGLE