feat: validate model IDs before processing requests
Add model validation cache with 5-minute TTL to reject invalid model IDs upfront instead of sending them to the API. This provides better error messages and avoids unnecessary API calls. - Add MODEL_VALIDATION_CACHE_TTL_MS constant (5 min) - Add isValidModel() with lazy cache population - Warm cache when listModels() is called - Validate model ID in /v1/messages before processing Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
@@ -12,12 +12,12 @@
|
||||
// Re-export public API
|
||||
export { sendMessage } from './message-handler.js';
|
||||
export { sendMessageStream } from './streaming-handler.js';
|
||||
export { listModels, fetchAvailableModels, getModelQuotas, getSubscriptionTier } from './model-api.js';
|
||||
export { listModels, fetchAvailableModels, getModelQuotas, getSubscriptionTier, isValidModel } from './model-api.js';
|
||||
|
||||
// Default export for backwards compatibility
|
||||
import { sendMessage } from './message-handler.js';
|
||||
import { sendMessageStream } from './streaming-handler.js';
|
||||
import { listModels, fetchAvailableModels, getModelQuotas, getSubscriptionTier } from './model-api.js';
|
||||
import { listModels, fetchAvailableModels, getModelQuotas, getSubscriptionTier, isValidModel } from './model-api.js';
|
||||
|
||||
export default {
|
||||
sendMessage,
|
||||
@@ -25,5 +25,6 @@ export default {
|
||||
listModels,
|
||||
fetchAvailableModels,
|
||||
getModelQuotas,
|
||||
getSubscriptionTier
|
||||
getSubscriptionTier,
|
||||
isValidModel
|
||||
};
|
||||
|
||||
@@ -9,10 +9,18 @@ import {
|
||||
ANTIGRAVITY_HEADERS,
|
||||
LOAD_CODE_ASSIST_ENDPOINTS,
|
||||
LOAD_CODE_ASSIST_HEADERS,
|
||||
getModelFamily
|
||||
getModelFamily,
|
||||
MODEL_VALIDATION_CACHE_TTL_MS
|
||||
} from '../constants.js';
|
||||
import { logger } from '../utils/logger.js';
|
||||
|
||||
// Model validation cache
|
||||
const modelCache = {
|
||||
validModels: new Set(),
|
||||
lastFetched: 0,
|
||||
fetchPromise: null // Prevents concurrent fetches
|
||||
};
|
||||
|
||||
/**
|
||||
* Check if a model is supported (Claude or Gemini)
|
||||
* @param {string} modelId - Model ID to check
|
||||
@@ -46,6 +54,10 @@ export async function listModels(token) {
|
||||
description: modelData.displayName || modelId
|
||||
}));
|
||||
|
||||
// Warm the model validation cache
|
||||
modelCache.validModels = new Set(modelList.map(m => m.id));
|
||||
modelCache.lastFetched = Date.now();
|
||||
|
||||
return {
|
||||
object: 'list',
|
||||
data: modelList
|
||||
@@ -246,3 +258,71 @@ export async function getSubscriptionTier(token) {
|
||||
logger.warn('[CloudCode] Failed to detect subscription tier from all endpoints. Defaulting to free.');
|
||||
return { tier: 'free', projectId: null };
|
||||
}
|
||||
|
||||
/**
|
||||
* Populate the model validation cache
|
||||
* @param {string} token - OAuth access token
|
||||
* @param {string} [projectId] - Optional project ID
|
||||
* @returns {Promise<void>}
|
||||
*/
|
||||
async function populateModelCache(token, projectId = null) {
|
||||
const now = Date.now();
|
||||
|
||||
// Check if cache is fresh
|
||||
if (modelCache.validModels.size > 0 && (now - modelCache.lastFetched) < MODEL_VALIDATION_CACHE_TTL_MS) {
|
||||
return;
|
||||
}
|
||||
|
||||
// If already fetching, wait for it
|
||||
if (modelCache.fetchPromise) {
|
||||
await modelCache.fetchPromise;
|
||||
return;
|
||||
}
|
||||
|
||||
// Start fetch
|
||||
modelCache.fetchPromise = (async () => {
|
||||
try {
|
||||
const data = await fetchAvailableModels(token, projectId);
|
||||
if (data && data.models) {
|
||||
const validIds = Object.keys(data.models).filter(modelId => isSupportedModel(modelId));
|
||||
modelCache.validModels = new Set(validIds);
|
||||
modelCache.lastFetched = Date.now();
|
||||
logger.debug(`[CloudCode] Model cache populated with ${validIds.length} models`);
|
||||
}
|
||||
} catch (error) {
|
||||
logger.warn(`[CloudCode] Failed to populate model cache: ${error.message}`);
|
||||
// Don't throw - validation should degrade gracefully
|
||||
} finally {
|
||||
modelCache.fetchPromise = null;
|
||||
}
|
||||
})();
|
||||
|
||||
await modelCache.fetchPromise;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a model ID is valid (exists in the available models list)
|
||||
* Uses a cached model list with TTL-based refresh
|
||||
* @param {string} modelId - Model ID to validate
|
||||
* @param {string} token - OAuth access token for cache population
|
||||
* @param {string} [projectId] - Optional project ID
|
||||
* @returns {Promise<boolean>} True if model is valid
|
||||
*/
|
||||
export async function isValidModel(modelId, token, projectId = null) {
|
||||
try {
|
||||
// Populate cache if needed
|
||||
await populateModelCache(token, projectId);
|
||||
|
||||
// If cache is populated, validate against it
|
||||
if (modelCache.validModels.size > 0) {
|
||||
return modelCache.validModels.has(modelId);
|
||||
}
|
||||
|
||||
// Cache empty (fetch failed) - fail open, let API validate
|
||||
return true;
|
||||
} catch (error) {
|
||||
logger.debug(`[CloudCode] Model validation error: ${error.message}`);
|
||||
// Fail open - let the API validate
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user