diff --git a/packages/core/src/code_assist/converter.ts b/packages/core/src/code_assist/converter.ts index a71c70da..9c8a2434 100644 --- a/packages/core/src/code_assist/converter.ts +++ b/packages/core/src/code_assist/converter.ts @@ -22,7 +22,6 @@ import { Part, SafetySetting, PartUnion, - SchemaUnion, SpeechConfigUnion, ThinkingConfig, ToolListUnion, @@ -61,7 +60,7 @@ interface VertexGenerationConfig { frequencyPenalty?: number; seed?: number; responseMimeType?: string; - responseSchema?: SchemaUnion; + responseJsonSchema?: unknown; routingConfig?: GenerationConfigRoutingConfig; modelSelectionConfig?: ModelSelectionConfig; responseModalities?: string[]; @@ -230,7 +229,7 @@ function toVertexGenerationConfig( frequencyPenalty: config.frequencyPenalty, seed: config.seed, responseMimeType: config.responseMimeType, - responseSchema: config.responseSchema, + responseJsonSchema: config.responseJsonSchema, routingConfig: config.routingConfig, modelSelectionConfig: config.modelSelectionConfig, responseModalities: config.responseModalities, diff --git a/packages/core/src/core/client.test.ts b/packages/core/src/core/client.test.ts index 2e8d086f..4c6f6dbb 100644 --- a/packages/core/src/core/client.test.ts +++ b/packages/core/src/core/client.test.ts @@ -396,7 +396,7 @@ describe('Gemini Client (client.ts)', () => { systemInstruction: getCoreSystemPrompt(''), temperature: 0, topP: 1, - responseSchema: schema, + responseJsonSchema: schema, responseMimeType: 'application/json', }, contents, @@ -435,7 +435,7 @@ describe('Gemini Client (client.ts)', () => { temperature: 0.9, topP: 1, // from default topK: 20, - responseSchema: schema, + responseJsonSchema: schema, responseMimeType: 'application/json', }, contents, diff --git a/packages/core/src/core/client.ts b/packages/core/src/core/client.ts index df3dbc4e..cc492472 100644 --- a/packages/core/src/core/client.ts +++ b/packages/core/src/core/client.ts @@ -7,7 +7,6 @@ import { EmbedContentParameters, GenerateContentConfig, - SchemaUnion, PartListUnion, Content, Tool, @@ -515,7 +514,7 @@ export class GeminiClient { async generateJson( contents: Content[], - schema: SchemaUnion, + schema: Record, abortSignal: AbortSignal, model?: string, config: GenerateContentConfig = {}, @@ -539,7 +538,7 @@ export class GeminiClient { config: { ...requestConfig, systemInstruction, - responseSchema: schema, + responseJsonSchema: schema, responseMimeType: 'application/json', }, contents, diff --git a/packages/core/src/services/loopDetectionService.ts b/packages/core/src/services/loopDetectionService.ts index a01e4ee9..409e92a8 100644 --- a/packages/core/src/services/loopDetectionService.ts +++ b/packages/core/src/services/loopDetectionService.ts @@ -9,7 +9,6 @@ import { GeminiEventType, ServerGeminiStreamEvent } from '../core/turn.js'; import { logLoopDetected } from '../telemetry/loggers.js'; import { LoopDetectedEvent, LoopType } from '../telemetry/types.js'; import { Config, DEFAULT_GEMINI_FLASH_MODEL } from '../config/config.js'; -import { SchemaUnion, Type } from '@google/genai'; const TOOL_CALL_LOOP_THRESHOLD = 5; const CONTENT_LOOP_THRESHOLD = 10; @@ -341,16 +340,16 @@ Please analyze the conversation history to determine the possibility that the co ...recentHistory, { role: 'user', parts: [{ text: prompt }] }, ]; - const schema: SchemaUnion = { - type: Type.OBJECT, + const schema: Record = { + type: 'object', properties: { reasoning: { - type: Type.STRING, + type: 'string', description: 'Your reasoning on if the conversation is looping without forward progress.', }, confidence: { - type: Type.NUMBER, + type: 'number', description: 'A number between 0.0 and 1.0 representing your confidence that the conversation is in an unproductive state.', }, diff --git a/packages/core/src/utils/editCorrector.ts b/packages/core/src/utils/editCorrector.ts index 0ef8d4fe..faa52b51 100644 --- a/packages/core/src/utils/editCorrector.ts +++ b/packages/core/src/utils/editCorrector.ts @@ -4,12 +4,7 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { - Content, - GenerateContentConfig, - SchemaUnion, - Type, -} from '@google/genai'; +import { Content, GenerateContentConfig } from '@google/genai'; import { GeminiClient } from '../core/client.js'; import { EditToolParams, EditTool } from '../tools/edit.js'; import { WriteFileTool } from '../tools/write-file.js'; @@ -364,11 +359,11 @@ export async function ensureCorrectFileContent( } // Define the expected JSON schema for the LLM response for old_string correction -const OLD_STRING_CORRECTION_SCHEMA: SchemaUnion = { - type: Type.OBJECT, +const OLD_STRING_CORRECTION_SCHEMA: Record = { + type: 'object', properties: { corrected_target_snippet: { - type: Type.STRING, + type: 'string', description: 'The corrected version of the target snippet that exactly and uniquely matches a segment within the provided file content.', }, @@ -438,11 +433,11 @@ Return ONLY the corrected target snippet in the specified JSON format with the k } // Define the expected JSON schema for the new_string correction LLM response -const NEW_STRING_CORRECTION_SCHEMA: SchemaUnion = { - type: Type.OBJECT, +const NEW_STRING_CORRECTION_SCHEMA: Record = { + type: 'object', properties: { corrected_new_string: { - type: Type.STRING, + type: 'string', description: 'The original_new_string adjusted to be a suitable replacement for the corrected_old_string, while maintaining the original intent of the change.', }, @@ -521,11 +516,11 @@ Return ONLY the corrected string in the specified JSON format with the key 'corr } } -const CORRECT_NEW_STRING_ESCAPING_SCHEMA: SchemaUnion = { - type: Type.OBJECT, +const CORRECT_NEW_STRING_ESCAPING_SCHEMA: Record = { + type: 'object', properties: { corrected_new_string_escaping: { - type: Type.STRING, + type: 'string', description: 'The new_string with corrected escaping, ensuring it is a proper replacement for the old_string, especially considering potential over-escaping issues from previous LLM generations.', }, @@ -593,11 +588,11 @@ Return ONLY the corrected string in the specified JSON format with the key 'corr } } -const CORRECT_STRING_ESCAPING_SCHEMA: SchemaUnion = { - type: Type.OBJECT, +const CORRECT_STRING_ESCAPING_SCHEMA: Record = { + type: 'object', properties: { corrected_string_escaping: { - type: Type.STRING, + type: 'string', description: 'The string with corrected escaping, ensuring it is valid, specially considering potential over-escaping issues from previous LLM generations.', }, diff --git a/packages/core/src/utils/nextSpeakerChecker.ts b/packages/core/src/utils/nextSpeakerChecker.ts index 8497db61..4ae8f437 100644 --- a/packages/core/src/utils/nextSpeakerChecker.ts +++ b/packages/core/src/utils/nextSpeakerChecker.ts @@ -4,7 +4,7 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { Content, SchemaUnion, Type } from '@google/genai'; +import { Content } from '@google/genai'; import { DEFAULT_GEMINI_FLASH_MODEL } from '../config/models.js'; import { GeminiClient } from '../core/client.js'; import { GeminiChat } from '../core/geminiChat.js'; @@ -16,16 +16,16 @@ const CHECK_PROMPT = `Analyze *only* the content and structure of your immediate 2. **Question to User:** If your last response ends with a direct question specifically addressed *to the user*, then the **'user'** should speak next. 3. **Waiting for User:** If your last response completed a thought, statement, or task *and* does not meet the criteria for Rule 1 (Model Continues) or Rule 2 (Question to User), it implies a pause expecting user input or reaction. In this case, the **'user'** should speak next.`; -const RESPONSE_SCHEMA: SchemaUnion = { - type: Type.OBJECT, +const RESPONSE_SCHEMA: Record = { + type: 'object', properties: { reasoning: { - type: Type.STRING, + type: 'string', description: "Brief explanation justifying the 'next_speaker' choice based *strictly* on the applicable rule and the content/structure of the preceding turn.", }, next_speaker: { - type: Type.STRING, + type: 'string', enum: ['user', 'model'], description: 'Who should speak next based *only* on the preceding turn and the decision rules',