Run `npm run format`
- Also updated README.md accordingly. Part of https://b.corp.google.com/issues/411384603
This commit is contained in:
parent
7928c1727f
commit
cfc697a96d
|
@ -1,7 +1,7 @@
|
|||
{
|
||||
"semi": true,
|
||||
"trailingComma": "all",
|
||||
"singleQuote": true,
|
||||
"printWidth": 80,
|
||||
"tabWidth": 2
|
||||
"semi": true,
|
||||
"trailingComma": "all",
|
||||
"singleQuote": true,
|
||||
"printWidth": 80,
|
||||
"tabWidth": 2
|
||||
}
|
||||
|
|
10
README.md
10
README.md
|
@ -51,3 +51,13 @@ To debug the CLI application using VS Code:
|
|||
2. In VS Code, use the "Attach" launch configuration (found in `.vscode/launch.json`). This configuration is set up to attach to the Node.js process listening on port 9229, which is the default port used by `--inspect-brk`.
|
||||
|
||||
Alternatively, you can use the "Launch Program" configuration in VS Code if you prefer to launch the currently open file directly, but the "Attach" method is generally recommended for debugging the main CLI entry point.
|
||||
|
||||
## Formatting
|
||||
|
||||
To format the code in this project, run the following command from the root directory:
|
||||
|
||||
```bash
|
||||
npm run format
|
||||
```
|
||||
|
||||
This command uses Prettier to format the code according to the project's style guidelines.
|
||||
|
|
|
@ -95,7 +95,11 @@ export default tseslint.config(
|
|||
'@typescript-eslint/no-namespace': ['error', { allowDeclarations: true }],
|
||||
'@typescript-eslint/no-unused-vars': [
|
||||
'warn',
|
||||
{ argsIgnorePattern: '^_', varsIgnorePattern: '^_', caughtErrorsIgnorePattern: '^_' },
|
||||
{
|
||||
argsIgnorePattern: '^_',
|
||||
varsIgnorePattern: '^_',
|
||||
caughtErrorsIgnorePattern: '^_',
|
||||
},
|
||||
],
|
||||
'no-cond-assign': 'error',
|
||||
'no-debugger': 'error',
|
||||
|
@ -108,12 +112,14 @@ export default tseslint.config(
|
|||
},
|
||||
{
|
||||
selector: 'ThrowStatement > Literal:not([value=/^\\w+Error:/])',
|
||||
message: 'Do not throw string literals or non-Error objects. Throw new Error("...") instead.',
|
||||
message:
|
||||
'Do not throw string literals or non-Error objects. Throw new Error("...") instead.',
|
||||
},
|
||||
],
|
||||
'no-unsafe-finally': 'error',
|
||||
'no-unused-expressions': 'off', // Disable base rule
|
||||
'@typescript-eslint/no-unused-expressions': [ // Enable TS version
|
||||
'@typescript-eslint/no-unused-expressions': [
|
||||
// Enable TS version
|
||||
'error',
|
||||
{ allowShortCircuit: true, allowTernary: true },
|
||||
],
|
||||
|
|
|
@ -1,40 +1,40 @@
|
|||
{
|
||||
"name": "gemini-code-cli",
|
||||
"version": "1.0.0",
|
||||
"description": "Gemini Code CLI",
|
||||
"type": "module",
|
||||
"main": "dist/gemini.js",
|
||||
"scripts": {
|
||||
"build": "tsc",
|
||||
"start": "node dist/gemini.js",
|
||||
"name": "gemini-code-cli",
|
||||
"version": "1.0.0",
|
||||
"description": "Gemini Code CLI",
|
||||
"type": "module",
|
||||
"main": "dist/gemini.js",
|
||||
"scripts": {
|
||||
"build": "tsc",
|
||||
"start": "node dist/gemini.js",
|
||||
"debug": "node --inspect-brk dist/gemini.js",
|
||||
"lint": "eslint . --ext .ts,.tsx",
|
||||
"format": "prettier --write ."
|
||||
},
|
||||
"files": [
|
||||
"dist"
|
||||
],
|
||||
"dependencies": {
|
||||
"@google/genai": "^0.8.0",
|
||||
"diff": "^7.0.0",
|
||||
"dotenv": "^16.4.7",
|
||||
"fast-glob": "^3.3.3",
|
||||
"ink": "^5.2.0",
|
||||
"ink-select-input": "^6.0.0",
|
||||
"ink-spinner": "^5.0.0",
|
||||
"ink-text-input": "^6.0.0",
|
||||
"react": "^18.3.1",
|
||||
"yargs": "^17.7.2"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/diff": "^7.0.2",
|
||||
"@types/dotenv": "^6.1.1",
|
||||
"@types/node": "^20.11.24",
|
||||
"@types/react": "^19.1.0",
|
||||
"@types/yargs": "^17.0.32",
|
||||
"typescript": "^5.3.3"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
}
|
||||
},
|
||||
"files": [
|
||||
"dist"
|
||||
],
|
||||
"dependencies": {
|
||||
"@google/genai": "^0.8.0",
|
||||
"diff": "^7.0.0",
|
||||
"dotenv": "^16.4.7",
|
||||
"fast-glob": "^3.3.3",
|
||||
"ink": "^5.2.0",
|
||||
"ink-select-input": "^6.0.0",
|
||||
"ink-spinner": "^5.0.0",
|
||||
"ink-text-input": "^6.0.0",
|
||||
"react": "^18.3.1",
|
||||
"yargs": "^17.7.2"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/diff": "^7.0.2",
|
||||
"@types/dotenv": "^6.1.1",
|
||||
"@types/node": "^20.11.24",
|
||||
"@types/react": "^19.1.0",
|
||||
"@types/yargs": "^17.0.32",
|
||||
"typescript": "^5.3.3"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,11 +24,11 @@ export async function parseArguments(): Promise<CliArgs> {
|
|||
// Handle warnings for extra arguments here
|
||||
if (argv._ && argv._.length > 0) {
|
||||
console.warn(
|
||||
`Warning: Additional arguments provided (${argv._.join(', ')}), but will be ignored.`
|
||||
`Warning: Additional arguments provided (${argv._.join(', ')}), but will be ignored.`,
|
||||
);
|
||||
}
|
||||
|
||||
// Cast to the interface to ensure the structure aligns with expectations
|
||||
// Use `unknown` first for safer casting if types might not perfectly match
|
||||
return argv as unknown as CliArgs;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,43 +4,47 @@ import * as path from 'node:path';
|
|||
import process from 'node:process';
|
||||
|
||||
function findEnvFile(startDir: string): string | null {
|
||||
// Start search from the provided directory (e.g., current working directory)
|
||||
let currentDir = path.resolve(startDir); // Ensure absolute path
|
||||
while (true) {
|
||||
const envPath = path.join(currentDir, '.env');
|
||||
if (fs.existsSync(envPath)) {
|
||||
return envPath;
|
||||
}
|
||||
|
||||
const parentDir = path.dirname(currentDir);
|
||||
if (parentDir === currentDir || !parentDir) {
|
||||
return null;
|
||||
}
|
||||
currentDir = parentDir;
|
||||
// Start search from the provided directory (e.g., current working directory)
|
||||
let currentDir = path.resolve(startDir); // Ensure absolute path
|
||||
while (true) {
|
||||
const envPath = path.join(currentDir, '.env');
|
||||
if (fs.existsSync(envPath)) {
|
||||
return envPath;
|
||||
}
|
||||
|
||||
const parentDir = path.dirname(currentDir);
|
||||
if (parentDir === currentDir || !parentDir) {
|
||||
return null;
|
||||
}
|
||||
currentDir = parentDir;
|
||||
}
|
||||
}
|
||||
|
||||
export function loadEnvironment(): void {
|
||||
// Start searching from the current working directory by default
|
||||
const envFilePath = findEnvFile(process.cwd());
|
||||
// Start searching from the current working directory by default
|
||||
const envFilePath = findEnvFile(process.cwd());
|
||||
|
||||
if (!envFilePath) {
|
||||
return;
|
||||
}
|
||||
if (!envFilePath) {
|
||||
return;
|
||||
}
|
||||
|
||||
dotenv.config({ path: envFilePath });
|
||||
dotenv.config({ path: envFilePath });
|
||||
|
||||
if (!process.env.GEMINI_API_KEY) {
|
||||
console.error('Error: GEMINI_API_KEY environment variable is not set in the loaded .env file.');
|
||||
process.exit(1);
|
||||
}
|
||||
if (!process.env.GEMINI_API_KEY) {
|
||||
console.error(
|
||||
'Error: GEMINI_API_KEY environment variable is not set in the loaded .env file.',
|
||||
);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
export function getApiKey(): string {
|
||||
loadEnvironment();
|
||||
const apiKey = process.env.GEMINI_API_KEY;
|
||||
if (!apiKey) {
|
||||
throw new Error('GEMINI_API_KEY is missing. Ensure loadEnvironment() was called successfully.');
|
||||
}
|
||||
return apiKey;
|
||||
}
|
||||
loadEnvironment();
|
||||
const apiKey = process.env.GEMINI_API_KEY;
|
||||
if (!apiKey) {
|
||||
throw new Error(
|
||||
'GEMINI_API_KEY is missing. Ensure loadEnvironment() was called successfully.',
|
||||
);
|
||||
}
|
||||
return apiKey;
|
||||
}
|
||||
|
|
|
@ -1,13 +1,20 @@
|
|||
import {
|
||||
GenerateContentConfig, GoogleGenAI, Part, Chat,
|
||||
Type,
|
||||
SchemaUnion,
|
||||
PartListUnion,
|
||||
Content
|
||||
GenerateContentConfig,
|
||||
GoogleGenAI,
|
||||
Part,
|
||||
Chat,
|
||||
Type,
|
||||
SchemaUnion,
|
||||
PartListUnion,
|
||||
Content,
|
||||
} from '@google/genai';
|
||||
import { getApiKey } from '../config/env.js';
|
||||
import { CoreSystemPrompt } from './prompts.js';
|
||||
import { type ToolCallEvent, type ToolCallConfirmationDetails, ToolCallStatus } from '../ui/types.js';
|
||||
import {
|
||||
type ToolCallEvent,
|
||||
type ToolCallConfirmationDetails,
|
||||
ToolCallStatus,
|
||||
} from '../ui/types.js';
|
||||
import process from 'node:process';
|
||||
import { toolRegistry } from '../tools/tool-registry.js';
|
||||
import { ToolResult } from '../tools/tools.js';
|
||||
|
@ -15,41 +22,45 @@ import { getFolderStructure } from '../utils/getFolderStructure.js';
|
|||
import { GeminiEventType, GeminiStream } from './gemini-stream.js';
|
||||
|
||||
type ToolExecutionOutcome = {
|
||||
callId: string;
|
||||
name: string;
|
||||
args: Record<string, any>;
|
||||
result?: ToolResult;
|
||||
error?: any;
|
||||
confirmationDetails?: ToolCallConfirmationDetails;
|
||||
callId: string;
|
||||
name: string;
|
||||
args: Record<string, any>;
|
||||
result?: ToolResult;
|
||||
error?: any;
|
||||
confirmationDetails?: ToolCallConfirmationDetails;
|
||||
};
|
||||
|
||||
export class GeminiClient {
|
||||
private ai: GoogleGenAI;
|
||||
private defaultHyperParameters: GenerateContentConfig = {
|
||||
temperature: 0,
|
||||
topP: 1,
|
||||
};
|
||||
private readonly MAX_TURNS = 100;
|
||||
private ai: GoogleGenAI;
|
||||
private defaultHyperParameters: GenerateContentConfig = {
|
||||
temperature: 0,
|
||||
topP: 1,
|
||||
};
|
||||
private readonly MAX_TURNS = 100;
|
||||
|
||||
constructor() {
|
||||
const apiKey = getApiKey();
|
||||
this.ai = new GoogleGenAI({ apiKey });
|
||||
}
|
||||
constructor() {
|
||||
const apiKey = getApiKey();
|
||||
this.ai = new GoogleGenAI({ apiKey });
|
||||
}
|
||||
|
||||
public async startChat(): Promise<Chat> {
|
||||
const tools = toolRegistry.getToolSchemas();
|
||||
public async startChat(): Promise<Chat> {
|
||||
const tools = toolRegistry.getToolSchemas();
|
||||
|
||||
// --- Get environmental information ---
|
||||
const cwd = process.cwd();
|
||||
const today = new Date().toLocaleDateString(undefined, { // Use locale-aware date formatting
|
||||
weekday: 'long', year: 'numeric', month: 'long', day: 'numeric'
|
||||
});
|
||||
const platform = process.platform;
|
||||
// --- Get environmental information ---
|
||||
const cwd = process.cwd();
|
||||
const today = new Date().toLocaleDateString(undefined, {
|
||||
// Use locale-aware date formatting
|
||||
weekday: 'long',
|
||||
year: 'numeric',
|
||||
month: 'long',
|
||||
day: 'numeric',
|
||||
});
|
||||
const platform = process.platform;
|
||||
|
||||
// --- Format information into a conversational multi-line string ---
|
||||
const folderStructure = await getFolderStructure(cwd);
|
||||
// --- End folder structure formatting ---)
|
||||
const initialContextText = `
|
||||
// --- Format information into a conversational multi-line string ---
|
||||
const folderStructure = await getFolderStructure(cwd);
|
||||
// --- End folder structure formatting ---)
|
||||
const initialContextText = `
|
||||
Okay, just setting up the context for our chat.
|
||||
Today is ${today}.
|
||||
My operating system is: ${platform}
|
||||
|
@ -57,194 +68,258 @@ I'm currently working in the directory: ${cwd}
|
|||
${folderStructure}
|
||||
`.trim();
|
||||
|
||||
const initialContextPart: Part = { text: initialContextText };
|
||||
// --- End environmental information formatting ---
|
||||
const initialContextPart: Part = { text: initialContextText };
|
||||
// --- End environmental information formatting ---
|
||||
|
||||
try {
|
||||
const chat = this.ai.chats.create({
|
||||
model: 'gemini-2.0-flash',//'gemini-2.0-flash',
|
||||
config: {
|
||||
systemInstruction: CoreSystemPrompt,
|
||||
...this.defaultHyperParameters,
|
||||
tools,
|
||||
},
|
||||
history: [
|
||||
// --- Add the context as a single part in the initial user message ---
|
||||
{
|
||||
role: "user",
|
||||
parts: [initialContextPart] // Pass the single Part object in an array
|
||||
},
|
||||
// --- Add an empty model response to balance the history ---
|
||||
{
|
||||
role: "model",
|
||||
parts: [{ text: "Got it. Thanks for the context!" }] // A slightly more conversational model response
|
||||
}
|
||||
// --- End history modification ---
|
||||
],
|
||||
});
|
||||
return chat;
|
||||
} catch (error) {
|
||||
console.error("Error initializing Gemini chat session:", error);
|
||||
const message = error instanceof Error ? error.message : "Unknown error.";
|
||||
throw new Error(`Failed to initialize chat: ${message}`);
|
||||
try {
|
||||
const chat = this.ai.chats.create({
|
||||
model: 'gemini-2.0-flash', //'gemini-2.0-flash',
|
||||
config: {
|
||||
systemInstruction: CoreSystemPrompt,
|
||||
...this.defaultHyperParameters,
|
||||
tools,
|
||||
},
|
||||
history: [
|
||||
// --- Add the context as a single part in the initial user message ---
|
||||
{
|
||||
role: 'user',
|
||||
parts: [initialContextPart], // Pass the single Part object in an array
|
||||
},
|
||||
// --- Add an empty model response to balance the history ---
|
||||
{
|
||||
role: 'model',
|
||||
parts: [{ text: 'Got it. Thanks for the context!' }], // A slightly more conversational model response
|
||||
},
|
||||
// --- End history modification ---
|
||||
],
|
||||
});
|
||||
return chat;
|
||||
} catch (error) {
|
||||
console.error('Error initializing Gemini chat session:', error);
|
||||
const message = error instanceof Error ? error.message : 'Unknown error.';
|
||||
throw new Error(`Failed to initialize chat: ${message}`);
|
||||
}
|
||||
}
|
||||
|
||||
public addMessageToHistory(chat: Chat, message: Content): void {
|
||||
const history = chat.getHistory();
|
||||
history.push(message);
|
||||
this.ai.chats;
|
||||
chat;
|
||||
}
|
||||
|
||||
public async *sendMessageStream(
|
||||
chat: Chat,
|
||||
request: PartListUnion,
|
||||
signal?: AbortSignal,
|
||||
): GeminiStream {
|
||||
let currentMessageToSend: PartListUnion = request;
|
||||
let turns = 0;
|
||||
|
||||
try {
|
||||
while (turns < this.MAX_TURNS) {
|
||||
turns++;
|
||||
const resultStream = await chat.sendMessageStream({
|
||||
message: currentMessageToSend,
|
||||
});
|
||||
let functionResponseParts: Part[] = [];
|
||||
let pendingToolCalls: Array<{
|
||||
callId: string;
|
||||
name: string;
|
||||
args: Record<string, any>;
|
||||
}> = [];
|
||||
let yieldedTextInTurn = false;
|
||||
const chunksForDebug = [];
|
||||
|
||||
for await (const chunk of resultStream) {
|
||||
chunksForDebug.push(chunk);
|
||||
if (signal?.aborted) {
|
||||
const abortError = new Error(
|
||||
'Request cancelled by user during stream.',
|
||||
);
|
||||
abortError.name = 'AbortError';
|
||||
throw abortError;
|
||||
}
|
||||
|
||||
const functionCalls = chunk.functionCalls;
|
||||
if (functionCalls && functionCalls.length > 0) {
|
||||
for (const call of functionCalls) {
|
||||
const callId =
|
||||
call.id ??
|
||||
`${call.name}-${Date.now()}-${Math.random().toString(16).slice(2)}`;
|
||||
const name = call.name || 'undefined_tool_name';
|
||||
const args = (call.args || {}) as Record<string, any>;
|
||||
|
||||
pendingToolCalls.push({ callId, name, args });
|
||||
const evtValue: ToolCallEvent = {
|
||||
type: 'tool_call',
|
||||
status: ToolCallStatus.Pending,
|
||||
callId,
|
||||
name,
|
||||
args,
|
||||
resultDisplay: undefined,
|
||||
confirmationDetails: undefined,
|
||||
};
|
||||
yield {
|
||||
type: GeminiEventType.ToolCallInfo,
|
||||
value: evtValue,
|
||||
};
|
||||
}
|
||||
} else {
|
||||
const text = chunk.text;
|
||||
if (text) {
|
||||
yieldedTextInTurn = true;
|
||||
yield {
|
||||
type: GeminiEventType.Content,
|
||||
value: text,
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public addMessageToHistory(chat: Chat, message: Content): void {
|
||||
const history = chat.getHistory();
|
||||
history.push(message);
|
||||
this.ai.chats
|
||||
chat
|
||||
}
|
||||
if (pendingToolCalls.length > 0) {
|
||||
const toolPromises: Promise<ToolExecutionOutcome>[] =
|
||||
pendingToolCalls.map(async (pendingToolCall) => {
|
||||
const tool = toolRegistry.getTool(pendingToolCall.name);
|
||||
|
||||
public async* sendMessageStream(
|
||||
chat: Chat,
|
||||
request: PartListUnion,
|
||||
signal?: AbortSignal
|
||||
): GeminiStream {
|
||||
let currentMessageToSend: PartListUnion = request;
|
||||
let turns = 0;
|
||||
if (!tool) {
|
||||
// Directly return error outcome if tool not found
|
||||
return {
|
||||
...pendingToolCall,
|
||||
error: new Error(
|
||||
`Tool "${pendingToolCall.name}" not found or is not registered.`,
|
||||
),
|
||||
};
|
||||
}
|
||||
|
||||
try {
|
||||
while (turns < this.MAX_TURNS) {
|
||||
turns++;
|
||||
const resultStream = await chat.sendMessageStream({ message: currentMessageToSend });
|
||||
let functionResponseParts: Part[] = [];
|
||||
let pendingToolCalls: Array<{ callId: string; name: string; args: Record<string, any> }> = [];
|
||||
let yieldedTextInTurn = false;
|
||||
const chunksForDebug = [];
|
||||
|
||||
for await (const chunk of resultStream) {
|
||||
chunksForDebug.push(chunk);
|
||||
if (signal?.aborted) {
|
||||
const abortError = new Error("Request cancelled by user during stream.");
|
||||
abortError.name = 'AbortError';
|
||||
throw abortError;
|
||||
}
|
||||
|
||||
const functionCalls = chunk.functionCalls;
|
||||
if (functionCalls && functionCalls.length > 0) {
|
||||
for (const call of functionCalls) {
|
||||
const callId = call.id ?? `${call.name}-${Date.now()}-${Math.random().toString(16).slice(2)}`;
|
||||
const name = call.name || 'undefined_tool_name';
|
||||
const args = (call.args || {}) as Record<string, any>;
|
||||
|
||||
pendingToolCalls.push({ callId, name, args });
|
||||
const evtValue: ToolCallEvent = {
|
||||
type: 'tool_call',
|
||||
status: ToolCallStatus.Pending,
|
||||
callId,
|
||||
name,
|
||||
args,
|
||||
resultDisplay: undefined,
|
||||
confirmationDetails: undefined,
|
||||
}
|
||||
yield {
|
||||
type: GeminiEventType.ToolCallInfo,
|
||||
value: evtValue,
|
||||
};
|
||||
}
|
||||
} else {
|
||||
const text = chunk.text;
|
||||
if (text) {
|
||||
yieldedTextInTurn = true;
|
||||
yield {
|
||||
type: GeminiEventType.Content,
|
||||
value: text,
|
||||
};
|
||||
}
|
||||
}
|
||||
try {
|
||||
const confirmation = await tool.shouldConfirmExecute(
|
||||
pendingToolCall.args,
|
||||
);
|
||||
if (confirmation) {
|
||||
return {
|
||||
...pendingToolCall,
|
||||
confirmationDetails: confirmation,
|
||||
};
|
||||
}
|
||||
} catch (error) {
|
||||
return {
|
||||
...pendingToolCall,
|
||||
error: new Error(
|
||||
`Tool failed to check tool confirmation: ${error}`,
|
||||
),
|
||||
};
|
||||
}
|
||||
|
||||
if (pendingToolCalls.length > 0) {
|
||||
const toolPromises: Promise<ToolExecutionOutcome>[] = pendingToolCalls.map(async pendingToolCall => {
|
||||
const tool = toolRegistry.getTool(pendingToolCall.name);
|
||||
try {
|
||||
const result = await tool.execute(pendingToolCall.args);
|
||||
return { ...pendingToolCall, result };
|
||||
} catch (error) {
|
||||
return {
|
||||
...pendingToolCall,
|
||||
error: new Error(`Tool failed to execute: ${error}`),
|
||||
};
|
||||
}
|
||||
});
|
||||
const toolExecutionOutcomes: ToolExecutionOutcome[] =
|
||||
await Promise.all(toolPromises);
|
||||
|
||||
if (!tool) {
|
||||
// Directly return error outcome if tool not found
|
||||
return { ...pendingToolCall, error: new Error(`Tool "${pendingToolCall.name}" not found or is not registered.`) };
|
||||
}
|
||||
for (const executedTool of toolExecutionOutcomes) {
|
||||
const { callId, name, args, result, error, confirmationDetails } =
|
||||
executedTool;
|
||||
|
||||
try {
|
||||
const confirmation = await tool.shouldConfirmExecute(pendingToolCall.args);
|
||||
if (confirmation) {
|
||||
return { ...pendingToolCall, confirmationDetails: confirmation };
|
||||
}
|
||||
} catch (error) {
|
||||
return { ...pendingToolCall, error: new Error(`Tool failed to check tool confirmation: ${error}`) };
|
||||
}
|
||||
if (error) {
|
||||
const errorMessage = error?.message || String(error);
|
||||
yield {
|
||||
type: GeminiEventType.Content,
|
||||
value: `[Error invoking tool ${name}: ${errorMessage}]`,
|
||||
};
|
||||
} else if (
|
||||
result &&
|
||||
typeof result === 'object' &&
|
||||
result !== null &&
|
||||
'error' in result
|
||||
) {
|
||||
const errorMessage = String(result.error);
|
||||
yield {
|
||||
type: GeminiEventType.Content,
|
||||
value: `[Error executing tool ${name}: ${errorMessage}]`,
|
||||
};
|
||||
} else {
|
||||
const status = confirmationDetails
|
||||
? ToolCallStatus.Confirming
|
||||
: ToolCallStatus.Invoked;
|
||||
const evtValue: ToolCallEvent = {
|
||||
type: 'tool_call',
|
||||
status,
|
||||
callId,
|
||||
name,
|
||||
args,
|
||||
resultDisplay: result?.returnDisplay,
|
||||
confirmationDetails,
|
||||
};
|
||||
yield {
|
||||
type: GeminiEventType.ToolCallInfo,
|
||||
value: evtValue,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
const result = await tool.execute(pendingToolCall.args);
|
||||
return { ...pendingToolCall, result };
|
||||
} catch (error) {
|
||||
return { ...pendingToolCall, error: new Error(`Tool failed to execute: ${error}`) };
|
||||
}
|
||||
});
|
||||
const toolExecutionOutcomes: ToolExecutionOutcome[] = await Promise.all(toolPromises);
|
||||
pendingToolCalls = [];
|
||||
|
||||
for (const executedTool of toolExecutionOutcomes) {
|
||||
const { callId, name, args, result, error, confirmationDetails } = executedTool;
|
||||
const waitingOnConfirmations =
|
||||
toolExecutionOutcomes.filter(
|
||||
(outcome) => outcome.confirmationDetails,
|
||||
).length > 0;
|
||||
if (waitingOnConfirmations) {
|
||||
// Stop processing content, wait for user.
|
||||
// TODO: Kill token processing once API supports signals.
|
||||
break;
|
||||
}
|
||||
|
||||
if (error) {
|
||||
const errorMessage = error?.message || String(error);
|
||||
yield {
|
||||
type: GeminiEventType.Content,
|
||||
value: `[Error invoking tool ${name}: ${errorMessage}]`,
|
||||
};
|
||||
} else if (result && typeof result === 'object' && result !== null && 'error' in result) {
|
||||
const errorMessage = String(result.error);
|
||||
yield {
|
||||
type: GeminiEventType.Content,
|
||||
value: `[Error executing tool ${name}: ${errorMessage}]`,
|
||||
};
|
||||
} else {
|
||||
const status = confirmationDetails ? ToolCallStatus.Confirming : ToolCallStatus.Invoked;
|
||||
const evtValue: ToolCallEvent = { type: 'tool_call', status, callId, name, args, resultDisplay: result?.returnDisplay, confirmationDetails }
|
||||
yield {
|
||||
type: GeminiEventType.ToolCallInfo,
|
||||
value: evtValue,
|
||||
};
|
||||
}
|
||||
}
|
||||
functionResponseParts = toolExecutionOutcomes.map(
|
||||
(executedTool: ToolExecutionOutcome): Part => {
|
||||
const { name, result, error } = executedTool;
|
||||
const output = { output: result?.llmContent };
|
||||
let toolOutcomePayload: any;
|
||||
|
||||
pendingToolCalls = [];
|
||||
if (error) {
|
||||
const errorMessage = error?.message || String(error);
|
||||
toolOutcomePayload = {
|
||||
error: `Invocation failed: ${errorMessage}`,
|
||||
};
|
||||
console.error(
|
||||
`[Turn ${turns}] Critical error invoking tool ${name}:`,
|
||||
error,
|
||||
);
|
||||
} else if (
|
||||
result &&
|
||||
typeof result === 'object' &&
|
||||
result !== null &&
|
||||
'error' in result
|
||||
) {
|
||||
toolOutcomePayload = output;
|
||||
console.warn(
|
||||
`[Turn ${turns}] Tool ${name} returned an error structure:`,
|
||||
result.error,
|
||||
);
|
||||
} else {
|
||||
toolOutcomePayload = output;
|
||||
}
|
||||
|
||||
const waitingOnConfirmations = toolExecutionOutcomes.filter(outcome => outcome.confirmationDetails).length > 0;
|
||||
if (waitingOnConfirmations) {
|
||||
// Stop processing content, wait for user.
|
||||
// TODO: Kill token processing once API supports signals.
|
||||
break;
|
||||
}
|
||||
|
||||
functionResponseParts = toolExecutionOutcomes.map((executedTool: ToolExecutionOutcome): Part => {
|
||||
const { name, result, error } = executedTool;
|
||||
const output = { "output": result?.llmContent };
|
||||
let toolOutcomePayload: any;
|
||||
|
||||
if (error) {
|
||||
const errorMessage = error?.message || String(error);
|
||||
toolOutcomePayload = { error: `Invocation failed: ${errorMessage}` };
|
||||
console.error(`[Turn ${turns}] Critical error invoking tool ${name}:`, error);
|
||||
} else if (result && typeof result === 'object' && result !== null && 'error' in result) {
|
||||
toolOutcomePayload = output;
|
||||
console.warn(`[Turn ${turns}] Tool ${name} returned an error structure:`, result.error);
|
||||
} else {
|
||||
toolOutcomePayload = output;
|
||||
}
|
||||
|
||||
return {
|
||||
functionResponse: {
|
||||
name: name,
|
||||
id: executedTool.callId,
|
||||
response: toolOutcomePayload,
|
||||
},
|
||||
};
|
||||
});
|
||||
currentMessageToSend = functionResponseParts;
|
||||
} else if (yieldedTextInTurn) {
|
||||
const history = chat.getHistory();
|
||||
const checkPrompt = `Analyze *only* the content and structure of your immediately preceding response (your last turn in the conversation history). Based *strictly* on that response, determine who should logically speak next: the 'user' or the 'model' (you).
|
||||
return {
|
||||
functionResponse: {
|
||||
name: name,
|
||||
id: executedTool.callId,
|
||||
response: toolOutcomePayload,
|
||||
},
|
||||
};
|
||||
},
|
||||
);
|
||||
currentMessageToSend = functionResponseParts;
|
||||
} else if (yieldedTextInTurn) {
|
||||
const history = chat.getHistory();
|
||||
const checkPrompt = `Analyze *only* the content and structure of your immediately preceding response (your last turn in the conversation history). Based *strictly* on that response, determine who should logically speak next: the 'user' or the 'model' (you).
|
||||
|
||||
**Decision Rules (apply in order):**
|
||||
|
||||
|
@ -274,110 +349,135 @@ Respond *only* in JSON format according to the following schema. Do not include
|
|||
\`\`\`
|
||||
}`;
|
||||
|
||||
// Schema Idea
|
||||
const responseSchema: SchemaUnion = {
|
||||
type: Type.OBJECT,
|
||||
properties: {
|
||||
reasoning: {
|
||||
type: Type.STRING,
|
||||
description: "Brief explanation justifying the 'next_speaker' choice based *strictly* on the applicable rule and the content/structure of the preceding turn."
|
||||
},
|
||||
next_speaker: {
|
||||
type: Type.STRING,
|
||||
enum: ['user', 'model'], // Enforce the choices
|
||||
description: "Who should speak next based *only* on the preceding turn and the decision rules",
|
||||
},
|
||||
},
|
||||
required: ['reasoning', 'next_speaker']
|
||||
};
|
||||
// Schema Idea
|
||||
const responseSchema: SchemaUnion = {
|
||||
type: Type.OBJECT,
|
||||
properties: {
|
||||
reasoning: {
|
||||
type: Type.STRING,
|
||||
description:
|
||||
"Brief explanation justifying the 'next_speaker' choice based *strictly* on the applicable rule and the content/structure of the preceding turn.",
|
||||
},
|
||||
next_speaker: {
|
||||
type: Type.STRING,
|
||||
enum: ['user', 'model'], // Enforce the choices
|
||||
description:
|
||||
'Who should speak next based *only* on the preceding turn and the decision rules',
|
||||
},
|
||||
},
|
||||
required: ['reasoning', 'next_speaker'],
|
||||
};
|
||||
|
||||
try {
|
||||
// Use the new generateJson method, passing the history and the check prompt
|
||||
const parsedResponse = await this.generateJson([...history, { role: "user", parts: [{ text: checkPrompt }] }], responseSchema);
|
||||
|
||||
// Safely extract the next speaker value
|
||||
const nextSpeaker: string | undefined = typeof parsedResponse?.next_speaker === 'string' ? parsedResponse.next_speaker : undefined;
|
||||
|
||||
if (nextSpeaker === 'model') {
|
||||
currentMessageToSend = { text: 'alright' }; // Or potentially a more meaningful continuation prompt
|
||||
} else {
|
||||
// 'user' should speak next, or value is missing/invalid. End the turn.
|
||||
break;
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
console.error(`[Turn ${turns}] Failed to get or parse next speaker check:`, error);
|
||||
// If the check fails, assume user should speak next to avoid infinite loops
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
console.warn(`[Turn ${turns}] No text or function calls received from Gemini. Ending interaction.`);
|
||||
break;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if (turns >= this.MAX_TURNS) {
|
||||
console.warn("sendMessageStream: Reached maximum tool call turns limit.");
|
||||
yield {
|
||||
type: GeminiEventType.Content,
|
||||
value: "\n\n[System Notice: Maximum interaction turns reached. The conversation may be incomplete.]",
|
||||
};
|
||||
}
|
||||
|
||||
} catch (error: unknown) {
|
||||
if (error instanceof Error && error.name === 'AbortError') {
|
||||
console.log("Gemini stream request aborted by user.");
|
||||
throw error;
|
||||
} else {
|
||||
console.error(`Error during Gemini stream or tool interaction:`, error);
|
||||
const message = error instanceof Error ? error.message : String(error);
|
||||
yield {
|
||||
type: GeminiEventType.Content,
|
||||
value: `\n\n[Error: An unexpected error occurred during the chat: ${message}]`,
|
||||
};
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates structured JSON content based on conversational history and a schema.
|
||||
* @param contents The conversational history (Content array) to provide context.
|
||||
* @param schema The SchemaUnion defining the desired JSON structure.
|
||||
* @returns A promise that resolves to the parsed JSON object matching the schema.
|
||||
* @throws Throws an error if the API call fails or the response is not valid JSON.
|
||||
*/
|
||||
public async generateJson(contents: Content[], schema: SchemaUnion): Promise<any> {
|
||||
try {
|
||||
const result = await this.ai.models.generateContent({
|
||||
model: 'gemini-2.0-flash', // Using flash for potentially faster structured output
|
||||
config: {
|
||||
...this.defaultHyperParameters,
|
||||
systemInstruction: CoreSystemPrompt,
|
||||
responseSchema: schema,
|
||||
responseMimeType: 'application/json',
|
||||
try {
|
||||
// Use the new generateJson method, passing the history and the check prompt
|
||||
const parsedResponse = await this.generateJson(
|
||||
[
|
||||
...history,
|
||||
{
|
||||
role: 'user',
|
||||
parts: [{ text: checkPrompt }],
|
||||
},
|
||||
contents: contents, // Pass the full Content array
|
||||
});
|
||||
],
|
||||
responseSchema,
|
||||
);
|
||||
|
||||
const responseText = result.text;
|
||||
if (!responseText) {
|
||||
throw new Error("API returned an empty response.");
|
||||
}
|
||||
// Safely extract the next speaker value
|
||||
const nextSpeaker: string | undefined =
|
||||
typeof parsedResponse?.next_speaker === 'string'
|
||||
? parsedResponse.next_speaker
|
||||
: undefined;
|
||||
|
||||
try {
|
||||
const parsedJson = JSON.parse(responseText);
|
||||
// TODO: Add schema validation if needed
|
||||
return parsedJson;
|
||||
} catch (parseError) {
|
||||
console.error("Failed to parse JSON response:", responseText);
|
||||
throw new Error(`Failed to parse API response as JSON: ${parseError instanceof Error ? parseError.message : String(parseError)}`);
|
||||
if (nextSpeaker === 'model') {
|
||||
currentMessageToSend = { text: 'alright' }; // Or potentially a more meaningful continuation prompt
|
||||
} else {
|
||||
// 'user' should speak next, or value is missing/invalid. End the turn.
|
||||
break;
|
||||
}
|
||||
} catch (error) {
|
||||
console.error("Error generating JSON content:", error);
|
||||
const message = error instanceof Error ? error.message : "Unknown API error.";
|
||||
throw new Error(`Failed to generate JSON content: ${message}`);
|
||||
} catch (error) {
|
||||
console.error(
|
||||
`[Turn ${turns}] Failed to get or parse next speaker check:`,
|
||||
error,
|
||||
);
|
||||
// If the check fails, assume user should speak next to avoid infinite loops
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
console.warn(
|
||||
`[Turn ${turns}] No text or function calls received from Gemini. Ending interaction.`,
|
||||
);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (turns >= this.MAX_TURNS) {
|
||||
console.warn(
|
||||
'sendMessageStream: Reached maximum tool call turns limit.',
|
||||
);
|
||||
yield {
|
||||
type: GeminiEventType.Content,
|
||||
value:
|
||||
'\n\n[System Notice: Maximum interaction turns reached. The conversation may be incomplete.]',
|
||||
};
|
||||
}
|
||||
} catch (error: unknown) {
|
||||
if (error instanceof Error && error.name === 'AbortError') {
|
||||
console.log('Gemini stream request aborted by user.');
|
||||
throw error;
|
||||
} else {
|
||||
console.error(`Error during Gemini stream or tool interaction:`, error);
|
||||
const message = error instanceof Error ? error.message : String(error);
|
||||
yield {
|
||||
type: GeminiEventType.Content,
|
||||
value: `\n\n[Error: An unexpected error occurred during the chat: ${message}]`,
|
||||
};
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates structured JSON content based on conversational history and a schema.
|
||||
* @param contents The conversational history (Content array) to provide context.
|
||||
* @param schema The SchemaUnion defining the desired JSON structure.
|
||||
* @returns A promise that resolves to the parsed JSON object matching the schema.
|
||||
* @throws Throws an error if the API call fails or the response is not valid JSON.
|
||||
*/
|
||||
public async generateJson(
|
||||
contents: Content[],
|
||||
schema: SchemaUnion,
|
||||
): Promise<any> {
|
||||
try {
|
||||
const result = await this.ai.models.generateContent({
|
||||
model: 'gemini-2.0-flash', // Using flash for potentially faster structured output
|
||||
config: {
|
||||
...this.defaultHyperParameters,
|
||||
systemInstruction: CoreSystemPrompt,
|
||||
responseSchema: schema,
|
||||
responseMimeType: 'application/json',
|
||||
},
|
||||
contents: contents, // Pass the full Content array
|
||||
});
|
||||
|
||||
const responseText = result.text;
|
||||
if (!responseText) {
|
||||
throw new Error('API returned an empty response.');
|
||||
}
|
||||
|
||||
try {
|
||||
const parsedJson = JSON.parse(responseText);
|
||||
// TODO: Add schema validation if needed
|
||||
return parsedJson;
|
||||
} catch (parseError) {
|
||||
console.error('Failed to parse JSON response:', responseText);
|
||||
throw new Error(
|
||||
`Failed to parse API response as JSON: ${parseError instanceof Error ? parseError.message : String(parseError)}`,
|
||||
);
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error generating JSON content:', error);
|
||||
const message =
|
||||
error instanceof Error ? error.message : 'Unknown API error.';
|
||||
throw new Error(`Failed to generate JSON content: ${message}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,167 +1,175 @@
|
|||
import { ToolCallEvent } from "../ui/types.js";
|
||||
import { ToolCallEvent } from '../ui/types.js';
|
||||
import { Part } from '@google/genai';
|
||||
import { HistoryItem } from '../ui/types.js';
|
||||
import { handleToolCallChunk, addErrorMessageToHistory } from './history-updater.js';
|
||||
import {
|
||||
handleToolCallChunk,
|
||||
addErrorMessageToHistory,
|
||||
} from './history-updater.js';
|
||||
|
||||
export enum GeminiEventType {
|
||||
Content,
|
||||
ToolCallInfo,
|
||||
Content,
|
||||
ToolCallInfo,
|
||||
}
|
||||
|
||||
export interface GeminiContentEvent {
|
||||
type: GeminiEventType.Content;
|
||||
value: string;
|
||||
type: GeminiEventType.Content;
|
||||
value: string;
|
||||
}
|
||||
|
||||
export interface GeminiToolCallInfoEvent {
|
||||
type: GeminiEventType.ToolCallInfo;
|
||||
value: ToolCallEvent;
|
||||
type: GeminiEventType.ToolCallInfo;
|
||||
value: ToolCallEvent;
|
||||
}
|
||||
|
||||
export type GeminiEvent =
|
||||
| GeminiContentEvent
|
||||
| GeminiToolCallInfoEvent;
|
||||
export type GeminiEvent = GeminiContentEvent | GeminiToolCallInfoEvent;
|
||||
|
||||
export type GeminiStream = AsyncIterable<GeminiEvent>;
|
||||
|
||||
export enum StreamingState {
|
||||
Idle,
|
||||
Responding,
|
||||
Idle,
|
||||
Responding,
|
||||
}
|
||||
|
||||
interface StreamProcessorParams {
|
||||
stream: GeminiStream;
|
||||
signal: AbortSignal;
|
||||
setHistory: React.Dispatch<React.SetStateAction<HistoryItem[]>>;
|
||||
submitQuery: (query: Part) => Promise<void>,
|
||||
getNextMessageId: () => number;
|
||||
addHistoryItem: (itemData: Omit<HistoryItem, 'id'>, id: number) => void;
|
||||
currentToolGroupIdRef: React.MutableRefObject<number | null>;
|
||||
stream: GeminiStream;
|
||||
signal: AbortSignal;
|
||||
setHistory: React.Dispatch<React.SetStateAction<HistoryItem[]>>;
|
||||
submitQuery: (query: Part) => Promise<void>;
|
||||
getNextMessageId: () => number;
|
||||
addHistoryItem: (itemData: Omit<HistoryItem, 'id'>, id: number) => void;
|
||||
currentToolGroupIdRef: React.MutableRefObject<number | null>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Processes the Gemini stream, managing text buffering, adaptive rendering,
|
||||
* and delegating history updates for tool calls and errors.
|
||||
*/
|
||||
export const processGeminiStream = async ({ // Renamed function for clarity
|
||||
stream,
|
||||
signal,
|
||||
setHistory,
|
||||
submitQuery,
|
||||
getNextMessageId,
|
||||
addHistoryItem,
|
||||
currentToolGroupIdRef,
|
||||
export const processGeminiStream = async ({
|
||||
// Renamed function for clarity
|
||||
stream,
|
||||
signal,
|
||||
setHistory,
|
||||
submitQuery,
|
||||
getNextMessageId,
|
||||
addHistoryItem,
|
||||
currentToolGroupIdRef,
|
||||
}: StreamProcessorParams): Promise<void> => {
|
||||
// --- State specific to this stream processing invocation ---
|
||||
let textBuffer = '';
|
||||
let renderTimeoutId: NodeJS.Timeout | null = null;
|
||||
let isStreamComplete = false;
|
||||
let currentGeminiMessageId: number | null = null;
|
||||
// --- State specific to this stream processing invocation ---
|
||||
let textBuffer = '';
|
||||
let renderTimeoutId: NodeJS.Timeout | null = null;
|
||||
let isStreamComplete = false;
|
||||
let currentGeminiMessageId: number | null = null;
|
||||
|
||||
const render = (content: string) => {
|
||||
if (currentGeminiMessageId === null) {
|
||||
return;
|
||||
}
|
||||
setHistory((prev) =>
|
||||
prev.map((item) =>
|
||||
item.id === currentGeminiMessageId && item.type === 'gemini'
|
||||
? { ...item, text: (item.text ?? '') + content }
|
||||
: item,
|
||||
),
|
||||
);
|
||||
};
|
||||
// --- Adaptive Rendering Logic (nested) ---
|
||||
const renderBufferedText = () => {
|
||||
if (signal.aborted) {
|
||||
if (renderTimeoutId) clearTimeout(renderTimeoutId);
|
||||
renderTimeoutId = null;
|
||||
return;
|
||||
}
|
||||
|
||||
const bufferLength = textBuffer.length;
|
||||
let chunkSize = 0;
|
||||
let delay = 50;
|
||||
|
||||
if (bufferLength > 150) {
|
||||
chunkSize = Math.min(bufferLength, 30);
|
||||
delay = 5;
|
||||
} else if (bufferLength > 30) {
|
||||
chunkSize = Math.min(bufferLength, 10);
|
||||
delay = 10;
|
||||
} else if (bufferLength > 0) {
|
||||
chunkSize = 2;
|
||||
delay = 20;
|
||||
}
|
||||
|
||||
if (chunkSize > 0) {
|
||||
const chunkToRender = textBuffer.substring(0, chunkSize);
|
||||
textBuffer = textBuffer.substring(chunkSize);
|
||||
render(chunkToRender);
|
||||
|
||||
renderTimeoutId = setTimeout(renderBufferedText, delay);
|
||||
} else {
|
||||
renderTimeoutId = null; // Clear timeout ID if nothing to render
|
||||
if (!isStreamComplete) {
|
||||
// Buffer empty, but stream might still send data, check again later
|
||||
renderTimeoutId = setTimeout(renderBufferedText, 50);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const scheduleRender = () => {
|
||||
if (renderTimeoutId === null) {
|
||||
renderTimeoutId = setTimeout(renderBufferedText, 0);
|
||||
}
|
||||
};
|
||||
|
||||
// --- Stream Processing Loop ---
|
||||
try {
|
||||
for await (const chunk of stream) {
|
||||
if (signal.aborted) break;
|
||||
|
||||
if (chunk.type === GeminiEventType.Content) {
|
||||
currentToolGroupIdRef.current = null; // Reset tool group on text
|
||||
|
||||
const render = (content: string) => {
|
||||
if (currentGeminiMessageId === null) {
|
||||
return;
|
||||
currentGeminiMessageId = getNextMessageId();
|
||||
addHistoryItem({ type: 'gemini', text: '' }, currentGeminiMessageId);
|
||||
textBuffer = '';
|
||||
}
|
||||
setHistory(prev => prev.map(item =>
|
||||
item.id === currentGeminiMessageId && item.type === 'gemini'
|
||||
? { ...item, text: (item.text ?? '') + content }
|
||||
: item
|
||||
));
|
||||
}
|
||||
// --- Adaptive Rendering Logic (nested) ---
|
||||
const renderBufferedText = () => {
|
||||
if (signal.aborted) {
|
||||
if (renderTimeoutId) clearTimeout(renderTimeoutId);
|
||||
renderTimeoutId = null;
|
||||
return;
|
||||
}
|
||||
|
||||
const bufferLength = textBuffer.length;
|
||||
let chunkSize = 0;
|
||||
let delay = 50;
|
||||
|
||||
if (bufferLength > 150) {
|
||||
chunkSize = Math.min(bufferLength, 30); delay = 5;
|
||||
} else if (bufferLength > 30) {
|
||||
chunkSize = Math.min(bufferLength, 10); delay = 10;
|
||||
} else if (bufferLength > 0) {
|
||||
chunkSize = 2; delay = 20;
|
||||
}
|
||||
|
||||
if (chunkSize > 0) {
|
||||
const chunkToRender = textBuffer.substring(0, chunkSize);
|
||||
textBuffer = textBuffer.substring(chunkSize);
|
||||
render(chunkToRender);
|
||||
|
||||
renderTimeoutId = setTimeout(renderBufferedText, delay);
|
||||
} else {
|
||||
renderTimeoutId = null; // Clear timeout ID if nothing to render
|
||||
if (!isStreamComplete) {
|
||||
// Buffer empty, but stream might still send data, check again later
|
||||
renderTimeoutId = setTimeout(renderBufferedText, 50);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const scheduleRender = () => {
|
||||
if (renderTimeoutId === null) {
|
||||
renderTimeoutId = setTimeout(renderBufferedText, 0);
|
||||
}
|
||||
};
|
||||
|
||||
// --- Stream Processing Loop ---
|
||||
try {
|
||||
for await (const chunk of stream) {
|
||||
if (signal.aborted) break;
|
||||
|
||||
if (chunk.type === GeminiEventType.Content) {
|
||||
currentToolGroupIdRef.current = null; // Reset tool group on text
|
||||
|
||||
if (currentGeminiMessageId === null) {
|
||||
currentGeminiMessageId = getNextMessageId();
|
||||
addHistoryItem({ type: 'gemini', text: '' }, currentGeminiMessageId);
|
||||
textBuffer = '';
|
||||
}
|
||||
textBuffer += chunk.value;
|
||||
scheduleRender();
|
||||
|
||||
} else if (chunk.type === GeminiEventType.ToolCallInfo) {
|
||||
if (renderTimeoutId) { // Stop rendering loop
|
||||
clearTimeout(renderTimeoutId);
|
||||
renderTimeoutId = null;
|
||||
}
|
||||
// Flush any text buffer content.
|
||||
render(textBuffer);
|
||||
currentGeminiMessageId = null; // End text message context
|
||||
textBuffer = ''; // Clear buffer
|
||||
|
||||
// Delegate history update for tool call
|
||||
handleToolCallChunk(
|
||||
chunk.value,
|
||||
setHistory,
|
||||
submitQuery,
|
||||
getNextMessageId,
|
||||
currentToolGroupIdRef
|
||||
);
|
||||
}
|
||||
}
|
||||
if (signal.aborted) {
|
||||
throw new Error("Request cancelled by user");
|
||||
}
|
||||
} catch (error: any) {
|
||||
if (renderTimeoutId) { // Ensure render loop stops on error
|
||||
clearTimeout(renderTimeoutId);
|
||||
renderTimeoutId = null;
|
||||
}
|
||||
// Delegate history update for error message
|
||||
addErrorMessageToHistory(error, setHistory, getNextMessageId);
|
||||
} finally {
|
||||
isStreamComplete = true; // Signal stream end for render loop completion
|
||||
textBuffer += chunk.value;
|
||||
scheduleRender();
|
||||
} else if (chunk.type === GeminiEventType.ToolCallInfo) {
|
||||
if (renderTimeoutId) {
|
||||
clearTimeout(renderTimeoutId);
|
||||
renderTimeoutId = null;
|
||||
// Stop rendering loop
|
||||
clearTimeout(renderTimeoutId);
|
||||
renderTimeoutId = null;
|
||||
}
|
||||
// Flush any text buffer content.
|
||||
render(textBuffer);
|
||||
currentGeminiMessageId = null; // End text message context
|
||||
textBuffer = ''; // Clear buffer
|
||||
|
||||
renderBufferedText(); // Force final render
|
||||
// Delegate history update for tool call
|
||||
handleToolCallChunk(
|
||||
chunk.value,
|
||||
setHistory,
|
||||
submitQuery,
|
||||
getNextMessageId,
|
||||
currentToolGroupIdRef,
|
||||
);
|
||||
}
|
||||
}
|
||||
};
|
||||
if (signal.aborted) {
|
||||
throw new Error('Request cancelled by user');
|
||||
}
|
||||
} catch (error: any) {
|
||||
if (renderTimeoutId) {
|
||||
// Ensure render loop stops on error
|
||||
clearTimeout(renderTimeoutId);
|
||||
renderTimeoutId = null;
|
||||
}
|
||||
// Delegate history update for error message
|
||||
addErrorMessageToHistory(error, setHistory, getNextMessageId);
|
||||
} finally {
|
||||
isStreamComplete = true; // Signal stream end for render loop completion
|
||||
if (renderTimeoutId) {
|
||||
clearTimeout(renderTimeoutId);
|
||||
renderTimeoutId = null;
|
||||
}
|
||||
|
||||
renderBufferedText(); // Force final render
|
||||
}
|
||||
};
|
||||
|
|
|
@ -1,7 +1,15 @@
|
|||
import { Part } from "@google/genai";
|
||||
import { toolRegistry } from "../tools/tool-registry.js";
|
||||
import { HistoryItem, IndividualToolCallDisplay, ToolCallEvent, ToolCallStatus, ToolConfirmationOutcome, ToolEditConfirmationDetails, ToolExecuteConfirmationDetails } from "../ui/types.js";
|
||||
import { ToolResultDisplay } from "../tools/tools.js";
|
||||
import { Part } from '@google/genai';
|
||||
import { toolRegistry } from '../tools/tool-registry.js';
|
||||
import {
|
||||
HistoryItem,
|
||||
IndividualToolCallDisplay,
|
||||
ToolCallEvent,
|
||||
ToolCallStatus,
|
||||
ToolConfirmationOutcome,
|
||||
ToolEditConfirmationDetails,
|
||||
ToolExecuteConfirmationDetails,
|
||||
} from '../ui/types.js';
|
||||
import { ToolResultDisplay } from '../tools/tools.js';
|
||||
|
||||
/**
|
||||
* Processes a tool call chunk and updates the history state accordingly.
|
||||
|
@ -9,114 +17,160 @@ import { ToolResultDisplay } from "../tools/tools.js";
|
|||
* Resides here as its primary effect is updating history based on tool events.
|
||||
*/
|
||||
export const handleToolCallChunk = (
|
||||
chunk: ToolCallEvent,
|
||||
setHistory: React.Dispatch<React.SetStateAction<HistoryItem[]>>,
|
||||
submitQuery: (query: Part) => Promise<void>,
|
||||
getNextMessageId: () => number,
|
||||
currentToolGroupIdRef: React.MutableRefObject<number | null>
|
||||
chunk: ToolCallEvent,
|
||||
setHistory: React.Dispatch<React.SetStateAction<HistoryItem[]>>,
|
||||
submitQuery: (query: Part) => Promise<void>,
|
||||
getNextMessageId: () => number,
|
||||
currentToolGroupIdRef: React.MutableRefObject<number | null>,
|
||||
): void => {
|
||||
const toolDefinition = toolRegistry.getTool(chunk.name);
|
||||
const description = toolDefinition?.getDescription
|
||||
? toolDefinition.getDescription(chunk.args)
|
||||
: '';
|
||||
const toolDisplayName = toolDefinition?.displayName ?? chunk.name;
|
||||
let confirmationDetails = chunk.confirmationDetails;
|
||||
if (confirmationDetails) {
|
||||
const originalConfirmationDetails = confirmationDetails;
|
||||
const historyUpdatingConfirm = async (outcome: ToolConfirmationOutcome) => {
|
||||
originalConfirmationDetails.onConfirm(outcome);
|
||||
const toolDefinition = toolRegistry.getTool(chunk.name);
|
||||
const description = toolDefinition?.getDescription
|
||||
? toolDefinition.getDescription(chunk.args)
|
||||
: '';
|
||||
const toolDisplayName = toolDefinition?.displayName ?? chunk.name;
|
||||
let confirmationDetails = chunk.confirmationDetails;
|
||||
if (confirmationDetails) {
|
||||
const originalConfirmationDetails = confirmationDetails;
|
||||
const historyUpdatingConfirm = async (outcome: ToolConfirmationOutcome) => {
|
||||
originalConfirmationDetails.onConfirm(outcome);
|
||||
|
||||
if (outcome === ToolConfirmationOutcome.Cancel) {
|
||||
let resultDisplay: ToolResultDisplay | undefined;
|
||||
if ('fileDiff' in originalConfirmationDetails) {
|
||||
resultDisplay = { fileDiff: (originalConfirmationDetails as ToolEditConfirmationDetails).fileDiff };
|
||||
} else {
|
||||
resultDisplay = `~~${(originalConfirmationDetails as ToolExecuteConfirmationDetails).command}~~`;
|
||||
}
|
||||
handleToolCallChunk({ ...chunk, status: ToolCallStatus.Canceled, confirmationDetails: undefined, resultDisplay, }, setHistory, submitQuery, getNextMessageId, currentToolGroupIdRef);
|
||||
const functionResponse: Part = {
|
||||
functionResponse: {
|
||||
name: chunk.name,
|
||||
response: { "error": "User rejected function call." },
|
||||
},
|
||||
}
|
||||
await submitQuery(functionResponse);
|
||||
} else {
|
||||
const tool = toolRegistry.getTool(chunk.name)
|
||||
if (!tool) {
|
||||
throw new Error(`Tool "${chunk.name}" not found or is not registered.`);
|
||||
}
|
||||
handleToolCallChunk({ ...chunk, status: ToolCallStatus.Invoked, resultDisplay: "Executing...", confirmationDetails: undefined }, setHistory, submitQuery, getNextMessageId, currentToolGroupIdRef);
|
||||
const result = await tool.execute(chunk.args);
|
||||
handleToolCallChunk({ ...chunk, status: ToolCallStatus.Invoked, resultDisplay: result.returnDisplay, confirmationDetails: undefined }, setHistory, submitQuery, getNextMessageId, currentToolGroupIdRef);
|
||||
const functionResponse: Part = {
|
||||
functionResponse: {
|
||||
name: chunk.name,
|
||||
id: chunk.callId,
|
||||
response: { "output": result.llmContent },
|
||||
},
|
||||
}
|
||||
await submitQuery(functionResponse);
|
||||
}
|
||||
if (outcome === ToolConfirmationOutcome.Cancel) {
|
||||
let resultDisplay: ToolResultDisplay | undefined;
|
||||
if ('fileDiff' in originalConfirmationDetails) {
|
||||
resultDisplay = {
|
||||
fileDiff: (
|
||||
originalConfirmationDetails as ToolEditConfirmationDetails
|
||||
).fileDiff,
|
||||
};
|
||||
} else {
|
||||
resultDisplay = `~~${(originalConfirmationDetails as ToolExecuteConfirmationDetails).command}~~`;
|
||||
}
|
||||
|
||||
confirmationDetails = {
|
||||
...originalConfirmationDetails,
|
||||
onConfirm: historyUpdatingConfirm,
|
||||
handleToolCallChunk(
|
||||
{
|
||||
...chunk,
|
||||
status: ToolCallStatus.Canceled,
|
||||
confirmationDetails: undefined,
|
||||
resultDisplay,
|
||||
},
|
||||
setHistory,
|
||||
submitQuery,
|
||||
getNextMessageId,
|
||||
currentToolGroupIdRef,
|
||||
);
|
||||
const functionResponse: Part = {
|
||||
functionResponse: {
|
||||
name: chunk.name,
|
||||
response: { error: 'User rejected function call.' },
|
||||
},
|
||||
};
|
||||
}
|
||||
const toolDetail: IndividualToolCallDisplay = {
|
||||
callId: chunk.callId,
|
||||
name: toolDisplayName,
|
||||
description,
|
||||
resultDisplay: chunk.resultDisplay,
|
||||
status: chunk.status,
|
||||
confirmationDetails: confirmationDetails,
|
||||
await submitQuery(functionResponse);
|
||||
} else {
|
||||
const tool = toolRegistry.getTool(chunk.name);
|
||||
if (!tool) {
|
||||
throw new Error(
|
||||
`Tool "${chunk.name}" not found or is not registered.`,
|
||||
);
|
||||
}
|
||||
handleToolCallChunk(
|
||||
{
|
||||
...chunk,
|
||||
status: ToolCallStatus.Invoked,
|
||||
resultDisplay: 'Executing...',
|
||||
confirmationDetails: undefined,
|
||||
},
|
||||
setHistory,
|
||||
submitQuery,
|
||||
getNextMessageId,
|
||||
currentToolGroupIdRef,
|
||||
);
|
||||
const result = await tool.execute(chunk.args);
|
||||
handleToolCallChunk(
|
||||
{
|
||||
...chunk,
|
||||
status: ToolCallStatus.Invoked,
|
||||
resultDisplay: result.returnDisplay,
|
||||
confirmationDetails: undefined,
|
||||
},
|
||||
setHistory,
|
||||
submitQuery,
|
||||
getNextMessageId,
|
||||
currentToolGroupIdRef,
|
||||
);
|
||||
const functionResponse: Part = {
|
||||
functionResponse: {
|
||||
name: chunk.name,
|
||||
id: chunk.callId,
|
||||
response: { output: result.llmContent },
|
||||
},
|
||||
};
|
||||
await submitQuery(functionResponse);
|
||||
}
|
||||
};
|
||||
|
||||
const activeGroupId = currentToolGroupIdRef.current;
|
||||
setHistory(prev => {
|
||||
if (chunk.status === ToolCallStatus.Pending) {
|
||||
if (activeGroupId === null) {
|
||||
// Start a new tool group
|
||||
const newGroupId = getNextMessageId();
|
||||
currentToolGroupIdRef.current = newGroupId;
|
||||
return [
|
||||
...prev,
|
||||
{ id: newGroupId, type: 'tool_group', tools: [toolDetail] } as HistoryItem
|
||||
];
|
||||
}
|
||||
confirmationDetails = {
|
||||
...originalConfirmationDetails,
|
||||
onConfirm: historyUpdatingConfirm,
|
||||
};
|
||||
}
|
||||
const toolDetail: IndividualToolCallDisplay = {
|
||||
callId: chunk.callId,
|
||||
name: toolDisplayName,
|
||||
description,
|
||||
resultDisplay: chunk.resultDisplay,
|
||||
status: chunk.status,
|
||||
confirmationDetails: confirmationDetails,
|
||||
};
|
||||
|
||||
// Add to existing tool group
|
||||
return prev.map(item =>
|
||||
item.id === activeGroupId && item.type === 'tool_group'
|
||||
? item.tools.some(t => t.callId === toolDetail.callId)
|
||||
? item // Tool already listed as pending
|
||||
: { ...item, tools: [...item.tools, toolDetail] }
|
||||
: item
|
||||
);
|
||||
}
|
||||
const activeGroupId = currentToolGroupIdRef.current;
|
||||
setHistory((prev) => {
|
||||
if (chunk.status === ToolCallStatus.Pending) {
|
||||
if (activeGroupId === null) {
|
||||
// Start a new tool group
|
||||
const newGroupId = getNextMessageId();
|
||||
currentToolGroupIdRef.current = newGroupId;
|
||||
return [
|
||||
...prev,
|
||||
{
|
||||
id: newGroupId,
|
||||
type: 'tool_group',
|
||||
tools: [toolDetail],
|
||||
} as HistoryItem,
|
||||
];
|
||||
}
|
||||
|
||||
// Update the status of a pending tool within the active group
|
||||
if (activeGroupId === null) {
|
||||
// Log if an invoked tool arrives without an active group context
|
||||
console.warn("Received invoked tool status without an active tool group ID:", chunk);
|
||||
return prev;
|
||||
}
|
||||
// Add to existing tool group
|
||||
return prev.map((item) =>
|
||||
item.id === activeGroupId && item.type === 'tool_group'
|
||||
? item.tools.some((t) => t.callId === toolDetail.callId)
|
||||
? item // Tool already listed as pending
|
||||
: { ...item, tools: [...item.tools, toolDetail] }
|
||||
: item,
|
||||
);
|
||||
}
|
||||
|
||||
return prev.map(item =>
|
||||
item.id === activeGroupId && item.type === 'tool_group'
|
||||
? {
|
||||
...item,
|
||||
tools: item.tools.map(t =>
|
||||
t.callId === toolDetail.callId
|
||||
? { ...t, ...toolDetail, status: chunk.status } // Update details & status
|
||||
: t
|
||||
)
|
||||
}
|
||||
: item
|
||||
);
|
||||
});
|
||||
// Update the status of a pending tool within the active group
|
||||
if (activeGroupId === null) {
|
||||
// Log if an invoked tool arrives without an active group context
|
||||
console.warn(
|
||||
'Received invoked tool status without an active tool group ID:',
|
||||
chunk,
|
||||
);
|
||||
return prev;
|
||||
}
|
||||
|
||||
return prev.map((item) =>
|
||||
item.id === activeGroupId && item.type === 'tool_group'
|
||||
? {
|
||||
...item,
|
||||
tools: item.tools.map((t) =>
|
||||
t.callId === toolDetail.callId
|
||||
? { ...t, ...toolDetail, status: chunk.status } // Update details & status
|
||||
: t,
|
||||
),
|
||||
}
|
||||
: item,
|
||||
);
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -124,45 +178,58 @@ export const handleToolCallChunk = (
|
|||
* it to the last non-user message or creating a new entry.
|
||||
*/
|
||||
export const addErrorMessageToHistory = (
|
||||
error: any,
|
||||
setHistory: React.Dispatch<React.SetStateAction<HistoryItem[]>>,
|
||||
getNextMessageId: () => number
|
||||
error: any,
|
||||
setHistory: React.Dispatch<React.SetStateAction<HistoryItem[]>>,
|
||||
getNextMessageId: () => number,
|
||||
): void => {
|
||||
const isAbort = error.name === 'AbortError';
|
||||
const errorType = isAbort ? 'info' : 'error';
|
||||
const errorText = isAbort
|
||||
? '[Request cancelled by user]'
|
||||
: `[Error: ${error.message || 'Unknown error'}]`;
|
||||
const isAbort = error.name === 'AbortError';
|
||||
const errorType = isAbort ? 'info' : 'error';
|
||||
const errorText = isAbort
|
||||
? '[Request cancelled by user]'
|
||||
: `[Error: ${error.message || 'Unknown error'}]`;
|
||||
|
||||
setHistory(prev => {
|
||||
const reversedHistory = [...prev].reverse();
|
||||
// Find the last message that isn't from the user to append the error/info to
|
||||
const lastBotMessageIndex = reversedHistory.findIndex(item => item.type !== 'user');
|
||||
const originalIndex = lastBotMessageIndex !== -1 ? prev.length - 1 - lastBotMessageIndex : -1;
|
||||
setHistory((prev) => {
|
||||
const reversedHistory = [...prev].reverse();
|
||||
// Find the last message that isn't from the user to append the error/info to
|
||||
const lastBotMessageIndex = reversedHistory.findIndex(
|
||||
(item) => item.type !== 'user',
|
||||
);
|
||||
const originalIndex =
|
||||
lastBotMessageIndex !== -1 ? prev.length - 1 - lastBotMessageIndex : -1;
|
||||
|
||||
if (originalIndex !== -1) {
|
||||
// Append error to the last relevant message
|
||||
return prev.map((item, index) => {
|
||||
if (index === originalIndex) {
|
||||
let baseText = '';
|
||||
// Determine base text based on item type
|
||||
if (item.type === 'gemini') baseText = item.text ?? '';
|
||||
else if (item.type === 'tool_group') baseText = `Tool execution (${item.tools.length} calls)`;
|
||||
else if (item.type === 'error' || item.type === 'info') baseText = item.text ?? '';
|
||||
// Safely handle potential undefined text
|
||||
if (originalIndex !== -1) {
|
||||
// Append error to the last relevant message
|
||||
return prev.map((item, index) => {
|
||||
if (index === originalIndex) {
|
||||
let baseText = '';
|
||||
// Determine base text based on item type
|
||||
if (item.type === 'gemini') baseText = item.text ?? '';
|
||||
else if (item.type === 'tool_group')
|
||||
baseText = `Tool execution (${item.tools.length} calls)`;
|
||||
else if (item.type === 'error' || item.type === 'info')
|
||||
baseText = item.text ?? '';
|
||||
// Safely handle potential undefined text
|
||||
|
||||
const updatedText = (baseText + (baseText && !baseText.endsWith('\n') ? '\n' : '') + errorText).trim();
|
||||
// Reuse existing ID, update type and text
|
||||
return { ...item, type: errorType, text: updatedText };
|
||||
}
|
||||
return item;
|
||||
});
|
||||
} else {
|
||||
// No previous message to append to, add a new error item
|
||||
return [
|
||||
...prev,
|
||||
{ id: getNextMessageId(), type: errorType, text: errorText } as HistoryItem
|
||||
];
|
||||
const updatedText = (
|
||||
baseText +
|
||||
(baseText && !baseText.endsWith('\n') ? '\n' : '') +
|
||||
errorText
|
||||
).trim();
|
||||
// Reuse existing ID, update type and text
|
||||
return { ...item, type: errorType, text: updatedText };
|
||||
}
|
||||
});
|
||||
};
|
||||
return item;
|
||||
});
|
||||
} else {
|
||||
// No previous message to append to, add a new error item
|
||||
return [
|
||||
...prev,
|
||||
{
|
||||
id: getNextMessageId(),
|
||||
type: errorType,
|
||||
text: errorText,
|
||||
} as HistoryItem,
|
||||
];
|
||||
}
|
||||
});
|
||||
};
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
import { ReadFileTool } from "../tools/read-file.tool.js";
|
||||
import { TerminalTool } from "../tools/terminal.tool.js";
|
||||
import { ReadFileTool } from '../tools/read-file.tool.js';
|
||||
import { TerminalTool } from '../tools/terminal.tool.js';
|
||||
|
||||
const MEMORY_FILE_NAME = 'GEMINI.md';
|
||||
|
||||
|
@ -91,4 +91,4 @@ assistant: I can run \`rm -rf ./temp\`. This will permanently delete the directo
|
|||
|
||||
# Final Reminder
|
||||
Your core function is efficient and safe assistance. Balance extreme conciseness with the crucial need for clarity, especially regarding safety and potential system modifications. Always prioritize user control and project conventions. Never make assumptions on the contents of files; instead use the ${ReadFileTool.Name} to ensure you aren't making too broad of assumptions.
|
||||
`;
|
||||
`;
|
||||
|
|
|
@ -14,77 +14,78 @@ import { TerminalTool } from './tools/terminal.tool.js';
|
|||
import { WriteFileTool } from './tools/write-file.tool.js';
|
||||
|
||||
async function main() {
|
||||
// 1. Configuration
|
||||
loadEnvironment();
|
||||
const argv = await parseArguments(); // Ensure args.ts imports printWarning from ui/display
|
||||
const targetDir = getTargetDirectory(argv.target_dir);
|
||||
// 1. Configuration
|
||||
loadEnvironment();
|
||||
const argv = await parseArguments(); // Ensure args.ts imports printWarning from ui/display
|
||||
const targetDir = getTargetDirectory(argv.target_dir);
|
||||
|
||||
// 2. Configure tools
|
||||
registerTools(targetDir);
|
||||
// 2. Configure tools
|
||||
registerTools(targetDir);
|
||||
|
||||
// 3. Render UI
|
||||
render(React.createElement(App, { directory: targetDir }));
|
||||
// 3. Render UI
|
||||
render(React.createElement(App, { directory: targetDir }));
|
||||
}
|
||||
|
||||
// --- Global Unhandled Rejection Handler ---
|
||||
process.on('unhandledRejection', (reason, promise) => {
|
||||
// Check if this is the known 429 ClientError that sometimes escapes
|
||||
// this is a workaround for a specific issue with the way we are calling gemini
|
||||
// where a 429 error is thrown but not caught, causing an unhandled rejection
|
||||
// TODO(adh): Remove this when the race condition is fixed
|
||||
const isKnownEscaped429 =
|
||||
reason instanceof Error &&
|
||||
reason.name === 'ClientError' &&
|
||||
reason.message.includes('got status: 429');
|
||||
// Check if this is the known 429 ClientError that sometimes escapes
|
||||
// this is a workaround for a specific issue with the way we are calling gemini
|
||||
// where a 429 error is thrown but not caught, causing an unhandled rejection
|
||||
// TODO(adh): Remove this when the race condition is fixed
|
||||
const isKnownEscaped429 =
|
||||
reason instanceof Error &&
|
||||
reason.name === 'ClientError' &&
|
||||
reason.message.includes('got status: 429');
|
||||
|
||||
if (isKnownEscaped429) {
|
||||
// Log it differently and DON'T exit, as it's likely already handled visually
|
||||
console.warn('-----------------------------------------');
|
||||
console.warn('WORKAROUND: Suppressed known escaped 429 Unhandled Rejection.');
|
||||
console.warn('-----------------------------------------');
|
||||
console.warn('Reason:', reason);
|
||||
// No process.exit(1);
|
||||
} else {
|
||||
// Log other unexpected unhandled rejections as critical errors
|
||||
console.error('=========================================');
|
||||
console.error('CRITICAL: Unhandled Promise Rejection!');
|
||||
console.error('=========================================');
|
||||
console.error('Reason:', reason);
|
||||
console.error('Stack trace may follow:');
|
||||
if (!(reason instanceof Error)) {
|
||||
console.error(reason);
|
||||
}
|
||||
// Exit for genuinely unhandled errors
|
||||
process.exit(1);
|
||||
if (isKnownEscaped429) {
|
||||
// Log it differently and DON'T exit, as it's likely already handled visually
|
||||
console.warn('-----------------------------------------');
|
||||
console.warn(
|
||||
'WORKAROUND: Suppressed known escaped 429 Unhandled Rejection.',
|
||||
);
|
||||
console.warn('-----------------------------------------');
|
||||
console.warn('Reason:', reason);
|
||||
// No process.exit(1);
|
||||
} else {
|
||||
// Log other unexpected unhandled rejections as critical errors
|
||||
console.error('=========================================');
|
||||
console.error('CRITICAL: Unhandled Promise Rejection!');
|
||||
console.error('=========================================');
|
||||
console.error('Reason:', reason);
|
||||
console.error('Stack trace may follow:');
|
||||
if (!(reason instanceof Error)) {
|
||||
console.error(reason);
|
||||
}
|
||||
// Exit for genuinely unhandled errors
|
||||
process.exit(1);
|
||||
}
|
||||
});
|
||||
|
||||
// --- Global Entry Point ---
|
||||
main().catch((error) => {
|
||||
console.error('An unexpected critical error occurred:');
|
||||
if (error instanceof Error) {
|
||||
console.error(error.message);
|
||||
} else {
|
||||
console.error(String(error));
|
||||
}
|
||||
process.exit(1);
|
||||
console.error('An unexpected critical error occurred:');
|
||||
if (error instanceof Error) {
|
||||
console.error(error.message);
|
||||
} else {
|
||||
console.error(String(error));
|
||||
}
|
||||
process.exit(1);
|
||||
});
|
||||
|
||||
function registerTools(targetDir: string) {
|
||||
const lsTool = new LSTool(targetDir);
|
||||
const readFileTool = new ReadFileTool(targetDir);
|
||||
const grepTool = new GrepTool(targetDir);
|
||||
const globTool = new GlobTool(targetDir);
|
||||
const editTool = new EditTool(targetDir);
|
||||
const terminalTool = new TerminalTool(targetDir);
|
||||
const writeFileTool = new WriteFileTool(targetDir);
|
||||
const lsTool = new LSTool(targetDir);
|
||||
const readFileTool = new ReadFileTool(targetDir);
|
||||
const grepTool = new GrepTool(targetDir);
|
||||
const globTool = new GlobTool(targetDir);
|
||||
const editTool = new EditTool(targetDir);
|
||||
const terminalTool = new TerminalTool(targetDir);
|
||||
const writeFileTool = new WriteFileTool(targetDir);
|
||||
|
||||
toolRegistry.registerTool(lsTool);
|
||||
toolRegistry.registerTool(readFileTool);
|
||||
toolRegistry.registerTool(grepTool);
|
||||
toolRegistry.registerTool(globTool);
|
||||
toolRegistry.registerTool(editTool);
|
||||
toolRegistry.registerTool(terminalTool);
|
||||
toolRegistry.registerTool(writeFileTool);
|
||||
toolRegistry.registerTool(lsTool);
|
||||
toolRegistry.registerTool(readFileTool);
|
||||
toolRegistry.registerTool(grepTool);
|
||||
toolRegistry.registerTool(globTool);
|
||||
toolRegistry.registerTool(editTool);
|
||||
toolRegistry.registerTool(terminalTool);
|
||||
toolRegistry.registerTool(writeFileTool);
|
||||
}
|
||||
|
||||
|
|
|
@ -3,7 +3,11 @@ import path from 'path';
|
|||
import * as Diff from 'diff';
|
||||
import { SchemaValidator } from '../utils/schemaValidator.js';
|
||||
import { BaseTool, ToolResult } from './tools.js';
|
||||
import { ToolCallConfirmationDetails, ToolConfirmationOutcome, ToolEditConfirmationDetails } from '../ui/types.js';
|
||||
import {
|
||||
ToolCallConfirmationDetails,
|
||||
ToolConfirmationOutcome,
|
||||
ToolEditConfirmationDetails,
|
||||
} from '../ui/types.js';
|
||||
import { makeRelative, shortenPath } from '../utils/paths.js';
|
||||
import { ReadFileTool } from './read-file.tool.js';
|
||||
import { WriteFileTool } from './write-file.tool.js';
|
||||
|
@ -12,39 +16,38 @@ import { WriteFileTool } from './write-file.tool.js';
|
|||
* Parameters for the Edit tool
|
||||
*/
|
||||
export interface EditToolParams {
|
||||
/**
|
||||
* The absolute path to the file to modify
|
||||
*/
|
||||
file_path: string;
|
||||
/**
|
||||
* The absolute path to the file to modify
|
||||
*/
|
||||
file_path: string;
|
||||
|
||||
/**
|
||||
* The text to replace
|
||||
*/
|
||||
old_string: string;
|
||||
/**
|
||||
* The text to replace
|
||||
*/
|
||||
old_string: string;
|
||||
|
||||
/**
|
||||
* The text to replace it with
|
||||
*/
|
||||
new_string: string;
|
||||
/**
|
||||
* The text to replace it with
|
||||
*/
|
||||
new_string: string;
|
||||
|
||||
/**
|
||||
* The expected number of replacements to perform (optional, defaults to 1)
|
||||
*/
|
||||
expected_replacements?: number;
|
||||
/**
|
||||
* The expected number of replacements to perform (optional, defaults to 1)
|
||||
*/
|
||||
expected_replacements?: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Result from the Edit tool
|
||||
*/
|
||||
export interface EditToolResult extends ToolResult {
|
||||
}
|
||||
export interface EditToolResult extends ToolResult {}
|
||||
|
||||
interface CalculatedEdit {
|
||||
currentContent: string | null;
|
||||
newContent: string;
|
||||
occurrences: number;
|
||||
error?: { display: string, raw: string };
|
||||
isNewFile: boolean;
|
||||
currentContent: string | null;
|
||||
newContent: string;
|
||||
occurrences: number;
|
||||
error?: { display: string; raw: string };
|
||||
isNewFile: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -52,317 +55,350 @@ interface CalculatedEdit {
|
|||
* This tool maintains state for the "Always Edit" confirmation preference.
|
||||
*/
|
||||
export class EditTool extends BaseTool<EditToolParams, EditToolResult> {
|
||||
private shouldAlwaysEdit = false;
|
||||
private readonly rootDirectory: string;
|
||||
private shouldAlwaysEdit = false;
|
||||
private readonly rootDirectory: string;
|
||||
|
||||
/**
|
||||
* Creates a new instance of the EditTool
|
||||
* @param rootDirectory Root directory to ground this tool in.
|
||||
*/
|
||||
constructor(rootDirectory: string) {
|
||||
super(
|
||||
'replace',
|
||||
'Edit',
|
||||
`Replaces a SINGLE, UNIQUE occurrence of text within a file. Requires providing significant context around the change to ensure uniqueness. For moving/renaming files, use the Bash tool with \`mv\`. For replacing entire file contents or creating new files use the ${WriteFileTool.Name} tool. Always use the ${ReadFileTool.Name} tool to examine the file before using this tool.`,
|
||||
{
|
||||
properties: {
|
||||
file_path: {
|
||||
description: 'The absolute path to the file to modify. Must start with /. When creating a new file, ensure the parent directory exists (use the `LS` tool to verify).',
|
||||
type: 'string'
|
||||
},
|
||||
old_string: {
|
||||
description: 'The exact text to replace. CRITICAL: Must uniquely identify the single instance to change. Include at least 3-5 lines of context BEFORE and AFTER the target text, matching whitespace and indentation precisely. If this string matches multiple locations or does not match exactly, the tool will fail. Use an empty string ("") when creating a new file.',
|
||||
type: 'string'
|
||||
},
|
||||
new_string: {
|
||||
description: 'The text to replace the `old_string` with. When creating a new file (using an empty `old_string`), this should contain the full desired content of the new file. Ensure the resulting code is correct and idiomatic.',
|
||||
type: 'string'
|
||||
}
|
||||
},
|
||||
required: ['file_path', 'old_string', 'new_string'],
|
||||
type: 'object'
|
||||
}
|
||||
);
|
||||
this.rootDirectory = path.resolve(rootDirectory);
|
||||
/**
|
||||
* Creates a new instance of the EditTool
|
||||
* @param rootDirectory Root directory to ground this tool in.
|
||||
*/
|
||||
constructor(rootDirectory: string) {
|
||||
super(
|
||||
'replace',
|
||||
'Edit',
|
||||
`Replaces a SINGLE, UNIQUE occurrence of text within a file. Requires providing significant context around the change to ensure uniqueness. For moving/renaming files, use the Bash tool with \`mv\`. For replacing entire file contents or creating new files use the ${WriteFileTool.Name} tool. Always use the ${ReadFileTool.Name} tool to examine the file before using this tool.`,
|
||||
{
|
||||
properties: {
|
||||
file_path: {
|
||||
description:
|
||||
'The absolute path to the file to modify. Must start with /. When creating a new file, ensure the parent directory exists (use the `LS` tool to verify).',
|
||||
type: 'string',
|
||||
},
|
||||
old_string: {
|
||||
description:
|
||||
'The exact text to replace. CRITICAL: Must uniquely identify the single instance to change. Include at least 3-5 lines of context BEFORE and AFTER the target text, matching whitespace and indentation precisely. If this string matches multiple locations or does not match exactly, the tool will fail. Use an empty string ("") when creating a new file.',
|
||||
type: 'string',
|
||||
},
|
||||
new_string: {
|
||||
description:
|
||||
'The text to replace the `old_string` with. When creating a new file (using an empty `old_string`), this should contain the full desired content of the new file. Ensure the resulting code is correct and idiomatic.',
|
||||
type: 'string',
|
||||
},
|
||||
},
|
||||
required: ['file_path', 'old_string', 'new_string'],
|
||||
type: 'object',
|
||||
},
|
||||
);
|
||||
this.rootDirectory = path.resolve(rootDirectory);
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if a path is within the root directory.
|
||||
* @param pathToCheck The absolute path to check.
|
||||
* @returns True if the path is within the root directory, false otherwise.
|
||||
*/
|
||||
private isWithinRoot(pathToCheck: string): boolean {
|
||||
const normalizedPath = path.normalize(pathToCheck);
|
||||
const normalizedRoot = this.rootDirectory;
|
||||
|
||||
const rootWithSep = normalizedRoot.endsWith(path.sep)
|
||||
? normalizedRoot
|
||||
: normalizedRoot + path.sep;
|
||||
|
||||
return (
|
||||
normalizedPath === normalizedRoot ||
|
||||
normalizedPath.startsWith(rootWithSep)
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates the parameters for the Edit tool
|
||||
* @param params Parameters to validate
|
||||
* @returns True if parameters are valid, false otherwise
|
||||
*/
|
||||
validateParams(params: EditToolParams): boolean {
|
||||
if (
|
||||
this.schema.parameters &&
|
||||
!SchemaValidator.validate(
|
||||
this.schema.parameters as Record<string, unknown>,
|
||||
params,
|
||||
)
|
||||
) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if a path is within the root directory.
|
||||
* @param pathToCheck The absolute path to check.
|
||||
* @returns True if the path is within the root directory, false otherwise.
|
||||
*/
|
||||
private isWithinRoot(pathToCheck: string): boolean {
|
||||
const normalizedPath = path.normalize(pathToCheck);
|
||||
const normalizedRoot = this.rootDirectory;
|
||||
|
||||
const rootWithSep = normalizedRoot.endsWith(path.sep)
|
||||
? normalizedRoot
|
||||
: normalizedRoot + path.sep;
|
||||
|
||||
return normalizedPath === normalizedRoot || normalizedPath.startsWith(rootWithSep);
|
||||
// Ensure path is absolute
|
||||
if (!path.isAbsolute(params.file_path)) {
|
||||
console.error(`File path must be absolute: ${params.file_path}`);
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates the parameters for the Edit tool
|
||||
* @param params Parameters to validate
|
||||
* @returns True if parameters are valid, false otherwise
|
||||
*/
|
||||
validateParams(params: EditToolParams): boolean {
|
||||
if (this.schema.parameters && !SchemaValidator.validate(this.schema.parameters as Record<string, unknown>, params)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Ensure path is absolute
|
||||
if (!path.isAbsolute(params.file_path)) {
|
||||
console.error(`File path must be absolute: ${params.file_path}`);
|
||||
return false;
|
||||
}
|
||||
|
||||
// Ensure path is within the root directory
|
||||
if (!this.isWithinRoot(params.file_path)) {
|
||||
console.error(`File path must be within the root directory (${this.rootDirectory}): ${params.file_path}`);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
// Validate expected_replacements if provided
|
||||
if (params.expected_replacements !== undefined && params.expected_replacements < 0) {
|
||||
console.error('Expected replacements must be a non-negative number');
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
// Ensure path is within the root directory
|
||||
if (!this.isWithinRoot(params.file_path)) {
|
||||
console.error(
|
||||
`File path must be within the root directory (${this.rootDirectory}): ${params.file_path}`,
|
||||
);
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculates the potential outcome of an edit operation.
|
||||
* @param params Parameters for the edit operation
|
||||
* @returns An object describing the potential edit outcome
|
||||
* @throws File system errors if reading the file fails unexpectedly (e.g., permissions)
|
||||
*/
|
||||
private calculateEdit(params: EditToolParams): CalculatedEdit {
|
||||
const expectedReplacements = params.expected_replacements === undefined ? 1 : params.expected_replacements;
|
||||
let currentContent: string | null = null;
|
||||
let fileExists = false;
|
||||
let isNewFile = false;
|
||||
let newContent = '';
|
||||
let occurrences = 0;
|
||||
let error: { display: string, raw: string } | undefined = undefined;
|
||||
// Validate expected_replacements if provided
|
||||
if (
|
||||
params.expected_replacements !== undefined &&
|
||||
params.expected_replacements < 0
|
||||
) {
|
||||
console.error('Expected replacements must be a non-negative number');
|
||||
return false;
|
||||
}
|
||||
|
||||
try {
|
||||
currentContent = fs.readFileSync(params.file_path, 'utf8');
|
||||
fileExists = true;
|
||||
} catch (err: any) {
|
||||
if (err.code !== 'ENOENT') {
|
||||
throw err;
|
||||
}
|
||||
fileExists = false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
if (params.old_string === '' && !fileExists) {
|
||||
isNewFile = true;
|
||||
newContent = params.new_string;
|
||||
occurrences = 0;
|
||||
} else if (!fileExists) {
|
||||
error = {
|
||||
display: `File not found.`,
|
||||
raw: `File not found: ${params.file_path}`
|
||||
};
|
||||
} else if (currentContent !== null) {
|
||||
occurrences = this.countOccurrences(currentContent, params.old_string);
|
||||
/**
|
||||
* Calculates the potential outcome of an edit operation.
|
||||
* @param params Parameters for the edit operation
|
||||
* @returns An object describing the potential edit outcome
|
||||
* @throws File system errors if reading the file fails unexpectedly (e.g., permissions)
|
||||
*/
|
||||
private calculateEdit(params: EditToolParams): CalculatedEdit {
|
||||
const expectedReplacements =
|
||||
params.expected_replacements === undefined
|
||||
? 1
|
||||
: params.expected_replacements;
|
||||
let currentContent: string | null = null;
|
||||
let fileExists = false;
|
||||
let isNewFile = false;
|
||||
let newContent = '';
|
||||
let occurrences = 0;
|
||||
let error: { display: string; raw: string } | undefined = undefined;
|
||||
|
||||
if (occurrences === 0) {
|
||||
error = {
|
||||
display: `No edits made`,
|
||||
raw: `Failed to edit, 0 occurrences found`
|
||||
}
|
||||
} else if (occurrences !== expectedReplacements) {
|
||||
error = {
|
||||
display: `Failed to edit, expected ${expectedReplacements} occurrences but found ${occurrences}`,
|
||||
raw: `Failed to edit, Expected ${expectedReplacements} occurrences but found ${occurrences} in file: ${params.file_path}`
|
||||
}
|
||||
} else {
|
||||
newContent = this.replaceAll(currentContent, params.old_string, params.new_string);
|
||||
}
|
||||
} else {
|
||||
error = {
|
||||
display: `Failed to read content`,
|
||||
raw: `Failed to read content of existing file: ${params.file_path}`
|
||||
}
|
||||
}
|
||||
try {
|
||||
currentContent = fs.readFileSync(params.file_path, 'utf8');
|
||||
fileExists = true;
|
||||
} catch (err: any) {
|
||||
if (err.code !== 'ENOENT') {
|
||||
throw err;
|
||||
}
|
||||
fileExists = false;
|
||||
}
|
||||
|
||||
return {
|
||||
currentContent,
|
||||
newContent,
|
||||
occurrences,
|
||||
error,
|
||||
isNewFile
|
||||
if (params.old_string === '' && !fileExists) {
|
||||
isNewFile = true;
|
||||
newContent = params.new_string;
|
||||
occurrences = 0;
|
||||
} else if (!fileExists) {
|
||||
error = {
|
||||
display: `File not found.`,
|
||||
raw: `File not found: ${params.file_path}`,
|
||||
};
|
||||
} else if (currentContent !== null) {
|
||||
occurrences = this.countOccurrences(currentContent, params.old_string);
|
||||
|
||||
if (occurrences === 0) {
|
||||
error = {
|
||||
display: `No edits made`,
|
||||
raw: `Failed to edit, 0 occurrences found`,
|
||||
};
|
||||
} else if (occurrences !== expectedReplacements) {
|
||||
error = {
|
||||
display: `Failed to edit, expected ${expectedReplacements} occurrences but found ${occurrences}`,
|
||||
raw: `Failed to edit, Expected ${expectedReplacements} occurrences but found ${occurrences} in file: ${params.file_path}`,
|
||||
};
|
||||
} else {
|
||||
newContent = this.replaceAll(
|
||||
currentContent,
|
||||
params.old_string,
|
||||
params.new_string,
|
||||
);
|
||||
}
|
||||
} else {
|
||||
error = {
|
||||
display: `Failed to read content`,
|
||||
raw: `Failed to read content of existing file: ${params.file_path}`,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines if confirmation is needed and prepares the confirmation details.
|
||||
* This method performs the calculation needed to generate the diff and respects the `shouldAlwaysEdit` state.
|
||||
* @param params Parameters for the potential edit operation
|
||||
* @returns Confirmation details object or false if no confirmation is needed/possible.
|
||||
*/
|
||||
async shouldConfirmExecute(params: EditToolParams): Promise<ToolCallConfirmationDetails | false> {
|
||||
if (this.shouldAlwaysEdit) {
|
||||
return false;
|
||||
}
|
||||
return {
|
||||
currentContent,
|
||||
newContent,
|
||||
occurrences,
|
||||
error,
|
||||
isNewFile,
|
||||
};
|
||||
}
|
||||
|
||||
if (!this.validateParams(params)) {
|
||||
console.error("[EditTool] Attempted confirmation with invalid parameters.");
|
||||
return false;
|
||||
}
|
||||
/**
|
||||
* Determines if confirmation is needed and prepares the confirmation details.
|
||||
* This method performs the calculation needed to generate the diff and respects the `shouldAlwaysEdit` state.
|
||||
* @param params Parameters for the potential edit operation
|
||||
* @returns Confirmation details object or false if no confirmation is needed/possible.
|
||||
*/
|
||||
async shouldConfirmExecute(
|
||||
params: EditToolParams,
|
||||
): Promise<ToolCallConfirmationDetails | false> {
|
||||
if (this.shouldAlwaysEdit) {
|
||||
return false;
|
||||
}
|
||||
|
||||
let calculatedEdit: CalculatedEdit;
|
||||
try {
|
||||
calculatedEdit = this.calculateEdit(params);
|
||||
} catch (error) {
|
||||
console.error(`Error calculating edit for confirmation: ${error instanceof Error ? error.message : String(error)}`);
|
||||
return false;
|
||||
}
|
||||
if (!this.validateParams(params)) {
|
||||
console.error(
|
||||
'[EditTool] Attempted confirmation with invalid parameters.',
|
||||
);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (calculatedEdit.error) {
|
||||
return false;
|
||||
}
|
||||
let calculatedEdit: CalculatedEdit;
|
||||
try {
|
||||
calculatedEdit = this.calculateEdit(params);
|
||||
} catch (error) {
|
||||
console.error(
|
||||
`Error calculating edit for confirmation: ${error instanceof Error ? error.message : String(error)}`,
|
||||
);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (calculatedEdit.error) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const fileName = path.basename(params.file_path);
|
||||
const fileDiff = Diff.createPatch(
|
||||
fileName,
|
||||
calculatedEdit.currentContent ?? '',
|
||||
calculatedEdit.newContent,
|
||||
'Current',
|
||||
'Proposed',
|
||||
{ context: 3, ignoreWhitespace: true },
|
||||
);
|
||||
|
||||
const confirmationDetails: ToolEditConfirmationDetails = {
|
||||
title: `Confirm Edit: ${shortenPath(makeRelative(params.file_path, this.rootDirectory))}`,
|
||||
fileName,
|
||||
fileDiff,
|
||||
onConfirm: async (outcome: ToolConfirmationOutcome) => {
|
||||
if (outcome === ToolConfirmationOutcome.ProceedAlways) {
|
||||
this.shouldAlwaysEdit = true;
|
||||
}
|
||||
},
|
||||
};
|
||||
return confirmationDetails;
|
||||
}
|
||||
|
||||
getDescription(params: EditToolParams): string {
|
||||
const relativePath = makeRelative(params.file_path, this.rootDirectory);
|
||||
const oldStringSnippet =
|
||||
params.old_string.split('\n')[0].substring(0, 30) +
|
||||
(params.old_string.length > 30 ? '...' : '');
|
||||
const newStringSnippet =
|
||||
params.new_string.split('\n')[0].substring(0, 30) +
|
||||
(params.new_string.length > 30 ? '...' : '');
|
||||
return `${shortenPath(relativePath)}: ${oldStringSnippet} => ${newStringSnippet}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes the edit operation with the given parameters.
|
||||
* This method recalculates the edit operation before execution.
|
||||
* @param params Parameters for the edit operation
|
||||
* @returns Result of the edit operation
|
||||
*/
|
||||
async execute(params: EditToolParams): Promise<EditToolResult> {
|
||||
if (!this.validateParams(params)) {
|
||||
return {
|
||||
llmContent: 'Invalid parameters for file edit operation',
|
||||
returnDisplay: '**Error:** Invalid parameters for file edit operation',
|
||||
};
|
||||
}
|
||||
|
||||
let editData: CalculatedEdit;
|
||||
try {
|
||||
editData = this.calculateEdit(params);
|
||||
} catch (error) {
|
||||
return {
|
||||
llmContent: `Error preparing edit: ${error instanceof Error ? error.message : String(error)}`,
|
||||
returnDisplay: 'Failed to prepare edit',
|
||||
};
|
||||
}
|
||||
|
||||
if (editData.error) {
|
||||
return {
|
||||
llmContent: editData.error.raw,
|
||||
returnDisplay: editData.error.display,
|
||||
};
|
||||
}
|
||||
|
||||
try {
|
||||
this.ensureParentDirectoriesExist(params.file_path);
|
||||
fs.writeFileSync(params.file_path, editData.newContent, 'utf8');
|
||||
|
||||
if (editData.isNewFile) {
|
||||
return {
|
||||
llmContent: `Created new file: ${params.file_path} with provided content.`,
|
||||
returnDisplay: `Created ${shortenPath(makeRelative(params.file_path, this.rootDirectory))}`,
|
||||
};
|
||||
} else {
|
||||
const fileName = path.basename(params.file_path);
|
||||
const fileDiff = Diff.createPatch(
|
||||
fileName,
|
||||
calculatedEdit.currentContent ?? '',
|
||||
calculatedEdit.newContent,
|
||||
'Current',
|
||||
'Proposed',
|
||||
{ context: 3, ignoreWhitespace: true, }
|
||||
fileName,
|
||||
editData.currentContent ?? '',
|
||||
editData.newContent,
|
||||
'Current',
|
||||
'Proposed',
|
||||
{ context: 3, ignoreWhitespace: true },
|
||||
);
|
||||
|
||||
const confirmationDetails: ToolEditConfirmationDetails = {
|
||||
title: `Confirm Edit: ${shortenPath(makeRelative(params.file_path, this.rootDirectory))}`,
|
||||
fileName,
|
||||
fileDiff,
|
||||
onConfirm: async (outcome: ToolConfirmationOutcome) => {
|
||||
if (outcome === ToolConfirmationOutcome.ProceedAlways) {
|
||||
this.shouldAlwaysEdit = true;
|
||||
}
|
||||
},
|
||||
return {
|
||||
llmContent: `Successfully modified file: ${params.file_path} (${editData.occurrences} replacements).`,
|
||||
returnDisplay: { fileDiff },
|
||||
};
|
||||
return confirmationDetails;
|
||||
}
|
||||
} catch (error) {
|
||||
return {
|
||||
llmContent: `Error executing edit: ${error instanceof Error ? error.message : String(error)}`,
|
||||
returnDisplay: `Failed to edit file`,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
getDescription(params: EditToolParams): string {
|
||||
const relativePath = makeRelative(params.file_path, this.rootDirectory);
|
||||
const oldStringSnippet = params.old_string.split('\n')[0].substring(0, 30) + (params.old_string.length > 30 ? '...' : '');
|
||||
const newStringSnippet = params.new_string.split('\n')[0].substring(0, 30) + (params.new_string.length > 30 ? '...' : '');
|
||||
return `${shortenPath(relativePath)}: ${oldStringSnippet} => ${newStringSnippet}`;
|
||||
/**
|
||||
* Counts occurrences of a substring in a string
|
||||
* @param str String to search in
|
||||
* @param substr Substring to count
|
||||
* @returns Number of occurrences
|
||||
*/
|
||||
private countOccurrences(str: string, substr: string): number {
|
||||
if (substr === '') {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes the edit operation with the given parameters.
|
||||
* This method recalculates the edit operation before execution.
|
||||
* @param params Parameters for the edit operation
|
||||
* @returns Result of the edit operation
|
||||
*/
|
||||
async execute(params: EditToolParams): Promise<EditToolResult> {
|
||||
if (!this.validateParams(params)) {
|
||||
return {
|
||||
llmContent: 'Invalid parameters for file edit operation',
|
||||
returnDisplay: '**Error:** Invalid parameters for file edit operation'
|
||||
};
|
||||
}
|
||||
|
||||
let editData: CalculatedEdit;
|
||||
try {
|
||||
editData = this.calculateEdit(params);
|
||||
} catch (error) {
|
||||
return {
|
||||
llmContent: `Error preparing edit: ${error instanceof Error ? error.message : String(error)}`,
|
||||
returnDisplay: 'Failed to prepare edit'
|
||||
};
|
||||
}
|
||||
|
||||
if (editData.error) {
|
||||
return {
|
||||
llmContent: editData.error.raw,
|
||||
returnDisplay: editData.error.display
|
||||
};
|
||||
}
|
||||
|
||||
try {
|
||||
this.ensureParentDirectoriesExist(params.file_path);
|
||||
fs.writeFileSync(params.file_path, editData.newContent, 'utf8');
|
||||
|
||||
if (editData.isNewFile) {
|
||||
return {
|
||||
llmContent: `Created new file: ${params.file_path} with provided content.`,
|
||||
returnDisplay: `Created ${shortenPath(makeRelative(params.file_path, this.rootDirectory))}`
|
||||
};
|
||||
} else {
|
||||
const fileName = path.basename(params.file_path);
|
||||
const fileDiff = Diff.createPatch(
|
||||
fileName,
|
||||
editData.currentContent ?? '',
|
||||
editData.newContent,
|
||||
'Current',
|
||||
'Proposed',
|
||||
{ context: 3, ignoreWhitespace: true }
|
||||
);
|
||||
|
||||
return {
|
||||
llmContent: `Successfully modified file: ${params.file_path} (${editData.occurrences} replacements).`,
|
||||
returnDisplay: { fileDiff }
|
||||
};
|
||||
}
|
||||
} catch (error) {
|
||||
return {
|
||||
llmContent: `Error executing edit: ${error instanceof Error ? error.message : String(error)}`,
|
||||
returnDisplay: `Failed to edit file`
|
||||
};
|
||||
}
|
||||
let count = 0;
|
||||
let pos = str.indexOf(substr);
|
||||
while (pos !== -1) {
|
||||
count++;
|
||||
pos = str.indexOf(substr, pos + substr.length);
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
/**
|
||||
* Counts occurrences of a substring in a string
|
||||
* @param str String to search in
|
||||
* @param substr Substring to count
|
||||
* @returns Number of occurrences
|
||||
*/
|
||||
private countOccurrences(str: string, substr: string): number {
|
||||
if (substr === '') {
|
||||
return 0;
|
||||
}
|
||||
let count = 0;
|
||||
let pos = str.indexOf(substr);
|
||||
while (pos !== -1) {
|
||||
count++;
|
||||
pos = str.indexOf(substr, pos + substr.length);
|
||||
}
|
||||
return count;
|
||||
/**
|
||||
* Replaces all occurrences of a substring in a string
|
||||
* @param str String to modify
|
||||
* @param find Substring to find
|
||||
* @param replace Replacement string
|
||||
* @returns Modified string
|
||||
*/
|
||||
private replaceAll(str: string, find: string, replace: string): string {
|
||||
if (find === '') {
|
||||
return str;
|
||||
}
|
||||
return str.split(find).join(replace);
|
||||
}
|
||||
|
||||
/**
|
||||
* Replaces all occurrences of a substring in a string
|
||||
* @param str String to modify
|
||||
* @param find Substring to find
|
||||
* @param replace Replacement string
|
||||
* @returns Modified string
|
||||
*/
|
||||
private replaceAll(str: string, find: string, replace: string): string {
|
||||
if (find === '') {
|
||||
return str;
|
||||
}
|
||||
return str.split(find).join(replace);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates parent directories if they don't exist
|
||||
* @param filePath Path to ensure parent directories exist
|
||||
*/
|
||||
private ensureParentDirectoriesExist(filePath: string): void {
|
||||
const dirName = path.dirname(filePath);
|
||||
if (!fs.existsSync(dirName)) {
|
||||
fs.mkdirSync(dirName, { recursive: true });
|
||||
}
|
||||
/**
|
||||
* Creates parent directories if they don't exist
|
||||
* @param filePath Path to ensure parent directories exist
|
||||
*/
|
||||
private ensureParentDirectoriesExist(filePath: string): void {
|
||||
const dirName = path.dirname(filePath);
|
||||
if (!fs.existsSync(dirName)) {
|
||||
fs.mkdirSync(dirName, { recursive: true });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,8 +23,7 @@ export interface GlobToolParams {
|
|||
/**
|
||||
* Result from the GlobTool
|
||||
*/
|
||||
export interface GlobToolResult extends ToolResult {
|
||||
}
|
||||
export interface GlobToolResult extends ToolResult {}
|
||||
|
||||
/**
|
||||
* Implementation of the GlobTool that finds files matching patterns,
|
||||
|
@ -49,17 +48,19 @@ export class GlobTool extends BaseTool<GlobToolParams, GlobToolResult> {
|
|||
{
|
||||
properties: {
|
||||
pattern: {
|
||||
description: 'The glob pattern to match against (e.g., \'*.py\', \'src/**/*.js\', \'docs/*.md\').',
|
||||
type: 'string'
|
||||
description:
|
||||
"The glob pattern to match against (e.g., '*.py', 'src/**/*.js', 'docs/*.md').",
|
||||
type: 'string',
|
||||
},
|
||||
path: {
|
||||
description: 'Optional: The absolute path to the directory to search within. If omitted, searches the root directory.',
|
||||
type: 'string'
|
||||
}
|
||||
description:
|
||||
'Optional: The absolute path to the directory to search within. If omitted, searches the root directory.',
|
||||
type: 'string',
|
||||
},
|
||||
},
|
||||
required: ['pattern'],
|
||||
type: 'object'
|
||||
}
|
||||
type: 'object',
|
||||
},
|
||||
);
|
||||
|
||||
// Set the root directory
|
||||
|
@ -84,7 +85,10 @@ export class GlobTool extends BaseTool<GlobToolParams, GlobToolResult> {
|
|||
|
||||
// Check if it's the root itself or starts with the root path followed by a separator.
|
||||
// This ensures that we don't accidentally allow access to parent directories.
|
||||
return normalizedPath === normalizedRoot || normalizedPath.startsWith(rootWithSep);
|
||||
return (
|
||||
normalizedPath === normalizedRoot ||
|
||||
normalizedPath.startsWith(rootWithSep)
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -94,7 +98,13 @@ export class GlobTool extends BaseTool<GlobToolParams, GlobToolResult> {
|
|||
* @returns An error message string if invalid, null otherwise
|
||||
*/
|
||||
invalidParams(params: GlobToolParams): string | null {
|
||||
if (this.schema.parameters && !SchemaValidator.validate(this.schema.parameters as Record<string, unknown>, params)) {
|
||||
if (
|
||||
this.schema.parameters &&
|
||||
!SchemaValidator.validate(
|
||||
this.schema.parameters as Record<string, unknown>,
|
||||
params,
|
||||
)
|
||||
) {
|
||||
return "Parameters failed schema validation. Ensure 'pattern' is a string and 'path' (if provided) is a string.";
|
||||
}
|
||||
|
||||
|
@ -121,8 +131,12 @@ export class GlobTool extends BaseTool<GlobToolParams, GlobToolResult> {
|
|||
}
|
||||
|
||||
// Validate glob pattern (basic non-empty check)
|
||||
if (!params.pattern || typeof params.pattern !== 'string' || params.pattern.trim() === '') {
|
||||
return "The 'pattern' parameter cannot be empty.";
|
||||
if (
|
||||
!params.pattern ||
|
||||
typeof params.pattern !== 'string' ||
|
||||
params.pattern.trim() === ''
|
||||
) {
|
||||
return "The 'pattern' parameter cannot be empty.";
|
||||
}
|
||||
// Could add more sophisticated glob pattern validation if needed
|
||||
|
||||
|
@ -156,7 +170,7 @@ export class GlobTool extends BaseTool<GlobToolParams, GlobToolResult> {
|
|||
if (validationError) {
|
||||
return {
|
||||
llmContent: `Error: Invalid parameters provided. Reason: ${validationError}`,
|
||||
returnDisplay: `**Error:** Failed to execute tool.`
|
||||
returnDisplay: `**Error:** Failed to execute tool.`,
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -168,10 +182,10 @@ export class GlobTool extends BaseTool<GlobToolParams, GlobToolResult> {
|
|||
// We use fast-glob because it's performant and supports glob patterns.
|
||||
const entries = await fg(params.pattern, {
|
||||
cwd: searchDirAbsolute, // Search within this absolute directory
|
||||
absolute: true, // Return absolute paths
|
||||
onlyFiles: true, // Match only files
|
||||
stats: true, // Include file stats object for sorting
|
||||
dot: true, // Include files starting with a dot
|
||||
absolute: true, // Return absolute paths
|
||||
onlyFiles: true, // Match only files
|
||||
stats: true, // Include file stats object for sorting
|
||||
dot: true, // Include files starting with a dot
|
||||
ignore: ['**/node_modules/**', '**/.git/**'], // Common sensible default, adjust as needed
|
||||
followSymbolicLinks: false, // Avoid potential issues with symlinks unless specifically needed
|
||||
suppressErrors: true, // Suppress EACCES errors for individual files (we handle dir access in validation)
|
||||
|
@ -181,7 +195,7 @@ export class GlobTool extends BaseTool<GlobToolParams, GlobToolResult> {
|
|||
if (!entries || entries.length === 0) {
|
||||
return {
|
||||
llmContent: `No files found matching pattern "${params.pattern}" within ${searchDirAbsolute}.`,
|
||||
returnDisplay: `No files found`
|
||||
returnDisplay: `No files found`,
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -197,30 +211,39 @@ export class GlobTool extends BaseTool<GlobToolParams, GlobToolResult> {
|
|||
});
|
||||
|
||||
// 5. Format Output
|
||||
const sortedAbsolutePaths = entries.map(entry => entry.path);
|
||||
const sortedAbsolutePaths = entries.map((entry) => entry.path);
|
||||
|
||||
// Convert absolute paths to relative paths (to rootDir) for clearer display
|
||||
const sortedRelativePaths = sortedAbsolutePaths.map(absPath => makeRelative(absPath, this.rootDirectory));
|
||||
const sortedRelativePaths = sortedAbsolutePaths.map((absPath) =>
|
||||
makeRelative(absPath, this.rootDirectory),
|
||||
);
|
||||
|
||||
// Construct the result message
|
||||
const fileListDescription = sortedRelativePaths.map(p => ` - ${shortenPath(p)}`).join('\n');
|
||||
const fileListDescription = sortedRelativePaths
|
||||
.map((p) => ` - ${shortenPath(p)}`)
|
||||
.join('\n');
|
||||
const fileCount = sortedRelativePaths.length;
|
||||
const relativeSearchDir = makeRelative(searchDirAbsolute, this.rootDirectory);
|
||||
const displayPath = shortenPath(relativeSearchDir === '.' ? 'root directory' : relativeSearchDir);
|
||||
const relativeSearchDir = makeRelative(
|
||||
searchDirAbsolute,
|
||||
this.rootDirectory,
|
||||
);
|
||||
const displayPath = shortenPath(
|
||||
relativeSearchDir === '.' ? 'root directory' : relativeSearchDir,
|
||||
);
|
||||
|
||||
return {
|
||||
llmContent: `Found ${fileCount} file(s) matching "${params.pattern}" within ${displayPath}, sorted by modification time (newest first):\n${fileListDescription}`,
|
||||
returnDisplay: `Found ${fileCount} matching file(s)`
|
||||
returnDisplay: `Found ${fileCount} matching file(s)`,
|
||||
};
|
||||
|
||||
} catch (error) {
|
||||
// Catch unexpected errors during glob execution (less likely with suppressErrors=true, but possible)
|
||||
const errorMessage = error instanceof Error ? error.message : String(error);
|
||||
console.error(`GlobTool execute Error: ${errorMessage}`, error);
|
||||
return {
|
||||
llmContent: `Error during glob search operation: ${errorMessage}`,
|
||||
returnDisplay: `**Error:** An unexpected error occurred.`
|
||||
};
|
||||
// Catch unexpected errors during glob execution (less likely with suppressErrors=true, but possible)
|
||||
const errorMessage =
|
||||
error instanceof Error ? error.message : String(error);
|
||||
console.error(`GlobTool execute Error: ${errorMessage}`, error);
|
||||
return {
|
||||
llmContent: `Error during glob search operation: ${errorMessage}`,
|
||||
returnDisplay: `**Error:** An unexpected error occurred.`,
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -42,8 +42,7 @@ interface GrepMatch {
|
|||
/**
|
||||
* Result from the GrepTool
|
||||
*/
|
||||
export interface GrepToolResult extends ToolResult {
|
||||
}
|
||||
export interface GrepToolResult extends ToolResult {}
|
||||
|
||||
// --- GrepTool Class ---
|
||||
|
||||
|
@ -65,21 +64,24 @@ export class GrepTool extends BaseTool<GrepToolParams, GrepToolResult> {
|
|||
{
|
||||
properties: {
|
||||
pattern: {
|
||||
description: 'The regular expression (regex) pattern to search for within file contents (e.g., \'function\\s+myFunction\', \'import\\s+\\{.*\\}\\s+from\\s+.*\').',
|
||||
type: 'string'
|
||||
description:
|
||||
"The regular expression (regex) pattern to search for within file contents (e.g., 'function\\s+myFunction', 'import\\s+\\{.*\\}\\s+from\\s+.*').",
|
||||
type: 'string',
|
||||
},
|
||||
path: {
|
||||
description: 'Optional: The absolute path to the directory to search within. If omitted, searches the current working directory.',
|
||||
type: 'string'
|
||||
description:
|
||||
'Optional: The absolute path to the directory to search within. If omitted, searches the current working directory.',
|
||||
type: 'string',
|
||||
},
|
||||
include: {
|
||||
description: 'Optional: A glob pattern to filter which files are searched (e.g., \'*.js\', \'*.{ts,tsx}\', \'src/**\'). If omitted, searches all files (respecting potential global ignores).',
|
||||
type: 'string'
|
||||
}
|
||||
description:
|
||||
"Optional: A glob pattern to filter which files are searched (e.g., '*.js', '*.{ts,tsx}', 'src/**'). If omitted, searches all files (respecting potential global ignores).",
|
||||
type: 'string',
|
||||
},
|
||||
},
|
||||
required: ['pattern'],
|
||||
type: 'object'
|
||||
}
|
||||
type: 'object',
|
||||
},
|
||||
);
|
||||
// Ensure rootDirectory is absolute and normalized
|
||||
this.rootDirectory = path.resolve(rootDirectory);
|
||||
|
@ -97,8 +99,13 @@ export class GrepTool extends BaseTool<GrepToolParams, GrepToolResult> {
|
|||
const targetPath = path.resolve(this.rootDirectory, relativePath || '.');
|
||||
|
||||
// Security Check: Ensure the resolved path is still within the root directory.
|
||||
if (!targetPath.startsWith(this.rootDirectory) && targetPath !== this.rootDirectory) {
|
||||
throw new Error(`Path validation failed: Attempted path "${relativePath || '.'}" resolves outside the allowed root directory "${this.rootDirectory}".`);
|
||||
if (
|
||||
!targetPath.startsWith(this.rootDirectory) &&
|
||||
targetPath !== this.rootDirectory
|
||||
) {
|
||||
throw new Error(
|
||||
`Path validation failed: Attempted path "${relativePath || '.'}" resolves outside the allowed root directory "${this.rootDirectory}".`,
|
||||
);
|
||||
}
|
||||
|
||||
// Check existence and type after resolving
|
||||
|
@ -111,7 +118,9 @@ export class GrepTool extends BaseTool<GrepToolParams, GrepToolResult> {
|
|||
if (err.code === 'ENOENT') {
|
||||
throw new Error(`Path does not exist: ${targetPath}`);
|
||||
}
|
||||
throw new Error(`Failed to access path stats for ${targetPath}: ${err.message}`);
|
||||
throw new Error(
|
||||
`Failed to access path stats for ${targetPath}: ${err.message}`,
|
||||
);
|
||||
}
|
||||
|
||||
return targetPath;
|
||||
|
@ -123,8 +132,14 @@ export class GrepTool extends BaseTool<GrepToolParams, GrepToolResult> {
|
|||
* @returns An error message string if invalid, null otherwise
|
||||
*/
|
||||
invalidParams(params: GrepToolParams): string | null {
|
||||
if (this.schema.parameters && !SchemaValidator.validate(this.schema.parameters as Record<string, unknown>, params)) {
|
||||
return "Parameters failed schema validation.";
|
||||
if (
|
||||
this.schema.parameters &&
|
||||
!SchemaValidator.validate(
|
||||
this.schema.parameters as Record<string, unknown>,
|
||||
params,
|
||||
)
|
||||
) {
|
||||
return 'Parameters failed schema validation.';
|
||||
}
|
||||
|
||||
try {
|
||||
|
@ -142,7 +157,6 @@ export class GrepTool extends BaseTool<GrepToolParams, GrepToolResult> {
|
|||
return null; // Parameters are valid
|
||||
}
|
||||
|
||||
|
||||
// --- Core Execution ---
|
||||
|
||||
/**
|
||||
|
@ -156,7 +170,7 @@ export class GrepTool extends BaseTool<GrepToolParams, GrepToolResult> {
|
|||
console.error(`GrepTool Parameter Validation Failed: ${validationError}`);
|
||||
return {
|
||||
llmContent: `Error: Invalid parameters provided. Reason: ${validationError}`,
|
||||
returnDisplay: `**Error:** Failed to execute tool.`
|
||||
returnDisplay: `**Error:** Failed to execute tool.`,
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -177,40 +191,49 @@ export class GrepTool extends BaseTool<GrepToolParams, GrepToolResult> {
|
|||
return { llmContent: noMatchMsg, returnDisplay: noMatchUser };
|
||||
}
|
||||
|
||||
const matchesByFile = matches.reduce((acc, match) => {
|
||||
const relativeFilePath = path.relative(searchDirAbs, path.resolve(searchDirAbs, match.filePath)) || path.basename(match.filePath);
|
||||
if (!acc[relativeFilePath]) {
|
||||
acc[relativeFilePath] = [];
|
||||
}
|
||||
acc[relativeFilePath].push(match);
|
||||
acc[relativeFilePath].sort((a, b) => a.lineNumber - b.lineNumber);
|
||||
return acc;
|
||||
}, {} as Record<string, GrepMatch[]>);
|
||||
const matchesByFile = matches.reduce(
|
||||
(acc, match) => {
|
||||
const relativeFilePath =
|
||||
path.relative(
|
||||
searchDirAbs,
|
||||
path.resolve(searchDirAbs, match.filePath),
|
||||
) || path.basename(match.filePath);
|
||||
if (!acc[relativeFilePath]) {
|
||||
acc[relativeFilePath] = [];
|
||||
}
|
||||
acc[relativeFilePath].push(match);
|
||||
acc[relativeFilePath].sort((a, b) => a.lineNumber - b.lineNumber);
|
||||
return acc;
|
||||
},
|
||||
{} as Record<string, GrepMatch[]>,
|
||||
);
|
||||
|
||||
let llmContent = `Found ${matches.length} match(es) for pattern "${params.pattern}" in path "${searchDirDisplay}"${params.include ? ` (filter: "${params.include}")` : ''}:\n---\n`;
|
||||
|
||||
for (const filePath in matchesByFile) {
|
||||
llmContent += `File: ${filePath}\n`;
|
||||
matchesByFile[filePath].forEach(match => {
|
||||
matchesByFile[filePath].forEach((match) => {
|
||||
const trimmedLine = match.line.trim();
|
||||
llmContent += `L${match.lineNumber}: ${trimmedLine}\n`;
|
||||
});
|
||||
llmContent += '---\n';
|
||||
}
|
||||
|
||||
return { llmContent: llmContent.trim(), returnDisplay: `Found ${matches.length} matche(s)` };
|
||||
|
||||
return {
|
||||
llmContent: llmContent.trim(),
|
||||
returnDisplay: `Found ${matches.length} matche(s)`,
|
||||
};
|
||||
} catch (error) {
|
||||
console.error(`Error during GrepTool execution: ${error}`);
|
||||
const errorMessage = error instanceof Error ? error.message : String(error);
|
||||
const errorMessage =
|
||||
error instanceof Error ? error.message : String(error);
|
||||
return {
|
||||
llmContent: `Error during grep search operation: ${errorMessage}`,
|
||||
returnDisplay: errorMessage
|
||||
returnDisplay: errorMessage,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// --- Inlined Grep Logic and Helpers ---
|
||||
|
||||
/**
|
||||
|
@ -221,9 +244,13 @@ export class GrepTool extends BaseTool<GrepToolParams, GrepToolResult> {
|
|||
private isCommandAvailable(command: string): Promise<boolean> {
|
||||
return new Promise((resolve) => {
|
||||
const checkCommand = process.platform === 'win32' ? 'where' : 'command';
|
||||
const checkArgs = process.platform === 'win32' ? [command] : ['-v', command];
|
||||
const checkArgs =
|
||||
process.platform === 'win32' ? [command] : ['-v', command];
|
||||
try {
|
||||
const child = spawn(checkCommand, checkArgs, { stdio: 'ignore', shell: process.platform === 'win32' });
|
||||
const child = spawn(checkCommand, checkArgs, {
|
||||
stdio: 'ignore',
|
||||
shell: process.platform === 'win32',
|
||||
});
|
||||
child.on('close', (code) => resolve(code === 0));
|
||||
child.on('error', () => resolve(false));
|
||||
} catch (e) {
|
||||
|
@ -252,7 +279,9 @@ export class GrepTool extends BaseTool<GrepToolParams, GrepToolResult> {
|
|||
return false;
|
||||
} catch (err: any) {
|
||||
if (err.code !== 'ENOENT') {
|
||||
console.error(`Error checking for .git in ${currentPath}: ${err.message}`);
|
||||
console.error(
|
||||
`Error checking for .git in ${currentPath}: ${err.message}`,
|
||||
);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
@ -263,19 +292,21 @@ export class GrepTool extends BaseTool<GrepToolParams, GrepToolResult> {
|
|||
currentPath = path.dirname(currentPath);
|
||||
}
|
||||
} catch (err: any) {
|
||||
console.error(`Error traversing directory structure upwards from ${dirPath}: ${err instanceof Error ? err.message : String(err)}`);
|
||||
console.error(
|
||||
`Error traversing directory structure upwards from ${dirPath}: ${err instanceof Error ? err.message : String(err)}`,
|
||||
);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses the standard output of grep-like commands (git grep, system grep).
|
||||
* Expects format: filePath:lineNumber:lineContent
|
||||
* Handles colons within file paths and line content correctly.
|
||||
* @param {string} output The raw stdout string.
|
||||
* @param {string} basePath The absolute directory the search was run from, for relative paths.
|
||||
* @returns {GrepMatch[]} Array of match objects.
|
||||
*/
|
||||
* Parses the standard output of grep-like commands (git grep, system grep).
|
||||
* Expects format: filePath:lineNumber:lineContent
|
||||
* Handles colons within file paths and line content correctly.
|
||||
* @param {string} output The raw stdout string.
|
||||
* @param {string} basePath The absolute directory the search was run from, for relative paths.
|
||||
* @returns {GrepMatch[]} Array of match objects.
|
||||
*/
|
||||
private parseGrepOutput(output: string, basePath: string): GrepMatch[] {
|
||||
const results: GrepMatch[] = [];
|
||||
if (!output) return results;
|
||||
|
@ -302,7 +333,10 @@ export class GrepTool extends BaseTool<GrepToolParams, GrepToolResult> {
|
|||
|
||||
// Extract parts based on the found colon indices
|
||||
const filePathRaw = line.substring(0, firstColonIndex);
|
||||
const lineNumberStr = line.substring(firstColonIndex + 1, secondColonIndex);
|
||||
const lineNumberStr = line.substring(
|
||||
firstColonIndex + 1,
|
||||
secondColonIndex,
|
||||
);
|
||||
// The rest of the line, starting after the second colon, is the content.
|
||||
const lineContent = line.substring(secondColonIndex + 1);
|
||||
|
||||
|
@ -327,10 +361,10 @@ export class GrepTool extends BaseTool<GrepToolParams, GrepToolResult> {
|
|||
}
|
||||
|
||||
/**
|
||||
* Gets a description of the grep operation
|
||||
* @param params Parameters for the grep operation
|
||||
* @returns A string describing the grep
|
||||
*/
|
||||
* Gets a description of the grep operation
|
||||
* @param params Parameters for the grep operation
|
||||
* @returns A string describing the grep
|
||||
*/
|
||||
getDescription(params: GrepToolParams): string {
|
||||
let description = `'${params.pattern}'`;
|
||||
|
||||
|
@ -363,37 +397,59 @@ export class GrepTool extends BaseTool<GrepToolParams, GrepToolResult> {
|
|||
try {
|
||||
// --- Strategy 1: git grep ---
|
||||
const isGit = await this.isGitRepository(absolutePath);
|
||||
const gitAvailable = isGit && await this.isCommandAvailable('git');
|
||||
const gitAvailable = isGit && (await this.isCommandAvailable('git'));
|
||||
|
||||
if (gitAvailable) {
|
||||
strategyUsed = 'git grep';
|
||||
const gitArgs = ['grep', '--untracked', '-n', '-E', '--ignore-case', pattern];
|
||||
const gitArgs = [
|
||||
'grep',
|
||||
'--untracked',
|
||||
'-n',
|
||||
'-E',
|
||||
'--ignore-case',
|
||||
pattern,
|
||||
];
|
||||
if (include) {
|
||||
gitArgs.push('--', include);
|
||||
}
|
||||
|
||||
try {
|
||||
const output = await new Promise<string>((resolve, reject) => {
|
||||
const child = spawn('git', gitArgs, { cwd: absolutePath, windowsHide: true });
|
||||
const child = spawn('git', gitArgs, {
|
||||
cwd: absolutePath,
|
||||
windowsHide: true,
|
||||
});
|
||||
const stdoutChunks: Buffer[] = [];
|
||||
const stderrChunks: Buffer[] = [];
|
||||
|
||||
child.stdout.on('data', (chunk) => { stdoutChunks.push(chunk); });
|
||||
child.stderr.on('data', (chunk) => { stderrChunks.push(chunk); });
|
||||
child.stdout.on('data', (chunk) => {
|
||||
stdoutChunks.push(chunk);
|
||||
});
|
||||
child.stderr.on('data', (chunk) => {
|
||||
stderrChunks.push(chunk);
|
||||
});
|
||||
|
||||
child.on('error', (err) => reject(new Error(`Failed to start git grep: ${err.message}`)));
|
||||
child.on('error', (err) =>
|
||||
reject(new Error(`Failed to start git grep: ${err.message}`)),
|
||||
);
|
||||
|
||||
child.on('close', (code) => {
|
||||
const stdoutData = Buffer.concat(stdoutChunks).toString('utf8');
|
||||
const stderrData = Buffer.concat(stderrChunks).toString('utf8');
|
||||
if (code === 0) resolve(stdoutData);
|
||||
else if (code === 1) resolve(''); // No matches is not an error
|
||||
else reject(new Error(`git grep exited with code ${code}: ${stderrData}`));
|
||||
else if (code === 1)
|
||||
resolve(''); // No matches is not an error
|
||||
else
|
||||
reject(
|
||||
new Error(`git grep exited with code ${code}: ${stderrData}`),
|
||||
);
|
||||
});
|
||||
});
|
||||
return this.parseGrepOutput(output, absolutePath);
|
||||
} catch (gitError: any) {
|
||||
console.error(`GrepTool: git grep strategy failed: ${gitError.message}. Falling back...`);
|
||||
console.error(
|
||||
`GrepTool: git grep strategy failed: ${gitError.message}. Falling back...`,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -403,7 +459,7 @@ export class GrepTool extends BaseTool<GrepToolParams, GrepToolResult> {
|
|||
strategyUsed = 'system grep';
|
||||
const grepArgs = ['-r', '-n', '-H', '-E'];
|
||||
const commonExcludes = ['.git', 'node_modules', 'bower_components'];
|
||||
commonExcludes.forEach(dir => grepArgs.push(`--exclude-dir=${dir}`));
|
||||
commonExcludes.forEach((dir) => grepArgs.push(`--exclude-dir=${dir}`));
|
||||
if (include) {
|
||||
grepArgs.push(`--include=${include}`);
|
||||
}
|
||||
|
@ -412,41 +468,67 @@ export class GrepTool extends BaseTool<GrepToolParams, GrepToolResult> {
|
|||
|
||||
try {
|
||||
const output = await new Promise<string>((resolve, reject) => {
|
||||
const child = spawn('grep', grepArgs, { cwd: absolutePath, windowsHide: true });
|
||||
const child = spawn('grep', grepArgs, {
|
||||
cwd: absolutePath,
|
||||
windowsHide: true,
|
||||
});
|
||||
const stdoutChunks: Buffer[] = [];
|
||||
const stderrChunks: Buffer[] = [];
|
||||
|
||||
child.stdout.on('data', (chunk) => { stdoutChunks.push(chunk); });
|
||||
child.stdout.on('data', (chunk) => {
|
||||
stdoutChunks.push(chunk);
|
||||
});
|
||||
child.stderr.on('data', (chunk) => {
|
||||
const stderrStr = chunk.toString();
|
||||
if (!stderrStr.includes('Permission denied') && !/grep:.*: Is a directory/i.test(stderrStr)) {
|
||||
if (
|
||||
!stderrStr.includes('Permission denied') &&
|
||||
!/grep:.*: Is a directory/i.test(stderrStr)
|
||||
) {
|
||||
stderrChunks.push(chunk);
|
||||
}
|
||||
});
|
||||
|
||||
child.on('error', (err) => reject(new Error(`Failed to start system grep: ${err.message}`)));
|
||||
child.on('error', (err) =>
|
||||
reject(new Error(`Failed to start system grep: ${err.message}`)),
|
||||
);
|
||||
|
||||
child.on('close', (code) => {
|
||||
const stdoutData = Buffer.concat(stdoutChunks).toString('utf8');
|
||||
const stderrData = Buffer.concat(stderrChunks).toString('utf8').trim();
|
||||
const stderrData = Buffer.concat(stderrChunks)
|
||||
.toString('utf8')
|
||||
.trim();
|
||||
if (code === 0) resolve(stdoutData);
|
||||
else if (code === 1) resolve(''); // No matches
|
||||
else if (code === 1)
|
||||
resolve(''); // No matches
|
||||
else {
|
||||
if (stderrData) reject(new Error(`System grep exited with code ${code}: ${stderrData}`));
|
||||
if (stderrData)
|
||||
reject(
|
||||
new Error(
|
||||
`System grep exited with code ${code}: ${stderrData}`,
|
||||
),
|
||||
);
|
||||
else resolve('');
|
||||
}
|
||||
});
|
||||
});
|
||||
return this.parseGrepOutput(output, absolutePath);
|
||||
} catch (grepError: any) {
|
||||
console.error(`GrepTool: System grep strategy failed: ${grepError.message}. Falling back...`);
|
||||
console.error(
|
||||
`GrepTool: System grep strategy failed: ${grepError.message}. Falling back...`,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// --- Strategy 3: Pure JavaScript Fallback ---
|
||||
strategyUsed = 'javascript fallback';
|
||||
const globPattern = include ? include : '**/*';
|
||||
const ignorePatterns = ['.git', 'node_modules', 'bower_components', '.svn', '.hg'];
|
||||
const ignorePatterns = [
|
||||
'.git',
|
||||
'node_modules',
|
||||
'bower_components',
|
||||
'.svn',
|
||||
'.hg',
|
||||
];
|
||||
|
||||
const filesStream = fastGlob.stream(globPattern, {
|
||||
cwd: absolutePath,
|
||||
|
@ -469,7 +551,9 @@ export class GrepTool extends BaseTool<GrepToolParams, GrepToolResult> {
|
|||
lines.forEach((line, index) => {
|
||||
if (regex.test(line)) {
|
||||
allMatches.push({
|
||||
filePath: path.relative(absolutePath, fileAbsolutePath) || path.basename(fileAbsolutePath),
|
||||
filePath:
|
||||
path.relative(absolutePath, fileAbsolutePath) ||
|
||||
path.basename(fileAbsolutePath),
|
||||
lineNumber: index + 1,
|
||||
line: line,
|
||||
});
|
||||
|
@ -477,16 +561,19 @@ export class GrepTool extends BaseTool<GrepToolParams, GrepToolResult> {
|
|||
});
|
||||
} catch (readError: any) {
|
||||
if (readError.code !== 'ENOENT') {
|
||||
console.error(`GrepTool: Could not read or process file ${fileAbsolutePath}: ${readError.message}`);
|
||||
console.error(
|
||||
`GrepTool: Could not read or process file ${fileAbsolutePath}: ${readError.message}`,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return allMatches;
|
||||
|
||||
} catch (error: any) {
|
||||
console.error(`GrepTool: Error during performGrepSearch (Strategy: ${strategyUsed}): ${error.message}`);
|
||||
console.error(
|
||||
`GrepTool: Error during performGrepSearch (Strategy: ${strategyUsed}): ${error.message}`,
|
||||
);
|
||||
throw error; // Re-throw to be caught by the execute method's handler
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -91,20 +91,21 @@ export class LSTool extends BaseTool<LSToolParams, LSToolResult> {
|
|||
{
|
||||
properties: {
|
||||
path: {
|
||||
description: 'The absolute path to the directory to list (must be absolute, not relative)',
|
||||
type: 'string'
|
||||
description:
|
||||
'The absolute path to the directory to list (must be absolute, not relative)',
|
||||
type: 'string',
|
||||
},
|
||||
ignore: {
|
||||
description: 'List of glob patterns to ignore',
|
||||
items: {
|
||||
type: 'string'
|
||||
type: 'string',
|
||||
},
|
||||
type: 'array'
|
||||
}
|
||||
type: 'array',
|
||||
},
|
||||
},
|
||||
required: ['path'],
|
||||
type: 'object'
|
||||
}
|
||||
type: 'object',
|
||||
},
|
||||
);
|
||||
|
||||
// Set the root directory
|
||||
|
@ -123,7 +124,10 @@ export class LSTool extends BaseTool<LSToolParams, LSToolResult> {
|
|||
const rootWithSep = normalizedRoot.endsWith(path.sep)
|
||||
? normalizedRoot
|
||||
: normalizedRoot + path.sep;
|
||||
return normalizedPath === normalizedRoot || normalizedPath.startsWith(rootWithSep);
|
||||
return (
|
||||
normalizedPath === normalizedRoot ||
|
||||
normalizedPath.startsWith(rootWithSep)
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -132,8 +136,14 @@ export class LSTool extends BaseTool<LSToolParams, LSToolResult> {
|
|||
* @returns An error message string if invalid, null otherwise
|
||||
*/
|
||||
invalidParams(params: LSToolParams): string | null {
|
||||
if (this.schema.parameters && !SchemaValidator.validate(this.schema.parameters as Record<string, unknown>, params)) {
|
||||
return "Parameters failed schema validation.";
|
||||
if (
|
||||
this.schema.parameters &&
|
||||
!SchemaValidator.validate(
|
||||
this.schema.parameters as Record<string, unknown>,
|
||||
params,
|
||||
)
|
||||
) {
|
||||
return 'Parameters failed schema validation.';
|
||||
}
|
||||
// Ensure path is absolute
|
||||
if (!path.isAbsolute(params.path)) {
|
||||
|
@ -194,7 +204,7 @@ export class LSTool extends BaseTool<LSToolParams, LSToolResult> {
|
|||
listedPath: params.path,
|
||||
totalEntries: 0,
|
||||
llmContent: `Error: Invalid parameters provided. Reason: ${validationError}`,
|
||||
returnDisplay: "**Error:** Failed to execute tool."
|
||||
returnDisplay: '**Error:** Failed to execute tool.',
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -206,7 +216,7 @@ export class LSTool extends BaseTool<LSToolParams, LSToolResult> {
|
|||
listedPath: params.path,
|
||||
totalEntries: 0,
|
||||
llmContent: `Directory does not exist: ${params.path}`,
|
||||
returnDisplay: `Directory does not exist`
|
||||
returnDisplay: `Directory does not exist`,
|
||||
};
|
||||
}
|
||||
// Check if path is a directory
|
||||
|
@ -217,7 +227,7 @@ export class LSTool extends BaseTool<LSToolParams, LSToolResult> {
|
|||
listedPath: params.path,
|
||||
totalEntries: 0,
|
||||
llmContent: `Path is not a directory: ${params.path}`,
|
||||
returnDisplay: `Path is not a directory`
|
||||
returnDisplay: `Path is not a directory`,
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -230,7 +240,7 @@ export class LSTool extends BaseTool<LSToolParams, LSToolResult> {
|
|||
listedPath: params.path,
|
||||
totalEntries: 0,
|
||||
llmContent: `Directory is empty: ${params.path}`,
|
||||
returnDisplay: `Directory is empty.`
|
||||
returnDisplay: `Directory is empty.`,
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -248,7 +258,7 @@ export class LSTool extends BaseTool<LSToolParams, LSToolResult> {
|
|||
path: fullPath,
|
||||
isDirectory: isDir,
|
||||
size: isDir ? 0 : stats.size,
|
||||
modifiedTime: stats.mtime
|
||||
modifiedTime: stats.mtime,
|
||||
});
|
||||
} catch (error) {
|
||||
// Skip entries that can't be accessed
|
||||
|
@ -264,18 +274,20 @@ export class LSTool extends BaseTool<LSToolParams, LSToolResult> {
|
|||
});
|
||||
|
||||
// Create formatted content for display
|
||||
const directoryContent = entries.map(entry => {
|
||||
const typeIndicator = entry.isDirectory ? 'd' : '-';
|
||||
const sizeInfo = entry.isDirectory ? '' : ` (${entry.size} bytes)`;
|
||||
return `${typeIndicator} ${entry.name}${sizeInfo}`;
|
||||
}).join('\n');
|
||||
|
||||
const directoryContent = entries
|
||||
.map((entry) => {
|
||||
const typeIndicator = entry.isDirectory ? 'd' : '-';
|
||||
const sizeInfo = entry.isDirectory ? '' : ` (${entry.size} bytes)`;
|
||||
return `${typeIndicator} ${entry.name}${sizeInfo}`;
|
||||
})
|
||||
.join('\n');
|
||||
|
||||
return {
|
||||
entries,
|
||||
listedPath: params.path,
|
||||
totalEntries: entries.length,
|
||||
llmContent: `Directory listing for ${params.path}:\n${directoryContent}`,
|
||||
returnDisplay: `Found ${entries.length} item(s).`
|
||||
returnDisplay: `Found ${entries.length} item(s).`,
|
||||
};
|
||||
} catch (error) {
|
||||
const errorMessage = `Error listing directory: ${error instanceof Error ? error.message : String(error)}`;
|
||||
|
@ -284,8 +296,8 @@ export class LSTool extends BaseTool<LSToolParams, LSToolResult> {
|
|||
listedPath: params.path,
|
||||
totalEntries: 0,
|
||||
llmContent: errorMessage,
|
||||
returnDisplay: `**Error:** ${errorMessage}`
|
||||
returnDisplay: `**Error:** ${errorMessage}`,
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -27,13 +27,15 @@ export interface ReadFileToolParams {
|
|||
/**
|
||||
* Standardized result from the ReadFile tool
|
||||
*/
|
||||
export interface ReadFileToolResult extends ToolResult {
|
||||
}
|
||||
export interface ReadFileToolResult extends ToolResult {}
|
||||
|
||||
/**
|
||||
* Implementation of the ReadFile tool that reads files from the filesystem
|
||||
*/
|
||||
export class ReadFileTool extends BaseTool<ReadFileToolParams, ReadFileToolResult> {
|
||||
export class ReadFileTool extends BaseTool<
|
||||
ReadFileToolParams,
|
||||
ReadFileToolResult
|
||||
> {
|
||||
public static readonly Name: string = 'read_file';
|
||||
|
||||
// Maximum number of lines to read by default
|
||||
|
@ -60,21 +62,24 @@ export class ReadFileTool extends BaseTool<ReadFileToolParams, ReadFileToolResul
|
|||
{
|
||||
properties: {
|
||||
file_path: {
|
||||
description: 'The absolute path to the file to read (e.g., \'/home/user/project/file.txt\'). Relative paths are not supported.',
|
||||
type: 'string'
|
||||
description:
|
||||
"The absolute path to the file to read (e.g., '/home/user/project/file.txt'). Relative paths are not supported.",
|
||||
type: 'string',
|
||||
},
|
||||
offset: {
|
||||
description: 'Optional: The 0-based line number to start reading from. Requires \'limit\' to be set. Use for paginating through large files.',
|
||||
type: 'number'
|
||||
description:
|
||||
"Optional: The 0-based line number to start reading from. Requires 'limit' to be set. Use for paginating through large files.",
|
||||
type: 'number',
|
||||
},
|
||||
limit: {
|
||||
description: 'Optional: Maximum number of lines to read. Use with \'offset\' to paginate through large files. If omitted, reads the entire file (if feasible).',
|
||||
type: 'number'
|
||||
}
|
||||
description:
|
||||
"Optional: Maximum number of lines to read. Use with 'offset' to paginate through large files. If omitted, reads the entire file (if feasible).",
|
||||
type: 'number',
|
||||
},
|
||||
},
|
||||
required: ['file_path'],
|
||||
type: 'object'
|
||||
}
|
||||
type: 'object',
|
||||
},
|
||||
);
|
||||
|
||||
// Set the root directory
|
||||
|
@ -95,7 +100,10 @@ export class ReadFileTool extends BaseTool<ReadFileToolParams, ReadFileToolResul
|
|||
? normalizedRoot
|
||||
: normalizedRoot + path.sep;
|
||||
|
||||
return normalizedPath === normalizedRoot || normalizedPath.startsWith(rootWithSep);
|
||||
return (
|
||||
normalizedPath === normalizedRoot ||
|
||||
normalizedPath.startsWith(rootWithSep)
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -104,8 +112,14 @@ export class ReadFileTool extends BaseTool<ReadFileToolParams, ReadFileToolResul
|
|||
* @returns True if parameters are valid, false otherwise
|
||||
*/
|
||||
invalidParams(params: ReadFileToolParams): string | null {
|
||||
if (this.schema.parameters && !SchemaValidator.validate(this.schema.parameters as Record<string, unknown>, params)) {
|
||||
return "Parameters failed schema validation.";
|
||||
if (
|
||||
this.schema.parameters &&
|
||||
!SchemaValidator.validate(
|
||||
this.schema.parameters as Record<string, unknown>,
|
||||
params,
|
||||
)
|
||||
) {
|
||||
return 'Parameters failed schema validation.';
|
||||
}
|
||||
const filePath = params.file_path;
|
||||
if (!path.isAbsolute(filePath)) {
|
||||
|
@ -151,7 +165,7 @@ export class ReadFileTool extends BaseTool<ReadFileToolParams, ReadFileToolResul
|
|||
}
|
||||
|
||||
// If more than 30% are non-printable, likely binary
|
||||
return (nonPrintableCount / bytesRead) > 0.3;
|
||||
return nonPrintableCount / bytesRead > 0.3;
|
||||
} catch (error) {
|
||||
return false;
|
||||
}
|
||||
|
@ -166,7 +180,9 @@ export class ReadFileTool extends BaseTool<ReadFileToolParams, ReadFileToolResul
|
|||
const ext = path.extname(filePath).toLowerCase();
|
||||
|
||||
// Common image formats
|
||||
if (['.jpg', '.jpeg', '.png', '.gif', '.bmp', '.webp', '.svg'].includes(ext)) {
|
||||
if (
|
||||
['.jpg', '.jpeg', '.png', '.gif', '.bmp', '.webp', '.svg'].includes(ext)
|
||||
) {
|
||||
return 'image';
|
||||
}
|
||||
|
||||
|
@ -189,8 +205,8 @@ export class ReadFileTool extends BaseTool<ReadFileToolParams, ReadFileToolResul
|
|||
* @returns A string describing the file being read
|
||||
*/
|
||||
getDescription(params: ReadFileToolParams): string {
|
||||
const relativePath = makeRelative(params.file_path, this.rootDirectory);
|
||||
return shortenPath(relativePath);
|
||||
const relativePath = makeRelative(params.file_path, this.rootDirectory);
|
||||
return shortenPath(relativePath);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -204,7 +220,7 @@ export class ReadFileTool extends BaseTool<ReadFileToolParams, ReadFileToolResul
|
|||
if (validationError) {
|
||||
return {
|
||||
llmContent: `Error: Invalid parameters provided. Reason: ${validationError}`,
|
||||
returnDisplay: "**Error:** Failed to execute tool."
|
||||
returnDisplay: '**Error:** Failed to execute tool.',
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -245,14 +261,15 @@ export class ReadFileTool extends BaseTool<ReadFileToolParams, ReadFileToolResul
|
|||
const formattedLines = selectedLines.map((line) => {
|
||||
let processedLine = line;
|
||||
if (line.length > ReadFileTool.MAX_LINE_LENGTH) {
|
||||
processedLine = line.substring(0, ReadFileTool.MAX_LINE_LENGTH) + '... [truncated]';
|
||||
processedLine =
|
||||
line.substring(0, ReadFileTool.MAX_LINE_LENGTH) + '... [truncated]';
|
||||
truncated = true;
|
||||
}
|
||||
|
||||
return processedLine;
|
||||
});
|
||||
|
||||
const contentTruncated = (endLine < lines.length) || truncated;
|
||||
const contentTruncated = endLine < lines.length || truncated;
|
||||
|
||||
let llmContent = '';
|
||||
if (contentTruncated) {
|
||||
|
@ -273,4 +290,4 @@ export class ReadFileTool extends BaseTool<ReadFileToolParams, ReadFileToolResul
|
|||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -2,56 +2,58 @@ import { ToolListUnion, FunctionDeclaration } from '@google/genai';
|
|||
import { Tool } from './tools.js';
|
||||
|
||||
class ToolRegistry {
|
||||
private tools: Map<string, Tool> = new Map();
|
||||
private tools: Map<string, Tool> = new Map();
|
||||
|
||||
/**
|
||||
* Registers a tool definition.
|
||||
* @param tool - The tool object containing schema and execution logic.
|
||||
*/
|
||||
registerTool(tool: Tool): void {
|
||||
if (this.tools.has(tool.name)) {
|
||||
// Decide on behavior: throw error, log warning, or allow overwrite
|
||||
console.warn(`Tool with name "${tool.name}" is already registered. Overwriting.`);
|
||||
}
|
||||
this.tools.set(tool.name, tool);
|
||||
/**
|
||||
* Registers a tool definition.
|
||||
* @param tool - The tool object containing schema and execution logic.
|
||||
*/
|
||||
registerTool(tool: Tool): void {
|
||||
if (this.tools.has(tool.name)) {
|
||||
// Decide on behavior: throw error, log warning, or allow overwrite
|
||||
console.warn(
|
||||
`Tool with name "${tool.name}" is already registered. Overwriting.`,
|
||||
);
|
||||
}
|
||||
this.tools.set(tool.name, tool);
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves the list of tool schemas in the format required by Gemini.
|
||||
* @returns A ToolListUnion containing the function declarations.
|
||||
*/
|
||||
getToolSchemas(): ToolListUnion {
|
||||
const declarations: FunctionDeclaration[] = [];
|
||||
this.tools.forEach(tool => {
|
||||
declarations.push(tool.schema);
|
||||
});
|
||||
/**
|
||||
* Retrieves the list of tool schemas in the format required by Gemini.
|
||||
* @returns A ToolListUnion containing the function declarations.
|
||||
*/
|
||||
getToolSchemas(): ToolListUnion {
|
||||
const declarations: FunctionDeclaration[] = [];
|
||||
this.tools.forEach((tool) => {
|
||||
declarations.push(tool.schema);
|
||||
});
|
||||
|
||||
// Return Gemini's expected format. Handle the case of no tools.
|
||||
if (declarations.length === 0) {
|
||||
// Depending on the SDK version, you might need `undefined`, `[]`, or `[{ functionDeclarations: [] }]`
|
||||
// Check the documentation for your @google/genai version.
|
||||
// Let's assume an empty array works or signifies no tools.
|
||||
return [];
|
||||
// Or if it requires the structure:
|
||||
// return [{ functionDeclarations: [] }];
|
||||
}
|
||||
return [{ functionDeclarations: declarations }];
|
||||
// Return Gemini's expected format. Handle the case of no tools.
|
||||
if (declarations.length === 0) {
|
||||
// Depending on the SDK version, you might need `undefined`, `[]`, or `[{ functionDeclarations: [] }]`
|
||||
// Check the documentation for your @google/genai version.
|
||||
// Let's assume an empty array works or signifies no tools.
|
||||
return [];
|
||||
// Or if it requires the structure:
|
||||
// return [{ functionDeclarations: [] }];
|
||||
}
|
||||
return [{ functionDeclarations: declarations }];
|
||||
}
|
||||
|
||||
/**
|
||||
* Optional: Get a list of registered tool names.
|
||||
*/
|
||||
listAvailableTools(): string[] {
|
||||
return Array.from(this.tools.keys());
|
||||
}
|
||||
/**
|
||||
* Optional: Get a list of registered tool names.
|
||||
*/
|
||||
listAvailableTools(): string[] {
|
||||
return Array.from(this.tools.keys());
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the definition of a specific tool.
|
||||
*/
|
||||
getTool(name: string): Tool | undefined {
|
||||
return this.tools.get(name);
|
||||
}
|
||||
/**
|
||||
* Get the definition of a specific tool.
|
||||
*/
|
||||
getTool(name: string): Tool | undefined {
|
||||
return this.tools.get(name);
|
||||
}
|
||||
}
|
||||
|
||||
// Export a singleton instance of the registry
|
||||
export const toolRegistry = new ToolRegistry();
|
||||
export const toolRegistry = new ToolRegistry();
|
||||
|
|
|
@ -1,10 +1,13 @@
|
|||
import { FunctionDeclaration, Schema } from "@google/genai";
|
||||
import { ToolCallConfirmationDetails } from "../ui/types.js";
|
||||
import { FunctionDeclaration, Schema } from '@google/genai';
|
||||
import { ToolCallConfirmationDetails } from '../ui/types.js';
|
||||
|
||||
/**
|
||||
* Interface representing the base Tool functionality
|
||||
*/
|
||||
export interface Tool<TParams = unknown, TResult extends ToolResult = ToolResult> {
|
||||
export interface Tool<
|
||||
TParams = unknown,
|
||||
TResult extends ToolResult = ToolResult,
|
||||
> {
|
||||
/**
|
||||
* The internal name of the tool (used for API calls)
|
||||
*/
|
||||
|
@ -45,7 +48,9 @@ export interface Tool<TParams = unknown, TResult extends ToolResult = ToolResult
|
|||
* @param params Parameters for the tool execution
|
||||
* @returns Whether execute should be confirmed.
|
||||
*/
|
||||
shouldConfirmExecute(params: TParams): Promise<ToolCallConfirmationDetails | false>;
|
||||
shouldConfirmExecute(
|
||||
params: TParams,
|
||||
): Promise<ToolCallConfirmationDetails | false>;
|
||||
|
||||
/**
|
||||
* Executes the tool with the given parameters
|
||||
|
@ -55,11 +60,14 @@ export interface Tool<TParams = unknown, TResult extends ToolResult = ToolResult
|
|||
execute(params: TParams): Promise<TResult>;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Base implementation for tools with common functionality
|
||||
*/
|
||||
export abstract class BaseTool<TParams = unknown, TResult extends ToolResult = ToolResult> implements Tool<TParams, TResult> {
|
||||
export abstract class BaseTool<
|
||||
TParams = unknown,
|
||||
TResult extends ToolResult = ToolResult,
|
||||
> implements Tool<TParams, TResult>
|
||||
{
|
||||
/**
|
||||
* Creates a new instance of BaseTool
|
||||
* @param name Internal name of the tool (used for API calls)
|
||||
|
@ -71,7 +79,7 @@ export abstract class BaseTool<TParams = unknown, TResult extends ToolResult = T
|
|||
public readonly name: string,
|
||||
public readonly displayName: string,
|
||||
public readonly description: string,
|
||||
public readonly parameterSchema: Record<string, unknown>
|
||||
public readonly parameterSchema: Record<string, unknown>,
|
||||
) {}
|
||||
|
||||
/**
|
||||
|
@ -81,7 +89,7 @@ export abstract class BaseTool<TParams = unknown, TResult extends ToolResult = T
|
|||
return {
|
||||
name: this.name,
|
||||
description: this.description,
|
||||
parameters: this.parameterSchema as Schema
|
||||
parameters: this.parameterSchema as Schema,
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -112,7 +120,9 @@ export abstract class BaseTool<TParams = unknown, TResult extends ToolResult = T
|
|||
* @param params Parameters for the tool execution
|
||||
* @returns Whether or not execute should be confirmed by the user.
|
||||
*/
|
||||
shouldConfirmExecute(params: TParams): Promise<ToolCallConfirmationDetails | false> {
|
||||
shouldConfirmExecute(
|
||||
params: TParams,
|
||||
): Promise<ToolCallConfirmationDetails | false> {
|
||||
return Promise.resolve(false);
|
||||
}
|
||||
|
||||
|
@ -125,7 +135,6 @@ export abstract class BaseTool<TParams = unknown, TResult extends ToolResult = T
|
|||
abstract execute(params: TParams): Promise<TResult>;
|
||||
}
|
||||
|
||||
|
||||
export interface ToolResult {
|
||||
/**
|
||||
* Content meant to be included in LLM history.
|
||||
|
@ -143,5 +152,5 @@ export interface ToolResult {
|
|||
export type ToolResultDisplay = string | FileDiff;
|
||||
|
||||
export interface FileDiff {
|
||||
fileDiff: string
|
||||
fileDiff: string;
|
||||
}
|
||||
|
|
|
@ -3,7 +3,11 @@ import path from 'path';
|
|||
import { BaseTool, ToolResult } from './tools.js';
|
||||
import { SchemaValidator } from '../utils/schemaValidator.js';
|
||||
import { makeRelative, shortenPath } from '../utils/paths.js';
|
||||
import { ToolCallConfirmationDetails, ToolConfirmationOutcome, ToolEditConfirmationDetails } from '../ui/types.js';
|
||||
import {
|
||||
ToolCallConfirmationDetails,
|
||||
ToolConfirmationOutcome,
|
||||
ToolEditConfirmationDetails,
|
||||
} from '../ui/types.js';
|
||||
import * as Diff from 'diff';
|
||||
|
||||
/**
|
||||
|
@ -24,13 +28,15 @@ export interface WriteFileToolParams {
|
|||
/**
|
||||
* Standardized result from the WriteFile tool
|
||||
*/
|
||||
export interface WriteFileToolResult extends ToolResult {
|
||||
}
|
||||
export interface WriteFileToolResult extends ToolResult {}
|
||||
|
||||
/**
|
||||
* Implementation of the WriteFile tool that writes files to the filesystem
|
||||
*/
|
||||
export class WriteFileTool extends BaseTool<WriteFileToolParams, WriteFileToolResult> {
|
||||
export class WriteFileTool extends BaseTool<
|
||||
WriteFileToolParams,
|
||||
WriteFileToolResult
|
||||
> {
|
||||
public static readonly Name: string = 'write_file';
|
||||
private shouldAlwaysWrite = false;
|
||||
|
||||
|
@ -52,17 +58,18 @@ export class WriteFileTool extends BaseTool<WriteFileToolParams, WriteFileToolRe
|
|||
{
|
||||
properties: {
|
||||
filePath: {
|
||||
description: 'The absolute path to the file to write to (e.g., \'/home/user/project/file.txt\'). Relative paths are not supported.',
|
||||
type: 'string'
|
||||
description:
|
||||
"The absolute path to the file to write to (e.g., '/home/user/project/file.txt'). Relative paths are not supported.",
|
||||
type: 'string',
|
||||
},
|
||||
content: {
|
||||
description: 'The content to write to the file.',
|
||||
type: 'string'
|
||||
}
|
||||
type: 'string',
|
||||
},
|
||||
},
|
||||
required: ['filePath', 'content'],
|
||||
type: 'object'
|
||||
}
|
||||
type: 'object',
|
||||
},
|
||||
);
|
||||
|
||||
// Set the root directory
|
||||
|
@ -83,7 +90,10 @@ export class WriteFileTool extends BaseTool<WriteFileToolParams, WriteFileToolRe
|
|||
? normalizedRoot
|
||||
: normalizedRoot + path.sep;
|
||||
|
||||
return normalizedPath === normalizedRoot || normalizedPath.startsWith(rootWithSep);
|
||||
return (
|
||||
normalizedPath === normalizedRoot ||
|
||||
normalizedPath.startsWith(rootWithSep)
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -92,7 +102,13 @@ export class WriteFileTool extends BaseTool<WriteFileToolParams, WriteFileToolRe
|
|||
* @returns True if parameters are valid, false otherwise
|
||||
*/
|
||||
invalidParams(params: WriteFileToolParams): string | null {
|
||||
if (this.schema.parameters && !SchemaValidator.validate(this.schema.parameters as Record<string, unknown>, params)) {
|
||||
if (
|
||||
this.schema.parameters &&
|
||||
!SchemaValidator.validate(
|
||||
this.schema.parameters as Record<string, unknown>,
|
||||
params,
|
||||
)
|
||||
) {
|
||||
return 'Parameters failed schema validation.';
|
||||
}
|
||||
|
||||
|
@ -114,7 +130,9 @@ export class WriteFileTool extends BaseTool<WriteFileToolParams, WriteFileToolRe
|
|||
* @param params Parameters for the tool execution
|
||||
* @returns Whether or not execute should be confirmed by the user.
|
||||
*/
|
||||
async shouldConfirmExecute(params: WriteFileToolParams): Promise<ToolCallConfirmationDetails | false> {
|
||||
async shouldConfirmExecute(
|
||||
params: WriteFileToolParams,
|
||||
): Promise<ToolCallConfirmationDetails | false> {
|
||||
if (this.shouldAlwaysWrite) {
|
||||
return false;
|
||||
}
|
||||
|
@ -135,7 +153,7 @@ export class WriteFileTool extends BaseTool<WriteFileToolParams, WriteFileToolRe
|
|||
params.content,
|
||||
'Current',
|
||||
'Proposed',
|
||||
{ context: 3, ignoreWhitespace: true}
|
||||
{ context: 3, ignoreWhitespace: true },
|
||||
);
|
||||
|
||||
const confirmationDetails: ToolEditConfirmationDetails = {
|
||||
|
@ -171,7 +189,7 @@ export class WriteFileTool extends BaseTool<WriteFileToolParams, WriteFileToolRe
|
|||
if (validationError) {
|
||||
return {
|
||||
llmContent: `Error: Invalid parameters provided. Reason: ${validationError}`,
|
||||
returnDisplay: '**Error:** Failed to execute tool.'
|
||||
returnDisplay: '**Error:** Failed to execute tool.',
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -187,13 +205,13 @@ export class WriteFileTool extends BaseTool<WriteFileToolParams, WriteFileToolRe
|
|||
|
||||
return {
|
||||
llmContent: `Successfully wrote to file: ${params.file_path}`,
|
||||
returnDisplay: `Wrote to ${shortenPath(makeRelative(params.file_path, this.rootDirectory))}`
|
||||
returnDisplay: `Wrote to ${shortenPath(makeRelative(params.file_path, this.rootDirectory))}`,
|
||||
};
|
||||
} catch (error) {
|
||||
const errorMsg = `Error writing to file: ${error instanceof Error ? error.message : String(error)}`;
|
||||
return {
|
||||
llmContent: `Error writing to file ${params.file_path}: ${errorMsg}`,
|
||||
returnDisplay: `Failed to write to file: ${errorMsg}`
|
||||
returnDisplay: `Failed to write to file: ${errorMsg}`,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
|
|
@ -13,78 +13,111 @@ import { StreamingState } from '../core/gemini-stream.js';
|
|||
import { PartListUnion } from '@google/genai';
|
||||
|
||||
interface AppProps {
|
||||
directory: string;
|
||||
directory: string;
|
||||
}
|
||||
|
||||
const App = ({ directory }: AppProps) => {
|
||||
const [query, setQuery] = useState('');
|
||||
const [history, setHistory] = useState<HistoryItem[]>([]);
|
||||
const { streamingState, submitQuery, initError } = useGeminiStream(setHistory);
|
||||
const { elapsedTime, currentLoadingPhrase } = useLoadingIndicator(streamingState);
|
||||
const [query, setQuery] = useState('');
|
||||
const [history, setHistory] = useState<HistoryItem[]>([]);
|
||||
const { streamingState, submitQuery, initError } =
|
||||
useGeminiStream(setHistory);
|
||||
const { elapsedTime, currentLoadingPhrase } =
|
||||
useLoadingIndicator(streamingState);
|
||||
|
||||
const handleInputSubmit = (value: PartListUnion) => {
|
||||
submitQuery(value).then(() => {
|
||||
setQuery('');
|
||||
}).catch(() => {
|
||||
setQuery('');
|
||||
});
|
||||
};
|
||||
const handleInputSubmit = (value: PartListUnion) => {
|
||||
submitQuery(value)
|
||||
.then(() => {
|
||||
setQuery('');
|
||||
})
|
||||
.catch(() => {
|
||||
setQuery('');
|
||||
});
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
if (initError && !history.some(item => item.type === 'error' && item.text?.includes(initError))) {
|
||||
setHistory(prev => [
|
||||
...prev,
|
||||
{ id: Date.now(), type: 'error', text: `Initialization Error: ${initError}. Please check API key and configuration.` } as HistoryItem
|
||||
]);
|
||||
}
|
||||
}, [initError, history]);
|
||||
useEffect(() => {
|
||||
if (
|
||||
initError &&
|
||||
!history.some(
|
||||
(item) => item.type === 'error' && item.text?.includes(initError),
|
||||
)
|
||||
) {
|
||||
setHistory((prev) => [
|
||||
...prev,
|
||||
{
|
||||
id: Date.now(),
|
||||
type: 'error',
|
||||
text: `Initialization Error: ${initError}. Please check API key and configuration.`,
|
||||
} as HistoryItem,
|
||||
]);
|
||||
}
|
||||
}, [initError, history]);
|
||||
|
||||
const isWaitingForToolConfirmation = history.some(item =>
|
||||
item.type === 'tool_group' && item.tools.some(tool => tool.confirmationDetails !== undefined)
|
||||
);
|
||||
const isInputActive = streamingState === StreamingState.Idle && !initError;
|
||||
const isWaitingForToolConfirmation = history.some(
|
||||
(item) =>
|
||||
item.type === 'tool_group' &&
|
||||
item.tools.some((tool) => tool.confirmationDetails !== undefined),
|
||||
);
|
||||
const isInputActive = streamingState === StreamingState.Idle && !initError;
|
||||
|
||||
return (
|
||||
<Box flexDirection="column" padding={1} marginBottom={1} width="100%">
|
||||
<Header cwd={directory} />
|
||||
|
||||
return (
|
||||
<Box flexDirection="column" padding={1} marginBottom={1} width="100%">
|
||||
<Header cwd={directory} />
|
||||
<Tips />
|
||||
|
||||
<Tips />
|
||||
|
||||
{initError && streamingState !== StreamingState.Responding && !isWaitingForToolConfirmation && (
|
||||
<Box borderStyle="round" borderColor="red" paddingX={1} marginBottom={1}>
|
||||
{history.find(item => item.type === 'error' && item.text?.includes(initError))?.text ? (
|
||||
<Text color="red">{history.find(item => item.type === 'error' && item.text?.includes(initError))?.text}</Text>
|
||||
) : (
|
||||
<>
|
||||
<Text color="red">Initialization Error: {initError}</Text>
|
||||
<Text color="red"> Please check API key and configuration.</Text>
|
||||
</>
|
||||
)}
|
||||
</Box>
|
||||
{initError &&
|
||||
streamingState !== StreamingState.Responding &&
|
||||
!isWaitingForToolConfirmation && (
|
||||
<Box
|
||||
borderStyle="round"
|
||||
borderColor="red"
|
||||
paddingX={1}
|
||||
marginBottom={1}
|
||||
>
|
||||
{history.find(
|
||||
(item) => item.type === 'error' && item.text?.includes(initError),
|
||||
)?.text ? (
|
||||
<Text color="red">
|
||||
{
|
||||
history.find(
|
||||
(item) =>
|
||||
item.type === 'error' && item.text?.includes(initError),
|
||||
)?.text
|
||||
}
|
||||
</Text>
|
||||
) : (
|
||||
<>
|
||||
<Text color="red">Initialization Error: {initError}</Text>
|
||||
<Text color="red">
|
||||
{' '}
|
||||
Please check API key and configuration.
|
||||
</Text>
|
||||
</>
|
||||
)}
|
||||
</Box>
|
||||
)}
|
||||
|
||||
<Box flexDirection="column">
|
||||
<HistoryDisplay history={history} onSubmit={handleInputSubmit} />
|
||||
<LoadingIndicator
|
||||
isLoading={streamingState === StreamingState.Responding}
|
||||
currentLoadingPhrase={currentLoadingPhrase}
|
||||
elapsedTime={elapsedTime}
|
||||
/>
|
||||
</Box>
|
||||
<Box flexDirection="column">
|
||||
<HistoryDisplay history={history} onSubmit={handleInputSubmit} />
|
||||
<LoadingIndicator
|
||||
isLoading={streamingState === StreamingState.Responding}
|
||||
currentLoadingPhrase={currentLoadingPhrase}
|
||||
elapsedTime={elapsedTime}
|
||||
/>
|
||||
</Box>
|
||||
|
||||
{!isWaitingForToolConfirmation && isInputActive && (
|
||||
<InputPrompt
|
||||
query={query}
|
||||
setQuery={setQuery}
|
||||
onSubmit={handleInputSubmit}
|
||||
isActive={isInputActive}
|
||||
/>
|
||||
)}
|
||||
{!isWaitingForToolConfirmation && isInputActive && (
|
||||
<InputPrompt
|
||||
query={query}
|
||||
setQuery={setQuery}
|
||||
onSubmit={handleInputSubmit}
|
||||
isActive={isInputActive}
|
||||
/>
|
||||
)}
|
||||
|
||||
<Footer queryLength={query.length} />
|
||||
</Box>
|
||||
);
|
||||
<Footer queryLength={query.length} />
|
||||
</Box>
|
||||
);
|
||||
};
|
||||
|
||||
export default App;
|
||||
export default App;
|
||||
|
|
|
@ -2,20 +2,18 @@ import React from 'react';
|
|||
import { Box, Text } from 'ink';
|
||||
|
||||
interface FooterProps {
|
||||
queryLength: number;
|
||||
queryLength: number;
|
||||
}
|
||||
|
||||
const Footer: React.FC<FooterProps> = ({ queryLength }) => {
|
||||
return (
|
||||
<Box marginTop={1} justifyContent="space-between">
|
||||
<Box minWidth={15}>
|
||||
<Text color="gray">
|
||||
{queryLength === 0 ? "? for shortcuts" : ""}
|
||||
</Text>
|
||||
</Box>
|
||||
<Text color="blue">Gemini</Text>
|
||||
</Box>
|
||||
);
|
||||
return (
|
||||
<Box marginTop={1} justifyContent="space-between">
|
||||
<Box minWidth={15}>
|
||||
<Text color="gray">{queryLength === 0 ? '? for shortcuts' : ''}</Text>
|
||||
</Box>
|
||||
<Text color="blue">Gemini</Text>
|
||||
</Box>
|
||||
);
|
||||
};
|
||||
|
||||
export default Footer;
|
||||
export default Footer;
|
||||
|
|
|
@ -4,35 +4,37 @@ import { UI_WIDTH, BOX_PADDING_X } from '../constants.js';
|
|||
import { shortenPath } from '../../utils/paths.js';
|
||||
|
||||
interface HeaderProps {
|
||||
cwd: string;
|
||||
cwd: string;
|
||||
}
|
||||
|
||||
const Header: React.FC<HeaderProps> = ({ cwd }) => {
|
||||
return (
|
||||
<>
|
||||
{/* Static Header Art */}
|
||||
<Box marginBottom={1}>
|
||||
<Text color="blue">{`
|
||||
return (
|
||||
<>
|
||||
{/* Static Header Art */}
|
||||
<Box marginBottom={1}>
|
||||
<Text color="blue">{`
|
||||
______ ________ ____ ____ _____ ____ _____ _____
|
||||
.' ___ ||_ __ ||_ \\ / _||_ _||_ \\|_ _||_ _|
|
||||
/ .' \\_| | |_ \\_| | \\/ | | | | \\ | | | |
|
||||
| | ____ | _| _ | |\\ /| | | | | |\\ \\| | | |
|
||||
\\ \`.___] |_| |__/ | _| |_\\/_| |_ _| |_ _| |_\\ |_ _| |_
|
||||
\`._____.'|________||_____||_____||_____||_____|\\____||_____|`}</Text>
|
||||
</Box>
|
||||
{/* CWD Display */}
|
||||
<Box
|
||||
borderStyle="round"
|
||||
borderColor="blue"
|
||||
paddingX={BOX_PADDING_X}
|
||||
flexDirection="column"
|
||||
marginBottom={1}
|
||||
width={UI_WIDTH}
|
||||
>
|
||||
<Box paddingLeft={2}><Text color="gray">cwd: {shortenPath(cwd, /*maxLength*/ 70)}</Text></Box>
|
||||
</Box>
|
||||
</>
|
||||
);
|
||||
</Box>
|
||||
{/* CWD Display */}
|
||||
<Box
|
||||
borderStyle="round"
|
||||
borderColor="blue"
|
||||
paddingX={BOX_PADDING_X}
|
||||
flexDirection="column"
|
||||
marginBottom={1}
|
||||
width={UI_WIDTH}
|
||||
>
|
||||
<Box paddingLeft={2}>
|
||||
<Text color="gray">cwd: {shortenPath(cwd, /*maxLength*/ 70)}</Text>
|
||||
</Box>
|
||||
</Box>
|
||||
</>
|
||||
);
|
||||
};
|
||||
|
||||
export default Header;
|
||||
export default Header;
|
||||
|
|
|
@ -10,30 +10,33 @@ import ToolGroupMessage from './messages/ToolGroupMessage.js';
|
|||
import { PartListUnion } from '@google/genai';
|
||||
|
||||
interface HistoryDisplayProps {
|
||||
history: HistoryItem[];
|
||||
onSubmit: (value: PartListUnion) => void;
|
||||
history: HistoryItem[];
|
||||
onSubmit: (value: PartListUnion) => void;
|
||||
}
|
||||
|
||||
const HistoryDisplay: React.FC<HistoryDisplayProps> = ({ history, onSubmit }) => {
|
||||
// No grouping logic needed here anymore
|
||||
return (
|
||||
<Box flexDirection="column">
|
||||
{history.map((item) => (
|
||||
<Box key={item.id} marginBottom={1}>
|
||||
{/* Render standard message types */}
|
||||
{item.type === 'user' && <UserMessage text={item.text} />}
|
||||
{item.type === 'gemini' && <GeminiMessage text={item.text} />}
|
||||
{item.type === 'info' && <InfoMessage text={item.text} />}
|
||||
{item.type === 'error' && <ErrorMessage text={item.text} />}
|
||||
const HistoryDisplay: React.FC<HistoryDisplayProps> = ({
|
||||
history,
|
||||
onSubmit,
|
||||
}) => {
|
||||
// No grouping logic needed here anymore
|
||||
return (
|
||||
<Box flexDirection="column">
|
||||
{history.map((item) => (
|
||||
<Box key={item.id} marginBottom={1}>
|
||||
{/* Render standard message types */}
|
||||
{item.type === 'user' && <UserMessage text={item.text} />}
|
||||
{item.type === 'gemini' && <GeminiMessage text={item.text} />}
|
||||
{item.type === 'info' && <InfoMessage text={item.text} />}
|
||||
{item.type === 'error' && <ErrorMessage text={item.text} />}
|
||||
|
||||
{/* Render the tool group component */}
|
||||
{item.type === 'tool_group' && (
|
||||
<ToolGroupMessage toolCalls={item.tools} onSubmit={onSubmit} />
|
||||
)}
|
||||
</Box>
|
||||
))}
|
||||
{/* Render the tool group component */}
|
||||
{item.type === 'tool_group' && (
|
||||
<ToolGroupMessage toolCalls={item.tools} onSubmit={onSubmit} />
|
||||
)}
|
||||
</Box>
|
||||
);
|
||||
))}
|
||||
</Box>
|
||||
);
|
||||
};
|
||||
|
||||
export default HistoryDisplay;
|
||||
export default HistoryDisplay;
|
||||
|
|
|
@ -3,37 +3,32 @@ import { Box, Text } from 'ink';
|
|||
import TextInput from 'ink-text-input';
|
||||
|
||||
interface InputPromptProps {
|
||||
query: string;
|
||||
setQuery: (value: string) => void;
|
||||
onSubmit: (value: string) => void;
|
||||
isActive: boolean;
|
||||
query: string;
|
||||
setQuery: (value: string) => void;
|
||||
onSubmit: (value: string) => void;
|
||||
isActive: boolean;
|
||||
}
|
||||
|
||||
const InputPrompt: React.FC<InputPromptProps> = ({
|
||||
query,
|
||||
setQuery,
|
||||
onSubmit,
|
||||
query,
|
||||
setQuery,
|
||||
onSubmit,
|
||||
}) => {
|
||||
return (
|
||||
<Box
|
||||
marginTop={1}
|
||||
borderStyle="round"
|
||||
borderColor={'white'}
|
||||
paddingX={1}
|
||||
>
|
||||
<Text color={'white'}>> </Text>
|
||||
<Box flexGrow={1}>
|
||||
<TextInput
|
||||
value={query}
|
||||
onChange={setQuery}
|
||||
onSubmit={onSubmit}
|
||||
showCursor={true}
|
||||
focus={true}
|
||||
placeholder={'Ask Gemini... (try "/init" or "/help")'}
|
||||
/>
|
||||
</Box>
|
||||
</Box>
|
||||
);
|
||||
return (
|
||||
<Box marginTop={1} borderStyle="round" borderColor={'white'} paddingX={1}>
|
||||
<Text color={'white'}>> </Text>
|
||||
<Box flexGrow={1}>
|
||||
<TextInput
|
||||
value={query}
|
||||
onChange={setQuery}
|
||||
onSubmit={onSubmit}
|
||||
showCursor={true}
|
||||
focus={true}
|
||||
placeholder={'Ask Gemini... (try "/init" or "/help")'}
|
||||
/>
|
||||
</Box>
|
||||
</Box>
|
||||
);
|
||||
};
|
||||
|
||||
export default InputPrompt;
|
||||
export default InputPrompt;
|
||||
|
|
|
@ -3,30 +3,32 @@ import { Box, Text } from 'ink';
|
|||
import Spinner from 'ink-spinner';
|
||||
|
||||
interface LoadingIndicatorProps {
|
||||
isLoading: boolean;
|
||||
currentLoadingPhrase: string;
|
||||
elapsedTime: number;
|
||||
isLoading: boolean;
|
||||
currentLoadingPhrase: string;
|
||||
elapsedTime: number;
|
||||
}
|
||||
|
||||
const LoadingIndicator: React.FC<LoadingIndicatorProps> = ({
|
||||
isLoading,
|
||||
currentLoadingPhrase,
|
||||
elapsedTime,
|
||||
isLoading,
|
||||
currentLoadingPhrase,
|
||||
elapsedTime,
|
||||
}) => {
|
||||
if (!isLoading) {
|
||||
return null; // Don't render anything if not loading
|
||||
}
|
||||
if (!isLoading) {
|
||||
return null; // Don't render anything if not loading
|
||||
}
|
||||
|
||||
return (
|
||||
<Box marginTop={1} paddingLeft={0}>
|
||||
<Box marginRight={1}>
|
||||
<Spinner type="dots" />
|
||||
</Box>
|
||||
<Text color="cyan">{currentLoadingPhrase} ({elapsedTime}s)</Text>
|
||||
<Box flexGrow={1}>{/* Spacer */}</Box>
|
||||
<Text color="gray">(ESC to cancel)</Text>
|
||||
</Box>
|
||||
);
|
||||
return (
|
||||
<Box marginTop={1} paddingLeft={0}>
|
||||
<Box marginRight={1}>
|
||||
<Spinner type="dots" />
|
||||
</Box>
|
||||
<Text color="cyan">
|
||||
{currentLoadingPhrase} ({elapsedTime}s)
|
||||
</Text>
|
||||
<Box flexGrow={1}>{/* Spacer */}</Box>
|
||||
<Text color="gray">(ESC to cancel)</Text>
|
||||
</Box>
|
||||
);
|
||||
};
|
||||
|
||||
export default LoadingIndicator;
|
||||
export default LoadingIndicator;
|
||||
|
|
|
@ -3,15 +3,20 @@ import { Box, Text } from 'ink';
|
|||
import { UI_WIDTH } from '../constants.js';
|
||||
|
||||
const Tips: React.FC = () => {
|
||||
return (
|
||||
<Box flexDirection="column" marginBottom={1} width={UI_WIDTH}>
|
||||
<Text>Tips for getting started:</Text>
|
||||
<Text>1. <Text bold>/help</Text> for more information.</Text>
|
||||
<Text>2. <Text bold>/init</Text> to create a GEMINI.md for instructions & context.</Text>
|
||||
<Text>3. Ask coding questions, edit code or run commands.</Text>
|
||||
<Text>4. Be specific for the best results.</Text>
|
||||
</Box>
|
||||
);
|
||||
return (
|
||||
<Box flexDirection="column" marginBottom={1} width={UI_WIDTH}>
|
||||
<Text>Tips for getting started:</Text>
|
||||
<Text>
|
||||
1. <Text bold>/help</Text> for more information.
|
||||
</Text>
|
||||
<Text>
|
||||
2. <Text bold>/init</Text> to create a GEMINI.md for instructions &
|
||||
context.
|
||||
</Text>
|
||||
<Text>3. Ask coding questions, edit code or run commands.</Text>
|
||||
<Text>4. Be specific for the best results.</Text>
|
||||
</Box>
|
||||
);
|
||||
};
|
||||
|
||||
export default Tips;
|
||||
export default Tips;
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
import React from 'react';
|
||||
import { Box, Text } from 'ink'
|
||||
import { Box, Text } from 'ink';
|
||||
|
||||
interface DiffLine {
|
||||
type: 'add' | 'del' | 'context' | 'hunk' | 'other';
|
||||
|
@ -30,29 +30,53 @@ function parseDiffWithLineNumbers(diffContent: string): DiffLine[] {
|
|||
continue;
|
||||
}
|
||||
if (!inHunk) {
|
||||
// Skip standard Git header lines more robustly
|
||||
if (line.startsWith('--- ') || line.startsWith('+++ ') || line.startsWith('diff --git') || line.startsWith('index ') || line.startsWith('similarity index') || line.startsWith('rename from') || line.startsWith('rename to') || line.startsWith('new file mode') || line.startsWith('deleted file mode')) continue;
|
||||
// Skip standard Git header lines more robustly
|
||||
if (
|
||||
line.startsWith('--- ') ||
|
||||
line.startsWith('+++ ') ||
|
||||
line.startsWith('diff --git') ||
|
||||
line.startsWith('index ') ||
|
||||
line.startsWith('similarity index') ||
|
||||
line.startsWith('rename from') ||
|
||||
line.startsWith('rename to') ||
|
||||
line.startsWith('new file mode') ||
|
||||
line.startsWith('deleted file mode')
|
||||
)
|
||||
continue;
|
||||
// If it's not a hunk or header, skip (or handle as 'other' if needed)
|
||||
continue;
|
||||
}
|
||||
if (line.startsWith('+')) {
|
||||
currentNewLine++; // Increment before pushing
|
||||
result.push({ type: 'add', newLine: currentNewLine, content: line.substring(1) });
|
||||
result.push({
|
||||
type: 'add',
|
||||
newLine: currentNewLine,
|
||||
content: line.substring(1),
|
||||
});
|
||||
} else if (line.startsWith('-')) {
|
||||
currentOldLine++; // Increment before pushing
|
||||
result.push({ type: 'del', oldLine: currentOldLine, content: line.substring(1) });
|
||||
result.push({
|
||||
type: 'del',
|
||||
oldLine: currentOldLine,
|
||||
content: line.substring(1),
|
||||
});
|
||||
} else if (line.startsWith(' ')) {
|
||||
currentOldLine++; // Increment before pushing
|
||||
currentNewLine++;
|
||||
result.push({ type: 'context', oldLine: currentOldLine, newLine: currentNewLine, content: line.substring(1) });
|
||||
} else if (line.startsWith('\\')) { // Handle "\ No newline at end of file"
|
||||
result.push({
|
||||
type: 'context',
|
||||
oldLine: currentOldLine,
|
||||
newLine: currentNewLine,
|
||||
content: line.substring(1),
|
||||
});
|
||||
} else if (line.startsWith('\\')) {
|
||||
// Handle "\ No newline at end of file"
|
||||
result.push({ type: 'other', content: line });
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
interface DiffRendererProps {
|
||||
diffContent: string;
|
||||
filename?: string;
|
||||
|
@ -61,7 +85,10 @@ interface DiffRendererProps {
|
|||
|
||||
const DEFAULT_TAB_WIDTH = 4; // Spaces per tab for normalization
|
||||
|
||||
const DiffRenderer: React.FC<DiffRendererProps> = ({ diffContent, tabWidth = DEFAULT_TAB_WIDTH }) => {
|
||||
const DiffRenderer: React.FC<DiffRendererProps> = ({
|
||||
diffContent,
|
||||
tabWidth = DEFAULT_TAB_WIDTH,
|
||||
}) => {
|
||||
if (!diffContent || typeof diffContent !== 'string') {
|
||||
return <Text color="yellow">No diff content.</Text>;
|
||||
}
|
||||
|
@ -69,14 +96,15 @@ const DiffRenderer: React.FC<DiffRendererProps> = ({ diffContent, tabWidth = DEF
|
|||
const parsedLines = parseDiffWithLineNumbers(diffContent);
|
||||
|
||||
// 1. Normalize whitespace (replace tabs with spaces) *before* further processing
|
||||
const normalizedLines = parsedLines.map(line => ({
|
||||
const normalizedLines = parsedLines.map((line) => ({
|
||||
...line,
|
||||
content: line.content.replace(/\t/g, ' '.repeat(tabWidth))
|
||||
content: line.content.replace(/\t/g, ' '.repeat(tabWidth)),
|
||||
}));
|
||||
|
||||
// Filter out non-displayable lines (hunks, potentially 'other') using the normalized list
|
||||
const displayableLines = normalizedLines.filter(l => l.type !== 'hunk' && l.type !== 'other');
|
||||
|
||||
const displayableLines = normalizedLines.filter(
|
||||
(l) => l.type !== 'hunk' && l.type !== 'other',
|
||||
);
|
||||
|
||||
if (displayableLines.length === 0) {
|
||||
return (
|
||||
|
@ -93,7 +121,7 @@ const DiffRenderer: React.FC<DiffRendererProps> = ({ diffContent, tabWidth = DEF
|
|||
if (line.content.trim() === '') continue;
|
||||
|
||||
const firstCharIndex = line.content.search(/\S/); // Find index of first non-whitespace char
|
||||
const currentIndent = (firstCharIndex === -1) ? 0 : firstCharIndex; // Indent is 0 if no non-whitespace found
|
||||
const currentIndent = firstCharIndex === -1 ? 0 : firstCharIndex; // Indent is 0 if no non-whitespace found
|
||||
baseIndentation = Math.min(baseIndentation, currentIndent);
|
||||
}
|
||||
// If baseIndentation remained Infinity (e.g., no displayable lines with content), default to 0
|
||||
|
@ -102,7 +130,6 @@ const DiffRenderer: React.FC<DiffRendererProps> = ({ diffContent, tabWidth = DEF
|
|||
}
|
||||
// --- End Modification ---
|
||||
|
||||
|
||||
return (
|
||||
<Box borderStyle="round" borderColor="gray" flexDirection="column">
|
||||
{/* Iterate over the lines that should be displayed (already normalized) */}
|
||||
|
@ -139,9 +166,13 @@ const DiffRenderer: React.FC<DiffRendererProps> = ({ diffContent, tabWidth = DEF
|
|||
return (
|
||||
// Using your original rendering structure
|
||||
<Box key={key} flexDirection="row">
|
||||
<Text color="gray">{gutterNumStr} </Text>
|
||||
<Text color={color} dimColor={dim}>{prefixSymbol} </Text>
|
||||
<Text color={color} dimColor={dim} wrap="wrap">{displayContent}</Text>
|
||||
<Text color="gray">{gutterNumStr} </Text>
|
||||
<Text color={color} dimColor={dim}>
|
||||
{prefixSymbol}{' '}
|
||||
</Text>
|
||||
<Text color={color} dimColor={dim} wrap="wrap">
|
||||
{displayContent}
|
||||
</Text>
|
||||
</Box>
|
||||
);
|
||||
})}
|
||||
|
@ -149,4 +180,4 @@ const DiffRenderer: React.FC<DiffRendererProps> = ({ diffContent, tabWidth = DEF
|
|||
);
|
||||
};
|
||||
|
||||
export default DiffRenderer;
|
||||
export default DiffRenderer;
|
||||
|
|
|
@ -2,23 +2,25 @@ import React from 'react';
|
|||
import { Text, Box } from 'ink';
|
||||
|
||||
interface ErrorMessageProps {
|
||||
text: string;
|
||||
text: string;
|
||||
}
|
||||
|
||||
const ErrorMessage: React.FC<ErrorMessageProps> = ({ text }) => {
|
||||
const prefix = '✕ ';
|
||||
const prefixWidth = prefix.length;
|
||||
const prefix = '✕ ';
|
||||
const prefixWidth = prefix.length;
|
||||
|
||||
return (
|
||||
<Box flexDirection="row">
|
||||
<Box width={prefixWidth}>
|
||||
<Text color="red">{prefix}</Text>
|
||||
</Box>
|
||||
<Box flexGrow={1}>
|
||||
<Text wrap="wrap" color="red">{text}</Text>
|
||||
</Box>
|
||||
</Box>
|
||||
);
|
||||
return (
|
||||
<Box flexDirection="row">
|
||||
<Box width={prefixWidth}>
|
||||
<Text color="red">{prefix}</Text>
|
||||
</Box>
|
||||
<Box flexGrow={1}>
|
||||
<Text wrap="wrap" color="red">
|
||||
{text}
|
||||
</Text>
|
||||
</Box>
|
||||
</Box>
|
||||
);
|
||||
};
|
||||
|
||||
export default ErrorMessage;
|
||||
export default ErrorMessage;
|
||||
|
|
|
@ -3,42 +3,42 @@ import { Text, Box } from 'ink';
|
|||
import { MarkdownRenderer } from '../../utils/MarkdownRenderer.js';
|
||||
|
||||
interface GeminiMessageProps {
|
||||
text: string;
|
||||
text: string;
|
||||
}
|
||||
|
||||
const GeminiMessage: React.FC<GeminiMessageProps> = ({ text }) => {
|
||||
const prefix = '✦ ';
|
||||
const prefixWidth = prefix.length;
|
||||
const prefix = '✦ ';
|
||||
const prefixWidth = prefix.length;
|
||||
|
||||
// Handle potentially null or undefined text gracefully
|
||||
const safeText = text || '';
|
||||
// Handle potentially null or undefined text gracefully
|
||||
const safeText = text || '';
|
||||
|
||||
// Use the static render method from the MarkdownRenderer class
|
||||
// Pass safeText which is guaranteed to be a string
|
||||
const renderedBlocks = MarkdownRenderer.render(safeText);
|
||||
|
||||
// If the original text was actually empty/null, render the minimal state
|
||||
if (!safeText && renderedBlocks.length === 0) {
|
||||
return (
|
||||
<Box flexDirection="row">
|
||||
<Box width={prefixWidth}>
|
||||
<Text color="blue">{prefix}</Text>
|
||||
</Box>
|
||||
<Box flexGrow={1}></Box>
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
// Use the static render method from the MarkdownRenderer class
|
||||
// Pass safeText which is guaranteed to be a string
|
||||
const renderedBlocks = MarkdownRenderer.render(safeText);
|
||||
|
||||
// If the original text was actually empty/null, render the minimal state
|
||||
if (!safeText && renderedBlocks.length === 0) {
|
||||
return (
|
||||
<Box flexDirection="row">
|
||||
<Box width={prefixWidth}>
|
||||
<Text color="blue">{prefix}</Text>
|
||||
</Box>
|
||||
<Box flexGrow={1} flexDirection="column">
|
||||
{renderedBlocks}
|
||||
</Box>
|
||||
<Box flexDirection="row">
|
||||
<Box width={prefixWidth}>
|
||||
<Text color="blue">{prefix}</Text>
|
||||
</Box>
|
||||
<Box flexGrow={1}></Box>
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<Box flexDirection="row">
|
||||
<Box width={prefixWidth}>
|
||||
<Text color="blue">{prefix}</Text>
|
||||
</Box>
|
||||
<Box flexGrow={1} flexDirection="column">
|
||||
{renderedBlocks}
|
||||
</Box>
|
||||
</Box>
|
||||
);
|
||||
};
|
||||
|
||||
export default GeminiMessage;
|
||||
export default GeminiMessage;
|
||||
|
|
|
@ -2,23 +2,25 @@ import React from 'react';
|
|||
import { Text, Box } from 'ink';
|
||||
|
||||
interface InfoMessageProps {
|
||||
text: string;
|
||||
text: string;
|
||||
}
|
||||
|
||||
const InfoMessage: React.FC<InfoMessageProps> = ({ text }) => {
|
||||
const prefix = 'ℹ ';
|
||||
const prefixWidth = prefix.length;
|
||||
const prefix = 'ℹ ';
|
||||
const prefixWidth = prefix.length;
|
||||
|
||||
return (
|
||||
<Box flexDirection="row">
|
||||
<Box width={prefixWidth}>
|
||||
<Text color="yellow">{prefix}</Text>
|
||||
</Box>
|
||||
<Box flexGrow={1}>
|
||||
<Text wrap="wrap" color="yellow">{text}</Text>
|
||||
</Box>
|
||||
</Box>
|
||||
);
|
||||
return (
|
||||
<Box flexDirection="row">
|
||||
<Box width={prefixWidth}>
|
||||
<Text color="yellow">{prefix}</Text>
|
||||
</Box>
|
||||
<Box flexGrow={1}>
|
||||
<Text wrap="wrap" color="yellow">
|
||||
{text}
|
||||
</Text>
|
||||
</Box>
|
||||
</Box>
|
||||
);
|
||||
};
|
||||
|
||||
export default InfoMessage;
|
||||
export default InfoMessage;
|
||||
|
|
|
@ -1,7 +1,12 @@
|
|||
import React from 'react';
|
||||
import { Box, Text, useInput } from 'ink';
|
||||
import SelectInput from 'ink-select-input';
|
||||
import { ToolCallConfirmationDetails, ToolEditConfirmationDetails, ToolConfirmationOutcome, ToolExecuteConfirmationDetails } from '../../types.js'; // Adjust path as needed
|
||||
import {
|
||||
ToolCallConfirmationDetails,
|
||||
ToolEditConfirmationDetails,
|
||||
ToolConfirmationOutcome,
|
||||
ToolExecuteConfirmationDetails,
|
||||
} from '../../types.js'; // Adjust path as needed
|
||||
import { PartListUnion } from '@google/genai';
|
||||
import DiffRenderer from './DiffRenderer.js';
|
||||
import { UI_WIDTH } from '../../constants.js';
|
||||
|
@ -11,7 +16,9 @@ export interface ToolConfirmationMessageProps {
|
|||
onSubmit: (value: PartListUnion) => void;
|
||||
}
|
||||
|
||||
function isEditDetails(props: ToolCallConfirmationDetails): props is ToolEditConfirmationDetails {
|
||||
function isEditDetails(
|
||||
props: ToolCallConfirmationDetails,
|
||||
): props is ToolEditConfirmationDetails {
|
||||
return (props as ToolEditConfirmationDetails).fileName !== undefined;
|
||||
}
|
||||
|
||||
|
@ -20,7 +27,9 @@ interface InternalOption {
|
|||
value: ToolConfirmationOutcome;
|
||||
}
|
||||
|
||||
const ToolConfirmationMessage: React.FC<ToolConfirmationMessageProps> = ({ confirmationDetails }) => {
|
||||
const ToolConfirmationMessage: React.FC<ToolConfirmationMessageProps> = ({
|
||||
confirmationDetails,
|
||||
}) => {
|
||||
const { onConfirm } = confirmationDetails;
|
||||
|
||||
useInput((_, key) => {
|
||||
|
@ -39,41 +48,53 @@ const ToolConfirmationMessage: React.FC<ToolConfirmationMessageProps> = ({ confi
|
|||
const options: InternalOption[] = [];
|
||||
|
||||
if (isEditDetails(confirmationDetails)) {
|
||||
title = "Edit"; // Title for the outer box
|
||||
title = 'Edit'; // Title for the outer box
|
||||
|
||||
// Body content is now the DiffRenderer, passing filename to it
|
||||
// The bordered box is removed from here and handled within DiffRenderer
|
||||
bodyContent = (
|
||||
<DiffRenderer diffContent={confirmationDetails.fileDiff} />
|
||||
);
|
||||
bodyContent = <DiffRenderer diffContent={confirmationDetails.fileDiff} />;
|
||||
|
||||
question = `Apply this change?`;
|
||||
options.push(
|
||||
{ label: '1. Yes, apply change', value: ToolConfirmationOutcome.ProceedOnce },
|
||||
{ label: "2. Yes, always apply file edits", value: ToolConfirmationOutcome.ProceedAlways },
|
||||
{ label: '3. No (esc)', value: ToolConfirmationOutcome.Cancel }
|
||||
{
|
||||
label: '1. Yes, apply change',
|
||||
value: ToolConfirmationOutcome.ProceedOnce,
|
||||
},
|
||||
{
|
||||
label: '2. Yes, always apply file edits',
|
||||
value: ToolConfirmationOutcome.ProceedAlways,
|
||||
},
|
||||
{ label: '3. No (esc)', value: ToolConfirmationOutcome.Cancel },
|
||||
);
|
||||
|
||||
} else {
|
||||
const executionProps = confirmationDetails as ToolExecuteConfirmationDetails;
|
||||
title = "Execute Command"; // Title for the outer box
|
||||
const executionProps =
|
||||
confirmationDetails as ToolExecuteConfirmationDetails;
|
||||
title = 'Execute Command'; // Title for the outer box
|
||||
|
||||
// For execution, we still need context display and description
|
||||
const commandDisplay = <Text color="cyan">{executionProps.command}</Text>;
|
||||
|
||||
// Combine command and description into bodyContent for layout consistency
|
||||
bodyContent = (
|
||||
<Box flexDirection="column">
|
||||
<Box paddingX={1} marginLeft={1}>{commandDisplay}</Box>
|
||||
<Box flexDirection="column">
|
||||
<Box paddingX={1} marginLeft={1}>
|
||||
{commandDisplay}
|
||||
</Box>
|
||||
</Box>
|
||||
);
|
||||
|
||||
question = `Allow execution?`;
|
||||
const alwaysLabel = `2. Yes, always allow '${executionProps.rootCommand}' commands`;
|
||||
options.push(
|
||||
{ label: '1. Yes, allow once', value: ToolConfirmationOutcome.ProceedOnce },
|
||||
{ label: alwaysLabel, value: ToolConfirmationOutcome.ProceedAlways },
|
||||
{ label: '3. No (esc)', value: ToolConfirmationOutcome.Cancel }
|
||||
{
|
||||
label: '1. Yes, allow once',
|
||||
value: ToolConfirmationOutcome.ProceedOnce,
|
||||
},
|
||||
{
|
||||
label: alwaysLabel,
|
||||
value: ToolConfirmationOutcome.ProceedAlways,
|
||||
},
|
||||
{ label: '3. No (esc)', value: ToolConfirmationOutcome.Cancel },
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -82,7 +103,7 @@ const ToolConfirmationMessage: React.FC<ToolConfirmationMessageProps> = ({ confi
|
|||
{/* Body Content (Diff Renderer or Command Info) */}
|
||||
{/* No separate context display here anymore for edits */}
|
||||
<Box flexGrow={1} flexShrink={1} overflow="hidden" marginBottom={1}>
|
||||
{bodyContent}
|
||||
{bodyContent}
|
||||
</Box>
|
||||
|
||||
{/* Confirmation Question */}
|
||||
|
@ -98,4 +119,4 @@ const ToolConfirmationMessage: React.FC<ToolConfirmationMessageProps> = ({ confi
|
|||
);
|
||||
};
|
||||
|
||||
export default ToolConfirmationMessage;
|
||||
export default ToolConfirmationMessage;
|
||||
|
|
|
@ -6,42 +6,45 @@ import { PartListUnion } from '@google/genai';
|
|||
import ToolConfirmationMessage from './ToolConfirmationMessage.js';
|
||||
|
||||
interface ToolGroupMessageProps {
|
||||
toolCalls: IndividualToolCallDisplay[];
|
||||
onSubmit: (value: PartListUnion) => void;
|
||||
toolCalls: IndividualToolCallDisplay[];
|
||||
onSubmit: (value: PartListUnion) => void;
|
||||
}
|
||||
|
||||
// Main component renders the border and maps the tools using ToolMessage
|
||||
const ToolGroupMessage: React.FC<ToolGroupMessageProps> = ({ toolCalls, onSubmit }) => {
|
||||
const hasPending = toolCalls.some(t => t.status === ToolCallStatus.Pending);
|
||||
const borderColor = hasPending ? "yellow" : "blue";
|
||||
const ToolGroupMessage: React.FC<ToolGroupMessageProps> = ({
|
||||
toolCalls,
|
||||
onSubmit,
|
||||
}) => {
|
||||
const hasPending = toolCalls.some((t) => t.status === ToolCallStatus.Pending);
|
||||
const borderColor = hasPending ? 'yellow' : 'blue';
|
||||
|
||||
return (
|
||||
<Box
|
||||
flexDirection="column"
|
||||
borderStyle="round"
|
||||
borderColor={borderColor}
|
||||
>
|
||||
{toolCalls.map((tool) => {
|
||||
return (
|
||||
<React.Fragment key={tool.callId}>
|
||||
<ToolMessage
|
||||
key={tool.callId} // Use callId as the key
|
||||
name={tool.name}
|
||||
description={tool.description}
|
||||
resultDisplay={tool.resultDisplay}
|
||||
status={tool.status}
|
||||
/>
|
||||
{tool.status === ToolCallStatus.Confirming && tool.confirmationDetails && (
|
||||
<ToolConfirmationMessage confirmationDetails={tool.confirmationDetails} onSubmit={onSubmit}></ToolConfirmationMessage>
|
||||
)}
|
||||
</React.Fragment>
|
||||
);
|
||||
})}
|
||||
{/* Optional: Add padding below the last item if needed,
|
||||
return (
|
||||
<Box flexDirection="column" borderStyle="round" borderColor={borderColor}>
|
||||
{toolCalls.map((tool) => {
|
||||
return (
|
||||
<React.Fragment key={tool.callId}>
|
||||
<ToolMessage
|
||||
key={tool.callId} // Use callId as the key
|
||||
name={tool.name}
|
||||
description={tool.description}
|
||||
resultDisplay={tool.resultDisplay}
|
||||
status={tool.status}
|
||||
/>
|
||||
{tool.status === ToolCallStatus.Confirming &&
|
||||
tool.confirmationDetails && (
|
||||
<ToolConfirmationMessage
|
||||
confirmationDetails={tool.confirmationDetails}
|
||||
onSubmit={onSubmit}
|
||||
></ToolConfirmationMessage>
|
||||
)}
|
||||
</React.Fragment>
|
||||
);
|
||||
})}
|
||||
{/* Optional: Add padding below the last item if needed,
|
||||
though ToolMessage already has some vertical space implicitly */}
|
||||
{/* {tools.length > 0 && <Box height={1} />} */}
|
||||
</Box>
|
||||
);
|
||||
{/* {tools.length > 0 && <Box height={1} />} */}
|
||||
</Box>
|
||||
);
|
||||
};
|
||||
|
||||
export default ToolGroupMessage;
|
||||
|
|
|
@ -7,47 +7,68 @@ import DiffRenderer from './DiffRenderer.js';
|
|||
import { MarkdownRenderer } from '../../utils/MarkdownRenderer.js';
|
||||
|
||||
interface ToolMessageProps {
|
||||
name: string;
|
||||
description: string;
|
||||
resultDisplay: ToolResultDisplay | undefined;
|
||||
status: ToolCallStatus;
|
||||
name: string;
|
||||
description: string;
|
||||
resultDisplay: ToolResultDisplay | undefined;
|
||||
status: ToolCallStatus;
|
||||
}
|
||||
|
||||
const ToolMessage: React.FC<ToolMessageProps> = ({ name, description, resultDisplay, status }) => {
|
||||
const statusIndicatorWidth = 3;
|
||||
const hasResult = (status === ToolCallStatus.Invoked || status === ToolCallStatus.Canceled) && resultDisplay && resultDisplay.toString().trim().length > 0;
|
||||
const ToolMessage: React.FC<ToolMessageProps> = ({
|
||||
name,
|
||||
description,
|
||||
resultDisplay,
|
||||
status,
|
||||
}) => {
|
||||
const statusIndicatorWidth = 3;
|
||||
const hasResult =
|
||||
(status === ToolCallStatus.Invoked || status === ToolCallStatus.Canceled) &&
|
||||
resultDisplay &&
|
||||
resultDisplay.toString().trim().length > 0;
|
||||
|
||||
return (
|
||||
<Box paddingX={1} paddingY={0} flexDirection="column">
|
||||
{/* Row for Status Indicator and Tool Info */}
|
||||
<Box minHeight={1}>
|
||||
{/* Status Indicator */}
|
||||
<Box minWidth={statusIndicatorWidth}>
|
||||
{status === ToolCallStatus.Pending && <Spinner type="dots" />}
|
||||
{status === ToolCallStatus.Invoked && <Text color="green">✔</Text>}
|
||||
{status === ToolCallStatus.Confirming && <Text color="blue">?</Text>}
|
||||
{status === ToolCallStatus.Canceled && <Text color="red" bold>-</Text>}
|
||||
|
||||
</Box>
|
||||
<Box>
|
||||
<Text color="blue" wrap="truncate-end" strikethrough={status === ToolCallStatus.Canceled}>
|
||||
<Text bold>{name}</Text> <Text color="gray">{description}</Text>
|
||||
</Text>
|
||||
</Box>
|
||||
</Box>
|
||||
|
||||
{hasResult && (
|
||||
<Box paddingLeft={statusIndicatorWidth}>
|
||||
<Box flexShrink={1} flexDirection="row">
|
||||
<Text color="gray">↳ </Text>
|
||||
{/* Use default text color (white) or gray instead of dimColor */}
|
||||
{typeof resultDisplay === 'string' && <Box flexDirection='column'>{MarkdownRenderer.render(resultDisplay)}</Box>}
|
||||
{typeof resultDisplay === 'object' && <DiffRenderer diffContent={resultDisplay.fileDiff} />}
|
||||
</Box>
|
||||
</Box>
|
||||
)}
|
||||
return (
|
||||
<Box paddingX={1} paddingY={0} flexDirection="column">
|
||||
{/* Row for Status Indicator and Tool Info */}
|
||||
<Box minHeight={1}>
|
||||
{/* Status Indicator */}
|
||||
<Box minWidth={statusIndicatorWidth}>
|
||||
{status === ToolCallStatus.Pending && <Spinner type="dots" />}
|
||||
{status === ToolCallStatus.Invoked && <Text color="green">✔</Text>}
|
||||
{status === ToolCallStatus.Confirming && <Text color="blue">?</Text>}
|
||||
{status === ToolCallStatus.Canceled && (
|
||||
<Text color="red" bold>
|
||||
-
|
||||
</Text>
|
||||
)}
|
||||
</Box>
|
||||
);
|
||||
<Box>
|
||||
<Text
|
||||
color="blue"
|
||||
wrap="truncate-end"
|
||||
strikethrough={status === ToolCallStatus.Canceled}
|
||||
>
|
||||
<Text bold>{name}</Text> <Text color="gray">{description}</Text>
|
||||
</Text>
|
||||
</Box>
|
||||
</Box>
|
||||
|
||||
{hasResult && (
|
||||
<Box paddingLeft={statusIndicatorWidth}>
|
||||
<Box flexShrink={1} flexDirection="row">
|
||||
<Text color="gray">↳ </Text>
|
||||
{/* Use default text color (white) or gray instead of dimColor */}
|
||||
{typeof resultDisplay === 'string' && (
|
||||
<Box flexDirection="column">
|
||||
{MarkdownRenderer.render(resultDisplay)}
|
||||
</Box>
|
||||
)}
|
||||
{typeof resultDisplay === 'object' && (
|
||||
<DiffRenderer diffContent={resultDisplay.fileDiff} />
|
||||
)}
|
||||
</Box>
|
||||
</Box>
|
||||
)}
|
||||
</Box>
|
||||
);
|
||||
};
|
||||
|
||||
export default ToolMessage;
|
||||
|
|
|
@ -2,23 +2,23 @@ import React from 'react';
|
|||
import { Text, Box } from 'ink';
|
||||
|
||||
interface UserMessageProps {
|
||||
text: string;
|
||||
text: string;
|
||||
}
|
||||
|
||||
const UserMessage: React.FC<UserMessageProps> = ({ text }) => {
|
||||
const prefix = '> ';
|
||||
const prefixWidth = prefix.length;
|
||||
const prefix = '> ';
|
||||
const prefixWidth = prefix.length;
|
||||
|
||||
return (
|
||||
<Box flexDirection="row">
|
||||
<Box width={prefixWidth}>
|
||||
<Text color="gray">{prefix}</Text>
|
||||
</Box>
|
||||
<Box flexGrow={1}>
|
||||
<Text wrap="wrap">{text}</Text>
|
||||
</Box>
|
||||
</Box>
|
||||
);
|
||||
return (
|
||||
<Box flexDirection="row">
|
||||
<Box width={prefixWidth}>
|
||||
<Text color="gray">{prefix}</Text>
|
||||
</Box>
|
||||
<Box flexGrow={1}>
|
||||
<Text wrap="wrap">{text}</Text>
|
||||
</Box>
|
||||
</Box>
|
||||
);
|
||||
};
|
||||
|
||||
export default UserMessage;
|
||||
export default UserMessage;
|
||||
|
|
|
@ -3,24 +3,25 @@ const BoxBorderWidth = 1;
|
|||
export const BOX_PADDING_X = 1;
|
||||
|
||||
// Calculate width based on art, padding, and border
|
||||
export const UI_WIDTH = EstimatedArtWidth + (BOX_PADDING_X * 2) + (BoxBorderWidth * 2); // ~63
|
||||
export const UI_WIDTH =
|
||||
EstimatedArtWidth + BOX_PADDING_X * 2 + BoxBorderWidth * 2; // ~63
|
||||
|
||||
export const WITTY_LOADING_PHRASES = [
|
||||
'Consulting the digital spirits...',
|
||||
'Reticulating splines...',
|
||||
'Warming up the AI hamsters...',
|
||||
'Asking the magic conch shell...',
|
||||
'Generating witty retort...',
|
||||
'Polishing the algorithms...',
|
||||
'Don\'t rush perfection (or my code)...',
|
||||
'Brewing fresh bytes...',
|
||||
'Counting electrons...',
|
||||
'Engaging cognitive processors...',
|
||||
'Checking for syntax errors in the universe...',
|
||||
'One moment, optimizing humor...',
|
||||
'Shuffling punchlines...',
|
||||
'Untangling neural nets...',
|
||||
'Compiling brilliance...',
|
||||
'Consulting the digital spirits...',
|
||||
'Reticulating splines...',
|
||||
'Warming up the AI hamsters...',
|
||||
'Asking the magic conch shell...',
|
||||
'Generating witty retort...',
|
||||
'Polishing the algorithms...',
|
||||
"Don't rush perfection (or my code)...",
|
||||
'Brewing fresh bytes...',
|
||||
'Counting electrons...',
|
||||
'Engaging cognitive processors...',
|
||||
'Checking for syntax errors in the universe...',
|
||||
'One moment, optimizing humor...',
|
||||
'Shuffling punchlines...',
|
||||
'Untangling neural nets...',
|
||||
'Compiling brilliance...',
|
||||
];
|
||||
export const PHRASE_CHANGE_INTERVAL_MS = 15000;
|
||||
export const STREAM_DEBOUNCE_MS = 100;
|
||||
export const STREAM_DEBOUNCE_MS = 100;
|
||||
|
|
|
@ -7,136 +7,157 @@ import { processGeminiStream } from '../../core/gemini-stream.js';
|
|||
import { StreamingState } from '../../core/gemini-stream.js';
|
||||
|
||||
const addHistoryItem = (
|
||||
setHistory: React.Dispatch<React.SetStateAction<HistoryItem[]>>,
|
||||
itemData: Omit<HistoryItem, 'id'>,
|
||||
id: number
|
||||
setHistory: React.Dispatch<React.SetStateAction<HistoryItem[]>>,
|
||||
itemData: Omit<HistoryItem, 'id'>,
|
||||
id: number,
|
||||
) => {
|
||||
setHistory((prevHistory) => [
|
||||
...prevHistory,
|
||||
{ ...itemData, id } as HistoryItem,
|
||||
]);
|
||||
setHistory((prevHistory) => [
|
||||
...prevHistory,
|
||||
{ ...itemData, id } as HistoryItem,
|
||||
]);
|
||||
};
|
||||
|
||||
export const useGeminiStream = (
|
||||
setHistory: React.Dispatch<React.SetStateAction<HistoryItem[]>>,
|
||||
setHistory: React.Dispatch<React.SetStateAction<HistoryItem[]>>,
|
||||
) => {
|
||||
const [streamingState, setStreamingState] = useState<StreamingState>(StreamingState.Idle);
|
||||
const [initError, setInitError] = useState<string | null>(null);
|
||||
const abortControllerRef = useRef<AbortController | null>(null);
|
||||
const currentToolGroupIdRef = useRef<number | null>(null);
|
||||
const chatSessionRef = useRef<Chat | null>(null);
|
||||
const geminiClientRef = useRef<GeminiClient | null>(null);
|
||||
const messageIdCounterRef = useRef(0);
|
||||
const [streamingState, setStreamingState] = useState<StreamingState>(
|
||||
StreamingState.Idle,
|
||||
);
|
||||
const [initError, setInitError] = useState<string | null>(null);
|
||||
const abortControllerRef = useRef<AbortController | null>(null);
|
||||
const currentToolGroupIdRef = useRef<number | null>(null);
|
||||
const chatSessionRef = useRef<Chat | null>(null);
|
||||
const geminiClientRef = useRef<GeminiClient | null>(null);
|
||||
const messageIdCounterRef = useRef(0);
|
||||
|
||||
// Initialize Client Effect (remains the same)
|
||||
useEffect(() => {
|
||||
setInitError(null);
|
||||
if (!geminiClientRef.current) {
|
||||
try {
|
||||
geminiClientRef.current = new GeminiClient();
|
||||
} catch (error: any) {
|
||||
setInitError(`Failed to initialize client: ${error.message || 'Unknown error'}`);
|
||||
}
|
||||
}
|
||||
}, []);
|
||||
// Initialize Client Effect (remains the same)
|
||||
useEffect(() => {
|
||||
setInitError(null);
|
||||
if (!geminiClientRef.current) {
|
||||
try {
|
||||
geminiClientRef.current = new GeminiClient();
|
||||
} catch (error: any) {
|
||||
setInitError(
|
||||
`Failed to initialize client: ${error.message || 'Unknown error'}`,
|
||||
);
|
||||
}
|
||||
}
|
||||
}, []);
|
||||
|
||||
// Input Handling Effect (remains the same)
|
||||
useInput((input, key) => {
|
||||
if (streamingState === StreamingState.Responding && key.escape) {
|
||||
abortControllerRef.current?.abort();
|
||||
}
|
||||
});
|
||||
// Input Handling Effect (remains the same)
|
||||
useInput((input, key) => {
|
||||
if (streamingState === StreamingState.Responding && key.escape) {
|
||||
abortControllerRef.current?.abort();
|
||||
}
|
||||
});
|
||||
|
||||
// ID Generation Callback (remains the same)
|
||||
const getNextMessageId = useCallback((baseTimestamp: number): number => {
|
||||
messageIdCounterRef.current += 1;
|
||||
return baseTimestamp + messageIdCounterRef.current;
|
||||
}, []);
|
||||
// ID Generation Callback (remains the same)
|
||||
const getNextMessageId = useCallback((baseTimestamp: number): number => {
|
||||
messageIdCounterRef.current += 1;
|
||||
return baseTimestamp + messageIdCounterRef.current;
|
||||
}, []);
|
||||
|
||||
// Submit Query Callback (updated to call processGeminiStream)
|
||||
const submitQuery = useCallback(async (query: PartListUnion) => {
|
||||
if (streamingState === StreamingState.Responding) {
|
||||
// No-op if already going.
|
||||
return;
|
||||
// Submit Query Callback (updated to call processGeminiStream)
|
||||
const submitQuery = useCallback(
|
||||
async (query: PartListUnion) => {
|
||||
if (streamingState === StreamingState.Responding) {
|
||||
// No-op if already going.
|
||||
return;
|
||||
}
|
||||
|
||||
if (typeof query === 'string' && query.toString().trim().length === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
const userMessageTimestamp = Date.now();
|
||||
const client = geminiClientRef.current;
|
||||
if (!client) {
|
||||
setInitError('Gemini client is not available.');
|
||||
return;
|
||||
}
|
||||
|
||||
if (!chatSessionRef.current) {
|
||||
chatSessionRef.current = await client.startChat();
|
||||
}
|
||||
|
||||
// Reset state
|
||||
setStreamingState(StreamingState.Responding);
|
||||
setInitError(null);
|
||||
currentToolGroupIdRef.current = null;
|
||||
messageIdCounterRef.current = 0;
|
||||
const chat = chatSessionRef.current;
|
||||
|
||||
try {
|
||||
// Add user message
|
||||
if (typeof query === 'string') {
|
||||
const trimmedQuery = query.toString();
|
||||
addHistoryItem(
|
||||
setHistory,
|
||||
{ type: 'user', text: trimmedQuery },
|
||||
userMessageTimestamp,
|
||||
);
|
||||
} else if (
|
||||
// HACK to detect errored function responses.
|
||||
typeof query === 'object' &&
|
||||
query !== null &&
|
||||
!Array.isArray(query) && // Ensure it's a single Part object
|
||||
'functionResponse' in query && // Check if it's a function response Part
|
||||
query.functionResponse?.response && // Check if response object exists
|
||||
'error' in query.functionResponse.response // Check specifically for the 'error' key
|
||||
) {
|
||||
const history = chat.getHistory();
|
||||
history.push({ role: 'user', parts: [query] });
|
||||
return;
|
||||
}
|
||||
|
||||
if (typeof query === 'string' && query.toString().trim().length === 0) {
|
||||
return;
|
||||
// Prepare for streaming
|
||||
abortControllerRef.current = new AbortController();
|
||||
const signal = abortControllerRef.current.signal;
|
||||
|
||||
// --- Delegate to Stream Processor ---
|
||||
|
||||
const stream = client.sendMessageStream(chat, query, signal);
|
||||
|
||||
const addHistoryItemFromStream = (
|
||||
itemData: Omit<HistoryItem, 'id'>,
|
||||
id: number,
|
||||
) => {
|
||||
addHistoryItem(setHistory, itemData, id);
|
||||
};
|
||||
const getStreamMessageId = () => getNextMessageId(userMessageTimestamp);
|
||||
|
||||
// Call the renamed processor function
|
||||
await processGeminiStream({
|
||||
stream,
|
||||
signal,
|
||||
setHistory,
|
||||
submitQuery,
|
||||
getNextMessageId: getStreamMessageId,
|
||||
addHistoryItem: addHistoryItemFromStream,
|
||||
currentToolGroupIdRef,
|
||||
});
|
||||
} catch (error: any) {
|
||||
// (Error handling for stream initiation remains the same)
|
||||
console.error('Error initiating stream:', error);
|
||||
if (error.name !== 'AbortError') {
|
||||
// Use historyUpdater's function potentially? Or keep addHistoryItem here?
|
||||
// Keeping addHistoryItem here for direct errors from this scope.
|
||||
addHistoryItem(
|
||||
setHistory,
|
||||
{
|
||||
type: 'error',
|
||||
text: `[Error starting stream: ${error.message}]`,
|
||||
},
|
||||
getNextMessageId(userMessageTimestamp),
|
||||
);
|
||||
}
|
||||
} finally {
|
||||
abortControllerRef.current = null;
|
||||
setStreamingState(StreamingState.Idle);
|
||||
}
|
||||
},
|
||||
[setStreamingState, setHistory, initError, getNextMessageId],
|
||||
);
|
||||
|
||||
const userMessageTimestamp = Date.now();
|
||||
const client = geminiClientRef.current;
|
||||
if (!client) {
|
||||
setInitError("Gemini client is not available.");
|
||||
return;
|
||||
}
|
||||
|
||||
if (!chatSessionRef.current) {
|
||||
chatSessionRef.current = await client.startChat();
|
||||
}
|
||||
|
||||
// Reset state
|
||||
setStreamingState(StreamingState.Responding);
|
||||
setInitError(null);
|
||||
currentToolGroupIdRef.current = null;
|
||||
messageIdCounterRef.current = 0;
|
||||
const chat = chatSessionRef.current;
|
||||
|
||||
try {
|
||||
// Add user message
|
||||
if (typeof query === 'string') {
|
||||
const trimmedQuery = query.toString();
|
||||
addHistoryItem(setHistory, { type: 'user', text: trimmedQuery }, userMessageTimestamp);
|
||||
} else if (
|
||||
// HACK to detect errored function responses.
|
||||
typeof query === 'object' &&
|
||||
query !== null &&
|
||||
!Array.isArray(query) && // Ensure it's a single Part object
|
||||
'functionResponse' in query && // Check if it's a function response Part
|
||||
query.functionResponse?.response && // Check if response object exists
|
||||
'error' in query.functionResponse.response // Check specifically for the 'error' key
|
||||
) {
|
||||
const history = chat.getHistory();
|
||||
history.push({ role: 'user', parts: [query] });
|
||||
return;
|
||||
}
|
||||
|
||||
// Prepare for streaming
|
||||
abortControllerRef.current = new AbortController();
|
||||
const signal = abortControllerRef.current.signal;
|
||||
|
||||
// --- Delegate to Stream Processor ---
|
||||
|
||||
const stream = client.sendMessageStream(chat, query, signal);
|
||||
|
||||
const addHistoryItemFromStream = (itemData: Omit<HistoryItem, 'id'>, id: number) => {
|
||||
addHistoryItem(setHistory, itemData, id);
|
||||
};
|
||||
const getStreamMessageId = () => getNextMessageId(userMessageTimestamp);
|
||||
|
||||
// Call the renamed processor function
|
||||
await processGeminiStream({
|
||||
stream,
|
||||
signal,
|
||||
setHistory,
|
||||
submitQuery,
|
||||
getNextMessageId: getStreamMessageId,
|
||||
addHistoryItem: addHistoryItemFromStream,
|
||||
currentToolGroupIdRef,
|
||||
});
|
||||
} catch (error: any) {
|
||||
// (Error handling for stream initiation remains the same)
|
||||
console.error("Error initiating stream:", error);
|
||||
if (error.name !== 'AbortError') {
|
||||
// Use historyUpdater's function potentially? Or keep addHistoryItem here?
|
||||
// Keeping addHistoryItem here for direct errors from this scope.
|
||||
addHistoryItem(setHistory, { type: 'error', text: `[Error starting stream: ${error.message}]` }, getNextMessageId(userMessageTimestamp));
|
||||
}
|
||||
} finally {
|
||||
abortControllerRef.current = null;
|
||||
setStreamingState(StreamingState.Idle);
|
||||
}
|
||||
}, [setStreamingState, setHistory, initError, getNextMessageId]);
|
||||
|
||||
return { streamingState, submitQuery, initError };
|
||||
return { streamingState, submitQuery, initError };
|
||||
};
|
||||
|
|
|
@ -1,53 +1,61 @@
|
|||
import { useState, useEffect, useRef } from 'react';
|
||||
import { WITTY_LOADING_PHRASES, PHRASE_CHANGE_INTERVAL_MS } from '../constants.js';
|
||||
import {
|
||||
WITTY_LOADING_PHRASES,
|
||||
PHRASE_CHANGE_INTERVAL_MS,
|
||||
} from '../constants.js';
|
||||
import { StreamingState } from '../../core/gemini-stream.js';
|
||||
|
||||
export const useLoadingIndicator = (streamingState: StreamingState) => {
|
||||
const [elapsedTime, setElapsedTime] = useState(0);
|
||||
const [currentLoadingPhrase, setCurrentLoadingPhrase] = useState(WITTY_LOADING_PHRASES[0]);
|
||||
const timerRef = useRef<NodeJS.Timeout | null>(null);
|
||||
const phraseIntervalRef = useRef<NodeJS.Timeout | null>(null);
|
||||
const currentPhraseIndexRef = useRef<number>(0);
|
||||
const [elapsedTime, setElapsedTime] = useState(0);
|
||||
const [currentLoadingPhrase, setCurrentLoadingPhrase] = useState(
|
||||
WITTY_LOADING_PHRASES[0],
|
||||
);
|
||||
const timerRef = useRef<NodeJS.Timeout | null>(null);
|
||||
const phraseIntervalRef = useRef<NodeJS.Timeout | null>(null);
|
||||
const currentPhraseIndexRef = useRef<number>(0);
|
||||
|
||||
// Timer effect for elapsed time during loading
|
||||
useEffect(() => {
|
||||
if (streamingState === StreamingState.Responding) {
|
||||
setElapsedTime(0); // Reset timer on new loading start
|
||||
timerRef.current = setInterval(() => {
|
||||
setElapsedTime((prevTime) => prevTime + 1);
|
||||
}, 1000);
|
||||
} else if (timerRef.current) {
|
||||
clearInterval(timerRef.current);
|
||||
timerRef.current = null;
|
||||
}
|
||||
// Cleanup on unmount or when isLoading changes
|
||||
return () => {
|
||||
if (timerRef.current) {
|
||||
clearInterval(timerRef.current);
|
||||
}
|
||||
};
|
||||
}, [streamingState]);
|
||||
// Timer effect for elapsed time during loading
|
||||
useEffect(() => {
|
||||
if (streamingState === StreamingState.Responding) {
|
||||
setElapsedTime(0); // Reset timer on new loading start
|
||||
timerRef.current = setInterval(() => {
|
||||
setElapsedTime((prevTime) => prevTime + 1);
|
||||
}, 1000);
|
||||
} else if (timerRef.current) {
|
||||
clearInterval(timerRef.current);
|
||||
timerRef.current = null;
|
||||
}
|
||||
// Cleanup on unmount or when isLoading changes
|
||||
return () => {
|
||||
if (timerRef.current) {
|
||||
clearInterval(timerRef.current);
|
||||
}
|
||||
};
|
||||
}, [streamingState]);
|
||||
|
||||
// Effect for cycling through witty loading phrases
|
||||
useEffect(() => {
|
||||
if (streamingState === StreamingState.Responding) {
|
||||
currentPhraseIndexRef.current = 0;
|
||||
setCurrentLoadingPhrase(WITTY_LOADING_PHRASES[0]);
|
||||
phraseIntervalRef.current = setInterval(() => {
|
||||
currentPhraseIndexRef.current = (currentPhraseIndexRef.current + 1) % WITTY_LOADING_PHRASES.length;
|
||||
setCurrentLoadingPhrase(WITTY_LOADING_PHRASES[currentPhraseIndexRef.current]);
|
||||
}, PHRASE_CHANGE_INTERVAL_MS);
|
||||
} else if (phraseIntervalRef.current) {
|
||||
clearInterval(phraseIntervalRef.current);
|
||||
phraseIntervalRef.current = null;
|
||||
}
|
||||
// Cleanup on unmount or when isLoading changes
|
||||
return () => {
|
||||
if (phraseIntervalRef.current) {
|
||||
clearInterval(phraseIntervalRef.current);
|
||||
}
|
||||
};
|
||||
}, [streamingState]);
|
||||
// Effect for cycling through witty loading phrases
|
||||
useEffect(() => {
|
||||
if (streamingState === StreamingState.Responding) {
|
||||
currentPhraseIndexRef.current = 0;
|
||||
setCurrentLoadingPhrase(WITTY_LOADING_PHRASES[0]);
|
||||
phraseIntervalRef.current = setInterval(() => {
|
||||
currentPhraseIndexRef.current =
|
||||
(currentPhraseIndexRef.current + 1) % WITTY_LOADING_PHRASES.length;
|
||||
setCurrentLoadingPhrase(
|
||||
WITTY_LOADING_PHRASES[currentPhraseIndexRef.current],
|
||||
);
|
||||
}, PHRASE_CHANGE_INTERVAL_MS);
|
||||
} else if (phraseIntervalRef.current) {
|
||||
clearInterval(phraseIntervalRef.current);
|
||||
phraseIntervalRef.current = null;
|
||||
}
|
||||
// Cleanup on unmount or when isLoading changes
|
||||
return () => {
|
||||
if (phraseIntervalRef.current) {
|
||||
clearInterval(phraseIntervalRef.current);
|
||||
}
|
||||
};
|
||||
}, [streamingState]);
|
||||
|
||||
return { elapsedTime, currentLoadingPhrase };
|
||||
};
|
||||
return { elapsedTime, currentLoadingPhrase };
|
||||
};
|
||||
|
|
|
@ -1,62 +1,65 @@
|
|||
import { ToolResultDisplay } from "../tools/tools.js";
|
||||
import { ToolResultDisplay } from '../tools/tools.js';
|
||||
|
||||
export enum ToolCallStatus {
|
||||
Pending,
|
||||
Invoked,
|
||||
Confirming,
|
||||
Canceled,
|
||||
Pending,
|
||||
Invoked,
|
||||
Confirming,
|
||||
Canceled,
|
||||
}
|
||||
|
||||
export interface ToolCallEvent {
|
||||
type: 'tool_call';
|
||||
status: ToolCallStatus;
|
||||
callId: string;
|
||||
name: string;
|
||||
args: Record<string, any>;
|
||||
resultDisplay: ToolResultDisplay | undefined;
|
||||
confirmationDetails: ToolCallConfirmationDetails | undefined;
|
||||
type: 'tool_call';
|
||||
status: ToolCallStatus;
|
||||
callId: string;
|
||||
name: string;
|
||||
args: Record<string, any>;
|
||||
resultDisplay: ToolResultDisplay | undefined;
|
||||
confirmationDetails: ToolCallConfirmationDetails | undefined;
|
||||
}
|
||||
|
||||
export interface IndividualToolCallDisplay {
|
||||
callId: string;
|
||||
name: string;
|
||||
description: string;
|
||||
resultDisplay: ToolResultDisplay | undefined;
|
||||
status: ToolCallStatus;
|
||||
confirmationDetails: ToolCallConfirmationDetails | undefined;
|
||||
callId: string;
|
||||
name: string;
|
||||
description: string;
|
||||
resultDisplay: ToolResultDisplay | undefined;
|
||||
status: ToolCallStatus;
|
||||
confirmationDetails: ToolCallConfirmationDetails | undefined;
|
||||
}
|
||||
|
||||
export interface HistoryItemBase {
|
||||
id: number;
|
||||
text?: string; // Text content for user/gemini/info/error messages
|
||||
id: number;
|
||||
text?: string; // Text content for user/gemini/info/error messages
|
||||
}
|
||||
|
||||
export type HistoryItem = HistoryItemBase & (
|
||||
export type HistoryItem = HistoryItemBase &
|
||||
(
|
||||
| { type: 'user'; text: string }
|
||||
| { type: 'gemini'; text: string }
|
||||
| { type: 'info'; text: string }
|
||||
| { type: 'error'; text: string }
|
||||
| { type: 'tool_group'; tools: IndividualToolCallDisplay[]; }
|
||||
);
|
||||
| { type: 'tool_group'; tools: IndividualToolCallDisplay[] }
|
||||
);
|
||||
|
||||
export interface ToolCallConfirmationDetails {
|
||||
title: string;
|
||||
onConfirm: (outcome: ToolConfirmationOutcome) => Promise<void>;
|
||||
title: string;
|
||||
onConfirm: (outcome: ToolConfirmationOutcome) => Promise<void>;
|
||||
}
|
||||
|
||||
export interface ToolEditConfirmationDetails extends ToolCallConfirmationDetails {
|
||||
fileName: string;
|
||||
fileDiff: string;
|
||||
export interface ToolEditConfirmationDetails
|
||||
extends ToolCallConfirmationDetails {
|
||||
fileName: string;
|
||||
fileDiff: string;
|
||||
}
|
||||
|
||||
export interface ToolExecuteConfirmationDetails extends ToolCallConfirmationDetails {
|
||||
command: string;
|
||||
rootCommand: string;
|
||||
description: string;
|
||||
export interface ToolExecuteConfirmationDetails
|
||||
extends ToolCallConfirmationDetails {
|
||||
command: string;
|
||||
rootCommand: string;
|
||||
description: string;
|
||||
}
|
||||
|
||||
export enum ToolConfirmationOutcome {
|
||||
ProceedOnce,
|
||||
ProceedAlways,
|
||||
Cancel,
|
||||
}
|
||||
ProceedOnce,
|
||||
ProceedAlways,
|
||||
Cancel,
|
||||
}
|
||||
|
|
|
@ -7,243 +7,356 @@ import { Text, Box } from 'ink';
|
|||
* and inline styles (bold, italic, strikethrough, code, links).
|
||||
*/
|
||||
export class MarkdownRenderer {
|
||||
/**
|
||||
* Renders INLINE markdown elements using an iterative approach.
|
||||
* Supports: **bold**, *italic*, _italic_, ~~strike~~, [link](url), `code`, ``code``, <u>underline</u>
|
||||
* @param text The string segment to parse for inline styles.
|
||||
* @returns An array of React nodes (Text components or strings).
|
||||
*/
|
||||
private static _renderInline(text: string): React.ReactNode[] {
|
||||
const nodes: React.ReactNode[] = [];
|
||||
let lastIndex = 0;
|
||||
// UPDATED Regex: Added <u>.*?<\/u> pattern
|
||||
const inlineRegex =
|
||||
/(\*\*.*?\*\*|\*.*?\*|_.*?_|~~.*?~~|\[.*?\]\(.*?\)|`+.+?`+|<u>.*?<\/u>)/g;
|
||||
let match;
|
||||
|
||||
/**
|
||||
* Renders INLINE markdown elements using an iterative approach.
|
||||
* Supports: **bold**, *italic*, _italic_, ~~strike~~, [link](url), `code`, ``code``, <u>underline</u>
|
||||
* @param text The string segment to parse for inline styles.
|
||||
* @returns An array of React nodes (Text components or strings).
|
||||
*/
|
||||
private static _renderInline(text: string): React.ReactNode[] {
|
||||
const nodes: React.ReactNode[] = [];
|
||||
let lastIndex = 0;
|
||||
// UPDATED Regex: Added <u>.*?<\/u> pattern
|
||||
const inlineRegex = /(\*\*.*?\*\*|\*.*?\*|_.*?_|~~.*?~~|\[.*?\]\(.*?\)|`+.+?`+|<u>.*?<\/u>)/g;
|
||||
let match;
|
||||
|
||||
while ((match = inlineRegex.exec(text)) !== null) {
|
||||
// 1. Add plain text before the match
|
||||
if (match.index > lastIndex) {
|
||||
nodes.push(<Text key={`t-${lastIndex}`}>{text.slice(lastIndex, match.index)}</Text>);
|
||||
}
|
||||
|
||||
const fullMatch = match[0];
|
||||
let renderedNode: React.ReactNode = null;
|
||||
const key = `m-${match.index}`; // Base key for matched part
|
||||
|
||||
// 2. Determine type of match and render accordingly
|
||||
try {
|
||||
if (fullMatch.startsWith('**') && fullMatch.endsWith('**') && fullMatch.length > 4) {
|
||||
renderedNode = <Text key={key} bold>{fullMatch.slice(2, -2)}</Text>;
|
||||
} else if (((fullMatch.startsWith('*') && fullMatch.endsWith('*')) || (fullMatch.startsWith('_') && fullMatch.endsWith('_'))) && fullMatch.length > 2) {
|
||||
renderedNode = <Text key={key} italic>{fullMatch.slice(1, -1)}</Text>;
|
||||
} else if (fullMatch.startsWith('~~') && fullMatch.endsWith('~~') && fullMatch.length > 4) {
|
||||
// Strikethrough as gray text
|
||||
renderedNode = <Text key={key} strikethrough>{fullMatch.slice(2, -2)}</Text>;
|
||||
} else if (fullMatch.startsWith('`') && fullMatch.endsWith('`') && fullMatch.length > 1) {
|
||||
// Code: Try to match varying numbers of backticks
|
||||
const codeMatch = fullMatch.match(/^(`+)(.+?)\1$/s);
|
||||
if (codeMatch && codeMatch[2]) {
|
||||
renderedNode = <Text key={key} color="yellow">{codeMatch[2]}</Text>;
|
||||
} else { // Fallback for simple or non-matching cases
|
||||
renderedNode = <Text key={key} color="yellow">{fullMatch.slice(1, -1)}</Text>;
|
||||
}
|
||||
} else if (fullMatch.startsWith('[') && fullMatch.includes('](') && fullMatch.endsWith(')')) {
|
||||
// Link: Extract text and URL
|
||||
const linkMatch = fullMatch.match(/\[(.*?)\]\((.*?)\)/);
|
||||
if (linkMatch) {
|
||||
const linkText = linkMatch[1];
|
||||
const url = linkMatch[2];
|
||||
// Render link text then URL slightly dimmed/colored
|
||||
renderedNode = (
|
||||
<Text key={key}>
|
||||
{linkText}
|
||||
<Text color="blue"> ({url})</Text>
|
||||
</Text>
|
||||
);
|
||||
}
|
||||
} else if (fullMatch.startsWith('<u>') && fullMatch.endsWith('</u>') && fullMatch.length > 6) {
|
||||
// ***** NEW: Handle underline tag *****
|
||||
// Use slice(3, -4) to remove <u> and </u>
|
||||
renderedNode = <Text key={key} underline>{fullMatch.slice(3, -4)}</Text>;
|
||||
}
|
||||
} catch (e) {
|
||||
// In case of regex or slicing errors, fallback to literal rendering
|
||||
console.error("Error parsing inline markdown part:", fullMatch, e);
|
||||
renderedNode = null; // Ensure fallback below is used
|
||||
}
|
||||
|
||||
|
||||
// 3. Add the rendered node or the literal text if parsing failed
|
||||
nodes.push(renderedNode ?? <Text key={key}>{fullMatch}</Text>);
|
||||
lastIndex = inlineRegex.lastIndex; // Move index past the current match
|
||||
}
|
||||
|
||||
// 4. Add any remaining plain text after the last match
|
||||
if (lastIndex < text.length) {
|
||||
nodes.push(<Text key={`t-${lastIndex}`}>{text.slice(lastIndex)}</Text>);
|
||||
}
|
||||
|
||||
// Filter out potential nulls if any error occurred without fallback
|
||||
return nodes.filter(node => node !== null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper to render a code block.
|
||||
*/
|
||||
private static _renderCodeBlock(key: string, content: string[], lang: string | null): React.ReactNode {
|
||||
// Basic styling for code block
|
||||
return (
|
||||
<Box key={key} borderStyle="round" paddingX={1} borderColor="gray" flexDirection="column">
|
||||
{lang && <Text dimColor> {lang}</Text>}
|
||||
{/* Render each line preserving whitespace (within Text component) */}
|
||||
{content.map((line, idx) => (
|
||||
<Text key={idx}>{line}</Text>
|
||||
))}
|
||||
</Box>
|
||||
while ((match = inlineRegex.exec(text)) !== null) {
|
||||
// 1. Add plain text before the match
|
||||
if (match.index > lastIndex) {
|
||||
nodes.push(
|
||||
<Text key={`t-${lastIndex}`}>
|
||||
{text.slice(lastIndex, match.index)}
|
||||
</Text>,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper to render a list item (ordered or unordered).
|
||||
*/
|
||||
private static _renderListItem(key: string, text: string, type: 'ul' | 'ol', marker: string): React.ReactNode {
|
||||
const renderedText = MarkdownRenderer._renderInline(text); // Allow inline styles in list items
|
||||
const prefix = type === 'ol' ? `${marker} ` : `${marker} `; // e.g., "1. " or "* "
|
||||
const prefixWidth = prefix.length;
|
||||
const fullMatch = match[0];
|
||||
let renderedNode: React.ReactNode = null;
|
||||
const key = `m-${match.index}`; // Base key for matched part
|
||||
|
||||
return (
|
||||
<Box key={key} paddingLeft={1} flexDirection="row">
|
||||
<Box width={prefixWidth}>
|
||||
<Text>{prefix}</Text>
|
||||
</Box>
|
||||
<Box flexGrow={1}>
|
||||
<Text wrap="wrap">{renderedText}</Text>
|
||||
</Box>
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Renders a full markdown string, handling block elements (headers, lists, code blocks)
|
||||
* and applying inline styles. This is the main public static method.
|
||||
* @param text The full markdown string to render.
|
||||
* @returns An array of React nodes representing markdown blocks.
|
||||
*/
|
||||
public static render(text: string): React.ReactNode[] {
|
||||
if (!text) return [];
|
||||
|
||||
const lines = text.split('\n');
|
||||
// Regexes for block elements
|
||||
const headerRegex = /^ *(#{1,4}) +(.*)/;
|
||||
const codeFenceRegex = /^ *(`{3,}|~{3,}) *(\S*?) *$/; // ```lang or ``` or ~~~
|
||||
const ulItemRegex = /^ *([-*+]) +(.*)/; // Unordered list item, captures bullet and text
|
||||
const olItemRegex = /^ *(\d+)\. +(.*)/; // Ordered list item, captures number and text
|
||||
const hrRegex = /^ *([-*_] *){3,} *$/; // Horizontal rule
|
||||
|
||||
const contentBlocks: React.ReactNode[] = [];
|
||||
// State for parsing across lines
|
||||
let inCodeBlock = false;
|
||||
let codeBlockContent: string[] = [];
|
||||
let codeBlockLang: string | null = null;
|
||||
let codeBlockFence = ''; // Store the type of fence used (``` or ~~~)
|
||||
let inListType: 'ul' | 'ol' | null = null; // Track current list type to group items
|
||||
|
||||
lines.forEach((line, index) => {
|
||||
const key = `line-${index}`;
|
||||
|
||||
// --- State 1: Inside a Code Block ---
|
||||
if (inCodeBlock) {
|
||||
const fenceMatch = line.match(codeFenceRegex);
|
||||
// Check for closing fence, matching the opening one and length
|
||||
if (fenceMatch && fenceMatch[1].startsWith(codeBlockFence[0]) && fenceMatch[1].length >= codeBlockFence.length) {
|
||||
// End of code block - render it
|
||||
contentBlocks.push(MarkdownRenderer._renderCodeBlock(key, codeBlockContent, codeBlockLang));
|
||||
// Reset state
|
||||
inCodeBlock = false;
|
||||
codeBlockContent = [];
|
||||
codeBlockLang = null;
|
||||
codeBlockFence = '';
|
||||
inListType = null; // Ensure list context is reset
|
||||
} else {
|
||||
// Add line to current code block content
|
||||
codeBlockContent.push(line);
|
||||
}
|
||||
return; // Process next line
|
||||
}
|
||||
|
||||
// --- State 2: Not Inside a Code Block ---
|
||||
// Check for block element starts in rough order of precedence/commonness
|
||||
const codeFenceMatch = line.match(codeFenceRegex);
|
||||
const headerMatch = line.match(headerRegex);
|
||||
const ulMatch = line.match(ulItemRegex);
|
||||
const olMatch = line.match(olItemRegex);
|
||||
const hrMatch = line.match(hrRegex);
|
||||
|
||||
if (codeFenceMatch) {
|
||||
inCodeBlock = true;
|
||||
codeBlockFence = codeFenceMatch[1];
|
||||
codeBlockLang = codeFenceMatch[2] || null;
|
||||
inListType = null; // Starting code block breaks list
|
||||
} else if (hrMatch) {
|
||||
// Render Horizontal Rule (simple dashed line)
|
||||
// Use box with height and border character, or just Text with dashes
|
||||
contentBlocks.push(<Box key={key}><Text dimColor>---</Text></Box>);
|
||||
inListType = null; // HR breaks list
|
||||
} else if (headerMatch) {
|
||||
const level = headerMatch[1].length;
|
||||
const headerText = headerMatch[2];
|
||||
const renderedHeaderText = MarkdownRenderer._renderInline(headerText);
|
||||
let headerNode: React.ReactNode = null;
|
||||
switch (level) { /* ... (header styling as before) ... */
|
||||
case 1: headerNode = <Text bold color="cyan">{renderedHeaderText}</Text>; break;
|
||||
case 2: headerNode = <Text bold color="blue">{renderedHeaderText}</Text>; break;
|
||||
case 3: headerNode = <Text bold>{renderedHeaderText}</Text>; break;
|
||||
case 4: headerNode = <Text italic color="gray">{renderedHeaderText}</Text>; break;
|
||||
}
|
||||
if (headerNode) contentBlocks.push(<Box key={key}>{headerNode}</Box>);
|
||||
inListType = null; // Header breaks list
|
||||
} else if (ulMatch) {
|
||||
const marker = ulMatch[1]; // *, -, or +
|
||||
const itemText = ulMatch[2];
|
||||
// If previous line was not UL, maybe add spacing? For now, just render item.
|
||||
contentBlocks.push(MarkdownRenderer._renderListItem(key, itemText, 'ul', marker));
|
||||
inListType = 'ul'; // Set/maintain list context
|
||||
} else if (olMatch) {
|
||||
const marker = olMatch[1]; // The number
|
||||
const itemText = olMatch[2];
|
||||
contentBlocks.push(MarkdownRenderer._renderListItem(key, itemText, 'ol', marker));
|
||||
inListType = 'ol'; // Set/maintain list context
|
||||
} else {
|
||||
// --- Regular line (Paragraph or Empty line) ---
|
||||
inListType = null; // Any non-list line breaks the list sequence
|
||||
|
||||
// Render line content if it's not blank, applying inline styles
|
||||
const renderedLine = MarkdownRenderer._renderInline(line);
|
||||
if (renderedLine.length > 0 || line.length > 0) { // Render lines with content or only whitespace
|
||||
contentBlocks.push(
|
||||
<Box key={key}>
|
||||
<Text wrap="wrap">{renderedLine}</Text>
|
||||
</Box>
|
||||
);
|
||||
} else if (line.trim().length === 0) { // Handle specifically empty lines
|
||||
// Add minimal space for blank lines between paragraphs/blocks
|
||||
if (contentBlocks.length > 0 && !inCodeBlock) { // Avoid adding space inside code block state (handled above)
|
||||
const previousBlock = contentBlocks[contentBlocks.length - 1];
|
||||
// Avoid adding multiple blank lines consecutively easily - check if previous was also blank?
|
||||
// For now, add a minimal spacer for any blank line outside code blocks.
|
||||
contentBlocks.push(<Box key={key} height={1} />);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Handle unclosed code block at the end of the input
|
||||
if (inCodeBlock) {
|
||||
contentBlocks.push(MarkdownRenderer._renderCodeBlock(`line-eof`, codeBlockContent, codeBlockLang));
|
||||
// 2. Determine type of match and render accordingly
|
||||
try {
|
||||
if (
|
||||
fullMatch.startsWith('**') &&
|
||||
fullMatch.endsWith('**') &&
|
||||
fullMatch.length > 4
|
||||
) {
|
||||
renderedNode = (
|
||||
<Text key={key} bold>
|
||||
{fullMatch.slice(2, -2)}
|
||||
</Text>
|
||||
);
|
||||
} else if (
|
||||
((fullMatch.startsWith('*') && fullMatch.endsWith('*')) ||
|
||||
(fullMatch.startsWith('_') && fullMatch.endsWith('_'))) &&
|
||||
fullMatch.length > 2
|
||||
) {
|
||||
renderedNode = (
|
||||
<Text key={key} italic>
|
||||
{fullMatch.slice(1, -1)}
|
||||
</Text>
|
||||
);
|
||||
} else if (
|
||||
fullMatch.startsWith('~~') &&
|
||||
fullMatch.endsWith('~~') &&
|
||||
fullMatch.length > 4
|
||||
) {
|
||||
// Strikethrough as gray text
|
||||
renderedNode = (
|
||||
<Text key={key} strikethrough>
|
||||
{fullMatch.slice(2, -2)}
|
||||
</Text>
|
||||
);
|
||||
} else if (
|
||||
fullMatch.startsWith('`') &&
|
||||
fullMatch.endsWith('`') &&
|
||||
fullMatch.length > 1
|
||||
) {
|
||||
// Code: Try to match varying numbers of backticks
|
||||
const codeMatch = fullMatch.match(/^(`+)(.+?)\1$/s);
|
||||
if (codeMatch && codeMatch[2]) {
|
||||
renderedNode = (
|
||||
<Text key={key} color="yellow">
|
||||
{codeMatch[2]}
|
||||
</Text>
|
||||
);
|
||||
} else {
|
||||
// Fallback for simple or non-matching cases
|
||||
renderedNode = (
|
||||
<Text key={key} color="yellow">
|
||||
{fullMatch.slice(1, -1)}
|
||||
</Text>
|
||||
);
|
||||
}
|
||||
} else if (
|
||||
fullMatch.startsWith('[') &&
|
||||
fullMatch.includes('](') &&
|
||||
fullMatch.endsWith(')')
|
||||
) {
|
||||
// Link: Extract text and URL
|
||||
const linkMatch = fullMatch.match(/\[(.*?)\]\((.*?)\)/);
|
||||
if (linkMatch) {
|
||||
const linkText = linkMatch[1];
|
||||
const url = linkMatch[2];
|
||||
// Render link text then URL slightly dimmed/colored
|
||||
renderedNode = (
|
||||
<Text key={key}>
|
||||
{linkText}
|
||||
<Text color="blue"> ({url})</Text>
|
||||
</Text>
|
||||
);
|
||||
}
|
||||
} else if (
|
||||
fullMatch.startsWith('<u>') &&
|
||||
fullMatch.endsWith('</u>') &&
|
||||
fullMatch.length > 6
|
||||
) {
|
||||
// ***** NEW: Handle underline tag *****
|
||||
// Use slice(3, -4) to remove <u> and </u>
|
||||
renderedNode = (
|
||||
<Text key={key} underline>
|
||||
{fullMatch.slice(3, -4)}
|
||||
</Text>
|
||||
);
|
||||
}
|
||||
} catch (e) {
|
||||
// In case of regex or slicing errors, fallback to literal rendering
|
||||
console.error('Error parsing inline markdown part:', fullMatch, e);
|
||||
renderedNode = null; // Ensure fallback below is used
|
||||
}
|
||||
|
||||
return contentBlocks;
|
||||
// 3. Add the rendered node or the literal text if parsing failed
|
||||
nodes.push(renderedNode ?? <Text key={key}>{fullMatch}</Text>);
|
||||
lastIndex = inlineRegex.lastIndex; // Move index past the current match
|
||||
}
|
||||
}
|
||||
|
||||
// 4. Add any remaining plain text after the last match
|
||||
if (lastIndex < text.length) {
|
||||
nodes.push(<Text key={`t-${lastIndex}`}>{text.slice(lastIndex)}</Text>);
|
||||
}
|
||||
|
||||
// Filter out potential nulls if any error occurred without fallback
|
||||
return nodes.filter((node) => node !== null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper to render a code block.
|
||||
*/
|
||||
private static _renderCodeBlock(
|
||||
key: string,
|
||||
content: string[],
|
||||
lang: string | null,
|
||||
): React.ReactNode {
|
||||
// Basic styling for code block
|
||||
return (
|
||||
<Box
|
||||
key={key}
|
||||
borderStyle="round"
|
||||
paddingX={1}
|
||||
borderColor="gray"
|
||||
flexDirection="column"
|
||||
>
|
||||
{lang && <Text dimColor> {lang}</Text>}
|
||||
{/* Render each line preserving whitespace (within Text component) */}
|
||||
{content.map((line, idx) => (
|
||||
<Text key={idx}>{line}</Text>
|
||||
))}
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper to render a list item (ordered or unordered).
|
||||
*/
|
||||
private static _renderListItem(
|
||||
key: string,
|
||||
text: string,
|
||||
type: 'ul' | 'ol',
|
||||
marker: string,
|
||||
): React.ReactNode {
|
||||
const renderedText = MarkdownRenderer._renderInline(text); // Allow inline styles in list items
|
||||
const prefix = type === 'ol' ? `${marker} ` : `${marker} `; // e.g., "1. " or "* "
|
||||
const prefixWidth = prefix.length;
|
||||
|
||||
return (
|
||||
<Box key={key} paddingLeft={1} flexDirection="row">
|
||||
<Box width={prefixWidth}>
|
||||
<Text>{prefix}</Text>
|
||||
</Box>
|
||||
<Box flexGrow={1}>
|
||||
<Text wrap="wrap">{renderedText}</Text>
|
||||
</Box>
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Renders a full markdown string, handling block elements (headers, lists, code blocks)
|
||||
* and applying inline styles. This is the main public static method.
|
||||
* @param text The full markdown string to render.
|
||||
* @returns An array of React nodes representing markdown blocks.
|
||||
*/
|
||||
public static render(text: string): React.ReactNode[] {
|
||||
if (!text) return [];
|
||||
|
||||
const lines = text.split('\n');
|
||||
// Regexes for block elements
|
||||
const headerRegex = /^ *(#{1,4}) +(.*)/;
|
||||
const codeFenceRegex = /^ *(`{3,}|~{3,}) *(\S*?) *$/; // ```lang or ``` or ~~~
|
||||
const ulItemRegex = /^ *([-*+]) +(.*)/; // Unordered list item, captures bullet and text
|
||||
const olItemRegex = /^ *(\d+)\. +(.*)/; // Ordered list item, captures number and text
|
||||
const hrRegex = /^ *([-*_] *){3,} *$/; // Horizontal rule
|
||||
|
||||
const contentBlocks: React.ReactNode[] = [];
|
||||
// State for parsing across lines
|
||||
let inCodeBlock = false;
|
||||
let codeBlockContent: string[] = [];
|
||||
let codeBlockLang: string | null = null;
|
||||
let codeBlockFence = ''; // Store the type of fence used (``` or ~~~)
|
||||
let inListType: 'ul' | 'ol' | null = null; // Track current list type to group items
|
||||
|
||||
lines.forEach((line, index) => {
|
||||
const key = `line-${index}`;
|
||||
|
||||
// --- State 1: Inside a Code Block ---
|
||||
if (inCodeBlock) {
|
||||
const fenceMatch = line.match(codeFenceRegex);
|
||||
// Check for closing fence, matching the opening one and length
|
||||
if (
|
||||
fenceMatch &&
|
||||
fenceMatch[1].startsWith(codeBlockFence[0]) &&
|
||||
fenceMatch[1].length >= codeBlockFence.length
|
||||
) {
|
||||
// End of code block - render it
|
||||
contentBlocks.push(
|
||||
MarkdownRenderer._renderCodeBlock(
|
||||
key,
|
||||
codeBlockContent,
|
||||
codeBlockLang,
|
||||
),
|
||||
);
|
||||
// Reset state
|
||||
inCodeBlock = false;
|
||||
codeBlockContent = [];
|
||||
codeBlockLang = null;
|
||||
codeBlockFence = '';
|
||||
inListType = null; // Ensure list context is reset
|
||||
} else {
|
||||
// Add line to current code block content
|
||||
codeBlockContent.push(line);
|
||||
}
|
||||
return; // Process next line
|
||||
}
|
||||
|
||||
// --- State 2: Not Inside a Code Block ---
|
||||
// Check for block element starts in rough order of precedence/commonness
|
||||
const codeFenceMatch = line.match(codeFenceRegex);
|
||||
const headerMatch = line.match(headerRegex);
|
||||
const ulMatch = line.match(ulItemRegex);
|
||||
const olMatch = line.match(olItemRegex);
|
||||
const hrMatch = line.match(hrRegex);
|
||||
|
||||
if (codeFenceMatch) {
|
||||
inCodeBlock = true;
|
||||
codeBlockFence = codeFenceMatch[1];
|
||||
codeBlockLang = codeFenceMatch[2] || null;
|
||||
inListType = null; // Starting code block breaks list
|
||||
} else if (hrMatch) {
|
||||
// Render Horizontal Rule (simple dashed line)
|
||||
// Use box with height and border character, or just Text with dashes
|
||||
contentBlocks.push(
|
||||
<Box key={key}>
|
||||
<Text dimColor>---</Text>
|
||||
</Box>,
|
||||
);
|
||||
inListType = null; // HR breaks list
|
||||
} else if (headerMatch) {
|
||||
const level = headerMatch[1].length;
|
||||
const headerText = headerMatch[2];
|
||||
const renderedHeaderText = MarkdownRenderer._renderInline(headerText);
|
||||
let headerNode: React.ReactNode = null;
|
||||
switch (level /* ... (header styling as before) ... */) {
|
||||
case 1:
|
||||
headerNode = (
|
||||
<Text bold color="cyan">
|
||||
{renderedHeaderText}
|
||||
</Text>
|
||||
);
|
||||
break;
|
||||
case 2:
|
||||
headerNode = (
|
||||
<Text bold color="blue">
|
||||
{renderedHeaderText}
|
||||
</Text>
|
||||
);
|
||||
break;
|
||||
case 3:
|
||||
headerNode = <Text bold>{renderedHeaderText}</Text>;
|
||||
break;
|
||||
case 4:
|
||||
headerNode = (
|
||||
<Text italic color="gray">
|
||||
{renderedHeaderText}
|
||||
</Text>
|
||||
);
|
||||
break;
|
||||
}
|
||||
if (headerNode) contentBlocks.push(<Box key={key}>{headerNode}</Box>);
|
||||
inListType = null; // Header breaks list
|
||||
} else if (ulMatch) {
|
||||
const marker = ulMatch[1]; // *, -, or +
|
||||
const itemText = ulMatch[2];
|
||||
// If previous line was not UL, maybe add spacing? For now, just render item.
|
||||
contentBlocks.push(
|
||||
MarkdownRenderer._renderListItem(key, itemText, 'ul', marker),
|
||||
);
|
||||
inListType = 'ul'; // Set/maintain list context
|
||||
} else if (olMatch) {
|
||||
const marker = olMatch[1]; // The number
|
||||
const itemText = olMatch[2];
|
||||
contentBlocks.push(
|
||||
MarkdownRenderer._renderListItem(key, itemText, 'ol', marker),
|
||||
);
|
||||
inListType = 'ol'; // Set/maintain list context
|
||||
} else {
|
||||
// --- Regular line (Paragraph or Empty line) ---
|
||||
inListType = null; // Any non-list line breaks the list sequence
|
||||
|
||||
// Render line content if it's not blank, applying inline styles
|
||||
const renderedLine = MarkdownRenderer._renderInline(line);
|
||||
if (renderedLine.length > 0 || line.length > 0) {
|
||||
// Render lines with content or only whitespace
|
||||
contentBlocks.push(
|
||||
<Box key={key}>
|
||||
<Text wrap="wrap">{renderedLine}</Text>
|
||||
</Box>,
|
||||
);
|
||||
} else if (line.trim().length === 0) {
|
||||
// Handle specifically empty lines
|
||||
// Add minimal space for blank lines between paragraphs/blocks
|
||||
if (contentBlocks.length > 0 && !inCodeBlock) {
|
||||
// Avoid adding space inside code block state (handled above)
|
||||
const previousBlock = contentBlocks[contentBlocks.length - 1];
|
||||
// Avoid adding multiple blank lines consecutively easily - check if previous was also blank?
|
||||
// For now, add a minimal spacer for any blank line outside code blocks.
|
||||
contentBlocks.push(<Box key={key} height={1} />);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Handle unclosed code block at the end of the input
|
||||
if (inCodeBlock) {
|
||||
contentBlocks.push(
|
||||
MarkdownRenderer._renderCodeBlock(
|
||||
`line-eof`,
|
||||
codeBlockContent,
|
||||
codeBlockLang,
|
||||
),
|
||||
);
|
||||
}
|
||||
|
||||
return contentBlocks;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,19 +1,18 @@
|
|||
import { promises as fs } from 'fs';
|
||||
import { SchemaUnion, Type } from "@google/genai"; // Assuming these types exist
|
||||
import { GeminiClient } from "../core/gemini-client.js"; // Assuming this path
|
||||
import { SchemaUnion, Type } from '@google/genai'; // Assuming these types exist
|
||||
import { GeminiClient } from '../core/gemini-client.js'; // Assuming this path
|
||||
import { exec } from 'child_process'; // Needed for Windows process check
|
||||
import { promisify } from 'util'; // To promisify exec
|
||||
|
||||
// Promisify child_process.exec for easier async/await usage
|
||||
const execAsync = promisify(exec);
|
||||
|
||||
|
||||
// Define the expected interface for the AI client dependency
|
||||
export interface AiClient {
|
||||
generateJson(
|
||||
prompt: any[], // Keep flexible or define a stricter prompt structure type
|
||||
schema: SchemaUnion
|
||||
): Promise<any>; // Ideally, specify the expected JSON structure TAnalysisResult | TAnalysisFailure
|
||||
generateJson(
|
||||
prompt: any[], // Keep flexible or define a stricter prompt structure type
|
||||
schema: SchemaUnion,
|
||||
): Promise<any>; // Ideally, specify the expected JSON structure TAnalysisResult | TAnalysisFailure
|
||||
}
|
||||
|
||||
// Identifier for the background process (e.g., PID)
|
||||
|
@ -22,232 +21,290 @@ export type ProcessHandle = number | string | unknown;
|
|||
|
||||
// Represents the structure expected from a successful LLM analysis call
|
||||
export interface AnalysisResult {
|
||||
summary: string;
|
||||
inferredStatus: 'Running' | 'SuccessReported' | 'ErrorReported' | 'Unknown';
|
||||
summary: string;
|
||||
inferredStatus: 'Running' | 'SuccessReported' | 'ErrorReported' | 'Unknown';
|
||||
}
|
||||
|
||||
// Represents the structure returned when the LLM analysis itself fails
|
||||
export interface AnalysisFailure {
|
||||
error: string;
|
||||
inferredStatus: 'AnalysisFailed';
|
||||
error: string;
|
||||
inferredStatus: 'AnalysisFailed';
|
||||
}
|
||||
|
||||
// Type guard to check if the result is a failure object
|
||||
function isAnalysisFailure(result: AnalysisResult | AnalysisFailure): result is AnalysisFailure {
|
||||
return (result as AnalysisFailure).inferredStatus === 'AnalysisFailed';
|
||||
function isAnalysisFailure(
|
||||
result: AnalysisResult | AnalysisFailure,
|
||||
): result is AnalysisFailure {
|
||||
return (result as AnalysisFailure).inferredStatus === 'AnalysisFailed';
|
||||
}
|
||||
|
||||
// Represents the final outcome after polling is complete (or failed/timed out)
|
||||
export interface FinalAnalysisOutcome {
|
||||
status: string; // e.g., 'SuccessReported', 'ErrorReported', 'ProcessEnded_SuccessReported', 'TimedOut_Running', 'AnalysisFailed'
|
||||
summary: string; // Final summary or error message
|
||||
status: string; // e.g., 'SuccessReported', 'ErrorReported', 'ProcessEnded_SuccessReported', 'TimedOut_Running', 'AnalysisFailed'
|
||||
summary: string; // Final summary or error message
|
||||
}
|
||||
|
||||
export class BackgroundTerminalAnalyzer {
|
||||
private ai: AiClient;
|
||||
// Make polling parameters configurable via constructor
|
||||
private pollIntervalMs: number;
|
||||
private maxAttempts: number;
|
||||
private initialDelayMs: number;
|
||||
private ai: AiClient;
|
||||
// Make polling parameters configurable via constructor
|
||||
private pollIntervalMs: number;
|
||||
private maxAttempts: number;
|
||||
private initialDelayMs: number;
|
||||
|
||||
// --- Dependency Injection & Configuration ---
|
||||
constructor(
|
||||
aiClient?: AiClient, // Allow injecting AiClient, default to GeminiClient
|
||||
options: {
|
||||
pollIntervalMs?: number,
|
||||
maxAttempts?: number,
|
||||
initialDelayMs?: number
|
||||
} = {} // Provide default options
|
||||
) {
|
||||
this.ai = aiClient || new GeminiClient(); // Use injected client or default
|
||||
this.pollIntervalMs = options.pollIntervalMs ?? 5000; // Default 5 seconds
|
||||
this.maxAttempts = options.maxAttempts ?? 6; // Default 6 attempts (approx 30s total)
|
||||
this.initialDelayMs = options.initialDelayMs ?? 500; // Default 0.5s initial delay
|
||||
}
|
||||
// --- Dependency Injection & Configuration ---
|
||||
constructor(
|
||||
aiClient?: AiClient, // Allow injecting AiClient, default to GeminiClient
|
||||
options: {
|
||||
pollIntervalMs?: number;
|
||||
maxAttempts?: number;
|
||||
initialDelayMs?: number;
|
||||
} = {}, // Provide default options
|
||||
) {
|
||||
this.ai = aiClient || new GeminiClient(); // Use injected client or default
|
||||
this.pollIntervalMs = options.pollIntervalMs ?? 5000; // Default 5 seconds
|
||||
this.maxAttempts = options.maxAttempts ?? 6; // Default 6 attempts (approx 30s total)
|
||||
this.initialDelayMs = options.initialDelayMs ?? 500; // Default 0.5s initial delay
|
||||
}
|
||||
|
||||
/**
|
||||
* Polls the output of a background process using an LLM
|
||||
* until a conclusive status is determined or timeout occurs.
|
||||
* @param pid The handle/identifier of the background process (typically PID number).
|
||||
* @param tempStdoutFilePath Path to the temporary file capturing stdout.
|
||||
* @param tempStderrFilePath Path to the temporary file capturing stderr.
|
||||
* @param command The command string that was executed (for context in prompts).
|
||||
* @returns A promise resolving to the final analysis outcome.
|
||||
*/
|
||||
public async analyze(
|
||||
pid: ProcessHandle,
|
||||
tempStdoutFilePath: string,
|
||||
tempStderrFilePath: string,
|
||||
command: string
|
||||
): Promise<FinalAnalysisOutcome> {
|
||||
/**
|
||||
* Polls the output of a background process using an LLM
|
||||
* until a conclusive status is determined or timeout occurs.
|
||||
* @param pid The handle/identifier of the background process (typically PID number).
|
||||
* @param tempStdoutFilePath Path to the temporary file capturing stdout.
|
||||
* @param tempStderrFilePath Path to the temporary file capturing stderr.
|
||||
* @param command The command string that was executed (for context in prompts).
|
||||
* @returns A promise resolving to the final analysis outcome.
|
||||
*/
|
||||
public async analyze(
|
||||
pid: ProcessHandle,
|
||||
tempStdoutFilePath: string,
|
||||
tempStderrFilePath: string,
|
||||
command: string,
|
||||
): Promise<FinalAnalysisOutcome> {
|
||||
// --- Initial Delay ---
|
||||
// Wait briefly before the first check to allow the process to initialize
|
||||
// and potentially write initial output.
|
||||
await new Promise((resolve) => setTimeout(resolve, this.initialDelayMs));
|
||||
|
||||
// --- Initial Delay ---
|
||||
// Wait briefly before the first check to allow the process to initialize
|
||||
// and potentially write initial output.
|
||||
await new Promise(resolve => setTimeout(resolve, this.initialDelayMs));
|
||||
let attempts = 0;
|
||||
let lastAnalysisResult: AnalysisResult | AnalysisFailure | null = null;
|
||||
|
||||
let attempts = 0;
|
||||
let lastAnalysisResult: AnalysisResult | AnalysisFailure | null = null;
|
||||
while (attempts < this.maxAttempts) {
|
||||
attempts++;
|
||||
let currentStdout: string = '';
|
||||
let currentStderr: string = '';
|
||||
|
||||
while (attempts < this.maxAttempts) {
|
||||
attempts++;
|
||||
let currentStdout: string = '';
|
||||
let currentStderr: string = '';
|
||||
|
||||
// --- Robust File Reading ---
|
||||
try {
|
||||
currentStdout = await fs.readFile(tempStdoutFilePath, 'utf-8');
|
||||
} catch (error: any) {
|
||||
// If file doesn't exist yet or isn't readable, treat as empty, but log warning
|
||||
if (error.code !== 'ENOENT') {
|
||||
console.warn(`Attempt ${attempts}: Failed to read stdout file ${tempStdoutFilePath}: ${error.message}`);
|
||||
}
|
||||
}
|
||||
try {
|
||||
currentStderr = await fs.readFile(tempStderrFilePath, 'utf-8');
|
||||
} catch (error: any) {
|
||||
if (error.code !== 'ENOENT') {
|
||||
console.warn(`Attempt ${attempts}: Failed to read stderr file ${tempStderrFilePath}: ${error.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
// --- Process Status Check ---
|
||||
let isRunning = false;
|
||||
try {
|
||||
// Check if process is running *before* the final analysis if it seems to have ended
|
||||
isRunning = await this.isProcessRunning(pid);
|
||||
if (!isRunning) {
|
||||
// Reread files one last time in case output was written just before exit
|
||||
try { currentStdout = await fs.readFile(tempStdoutFilePath, 'utf-8'); } catch {}
|
||||
try { currentStderr = await fs.readFile(tempStderrFilePath, 'utf-8'); } catch {}
|
||||
|
||||
lastAnalysisResult = await this.analyzeOutputWithLLM(currentStdout, currentStderr, command);
|
||||
|
||||
if (isAnalysisFailure(lastAnalysisResult)) {
|
||||
return { status: 'ProcessEnded_AnalysisFailed', summary: `Process ended. Final analysis failed: ${lastAnalysisResult.error}` };
|
||||
}
|
||||
// Append ProcessEnded to the status determined by the final analysis
|
||||
return { status: 'ProcessEnded_' + lastAnalysisResult.inferredStatus, summary: `Process ended. Final analysis summary: ${lastAnalysisResult.summary}` };
|
||||
}
|
||||
} catch (procCheckError: any) {
|
||||
// Log the error but allow polling to continue, as log analysis might still be useful
|
||||
console.warn(`Could not check process status for PID ${pid} on attempt ${attempts}: ${procCheckError.message}`);
|
||||
// Decide if you want to bail out here or continue analysis based on logs only
|
||||
// For now, we continue.
|
||||
}
|
||||
|
||||
// --- LLM Analysis ---
|
||||
lastAnalysisResult = await this.analyzeOutputWithLLM(currentStdout, currentStderr, command);
|
||||
|
||||
if (isAnalysisFailure(lastAnalysisResult)) {
|
||||
console.error(`LLM Analysis failed for PID ${pid} on attempt ${attempts}:`, lastAnalysisResult.error);
|
||||
// Stop polling on analysis failure, returning the specific failure status
|
||||
return { status: lastAnalysisResult.inferredStatus, summary: lastAnalysisResult.error };
|
||||
}
|
||||
|
||||
// --- Exit Conditions ---
|
||||
if (lastAnalysisResult.inferredStatus === 'SuccessReported' || lastAnalysisResult.inferredStatus === 'ErrorReported') {
|
||||
return { status: lastAnalysisResult.inferredStatus, summary: lastAnalysisResult.summary };
|
||||
}
|
||||
|
||||
// Heuristic: If the process seems stable and 'Running' after several checks,
|
||||
// return that status without waiting for the full timeout. Adjust threshold as needed.
|
||||
const runningExitThreshold = Math.floor(this.maxAttempts / 3) + 1; // e.g., exit after attempt 4 if maxAttempts is 6
|
||||
if (attempts >= runningExitThreshold && lastAnalysisResult.inferredStatus === 'Running') {
|
||||
return { status: lastAnalysisResult.inferredStatus, summary: lastAnalysisResult.summary };
|
||||
}
|
||||
|
||||
// --- Wait before next poll ---
|
||||
if (attempts < this.maxAttempts) {
|
||||
await new Promise(resolve => setTimeout(resolve, this.pollIntervalMs));
|
||||
}
|
||||
} // End while loop
|
||||
|
||||
// --- Timeout Condition ---
|
||||
console.warn(`Polling timed out for PID ${pid} after ${this.maxAttempts} attempts.`);
|
||||
|
||||
// Determine final status based on the last successful analysis (if any)
|
||||
const finalStatus = (lastAnalysisResult && !isAnalysisFailure(lastAnalysisResult))
|
||||
? `TimedOut_${lastAnalysisResult.inferredStatus}` // e.g., TimedOut_Running
|
||||
: 'TimedOut_AnalysisFailed'; // If last attempt failed or no analysis succeeded
|
||||
|
||||
const finalSummary = (lastAnalysisResult && !isAnalysisFailure(lastAnalysisResult))
|
||||
? `Polling timed out after ${this.maxAttempts} attempts. Last known summary: ${lastAnalysisResult.summary}`
|
||||
: (lastAnalysisResult && isAnalysisFailure(lastAnalysisResult))
|
||||
? `Polling timed out; last analysis attempt failed: ${lastAnalysisResult}`
|
||||
: `Polling timed out after ${this.maxAttempts} attempts without any successful analysis.`;
|
||||
|
||||
return { status: finalStatus, summary: finalSummary };
|
||||
}
|
||||
|
||||
// --- Actual Implementation of isProcessRunning ---
|
||||
/**
|
||||
* Checks if the background process is still running using OS-specific methods.
|
||||
* @param pid Process handle/identifier (expects a number for standard checks).
|
||||
* @returns True if running, false otherwise.
|
||||
* @throws Error if the check itself fails critically (e.g., command not found, permissions).
|
||||
*/
|
||||
private async isProcessRunning(pid: ProcessHandle): Promise<boolean> {
|
||||
if (typeof pid !== 'number' || !Number.isInteger(pid) || pid <= 0) {
|
||||
console.warn(`isProcessRunning: Invalid PID provided (${pid}). Assuming not running.`);
|
||||
return false;
|
||||
// --- Robust File Reading ---
|
||||
try {
|
||||
currentStdout = await fs.readFile(tempStdoutFilePath, 'utf-8');
|
||||
} catch (error: any) {
|
||||
// If file doesn't exist yet or isn't readable, treat as empty, but log warning
|
||||
if (error.code !== 'ENOENT') {
|
||||
console.warn(
|
||||
`Attempt ${attempts}: Failed to read stdout file ${tempStdoutFilePath}: ${error.message}`,
|
||||
);
|
||||
}
|
||||
|
||||
try {
|
||||
if (process.platform === 'win32') {
|
||||
// Windows: Use tasklist command
|
||||
const command = `tasklist /FI "PID eq ${pid}" /NH`; // /NH for no header
|
||||
const { stdout } = await execAsync(command);
|
||||
// Check if the output contains the process information (it will have the image name if found)
|
||||
return stdout.toLowerCase().includes('.exe'); // A simple check, adjust if needed
|
||||
} else {
|
||||
// Linux/macOS/Unix-like: Use kill -0 signal
|
||||
// process.kill sends signal 0 to check existence without killing
|
||||
process.kill(pid, 0);
|
||||
return true; // If no error is thrown, process exists
|
||||
}
|
||||
} catch (error: any) {
|
||||
if (error.code === 'ESRCH') {
|
||||
// ESRCH: Standard error code for "No such process" on Unix-like systems
|
||||
return false;
|
||||
} else if (process.platform === 'win32' && error.message.includes('No tasks are running')) {
|
||||
// tasklist specific error when PID doesn't exist
|
||||
return false;
|
||||
} else {
|
||||
// Other errors (e.g., EPERM - permission denied) mean we couldn't determine status.
|
||||
// Re-throwing might be appropriate depending on desired behavior.
|
||||
// Here, we log it and cautiously return true, assuming it *might* still be running.
|
||||
console.warn(`isProcessRunning(${pid}) encountered error: ${error.message}. Assuming process might still exist.`);
|
||||
// Or you could throw the error: throw new Error(`Failed to check process status for PID ${pid}: ${error.message}`);
|
||||
return true; // Cautious assumption
|
||||
}
|
||||
}
|
||||
try {
|
||||
currentStderr = await fs.readFile(tempStderrFilePath, 'utf-8');
|
||||
} catch (error: any) {
|
||||
if (error.code !== 'ENOENT') {
|
||||
console.warn(
|
||||
`Attempt ${attempts}: Failed to read stderr file ${tempStderrFilePath}: ${error.message}`,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// --- LLM Analysis Method (largely unchanged but added validation robustness) ---
|
||||
private async analyzeOutputWithLLM(
|
||||
stdout: string,
|
||||
stderr: string,
|
||||
command: string
|
||||
): Promise<AnalysisResult | AnalysisFailure> {
|
||||
try {
|
||||
const schema: SchemaUnion = { /* ... schema definition remains the same ... */
|
||||
type: Type.OBJECT,
|
||||
properties: {
|
||||
summary: {
|
||||
type: Type.STRING,
|
||||
description: "A concise interpretation of significant events, progress, final results, or errors found in the process's stdout and stderr. Summarizes what the logs indicate happened. Should be formatted as markdown."
|
||||
},
|
||||
inferredStatus: {
|
||||
type: Type.STRING,
|
||||
description: "The inferred status based *only* on analyzing the provided log content. Possible values: 'Running' (logs show ongoing activity without completion/error), 'SuccessReported' (logs indicate successful completion or final positive result), 'ErrorReported' (logs indicate an error or failure), 'Unknown' (status cannot be clearly determined from the log content).",
|
||||
enum: ['Running', 'SuccessReported', 'ErrorReported', 'Unknown']
|
||||
}
|
||||
},
|
||||
required: ['summary', 'inferredStatus']
|
||||
// --- Process Status Check ---
|
||||
let isRunning = false;
|
||||
try {
|
||||
// Check if process is running *before* the final analysis if it seems to have ended
|
||||
isRunning = await this.isProcessRunning(pid);
|
||||
if (!isRunning) {
|
||||
// Reread files one last time in case output was written just before exit
|
||||
try {
|
||||
currentStdout = await fs.readFile(tempStdoutFilePath, 'utf-8');
|
||||
} catch {}
|
||||
try {
|
||||
currentStderr = await fs.readFile(tempStderrFilePath, 'utf-8');
|
||||
} catch {}
|
||||
|
||||
lastAnalysisResult = await this.analyzeOutputWithLLM(
|
||||
currentStdout,
|
||||
currentStderr,
|
||||
command,
|
||||
);
|
||||
|
||||
if (isAnalysisFailure(lastAnalysisResult)) {
|
||||
return {
|
||||
status: 'ProcessEnded_AnalysisFailed',
|
||||
summary: `Process ended. Final analysis failed: ${lastAnalysisResult.error}`,
|
||||
};
|
||||
}
|
||||
// Append ProcessEnded to the status determined by the final analysis
|
||||
return {
|
||||
status: 'ProcessEnded_' + lastAnalysisResult.inferredStatus,
|
||||
summary: `Process ended. Final analysis summary: ${lastAnalysisResult.summary}`,
|
||||
};
|
||||
}
|
||||
} catch (procCheckError: any) {
|
||||
// Log the error but allow polling to continue, as log analysis might still be useful
|
||||
console.warn(
|
||||
`Could not check process status for PID ${pid} on attempt ${attempts}: ${procCheckError.message}`,
|
||||
);
|
||||
// Decide if you want to bail out here or continue analysis based on logs only
|
||||
// For now, we continue.
|
||||
}
|
||||
|
||||
const prompt = `**Analyze Background Process Logs**
|
||||
// --- LLM Analysis ---
|
||||
lastAnalysisResult = await this.analyzeOutputWithLLM(
|
||||
currentStdout,
|
||||
currentStderr,
|
||||
command,
|
||||
);
|
||||
|
||||
if (isAnalysisFailure(lastAnalysisResult)) {
|
||||
console.error(
|
||||
`LLM Analysis failed for PID ${pid} on attempt ${attempts}:`,
|
||||
lastAnalysisResult.error,
|
||||
);
|
||||
// Stop polling on analysis failure, returning the specific failure status
|
||||
return {
|
||||
status: lastAnalysisResult.inferredStatus,
|
||||
summary: lastAnalysisResult.error,
|
||||
};
|
||||
}
|
||||
|
||||
// --- Exit Conditions ---
|
||||
if (
|
||||
lastAnalysisResult.inferredStatus === 'SuccessReported' ||
|
||||
lastAnalysisResult.inferredStatus === 'ErrorReported'
|
||||
) {
|
||||
return {
|
||||
status: lastAnalysisResult.inferredStatus,
|
||||
summary: lastAnalysisResult.summary,
|
||||
};
|
||||
}
|
||||
|
||||
// Heuristic: If the process seems stable and 'Running' after several checks,
|
||||
// return that status without waiting for the full timeout. Adjust threshold as needed.
|
||||
const runningExitThreshold = Math.floor(this.maxAttempts / 3) + 1; // e.g., exit after attempt 4 if maxAttempts is 6
|
||||
if (
|
||||
attempts >= runningExitThreshold &&
|
||||
lastAnalysisResult.inferredStatus === 'Running'
|
||||
) {
|
||||
return {
|
||||
status: lastAnalysisResult.inferredStatus,
|
||||
summary: lastAnalysisResult.summary,
|
||||
};
|
||||
}
|
||||
|
||||
// --- Wait before next poll ---
|
||||
if (attempts < this.maxAttempts) {
|
||||
await new Promise((resolve) =>
|
||||
setTimeout(resolve, this.pollIntervalMs),
|
||||
);
|
||||
}
|
||||
} // End while loop
|
||||
|
||||
// --- Timeout Condition ---
|
||||
console.warn(
|
||||
`Polling timed out for PID ${pid} after ${this.maxAttempts} attempts.`,
|
||||
);
|
||||
|
||||
// Determine final status based on the last successful analysis (if any)
|
||||
const finalStatus =
|
||||
lastAnalysisResult && !isAnalysisFailure(lastAnalysisResult)
|
||||
? `TimedOut_${lastAnalysisResult.inferredStatus}` // e.g., TimedOut_Running
|
||||
: 'TimedOut_AnalysisFailed'; // If last attempt failed or no analysis succeeded
|
||||
|
||||
const finalSummary =
|
||||
lastAnalysisResult && !isAnalysisFailure(lastAnalysisResult)
|
||||
? `Polling timed out after ${this.maxAttempts} attempts. Last known summary: ${lastAnalysisResult.summary}`
|
||||
: lastAnalysisResult && isAnalysisFailure(lastAnalysisResult)
|
||||
? `Polling timed out; last analysis attempt failed: ${lastAnalysisResult}`
|
||||
: `Polling timed out after ${this.maxAttempts} attempts without any successful analysis.`;
|
||||
|
||||
return { status: finalStatus, summary: finalSummary };
|
||||
}
|
||||
|
||||
// --- Actual Implementation of isProcessRunning ---
|
||||
/**
|
||||
* Checks if the background process is still running using OS-specific methods.
|
||||
* @param pid Process handle/identifier (expects a number for standard checks).
|
||||
* @returns True if running, false otherwise.
|
||||
* @throws Error if the check itself fails critically (e.g., command not found, permissions).
|
||||
*/
|
||||
private async isProcessRunning(pid: ProcessHandle): Promise<boolean> {
|
||||
if (typeof pid !== 'number' || !Number.isInteger(pid) || pid <= 0) {
|
||||
console.warn(
|
||||
`isProcessRunning: Invalid PID provided (${pid}). Assuming not running.`,
|
||||
);
|
||||
return false;
|
||||
}
|
||||
|
||||
try {
|
||||
if (process.platform === 'win32') {
|
||||
// Windows: Use tasklist command
|
||||
const command = `tasklist /FI "PID eq ${pid}" /NH`; // /NH for no header
|
||||
const { stdout } = await execAsync(command);
|
||||
// Check if the output contains the process information (it will have the image name if found)
|
||||
return stdout.toLowerCase().includes('.exe'); // A simple check, adjust if needed
|
||||
} else {
|
||||
// Linux/macOS/Unix-like: Use kill -0 signal
|
||||
// process.kill sends signal 0 to check existence without killing
|
||||
process.kill(pid, 0);
|
||||
return true; // If no error is thrown, process exists
|
||||
}
|
||||
} catch (error: any) {
|
||||
if (error.code === 'ESRCH') {
|
||||
// ESRCH: Standard error code for "No such process" on Unix-like systems
|
||||
return false;
|
||||
} else if (
|
||||
process.platform === 'win32' &&
|
||||
error.message.includes('No tasks are running')
|
||||
) {
|
||||
// tasklist specific error when PID doesn't exist
|
||||
return false;
|
||||
} else {
|
||||
// Other errors (e.g., EPERM - permission denied) mean we couldn't determine status.
|
||||
// Re-throwing might be appropriate depending on desired behavior.
|
||||
// Here, we log it and cautiously return true, assuming it *might* still be running.
|
||||
console.warn(
|
||||
`isProcessRunning(${pid}) encountered error: ${error.message}. Assuming process might still exist.`,
|
||||
);
|
||||
// Or you could throw the error: throw new Error(`Failed to check process status for PID ${pid}: ${error.message}`);
|
||||
return true; // Cautious assumption
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// --- LLM Analysis Method (largely unchanged but added validation robustness) ---
|
||||
private async analyzeOutputWithLLM(
|
||||
stdout: string,
|
||||
stderr: string,
|
||||
command: string,
|
||||
): Promise<AnalysisResult | AnalysisFailure> {
|
||||
try {
|
||||
const schema: SchemaUnion = {
|
||||
/* ... schema definition remains the same ... */ type: Type.OBJECT,
|
||||
properties: {
|
||||
summary: {
|
||||
type: Type.STRING,
|
||||
description:
|
||||
"A concise interpretation of significant events, progress, final results, or errors found in the process's stdout and stderr. Summarizes what the logs indicate happened. Should be formatted as markdown.",
|
||||
},
|
||||
inferredStatus: {
|
||||
type: Type.STRING,
|
||||
description:
|
||||
"The inferred status based *only* on analyzing the provided log content. Possible values: 'Running' (logs show ongoing activity without completion/error), 'SuccessReported' (logs indicate successful completion or final positive result), 'ErrorReported' (logs indicate an error or failure), 'Unknown' (status cannot be clearly determined from the log content).",
|
||||
enum: ['Running', 'SuccessReported', 'ErrorReported', 'Unknown'],
|
||||
},
|
||||
},
|
||||
required: ['summary', 'inferredStatus'],
|
||||
};
|
||||
|
||||
const prompt = `**Analyze Background Process Logs**
|
||||
|
||||
**Context:** A command (\`${command}\`) was executed in the background. You are analyzing the standard output (stdout) and standard error (stderr) collected so far to understand its progress and outcome. This analysis will be used to inform a user about what the command did.
|
||||
|
||||
|
@ -277,49 +334,85 @@ Based *only* on the provided stdout and stderr:
|
|||
3. **Format Output:** Return the results as a JSON object adhering strictly to the following schema:
|
||||
|
||||
\`\`\`json
|
||||
${JSON.stringify({ // Generate the schema JSON string for the prompt context
|
||||
type: "object",
|
||||
properties: {
|
||||
summary: { type: "string", description: "Concise markdown summary of log interpretation." },
|
||||
inferredStatus: { type: "string", enum: ["Running", "SuccessReported", "ErrorReported", "Unknown"], description: "Status inferred *only* from log content." }
|
||||
},
|
||||
required: ["summary", "inferredStatus"]
|
||||
}, null, 2)}
|
||||
${JSON.stringify(
|
||||
{
|
||||
// Generate the schema JSON string for the prompt context
|
||||
type: 'object',
|
||||
properties: {
|
||||
summary: {
|
||||
type: 'string',
|
||||
description: 'Concise markdown summary of log interpretation.',
|
||||
},
|
||||
inferredStatus: {
|
||||
type: 'string',
|
||||
enum: ['Running', 'SuccessReported', 'ErrorReported', 'Unknown'],
|
||||
description: 'Status inferred *only* from log content.',
|
||||
},
|
||||
},
|
||||
required: ['summary', 'inferredStatus'],
|
||||
},
|
||||
null,
|
||||
2,
|
||||
)}
|
||||
\`\`\`
|
||||
|
||||
**Instructions:**
|
||||
* The \`summary\` must be an interpretation of the logs, focusing on key outcomes or activities. Prioritize recent events if logs are extensive.
|
||||
* The \`inferredStatus\` should reflect the most likely state *deduced purely from the log text provided*. Ensure it is one of the specified enum values.`;
|
||||
|
||||
const response = await this.ai.generateJson([{ role: "user", parts: [{ text: prompt }] }], schema);
|
||||
const response = await this.ai.generateJson(
|
||||
[{ role: 'user', parts: [{ text: prompt }] }],
|
||||
schema,
|
||||
);
|
||||
|
||||
// --- Enhanced Validation ---
|
||||
if (typeof response !== 'object' || response === null) {
|
||||
throw new Error(`LLM returned non-object response: ${JSON.stringify(response)}`);
|
||||
}
|
||||
if (typeof response.summary !== 'string' || response.summary.trim() === '') {
|
||||
// Ensure summary is a non-empty string
|
||||
console.warn("LLM response validation warning: 'summary' field is missing, empty or not a string. Raw response:", response);
|
||||
// Decide how to handle: throw error, or assign default? Let's throw for now.
|
||||
throw new Error(`LLM response missing or invalid 'summary'. Got: ${JSON.stringify(response.summary)}`);
|
||||
// --- Enhanced Validation ---
|
||||
if (typeof response !== 'object' || response === null) {
|
||||
throw new Error(
|
||||
`LLM returned non-object response: ${JSON.stringify(response)}`,
|
||||
);
|
||||
}
|
||||
if (
|
||||
typeof response.summary !== 'string' ||
|
||||
response.summary.trim() === ''
|
||||
) {
|
||||
// Ensure summary is a non-empty string
|
||||
console.warn(
|
||||
"LLM response validation warning: 'summary' field is missing, empty or not a string. Raw response:",
|
||||
response,
|
||||
);
|
||||
// Decide how to handle: throw error, or assign default? Let's throw for now.
|
||||
throw new Error(
|
||||
`LLM response missing or invalid 'summary'. Got: ${JSON.stringify(response.summary)}`,
|
||||
);
|
||||
}
|
||||
if (
|
||||
!['Running', 'SuccessReported', 'ErrorReported', 'Unknown'].includes(
|
||||
response.inferredStatus,
|
||||
)
|
||||
) {
|
||||
console.warn(
|
||||
`LLM response validation warning: 'inferredStatus' is invalid ('${response.inferredStatus}'). Raw response:`,
|
||||
response,
|
||||
);
|
||||
// Decide how to handle: throw error, or default to 'Unknown'? Let's throw.
|
||||
throw new Error(
|
||||
`LLM returned invalid 'inferredStatus': ${JSON.stringify(response.inferredStatus)}`,
|
||||
);
|
||||
}
|
||||
|
||||
}
|
||||
if (!['Running', 'SuccessReported', 'ErrorReported', 'Unknown'].includes(response.inferredStatus)) {
|
||||
console.warn(`LLM response validation warning: 'inferredStatus' is invalid ('${response.inferredStatus}'). Raw response:`, response);
|
||||
// Decide how to handle: throw error, or default to 'Unknown'? Let's throw.
|
||||
throw new Error(`LLM returned invalid 'inferredStatus': ${JSON.stringify(response.inferredStatus)}`);
|
||||
}
|
||||
|
||||
return response as AnalysisResult; // Cast after validation
|
||||
|
||||
} catch (error: any) {
|
||||
console.error(`LLM analysis call failed for command "${command}":`, error);
|
||||
// Ensure the error message passed back is helpful
|
||||
const errorMessage = error instanceof Error ? error.message : String(error);
|
||||
return {
|
||||
error: `LLM analysis call encountered an error: ${errorMessage}`,
|
||||
inferredStatus: 'AnalysisFailed'
|
||||
};
|
||||
}
|
||||
return response as AnalysisResult; // Cast after validation
|
||||
} catch (error: any) {
|
||||
console.error(
|
||||
`LLM analysis call failed for command "${command}":`,
|
||||
error,
|
||||
);
|
||||
// Ensure the error message passed back is helpful
|
||||
const errorMessage =
|
||||
error instanceof Error ? error.message : String(error);
|
||||
return {
|
||||
error: `LLM analysis call encountered an error: ${errorMessage}`,
|
||||
inferredStatus: 'AnalysisFailed',
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,11 +18,12 @@ interface FolderStructureOptions {
|
|||
}
|
||||
|
||||
// Define a type for the merged options where fileIncludePattern remains optional
|
||||
type MergedFolderStructureOptions = Required<Omit<FolderStructureOptions, 'fileIncludePattern'>> & {
|
||||
fileIncludePattern?: RegExp;
|
||||
type MergedFolderStructureOptions = Required<
|
||||
Omit<FolderStructureOptions, 'fileIncludePattern'>
|
||||
> & {
|
||||
fileIncludePattern?: RegExp;
|
||||
};
|
||||
|
||||
|
||||
/** Represents the full, unfiltered information about a folder and its contents. */
|
||||
interface FullFolderInfo {
|
||||
name: string;
|
||||
|
@ -55,7 +56,7 @@ interface ReducedFolderNode {
|
|||
*/
|
||||
async function readFullStructure(
|
||||
folderPath: string,
|
||||
options: MergedFolderStructureOptions
|
||||
options: MergedFolderStructureOptions,
|
||||
): Promise<FullFolderInfo | null> {
|
||||
const name = path.basename(folderPath);
|
||||
// Initialize with isIgnored: false
|
||||
|
@ -88,7 +89,7 @@ async function readFullStructure(
|
|||
files: [],
|
||||
subFolders: [],
|
||||
totalChildren: 0, // No children explored
|
||||
totalFiles: 0, // No files explored
|
||||
totalFiles: 0, // No files explored
|
||||
isIgnored: true, // Mark as ignored
|
||||
};
|
||||
folderInfo.subFolders.push(ignoredFolderInfo);
|
||||
|
@ -99,7 +100,12 @@ async function readFullStructure(
|
|||
// If not ignored, recurse as before
|
||||
const subFolderInfo = await readFullStructure(subFolderPath, options);
|
||||
// Add non-empty folders OR explicitly ignored folders
|
||||
if (subFolderInfo && (subFolderInfo.totalChildren > 0 || subFolderInfo.files.length > 0 || subFolderInfo.isIgnored)) {
|
||||
if (
|
||||
subFolderInfo &&
|
||||
(subFolderInfo.totalChildren > 0 ||
|
||||
subFolderInfo.files.length > 0 ||
|
||||
subFolderInfo.isIgnored)
|
||||
) {
|
||||
folderInfo.subFolders.push(subFolderInfo);
|
||||
}
|
||||
}
|
||||
|
@ -107,34 +113,43 @@ async function readFullStructure(
|
|||
|
||||
// Then process files (only if the current folder itself isn't marked as ignored)
|
||||
for (const entry of entries) {
|
||||
if (entry.isFile()) {
|
||||
const fileName = entry.name;
|
||||
// Include if no pattern or if pattern matches
|
||||
if (!options.fileIncludePattern || options.fileIncludePattern.test(fileName)) {
|
||||
folderInfo.files.push(fileName);
|
||||
}
|
||||
}
|
||||
if (entry.isFile()) {
|
||||
const fileName = entry.name;
|
||||
// Include if no pattern or if pattern matches
|
||||
if (
|
||||
!options.fileIncludePattern ||
|
||||
options.fileIncludePattern.test(fileName)
|
||||
) {
|
||||
folderInfo.files.push(fileName);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Calculate totals *after* processing children
|
||||
// Ignored folders contribute 0 to counts here because we didn't look inside.
|
||||
totalFileCount = folderInfo.files.length + folderInfo.subFolders.reduce((sum, sf) => sum + sf.totalFiles, 0);
|
||||
totalFileCount =
|
||||
folderInfo.files.length +
|
||||
folderInfo.subFolders.reduce((sum, sf) => sum + sf.totalFiles, 0);
|
||||
// Count the ignored folder itself as one child item in the parent's count.
|
||||
totalChildrenCount = folderInfo.files.length + folderInfo.subFolders.length + folderInfo.subFolders.reduce((sum, sf) => sum + sf.totalChildren, 0);
|
||||
|
||||
totalChildrenCount =
|
||||
folderInfo.files.length +
|
||||
folderInfo.subFolders.length +
|
||||
folderInfo.subFolders.reduce((sum, sf) => sum + sf.totalChildren, 0);
|
||||
} catch (error: any) {
|
||||
if (error.code === 'EACCES' || error.code === 'ENOENT') {
|
||||
console.warn(`Warning: Could not read directory ${folderPath}: ${error.message}`);
|
||||
console.warn(
|
||||
`Warning: Could not read directory ${folderPath}: ${error.message}`,
|
||||
);
|
||||
return null;
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
|
||||
return {
|
||||
...(folderInfo as FullFolderInfo), // Cast needed after conditional assignment check
|
||||
totalChildren: totalChildrenCount,
|
||||
totalFiles: totalFileCount,
|
||||
};
|
||||
return {
|
||||
...(folderInfo as FullFolderInfo), // Cast needed after conditional assignment check
|
||||
totalChildren: totalChildrenCount,
|
||||
totalFiles: totalFileCount,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -146,12 +161,20 @@ async function readFullStructure(
|
|||
* @returns The root node of the reduced structure.
|
||||
*/
|
||||
function reduceStructure(
|
||||
fullInfo: FullFolderInfo,
|
||||
maxItems: number,
|
||||
ignoredFolders: Set<string> // Pass ignoredFolders for checking
|
||||
fullInfo: FullFolderInfo,
|
||||
maxItems: number,
|
||||
ignoredFolders: Set<string>, // Pass ignoredFolders for checking
|
||||
): ReducedFolderNode {
|
||||
const rootReducedNode: ReducedFolderNode = { name: fullInfo.name, files: [], subFolders: [], isRoot: true };
|
||||
const queue: Array<{ original: FullFolderInfo; reduced: ReducedFolderNode }> = [];
|
||||
const rootReducedNode: ReducedFolderNode = {
|
||||
name: fullInfo.name,
|
||||
files: [],
|
||||
subFolders: [],
|
||||
isRoot: true,
|
||||
};
|
||||
const queue: Array<{
|
||||
original: FullFolderInfo;
|
||||
reduced: ReducedFolderNode;
|
||||
}> = [];
|
||||
|
||||
// Don't count the root itself towards the limit initially
|
||||
queue.push({ original: fullInfo, reduced: rootReducedNode });
|
||||
|
@ -160,20 +183,20 @@ function reduceStructure(
|
|||
while (queue.length > 0) {
|
||||
const { original: originalFolder, reduced: reducedFolder } = queue.shift()!;
|
||||
|
||||
// If the folder being processed was itself marked as ignored (shouldn't happen for root)
|
||||
if (originalFolder.isIgnored) {
|
||||
continue;
|
||||
}
|
||||
// If the folder being processed was itself marked as ignored (shouldn't happen for root)
|
||||
if (originalFolder.isIgnored) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Process Files
|
||||
let fileLimitReached = false;
|
||||
for (const file of originalFolder.files) {
|
||||
// Check limit *before* adding the file
|
||||
// Check limit *before* adding the file
|
||||
if (itemCount >= maxItems) {
|
||||
if (!fileLimitReached) {
|
||||
reducedFolder.files.push(TRUNCATION_INDICATOR);
|
||||
reducedFolder.hasMoreFiles = true;
|
||||
fileLimitReached = true;
|
||||
reducedFolder.files.push(TRUNCATION_INDICATOR);
|
||||
reducedFolder.hasMoreFiles = true;
|
||||
fileLimitReached = true;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -184,41 +207,44 @@ function reduceStructure(
|
|||
// Process Subfolders
|
||||
let subfolderLimitReached = false;
|
||||
for (const subFolder of originalFolder.subFolders) {
|
||||
// Count the folder itself towards the limit
|
||||
itemCount++;
|
||||
if (itemCount > maxItems) {
|
||||
if (!subfolderLimitReached) {
|
||||
// Add a placeholder node ONLY if we haven't already added one
|
||||
const truncatedSubfolderNode: ReducedFolderNode = {
|
||||
name: subFolder.name,
|
||||
files: [TRUNCATION_INDICATOR], // Generic truncation
|
||||
subFolders: [],
|
||||
hasMoreFiles: true,
|
||||
};
|
||||
reducedFolder.subFolders.push(truncatedSubfolderNode);
|
||||
reducedFolder.hasMoreSubfolders = true;
|
||||
subfolderLimitReached = true;
|
||||
}
|
||||
continue; // Stop processing further subfolders for this parent
|
||||
}
|
||||
|
||||
// Handle explicitly ignored folders identified during the read phase
|
||||
if (subFolder.isIgnored) {
|
||||
const ignoredReducedNode: ReducedFolderNode = {
|
||||
name: subFolder.name,
|
||||
files: [TRUNCATION_INDICATOR], // Indicate contents ignored/truncated
|
||||
subFolders: [],
|
||||
hasMoreFiles: true, // Mark as truncated
|
||||
// Count the folder itself towards the limit
|
||||
itemCount++;
|
||||
if (itemCount > maxItems) {
|
||||
if (!subfolderLimitReached) {
|
||||
// Add a placeholder node ONLY if we haven't already added one
|
||||
const truncatedSubfolderNode: ReducedFolderNode = {
|
||||
name: subFolder.name,
|
||||
files: [TRUNCATION_INDICATOR], // Generic truncation
|
||||
subFolders: [],
|
||||
hasMoreFiles: true,
|
||||
};
|
||||
reducedFolder.subFolders.push(ignoredReducedNode);
|
||||
// DO NOT add the ignored folder to the queue for further processing
|
||||
}
|
||||
else {
|
||||
// If not ignored and within limit, create the reduced node and add to queue
|
||||
const reducedSubFolder: ReducedFolderNode = { name: subFolder.name, files: [], subFolders: [] };
|
||||
reducedFolder.subFolders.push(reducedSubFolder);
|
||||
queue.push({ original: subFolder, reduced: reducedSubFolder });
|
||||
}
|
||||
reducedFolder.subFolders.push(truncatedSubfolderNode);
|
||||
reducedFolder.hasMoreSubfolders = true;
|
||||
subfolderLimitReached = true;
|
||||
}
|
||||
continue; // Stop processing further subfolders for this parent
|
||||
}
|
||||
|
||||
// Handle explicitly ignored folders identified during the read phase
|
||||
if (subFolder.isIgnored) {
|
||||
const ignoredReducedNode: ReducedFolderNode = {
|
||||
name: subFolder.name,
|
||||
files: [TRUNCATION_INDICATOR], // Indicate contents ignored/truncated
|
||||
subFolders: [],
|
||||
hasMoreFiles: true, // Mark as truncated
|
||||
};
|
||||
reducedFolder.subFolders.push(ignoredReducedNode);
|
||||
// DO NOT add the ignored folder to the queue for further processing
|
||||
} else {
|
||||
// If not ignored and within limit, create the reduced node and add to queue
|
||||
const reducedSubFolder: ReducedFolderNode = {
|
||||
name: subFolder.name,
|
||||
files: [],
|
||||
subFolders: [],
|
||||
};
|
||||
reducedFolder.subFolders.push(reducedSubFolder);
|
||||
queue.push({ original: subFolder, reduced: reducedSubFolder });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -227,25 +253,27 @@ function reduceStructure(
|
|||
|
||||
/** Calculates the total number of items present in the reduced structure. */
|
||||
function countReducedItems(node: ReducedFolderNode): number {
|
||||
let count = 0;
|
||||
// Count files, treating '...' as one item if present
|
||||
count += node.files.length;
|
||||
let count = 0;
|
||||
// Count files, treating '...' as one item if present
|
||||
count += node.files.length;
|
||||
|
||||
// Count subfolders and recursively count their contents
|
||||
count += node.subFolders.length;
|
||||
for (const sub of node.subFolders) {
|
||||
// Check if it's a placeholder ignored/truncated node
|
||||
const isTruncatedPlaceholder = (sub.files.length === 1 && sub.files[0] === TRUNCATION_INDICATOR && sub.subFolders.length === 0);
|
||||
// Count subfolders and recursively count their contents
|
||||
count += node.subFolders.length;
|
||||
for (const sub of node.subFolders) {
|
||||
// Check if it's a placeholder ignored/truncated node
|
||||
const isTruncatedPlaceholder =
|
||||
sub.files.length === 1 &&
|
||||
sub.files[0] === TRUNCATION_INDICATOR &&
|
||||
sub.subFolders.length === 0;
|
||||
|
||||
if (!isTruncatedPlaceholder) {
|
||||
count += countReducedItems(sub);
|
||||
}
|
||||
// Don't add count for items *inside* the placeholder node itself.
|
||||
if (!isTruncatedPlaceholder) {
|
||||
count += countReducedItems(sub);
|
||||
}
|
||||
return count;
|
||||
// Don't add count for items *inside* the placeholder node itself.
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Formats the reduced folder structure into a tree-like string.
|
||||
* (No changes needed in this function)
|
||||
|
@ -258,23 +286,23 @@ function formatReducedStructure(
|
|||
node: ReducedFolderNode,
|
||||
indent: string,
|
||||
isLast: boolean,
|
||||
builder: string[]
|
||||
builder: string[],
|
||||
): void {
|
||||
const connector = isLast ? "└───" : "├───";
|
||||
const connector = isLast ? '└───' : '├───';
|
||||
const linePrefix = indent + connector;
|
||||
|
||||
// Don't print the root node's name directly, only its contents
|
||||
if (!node.isRoot) {
|
||||
builder.push(`${linePrefix}${node.name}/`);
|
||||
builder.push(`${linePrefix}${node.name}/`);
|
||||
}
|
||||
|
||||
const childIndent = indent + (isLast || node.isRoot ? " " : "│ "); // Use " " if last, "│" otherwise
|
||||
const childIndent = indent + (isLast || node.isRoot ? ' ' : '│ '); // Use " " if last, "│" otherwise
|
||||
|
||||
// Render files
|
||||
const fileCount = node.files.length;
|
||||
for (let i = 0; i < fileCount; i++) {
|
||||
const isLastFile = i === fileCount - 1 && node.subFolders.length === 0;
|
||||
const fileConnector = isLastFile ? "└───" : "├───";
|
||||
const fileConnector = isLastFile ? '└───' : '├───';
|
||||
builder.push(`${childIndent}${fileConnector}${node.files[i]}`);
|
||||
}
|
||||
|
||||
|
@ -299,7 +327,7 @@ function formatReducedStructure(
|
|||
*/
|
||||
export async function getFolderStructure(
|
||||
directory: string,
|
||||
options?: FolderStructureOptions
|
||||
options?: FolderStructureOptions,
|
||||
): Promise<string> {
|
||||
const resolvedPath = path.resolve(directory);
|
||||
const mergedOptions: MergedFolderStructureOptions = {
|
||||
|
@ -317,31 +345,38 @@ export async function getFolderStructure(
|
|||
}
|
||||
|
||||
// 2. Reduce the structure (handles ignored folders specifically)
|
||||
const reducedRoot = reduceStructure(fullInfo, mergedOptions.maxItems, mergedOptions.ignoredFolders);
|
||||
const reducedRoot = reduceStructure(
|
||||
fullInfo,
|
||||
mergedOptions.maxItems,
|
||||
mergedOptions.ignoredFolders,
|
||||
);
|
||||
|
||||
// 3. Count items in the *reduced* structure for the summary
|
||||
const rootNodeItselfCount = 0; // Don't count the root node in the items summary
|
||||
const reducedItemCount = countReducedItems(reducedRoot) - rootNodeItselfCount;
|
||||
|
||||
const reducedItemCount =
|
||||
countReducedItems(reducedRoot) - rootNodeItselfCount;
|
||||
|
||||
// 4. Format the reduced structure into a string
|
||||
const structureLines: string[] = [];
|
||||
formatReducedStructure(reducedRoot, "", true, structureLines);
|
||||
formatReducedStructure(reducedRoot, '', true, structureLines);
|
||||
|
||||
// 5. Build the final output string
|
||||
const displayPath = resolvedPath.replace(/\\/g, '/');
|
||||
const totalOriginalChildren = fullInfo.totalChildren;
|
||||
|
||||
let disclaimer = "";
|
||||
// Check if any truncation happened OR if ignored folders were present
|
||||
if (reducedItemCount < totalOriginalChildren || fullInfo.subFolders.some(sf => sf.isIgnored)) {
|
||||
disclaimer = `Folders or files indicated with ${TRUNCATION_INDICATOR} contain more items not shown or were ignored.`;
|
||||
let disclaimer = '';
|
||||
// Check if any truncation happened OR if ignored folders were present
|
||||
if (
|
||||
reducedItemCount < totalOriginalChildren ||
|
||||
fullInfo.subFolders.some((sf) => sf.isIgnored)
|
||||
) {
|
||||
disclaimer = `Folders or files indicated with ${TRUNCATION_INDICATOR} contain more items not shown or were ignored.`;
|
||||
}
|
||||
|
||||
const summary = `Showing ${reducedItemCount} of ${totalOriginalChildren} items (files + folders). ${disclaimer}`.trim();
|
||||
const summary =
|
||||
`Showing ${reducedItemCount} of ${totalOriginalChildren} items (files + folders). ${disclaimer}`.trim();
|
||||
|
||||
return `${summary}\n\n${displayPath}/\n${structureLines.join('\n')}`;
|
||||
|
||||
} catch (error: any) {
|
||||
console.error(`Error getting folder structure for ${resolvedPath}:`, error);
|
||||
return `Error processing directory "${resolvedPath}": ${error.message}`;
|
||||
|
|
|
@ -5,7 +5,7 @@ import path from 'node:path'; // Import the 'path' module
|
|||
* Returns the target directory, using the provided argument or the current working directory.
|
||||
*/
|
||||
export function getTargetDirectory(targetDirArg: string | undefined): string {
|
||||
return targetDirArg || process.cwd();
|
||||
return targetDirArg || process.cwd();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -13,73 +13,72 @@ export function getTargetDirectory(targetDirArg: string | undefined): string {
|
|||
* Example: /path/to/a/very/long/file.txt -> /path/.../long/file.txt
|
||||
*/
|
||||
export function shortenPath(filePath: string, maxLen: number = 35): string {
|
||||
if (filePath.length <= maxLen) {
|
||||
return filePath;
|
||||
if (filePath.length <= maxLen) {
|
||||
return filePath;
|
||||
}
|
||||
|
||||
const parsedPath = path.parse(filePath);
|
||||
const root = parsedPath.root;
|
||||
const separator = path.sep;
|
||||
|
||||
// Get segments of the path *after* the root
|
||||
const relativePath = filePath.substring(root.length);
|
||||
const segments = relativePath.split(separator).filter((s) => s !== ''); // Filter out empty segments
|
||||
|
||||
// Handle cases with no segments after root (e.g., "/", "C:\") or only one segment
|
||||
if (segments.length <= 1) {
|
||||
// Fallback to simple start/end truncation for very short paths or single segments
|
||||
const keepLen = Math.floor((maxLen - 3) / 2);
|
||||
// Ensure keepLen is not negative if maxLen is very small
|
||||
if (keepLen <= 0) {
|
||||
return filePath.substring(0, maxLen - 3) + '...';
|
||||
}
|
||||
const start = filePath.substring(0, keepLen);
|
||||
const end = filePath.substring(filePath.length - keepLen);
|
||||
return `${start}...${end}`;
|
||||
}
|
||||
|
||||
const parsedPath = path.parse(filePath);
|
||||
const root = parsedPath.root;
|
||||
const separator = path.sep;
|
||||
const firstDir = segments[0];
|
||||
const startComponent = root + firstDir;
|
||||
|
||||
// Get segments of the path *after* the root
|
||||
const relativePath = filePath.substring(root.length);
|
||||
const segments = relativePath.split(separator).filter(s => s !== ''); // Filter out empty segments
|
||||
const endPartSegments: string[] = [];
|
||||
// Base length: startComponent + separator + "..."
|
||||
let currentLength = startComponent.length + separator.length + 3;
|
||||
|
||||
// Handle cases with no segments after root (e.g., "/", "C:\") or only one segment
|
||||
if (segments.length <= 1) {
|
||||
// Fallback to simple start/end truncation for very short paths or single segments
|
||||
const keepLen = Math.floor((maxLen - 3) / 2);
|
||||
// Ensure keepLen is not negative if maxLen is very small
|
||||
if (keepLen <= 0) {
|
||||
return filePath.substring(0, maxLen - 3) + '...';
|
||||
}
|
||||
const start = filePath.substring(0, keepLen);
|
||||
const end = filePath.substring(filePath.length - keepLen);
|
||||
return `${start}...${end}`;
|
||||
// Iterate backwards through segments (excluding the first one)
|
||||
for (let i = segments.length - 1; i >= 1; i--) {
|
||||
const segment = segments[i];
|
||||
// Length needed if we add this segment: current + separator + segment
|
||||
const lengthWithSegment = currentLength + separator.length + segment.length;
|
||||
|
||||
if (lengthWithSegment <= maxLen) {
|
||||
endPartSegments.unshift(segment); // Add to the beginning of the end part
|
||||
currentLength = lengthWithSegment;
|
||||
} else {
|
||||
// Adding this segment would exceed maxLen
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
const firstDir = segments[0];
|
||||
const startComponent = root + firstDir;
|
||||
// Construct the final path
|
||||
let result = startComponent + separator + '...';
|
||||
if (endPartSegments.length > 0) {
|
||||
result += separator + endPartSegments.join(separator);
|
||||
}
|
||||
|
||||
const endPartSegments: string[] = [];
|
||||
// Base length: startComponent + separator + "..."
|
||||
let currentLength = startComponent.length + separator.length + 3;
|
||||
|
||||
// Iterate backwards through segments (excluding the first one)
|
||||
for (let i = segments.length - 1; i >= 1; i--) {
|
||||
const segment = segments[i];
|
||||
// Length needed if we add this segment: current + separator + segment
|
||||
const lengthWithSegment = currentLength + separator.length + segment.length;
|
||||
|
||||
if (lengthWithSegment <= maxLen) {
|
||||
endPartSegments.unshift(segment); // Add to the beginning of the end part
|
||||
currentLength = lengthWithSegment;
|
||||
} else {
|
||||
// Adding this segment would exceed maxLen
|
||||
break;
|
||||
}
|
||||
// As a final check, if the result is somehow still too long (e.g., startComponent + ... is too long)
|
||||
// fallback to simple truncation of the original path
|
||||
if (result.length > maxLen) {
|
||||
const keepLen = Math.floor((maxLen - 3) / 2);
|
||||
if (keepLen <= 0) {
|
||||
return filePath.substring(0, maxLen - 3) + '...';
|
||||
}
|
||||
const start = filePath.substring(0, keepLen);
|
||||
const end = filePath.substring(filePath.length - keepLen);
|
||||
return `${start}...${end}`;
|
||||
}
|
||||
|
||||
// Construct the final path
|
||||
let result = startComponent + separator + '...';
|
||||
if (endPartSegments.length > 0) {
|
||||
result += separator + endPartSegments.join(separator);
|
||||
}
|
||||
|
||||
// As a final check, if the result is somehow still too long (e.g., startComponent + ... is too long)
|
||||
// fallback to simple truncation of the original path
|
||||
if (result.length > maxLen) {
|
||||
const keepLen = Math.floor((maxLen - 3) / 2);
|
||||
if (keepLen <= 0) {
|
||||
return filePath.substring(0, maxLen - 3) + '...';
|
||||
}
|
||||
const start = filePath.substring(0, keepLen);
|
||||
const end = filePath.substring(filePath.length - keepLen);
|
||||
return `${start}...${end}`;
|
||||
}
|
||||
|
||||
|
||||
return result;
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -91,12 +90,15 @@ export function shortenPath(filePath: string, maxLen: number = 35): string {
|
|||
* @param rootDirectory The absolute path of the directory to make the target path relative to.
|
||||
* @returns The relative path from rootDirectory to targetPath.
|
||||
*/
|
||||
export function makeRelative(targetPath: string, rootDirectory: string): string {
|
||||
const resolvedTargetPath = path.resolve(targetPath);
|
||||
const resolvedRootDirectory = path.resolve(rootDirectory);
|
||||
export function makeRelative(
|
||||
targetPath: string,
|
||||
rootDirectory: string,
|
||||
): string {
|
||||
const resolvedTargetPath = path.resolve(targetPath);
|
||||
const resolvedRootDirectory = path.resolve(rootDirectory);
|
||||
|
||||
const relativePath = path.relative(resolvedRootDirectory, resolvedTargetPath);
|
||||
const relativePath = path.relative(resolvedRootDirectory, resolvedTargetPath);
|
||||
|
||||
// If the paths are the same, path.relative returns '', return '.' instead
|
||||
return relativePath || '.';
|
||||
// If the paths are the same, path.relative returns '', return '.' instead
|
||||
return relativePath || '.';
|
||||
}
|
||||
|
|
|
@ -12,12 +12,12 @@ export class SchemaValidator {
|
|||
static validate(schema: Record<string, unknown>, data: unknown): boolean {
|
||||
// This is a simplified implementation
|
||||
// In a real application, you would use a library like Ajv for proper validation
|
||||
|
||||
|
||||
// Check for required fields
|
||||
if (schema.required && Array.isArray(schema.required)) {
|
||||
const required = schema.required as string[];
|
||||
const dataObj = data as Record<string, unknown>;
|
||||
|
||||
|
||||
for (const field of required) {
|
||||
if (dataObj[field] === undefined) {
|
||||
console.error(`Missing required field: ${field}`);
|
||||
|
@ -25,25 +25,29 @@ export class SchemaValidator {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Check property types if properties are defined
|
||||
if (schema.properties && typeof schema.properties === 'object') {
|
||||
const properties = schema.properties as Record<string, { type?: string }>;
|
||||
const dataObj = data as Record<string, unknown>;
|
||||
|
||||
|
||||
for (const [key, prop] of Object.entries(properties)) {
|
||||
if (dataObj[key] !== undefined && prop.type) {
|
||||
const expectedType = prop.type;
|
||||
const actualType = Array.isArray(dataObj[key]) ? 'array' : typeof dataObj[key];
|
||||
|
||||
const actualType = Array.isArray(dataObj[key])
|
||||
? 'array'
|
||||
: typeof dataObj[key];
|
||||
|
||||
if (expectedType !== actualType) {
|
||||
console.error(`Type mismatch for property "${key}": expected ${expectedType}, got ${actualType}`);
|
||||
console.error(
|
||||
`Type mismatch for property "${key}": expected ${expectedType}, got ${actualType}`,
|
||||
);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,19 +4,10 @@
|
|||
"outDir": "./dist",
|
||||
"rootDir": "./src",
|
||||
"jsx": "react",
|
||||
"lib": [
|
||||
"DOM",
|
||||
"DOM.Iterable",
|
||||
"ES2020"
|
||||
],
|
||||
"lib": ["DOM", "DOM.Iterable", "ES2020"],
|
||||
"module": "Node16",
|
||||
"target": "ES2020",
|
||||
"target": "ES2020"
|
||||
},
|
||||
"exclude": [
|
||||
"node_modules",
|
||||
"dist"
|
||||
],
|
||||
"include": [
|
||||
"src"
|
||||
]
|
||||
}
|
||||
"exclude": ["node_modules", "dist"],
|
||||
"include": ["src"]
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue