Initial commit of Gemini Code CLI

This commit introduces the initial codebase for the Gemini Code CLI, a command-line interface designed to facilitate interaction with the Gemini API for software engineering tasks.

The code was migrated from a previous git repository as a single squashed commit.

Core Features & Components:

*   **Gemini Integration:** Leverages the `@google/genai` SDK to interact with the Gemini models, supporting chat history, streaming responses, and function calling (tools).
*   **Terminal UI:** Built with Ink (React for CLIs) providing an interactive chat interface within the terminal, including input prompts, message display, loading indicators, and tool interaction elements.
*   **Tooling Framework:** Implements a robust tool system allowing Gemini to interact with the local environment. Includes tools for:
    *   File system listing (`ls`)
    *   File reading (`read-file`)
    *   Content searching (`grep`)
    *   File globbing (`glob`)
    *   File editing (`edit`)
    *   File writing (`write-file`)
    *   Executing bash commands (`terminal`)
*   **State Management:** Handles the streaming state of Gemini responses and manages the conversation history.
*   **Configuration:** Parses command-line arguments (`yargs`) and loads environment variables (`dotenv`) for setup.
*   **Project Structure:** Organized into `core`, `ui`, `tools`, `config`, and `utils` directories using TypeScript. Includes basic build (`tsc`) and start scripts.

This initial version establishes the foundation for a powerful CLI tool enabling developers to use Gemini for coding assistance directly in their terminal environment.

---
Created by yours truly: __Gemini Code__
This commit is contained in:
Taylor Mullen 2025-04-15 21:41:08 -07:00
commit add233c504
54 changed files with 7920 additions and 0 deletions

17
.gitignore vendored Normal file
View File

@ -0,0 +1,17 @@
# API keys and secrets
.env
# Dependency directory
node_modules
bower_components
# Editors
.idea
*.iml
# OS metadata
.DS_Store
Thumbs.db
# Ignore built ts files
dist

29
.vscode/launch.json vendored Normal file
View File

@ -0,0 +1,29 @@
{
// Use IntelliSense to learn about possible attributes.
// Hover to view descriptions of existing attributes.
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
"version": "0.2.0",
"configurations": [
{
"name": "Attach",
"port": 9229,
"request": "attach",
"skipFiles": [
"<node_internals>/**"
],
"type": "node"
},
{
"type": "node",
"request": "launch",
"name": "Launch Program",
"skipFiles": [
"<node_internals>/**"
],
"program": "${file}",
"outFiles": [
"${workspaceFolder}/**/*.js"
]
}
]
}

3
README.md Normal file
View File

@ -0,0 +1,3 @@
# Gemini Code
TBD

1669
package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

13
package.json Normal file
View File

@ -0,0 +1,13 @@
{
"name": "gemini-code",
"version": "1.0.0",
"private": true,
"workspaces": [
"packages/*"
],
"scripts": {
"build": "npm run build --workspaces",
"test": "npm run test --workspaces",
"start": "npm run start --workspace=gemini-code-cli"
}
}

38
packages/cli/package.json Normal file
View File

@ -0,0 +1,38 @@
{
"name": "gemini-code-cli",
"version": "1.0.0",
"description": "Gemini Code CLI",
"type": "module",
"main": "dist/gemini.js",
"scripts": {
"build": "tsc",
"start": "node dist/gemini.js",
"debug": "node --inspect-brk dist/gemini.js"
},
"files": [
"dist"
],
"dependencies": {
"@google/genai": "^0.8.0",
"diff": "^7.0.0",
"dotenv": "^16.4.7",
"fast-glob": "^3.3.3",
"ink": "^5.2.0",
"ink-select-input": "^6.0.0",
"ink-spinner": "^5.0.0",
"ink-text-input": "^6.0.0",
"react": "^18.3.1",
"yargs": "^17.7.2"
},
"devDependencies": {
"@types/diff": "^7.0.2",
"@types/dotenv": "^6.1.1",
"@types/node": "^20.11.24",
"@types/react": "^19.1.0",
"@types/yargs": "^17.0.32",
"typescript": "^5.3.3"
},
"engines": {
"node": ">=18"
}
}

View File

@ -0,0 +1,34 @@
import yargs from 'yargs/yargs';
import { hideBin } from 'yargs/helpers';
export interface CliArgs {
target_dir: string | undefined;
_: (string | number)[]; // Captures positional arguments
// Add other expected args here if needed
// e.g., verbose?: boolean;
}
export async function parseArguments(): Promise<CliArgs> {
const argv = await yargs(hideBin(process.argv))
.option('target_dir', {
alias: 'd',
type: 'string',
description:
'The target directory for Gemini operations. Defaults to the current working directory.',
})
.help()
.alias('h', 'help')
.strict() // Keep strict mode to error on unknown options
.parseAsync();
// Handle warnings for extra arguments here
if (argv._ && argv._.length > 0) {
console.warn(
`Warning: Additional arguments provided (${argv._.join(', ')}), but will be ignored.`
);
}
// Cast to the interface to ensure the structure aligns with expectations
// Use `unknown` first for safer casting if types might not perfectly match
return argv as unknown as CliArgs;
}

View File

@ -0,0 +1,46 @@
import * as dotenv from 'dotenv';
import * as fs from 'node:fs';
import * as path from 'node:path';
import process from 'node:process';
function findEnvFile(startDir: string): string | null {
// Start search from the provided directory (e.g., current working directory)
let currentDir = path.resolve(startDir); // Ensure absolute path
while (true) {
const envPath = path.join(currentDir, '.env');
if (fs.existsSync(envPath)) {
return envPath;
}
const parentDir = path.dirname(currentDir);
if (parentDir === currentDir || !parentDir) {
return null;
}
currentDir = parentDir;
}
}
export function loadEnvironment(): void {
// Start searching from the current working directory by default
const envFilePath = findEnvFile(process.cwd());
if (!envFilePath) {
return;
}
dotenv.config({ path: envFilePath });
if (!process.env.GEMINI_API_KEY) {
console.error('Error: GEMINI_API_KEY environment variable is not set in the loaded .env file.');
process.exit(1);
}
}
export function getApiKey(): string {
loadEnvironment();
const apiKey = process.env.GEMINI_API_KEY;
if (!apiKey) {
throw new Error('GEMINI_API_KEY is missing. Ensure loadEnvironment() was called successfully.');
}
return apiKey;
}

View File

@ -0,0 +1,383 @@
import {
GenerateContentConfig, GoogleGenAI, Part, Chat,
Type,
SchemaUnion,
PartListUnion,
Content
} from '@google/genai';
import { getApiKey } from '../config/env.js';
import { CoreSystemPrompt } from './prompts.js';
import { type ToolCallEvent, type ToolCallConfirmationDetails, ToolCallStatus } from '../ui/types.js';
import process from 'node:process';
import { toolRegistry } from '../tools/tool-registry.js';
import { ToolResult } from '../tools/ToolResult.js';
import { getFolderStructure } from '../utils/getFolderStructure.js';
import { GeminiEventType, GeminiStream } from './GeminiStream.js';
type ToolExecutionOutcome = {
callId: string;
name: string;
args: Record<string, any>;
result?: ToolResult;
error?: any;
confirmationDetails?: ToolCallConfirmationDetails;
};
export class GeminiClient {
private ai: GoogleGenAI;
private defaultHyperParameters: GenerateContentConfig = {
temperature: 0,
topP: 1,
};
private readonly MAX_TURNS = 100;
constructor() {
const apiKey = getApiKey();
this.ai = new GoogleGenAI({ apiKey });
}
public async startChat(): Promise<Chat> {
const tools = toolRegistry.getToolSchemas();
// --- Get environmental information ---
const cwd = process.cwd();
const today = new Date().toLocaleDateString(undefined, { // Use locale-aware date formatting
weekday: 'long', year: 'numeric', month: 'long', day: 'numeric'
});
const platform = process.platform;
// --- Format information into a conversational multi-line string ---
const folderStructure = await getFolderStructure(cwd);
// --- End folder structure formatting ---)
const initialContextText = `
Okay, just setting up the context for our chat.
Today is ${today}.
My operating system is: ${platform}
I'm currently working in the directory: ${cwd}
${folderStructure}
`.trim();
const initialContextPart: Part = { text: initialContextText };
// --- End environmental information formatting ---
try {
const chat = this.ai.chats.create({
model: 'gemini-2.5-pro-preview-03-25',//'gemini-2.0-flash',
config: {
systemInstruction: CoreSystemPrompt,
...this.defaultHyperParameters,
tools,
},
history: [
// --- Add the context as a single part in the initial user message ---
{
role: "user",
parts: [initialContextPart] // Pass the single Part object in an array
},
// --- Add an empty model response to balance the history ---
{
role: "model",
parts: [{ text: "Got it. Thanks for the context!" }] // A slightly more conversational model response
}
// --- End history modification ---
],
});
return chat;
} catch (error) {
console.error("Error initializing Gemini chat session:", error);
const message = error instanceof Error ? error.message : "Unknown error.";
throw new Error(`Failed to initialize chat: ${message}`);
}
}
public addMessageToHistory(chat: Chat, message: Content): void {
const history = chat.getHistory();
history.push(message);
this.ai.chats
chat
}
public async* sendMessageStream(
chat: Chat,
request: PartListUnion,
signal?: AbortSignal
): GeminiStream {
let currentMessageToSend: PartListUnion = request;
let turns = 0;
try {
while (turns < this.MAX_TURNS) {
turns++;
const resultStream = await chat.sendMessageStream({ message: currentMessageToSend });
let functionResponseParts: Part[] = [];
let pendingToolCalls: Array<{ callId: string; name: string; args: Record<string, any> }> = [];
let yieldedTextInTurn = false;
const chunksForDebug = [];
for await (const chunk of resultStream) {
chunksForDebug.push(chunk);
if (signal?.aborted) {
const abortError = new Error("Request cancelled by user during stream.");
abortError.name = 'AbortError';
throw abortError;
}
const functionCalls = chunk.functionCalls;
if (functionCalls && functionCalls.length > 0) {
for (const call of functionCalls) {
const callId = call.id ?? `${call.name}-${Date.now()}-${Math.random().toString(16).slice(2)}`;
const name = call.name || 'undefined_tool_name';
const args = (call.args || {}) as Record<string, any>;
pendingToolCalls.push({ callId, name, args });
const evtValue: ToolCallEvent = {
type: 'tool_call',
status: ToolCallStatus.Pending,
callId,
name,
args,
resultDisplay: undefined,
confirmationDetails: undefined,
}
yield {
type: GeminiEventType.ToolCallInfo,
value: evtValue,
};
}
} else {
const text = chunk.text;
if (text) {
yieldedTextInTurn = true;
yield {
type: GeminiEventType.Content,
value: text,
};
}
}
}
if (pendingToolCalls.length > 0) {
const toolPromises: Promise<ToolExecutionOutcome>[] = pendingToolCalls.map(async pendingToolCall => {
const tool = toolRegistry.getTool(pendingToolCall.name);
if (!tool) {
// Directly return error outcome if tool not found
return { ...pendingToolCall, error: new Error(`Tool "${pendingToolCall.name}" not found or is not registered.`) };
}
try {
const confirmation = await tool.shouldConfirmExecute(pendingToolCall.args);
if (confirmation) {
return { ...pendingToolCall, confirmationDetails: confirmation };
}
} catch (error) {
return { ...pendingToolCall, error: new Error(`Tool failed to check tool confirmation: ${error}`) };
}
try {
const result = await tool.execute(pendingToolCall.args);
return { ...pendingToolCall, result };
} catch (error) {
return { ...pendingToolCall, error: new Error(`Tool failed to execute: ${error}`) };
}
});
const toolExecutionOutcomes: ToolExecutionOutcome[] = await Promise.all(toolPromises);
for (const executedTool of toolExecutionOutcomes) {
const { callId, name, args, result, error, confirmationDetails } = executedTool;
if (error) {
const errorMessage = error?.message || String(error);
yield {
type: GeminiEventType.Content,
value: `[Error invoking tool ${name}: ${errorMessage}]`,
};
} else if (result && typeof result === 'object' && result !== null && 'error' in result) {
const errorMessage = String(result.error);
yield {
type: GeminiEventType.Content,
value: `[Error executing tool ${name}: ${errorMessage}]`,
};
} else {
const status = confirmationDetails ? ToolCallStatus.Confirming : ToolCallStatus.Invoked;
const evtValue: ToolCallEvent = { type: 'tool_call', status, callId, name, args, resultDisplay: result?.returnDisplay, confirmationDetails }
yield {
type: GeminiEventType.ToolCallInfo,
value: evtValue,
};
}
}
pendingToolCalls = [];
const waitingOnConfirmations = toolExecutionOutcomes.filter(outcome => outcome.confirmationDetails).length > 0;
if (waitingOnConfirmations) {
// Stop processing content, wait for user.
// TODO: Kill token processing once API supports signals.
break;
}
functionResponseParts = toolExecutionOutcomes.map((executedTool: ToolExecutionOutcome): Part => {
const { name, result, error } = executedTool;
const output = { "output": result?.llmContent };
let toolOutcomePayload: any;
if (error) {
const errorMessage = error?.message || String(error);
toolOutcomePayload = { error: `Invocation failed: ${errorMessage}` };
console.error(`[Turn ${turns}] Critical error invoking tool ${name}:`, error);
} else if (result && typeof result === 'object' && result !== null && 'error' in result) {
toolOutcomePayload = output;
console.warn(`[Turn ${turns}] Tool ${name} returned an error structure:`, result.error);
} else {
toolOutcomePayload = output;
}
return {
functionResponse: {
name: name,
id: executedTool.callId,
response: toolOutcomePayload,
},
};
});
currentMessageToSend = functionResponseParts;
} else if (yieldedTextInTurn) {
const history = chat.getHistory();
const checkPrompt = `Analyze *only* the content and structure of your immediately preceding response (your last turn in the conversation history). Based *strictly* on that response, determine who should logically speak next: the 'user' or the 'model' (you).
**Decision Rules (apply in order):**
1. **Model Continues:** If your last response explicitly states an immediate next action *you* intend to take (e.g., "Next, I will...", "Now I'll process...", "Moving on to analyze...", indicates an intended tool call that didn't execute), OR if the response seems clearly incomplete (cut off mid-thought without a natural conclusion), then the **'model'** should speak next.
2. **Question to User:** If your last response ends with a direct question specifically addressed *to the user*, then the **'user'** should speak next.
3. **Waiting for User:** If your last response completed a thought, statement, or task *and* does not meet the criteria for Rule 1 (Model Continues) or Rule 2 (Question to User), it implies a pause expecting user input or reaction. In this case, the **'user'** should speak next.
**Output Format:**
Respond *only* in JSON format according to the following schema. Do not include any text outside the JSON structure.
\`\`\`json
{
"type": "object",
"properties": {
"reasoning": {
"type": "string",
"description": "Brief explanation justifying the 'next_speaker' choice based *strictly* on the applicable rule and the content/structure of the preceding turn."
},
"next_speaker": {
"type": "string",
"enum": ["user", "model"],
"description": "Who should speak next based *only* on the preceding turn and the decision rules."
}
},
"required": ["next_speaker", "reasoning"]
\`\`\`
}`;
// Schema Idea
const responseSchema: SchemaUnion = {
type: Type.OBJECT,
properties: {
reasoning: {
type: Type.STRING,
description: "Brief explanation justifying the 'next_speaker' choice based *strictly* on the applicable rule and the content/structure of the preceding turn."
},
next_speaker: {
type: Type.STRING,
enum: ['user', 'model'], // Enforce the choices
description: "Who should speak next based *only* on the preceding turn and the decision rules",
},
},
required: ['reasoning', 'next_speaker']
};
try {
// Use the new generateJson method, passing the history and the check prompt
const parsedResponse = await this.generateJson([...history, { role: "user", parts: [{ text: checkPrompt }] }], responseSchema);
// Safely extract the next speaker value
const nextSpeaker: string | undefined = typeof parsedResponse?.next_speaker === 'string' ? parsedResponse.next_speaker : undefined;
if (nextSpeaker === 'model') {
currentMessageToSend = { text: 'alright' }; // Or potentially a more meaningful continuation prompt
} else {
// 'user' should speak next, or value is missing/invalid. End the turn.
break;
}
} catch (error) {
console.error(`[Turn ${turns}] Failed to get or parse next speaker check:`, error);
// If the check fails, assume user should speak next to avoid infinite loops
break;
}
} else {
console.warn(`[Turn ${turns}] No text or function calls received from Gemini. Ending interaction.`);
break;
}
}
if (turns >= this.MAX_TURNS) {
console.warn("sendMessageStream: Reached maximum tool call turns limit.");
yield {
type: GeminiEventType.Content,
value: "\n\n[System Notice: Maximum interaction turns reached. The conversation may be incomplete.]",
};
}
} catch (error: unknown) {
if (error instanceof Error && error.name === 'AbortError') {
console.log("Gemini stream request aborted by user.");
throw error;
} else {
console.error(`Error during Gemini stream or tool interaction:`, error);
const message = error instanceof Error ? error.message : String(error);
yield {
type: GeminiEventType.Content,
value: `\n\n[Error: An unexpected error occurred during the chat: ${message}]`,
};
throw error;
}
}
}
/**
* Generates structured JSON content based on conversational history and a schema.
* @param contents The conversational history (Content array) to provide context.
* @param schema The SchemaUnion defining the desired JSON structure.
* @returns A promise that resolves to the parsed JSON object matching the schema.
* @throws Throws an error if the API call fails or the response is not valid JSON.
*/
public async generateJson(contents: Content[], schema: SchemaUnion): Promise<any> {
try {
const result = await this.ai.models.generateContent({
model: 'gemini-2.0-flash', // Using flash for potentially faster structured output
config: {
...this.defaultHyperParameters,
systemInstruction: CoreSystemPrompt,
responseSchema: schema,
responseMimeType: 'application/json',
},
contents: contents, // Pass the full Content array
});
const responseText = result.text;
if (!responseText) {
throw new Error("API returned an empty response.");
}
try {
const parsedJson = JSON.parse(responseText);
// TODO: Add schema validation if needed
return parsedJson;
} catch (parseError) {
console.error("Failed to parse JSON response:", responseText);
throw new Error(`Failed to parse API response as JSON: ${parseError instanceof Error ? parseError.message : String(parseError)}`);
}
} catch (error) {
console.error("Error generating JSON content:", error);
const message = error instanceof Error ? error.message : "Unknown API error.";
throw new Error(`Failed to generate JSON content: ${message}`);
}
}
}

View File

@ -0,0 +1,22 @@
import { ToolCallEvent } from "../ui/types.js";
export enum GeminiEventType {
Content,
ToolCallInfo,
}
export interface GeminiContentEvent {
type: GeminiEventType.Content;
value: string;
}
export interface GeminiToolCallInfoEvent {
type: GeminiEventType.ToolCallInfo;
value: ToolCallEvent;
}
export type GeminiEvent =
| GeminiContentEvent
| GeminiToolCallInfoEvent;
export type GeminiStream = AsyncIterable<GeminiEvent>;

View File

@ -0,0 +1,4 @@
export enum StreamingState {
Idle,
Responding,
}

View File

View File

@ -0,0 +1 @@
export const MEMORY_FILE_NAME = 'GEMINI.md';

View File

@ -0,0 +1,142 @@
import { Part } from '@google/genai';
import { HistoryItem } from '../ui/types.js';
import { GeminiEventType, GeminiStream } from './GeminiStream.js';
import { handleToolCallChunk, addErrorMessageToHistory } from './historyUpdater.js';
interface StreamProcessorParams {
stream: GeminiStream;
signal: AbortSignal;
setHistory: React.Dispatch<React.SetStateAction<HistoryItem[]>>;
submitQuery: (query: Part) => Promise<void>,
getNextMessageId: () => number;
addHistoryItem: (itemData: Omit<HistoryItem, 'id'>, id: number) => void;
currentToolGroupIdRef: React.MutableRefObject<number | null>;
}
/**
* Processes the Gemini stream, managing text buffering, adaptive rendering,
* and delegating history updates for tool calls and errors.
*/
export const processGeminiStream = async ({ // Renamed function for clarity
stream,
signal,
setHistory,
submitQuery,
getNextMessageId,
addHistoryItem,
currentToolGroupIdRef,
}: StreamProcessorParams): Promise<void> => {
// --- State specific to this stream processing invocation ---
let textBuffer = '';
let renderTimeoutId: NodeJS.Timeout | null = null;
let isStreamComplete = false;
let currentGeminiMessageId: number | null = null;
const render = (content: string) => {
if (currentGeminiMessageId === null) {
return;
}
setHistory(prev => prev.map(item =>
item.id === currentGeminiMessageId && item.type === 'gemini'
? { ...item, text: (item.text ?? '') + content }
: item
));
}
// --- Adaptive Rendering Logic (nested) ---
const renderBufferedText = () => {
if (signal.aborted) {
if (renderTimeoutId) clearTimeout(renderTimeoutId);
renderTimeoutId = null;
return;
}
const bufferLength = textBuffer.length;
let chunkSize = 0;
let delay = 50;
if (bufferLength > 150) {
chunkSize = Math.min(bufferLength, 30); delay = 5;
} else if (bufferLength > 30) {
chunkSize = Math.min(bufferLength, 10); delay = 10;
} else if (bufferLength > 0) {
chunkSize = 2; delay = 20;
}
if (chunkSize > 0) {
const chunkToRender = textBuffer.substring(0, chunkSize);
textBuffer = textBuffer.substring(chunkSize);
render(chunkToRender);
renderTimeoutId = setTimeout(renderBufferedText, delay);
} else {
renderTimeoutId = null; // Clear timeout ID if nothing to render
if (!isStreamComplete) {
// Buffer empty, but stream might still send data, check again later
renderTimeoutId = setTimeout(renderBufferedText, 50);
}
}
};
const scheduleRender = () => {
if (renderTimeoutId === null) {
renderTimeoutId = setTimeout(renderBufferedText, 0);
}
};
// --- Stream Processing Loop ---
try {
for await (const chunk of stream) {
if (signal.aborted) break;
if (chunk.type === GeminiEventType.Content) {
currentToolGroupIdRef.current = null; // Reset tool group on text
if (currentGeminiMessageId === null) {
currentGeminiMessageId = getNextMessageId();
addHistoryItem({ type: 'gemini', text: '' }, currentGeminiMessageId);
textBuffer = '';
}
textBuffer += chunk.value;
scheduleRender();
} else if (chunk.type === GeminiEventType.ToolCallInfo) {
if (renderTimeoutId) { // Stop rendering loop
clearTimeout(renderTimeoutId);
renderTimeoutId = null;
}
// Flush any text buffer content.
render(textBuffer);
currentGeminiMessageId = null; // End text message context
textBuffer = ''; // Clear buffer
// Delegate history update for tool call
handleToolCallChunk(
chunk.value,
setHistory,
submitQuery,
getNextMessageId,
currentToolGroupIdRef
);
}
}
if (signal.aborted) {
throw new Error("Request cancelled by user");
}
} catch (error: any) {
if (renderTimeoutId) { // Ensure render loop stops on error
clearTimeout(renderTimeoutId);
renderTimeoutId = null;
}
// Delegate history update for error message
addErrorMessageToHistory(error, setHistory, getNextMessageId);
} finally {
isStreamComplete = true; // Signal stream end for render loop completion
if (renderTimeoutId) {
clearTimeout(renderTimeoutId);
renderTimeoutId = null;
}
renderBufferedText(); // Force final render
}
};

View File

@ -0,0 +1,173 @@
import { Part } from "@google/genai";
import { toolRegistry } from "../tools/tool-registry.js";
import { HistoryItem, IndividualToolCallDisplay, ToolCallEvent, ToolCallStatus, ToolConfirmationOutcome, ToolEditConfirmationDetails, ToolExecuteConfirmationDetails } from "../ui/types.js";
import { ToolResultDisplay } from "../tools/ToolResult.js";
/**
* Processes a tool call chunk and updates the history state accordingly.
* Manages adding new tool groups or updating existing ones.
* Resides here as its primary effect is updating history based on tool events.
*/
export const handleToolCallChunk = (
chunk: ToolCallEvent,
setHistory: React.Dispatch<React.SetStateAction<HistoryItem[]>>,
submitQuery: (query: Part) => Promise<void>,
getNextMessageId: () => number,
currentToolGroupIdRef: React.MutableRefObject<number | null>
): void => {
const toolDefinition = toolRegistry.getTool(chunk.name);
const description = toolDefinition?.getDescription
? toolDefinition.getDescription(chunk.args)
: '';
const toolDisplayName = toolDefinition?.displayName ?? chunk.name;
let confirmationDetails = chunk.confirmationDetails;
if (confirmationDetails) {
const originalConfirmationDetails = confirmationDetails;
const historyUpdatingConfirm = async (outcome: ToolConfirmationOutcome) => {
originalConfirmationDetails.onConfirm(outcome);
if (outcome === ToolConfirmationOutcome.Cancel) {
let resultDisplay: ToolResultDisplay | undefined;
if ('fileDiff' in originalConfirmationDetails) {
resultDisplay = { fileDiff: (originalConfirmationDetails as ToolEditConfirmationDetails).fileDiff };
} else {
resultDisplay = `~~${(originalConfirmationDetails as ToolExecuteConfirmationDetails).command}~~`;
}
handleToolCallChunk({ ...chunk, status: ToolCallStatus.Canceled, confirmationDetails: undefined, resultDisplay, }, setHistory, submitQuery, getNextMessageId, currentToolGroupIdRef);
const functionResponse: Part = {
functionResponse: {
name: chunk.name,
response: { "error": "User rejected function call." },
},
}
await submitQuery(functionResponse);
} else {
const tool = toolRegistry.getTool(chunk.name)
if (!tool) {
throw new Error(`Tool "${chunk.name}" not found or is not registered.`);
}
handleToolCallChunk({ ...chunk, status: ToolCallStatus.Invoked, resultDisplay: "Executing...", confirmationDetails: undefined }, setHistory, submitQuery, getNextMessageId, currentToolGroupIdRef);
const result = await tool.execute(chunk.args);
handleToolCallChunk({ ...chunk, status: ToolCallStatus.Invoked, resultDisplay: result.returnDisplay, confirmationDetails: undefined }, setHistory, submitQuery, getNextMessageId, currentToolGroupIdRef);
const functionResponse: Part = {
functionResponse: {
name: chunk.name,
id: chunk.callId,
response: { "output": result.llmContent },
},
}
await submitQuery(functionResponse);
}
}
confirmationDetails = {
...originalConfirmationDetails,
onConfirm: historyUpdatingConfirm,
};
}
const toolDetail: IndividualToolCallDisplay = {
callId: chunk.callId,
name: toolDisplayName,
description,
resultDisplay: chunk.resultDisplay,
status: chunk.status,
confirmationDetails: confirmationDetails,
};
const activeGroupId = currentToolGroupIdRef.current;
setHistory(prev => {
if (chunk.status === ToolCallStatus.Pending) {
if (activeGroupId === null) {
// Start a new tool group
const newGroupId = getNextMessageId();
currentToolGroupIdRef.current = newGroupId;
return [
...prev,
{ id: newGroupId, type: 'tool_group', tools: [toolDetail] } as HistoryItem
];
}
// Add to existing tool group
return prev.map(item =>
item.id === activeGroupId && item.type === 'tool_group'
? item.tools.some(t => t.callId === toolDetail.callId)
? item // Tool already listed as pending
: { ...item, tools: [...item.tools, toolDetail] }
: item
);
}
// Update the status of a pending tool within the active group
if (activeGroupId === null) {
// Log if an invoked tool arrives without an active group context
console.warn("Received invoked tool status without an active tool group ID:", chunk);
return prev;
}
return prev.map(item =>
item.id === activeGroupId && item.type === 'tool_group'
? {
...item,
tools: item.tools.map(t =>
t.callId === toolDetail.callId
? { ...t, ...toolDetail, status: chunk.status } // Update details & status
: t
)
}
: item
);
});
};
/**
* Appends an error or informational message to the history, attempting to attach
* it to the last non-user message or creating a new entry.
*/
export const addErrorMessageToHistory = (
error: any,
setHistory: React.Dispatch<React.SetStateAction<HistoryItem[]>>,
getNextMessageId: () => number
): void => {
const isAbort = error.name === 'AbortError';
const errorType = isAbort ? 'info' : 'error';
const errorText = isAbort
? '[Request cancelled by user]'
: `[Error: ${error.message || 'Unknown error'}]`;
setHistory(prev => {
const reversedHistory = [...prev].reverse();
// Find the last message that isn't from the user to append the error/info to
const lastBotMessageIndex = reversedHistory.findIndex(item => item.type !== 'user');
const originalIndex = lastBotMessageIndex !== -1 ? prev.length - 1 - lastBotMessageIndex : -1;
if (originalIndex !== -1) {
// Append error to the last relevant message
return prev.map((item, index) => {
if (index === originalIndex) {
let baseText = '';
// Determine base text based on item type
if (item.type === 'gemini') baseText = item.text ?? '';
else if (item.type === 'tool_group') baseText = `Tool execution (${item.tools.length} calls)`;
else if (item.type === 'error' || item.type === 'info') baseText = item.text ?? '';
// Safely handle potential undefined text
const updatedText = (baseText + (baseText && !baseText.endsWith('\n') ? '\n' : '') + errorText).trim();
// Reuse existing ID, update type and text
return { ...item, type: errorType, text: updatedText };
}
return item;
});
} else {
// No previous message to append to, add a new error item
return [
...prev,
{ id: getNextMessageId(), type: errorType, text: errorText } as HistoryItem
];
}
});
};

View File

@ -0,0 +1,93 @@
import { ReadFileTool } from "../tools/read-file.tool.js";
import { TerminalTool } from "../tools/terminal.tool.js";
import { MEMORY_FILE_NAME } from "./constants.js";
const contactEmail = 'ntaylormullen@google.com';
export const CoreSystemPrompt = `
You are an interactive CLI tool assistant specializing in software engineering tasks. Your primary goal is to help users safely and efficiently, adhering strictly to the following instructions and utilizing your available tools.
# Core Directives & Safety Rules
1. **Explain Critical Commands:** Before executing any command (especially using \`${TerminalTool.Name}\`) that modifies the file system, codebase, or system state, you *must* provide a brief explanation of the command's purpose and potential impact. Prioritize user understanding and safety.
2. **NEVER Commit Changes:** Unless explicitly instructed by the user to do so, you MUST NOT commit changes to version control (e.g., git commit). This is critical for user control over their repository.
3. **Security First:** Always apply security best practices. Never introduce code that exposes, logs, or commits secrets, API keys, or other sensitive information.
# Primary Workflow: Software Engineering Tasks
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this sequence:
1. **Understand:** Analyze the user's request and the relevant codebase context. Check for project-specific information in \`${MEMORY_FILE_NAME}\` if it exists. Use search tools extensively (in parallel if independent) to understand file structures, existing code patterns, and conventions.
2. **Implement:** Use the available tools (e.g., file editing, \`${TerminalTool.Name}\`) to construct the solution, strictly adhering to the project's established conventions (see 'Following Conventions' below).
- If creating a new project rely on scaffolding commands do lay out the initial project structure (i.e. npm init ...)
3. **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining \`README\` files, \`${MEMORY_FILE_NAME}\`, build/package configuration (e.g., \`package.json\`), or existing test execution patterns. NEVER assume standard test commands.
4. **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific linting and type-checking commands (e.g., \`npm run lint\`, \`ruff check .\`, \`tsc\`) that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, ask the user and propose adding them to \`${MEMORY_FILE_NAME}\` for future reference.
# Key Operating Principles
## Following Conventions
Rigorously adhere to existing project conventions when reading or modifying code. Analyze surrounding code and configuration first.
- **Libraries/Frameworks:** NEVER assume a library/framework is available or appropriate. Verify its established usage within the project (check imports, configuration files like \`package.json\`, \`Cargo.toml\`, \`requirements.txt\`, \`build.gradle\`, etc., or observe neighboring files) before employing it.
- **Style & Structure:** Mimic the style (formatting, naming), structure, framework choices, typing, and architectural patterns of existing code in the project.
- **Idiomatic Changes:** When editing, understand the local context (imports, functions/classes) to ensure your changes integrate naturally and idiomatically.
- **Comments:** Add code comments sparingly. Focus on *why* something is done, especially for complex logic, rather than *what* is done. Only add comments if necessary for clarity or if requested by the user.
## Memory (${MEMORY_FILE_NAME})
Utilize the \`${MEMORY_FILE_NAME}\` file in the current working directory for project-specific context:
- Reference stored commands, style preferences, and codebase notes when performing tasks.
- When you discover frequently used commands (build, test, lint, typecheck) or learn about specific project conventions or style preferences, proactively propose adding them to \`${MEMORY_FILE_NAME}\` for future sessions.
## Tone and Style (CLI Interaction)
- **Concise & Direct:** Adopt a professional, direct, and concise tone suitable for a CLI environment.
- **Minimal Output:** Aim for fewer than 4 lines of text output (excluding tool use/code generation) per response whenever practical. Focus strictly on the user's query.
- **Clarity over Brevity (When Needed):** While conciseness is key, prioritize clarity for essential explanations (like pre-command warnings) or when seeking necessary clarification if a request is ambiguous.
- **No Chitchat:** Avoid conversational filler, preambles ("Okay, I will now..."), or postambles ("I have finished the changes..."). Get straight to the action or answer.
- **Formatting:** Use GitHub-flavored Markdown. Responses will be rendered in monospace.
- **Tools vs. Text:** Use tools for actions, text output *only* for communication. Do not add explanatory comments within tool calls or code blocks unless specifically part of the required code/command itself.
- **Handling Inability:** If unable/unwilling to fulfill a request, state so briefly (1-2 sentences) without excessive justification. Offer alternatives if appropriate.
## Proactiveness
- **Act within Scope:** Fulfill the user's request thoroughly, including reasonable, directly implied follow-up actions.
- **Confirm Ambiguity/Expansion:** Do not take significant actions beyond the clear scope of the request without confirming with the user. If asked *how* to do something, explain first, don't just do it.
- **Stop After Action:** After completing a code modification or file operation, simply stop. Do not provide summaries unless asked.
# Tool Usage
- **Search:** Prefer the Agent tool for file searching to optimize context usage.
- **Parallelism:** Execute multiple independent tool calls in parallel when feasible.
- **Command Execution:** Use the \`${TerminalTool.Name}\` tool for running shell commands, remembering the safety rule to explain modifying commands first.
# Interaction Details
- **Help Command:** Use \`/help\` to display Gemini Code help. To get specific command/flag info, execute \`gemini -h\` via \`${TerminalTool.Name}\` and show the output.
- **Synthetic Messages:** Ignore system messages like \`++Request Cancelled++\`. Do not generate them.
- **Feedback:** Direct feedback to ${contactEmail}.
# Examples (Illustrating Tone and Workflow)
<example>
user: 1 + 2
assistant: 3
</example>
<example>
user: is 13 a prime number?
assistant: true
</example>
<example>
user: List files here.
assistant: [tool_call: execute_bash_command for 'ls -la']))]
</example>
<example>
user: Refactor the auth logic in src/auth.py to use the 'requests' library.
assistant: Okay, I see src/auth.py currently uses 'urllib'. Before changing it, I need to check if 'requests' is already a project dependency. [tool_call: ${TerminalTool.Name} for grep 'requests', 'requirements.txt']
(After confirming dependency or asking user to add it)
Okay, 'requests' is available. I will now refactor src/auth.py.
[tool_call: Uses read, edit tools following conventions]
(After editing)
[tool_call: Runs project-specific lint/typecheck commands found previously, e.g., ${TerminalTool.Name} for 'ruff', 'check', 'src/auth.py']
</example>
<example>
user: Delete the temp directory.
assistant: I can run \`rm -rf ./temp\`. This will permanently delete the directory and all its contents. Is it okay to proceed?
</example>
# Final Reminder
Your core function is efficient and safe assistance. Balance extreme conciseness with the crucial need for clarity, especially regarding safety and potential system modifications. Always prioritize user control and project conventions. Never make assumptions on the contents of files; instead use the ${ReadFileTool.Name} to ensure you aren't making too broad of assumptions.
`;

View File

@ -0,0 +1,57 @@
import React from 'react';
import { render } from 'ink';
import App from './ui/App.js';
import { parseArguments } from './config/args.js';
import { loadEnvironment } from './config/env.js';
import { getTargetDirectory } from './utils/paths.js';
import { toolRegistry } from './tools/tool-registry.js';
import { LSTool } from './tools/ls.tool.js';
import { ReadFileTool } from './tools/read-file.tool.js';
import { GrepTool } from './tools/grep.tool.js';
import { GlobTool } from './tools/glob.tool.js';
import { EditTool } from './tools/edit.tool.js';
import { TerminalTool } from './tools/terminal.tool.js';
import { WriteFileTool } from './tools/write-file.tool.js';
async function main() {
// 1. Configuration
loadEnvironment();
const argv = await parseArguments(); // Ensure args.ts imports printWarning from ui/display
const targetDir = getTargetDirectory(argv.target_dir);
// 2. Configure tools
registerTools(targetDir);
// 3. Render UI
render(React.createElement(App, { directory: targetDir }));
}
// --- Global Entry Point ---
main().catch((error) => {
console.error('An unexpected critical error occurred:');
if (error instanceof Error) {
console.error(error.message);
} else {
console.error(String(error));
}
process.exit(1);
});
function registerTools(targetDir: string) {
const lsTool = new LSTool(targetDir);
const readFileTool = new ReadFileTool(targetDir);
const grepTool = new GrepTool(targetDir);
const globTool = new GlobTool(targetDir);
const editTool = new EditTool(targetDir);
const terminalTool = new TerminalTool(targetDir);
const writeFileTool = new WriteFileTool(targetDir);
toolRegistry.registerTool(lsTool);
toolRegistry.registerTool(readFileTool);
toolRegistry.registerTool(grepTool);
toolRegistry.registerTool(globTool);
toolRegistry.registerTool(editTool);
toolRegistry.registerTool(terminalTool);
toolRegistry.registerTool(writeFileTool);
}

View File

@ -0,0 +1,73 @@
import type { FunctionDeclaration, Schema } from '@google/genai';
import { ToolResult } from './ToolResult.js';
import { Tool } from './Tool.js';
import { ToolCallConfirmationDetails } from '../ui/types.js';
/**
* Base implementation for tools with common functionality
*/
export abstract class BaseTool<TParams = unknown, TResult extends ToolResult = ToolResult> implements Tool<TParams, TResult> {
/**
* Creates a new instance of BaseTool
* @param name Internal name of the tool (used for API calls)
* @param displayName User-friendly display name of the tool
* @param description Description of what the tool does
* @param parameterSchema JSON Schema defining the parameters
*/
constructor(
public readonly name: string,
public readonly displayName: string,
public readonly description: string,
public readonly parameterSchema: Record<string, unknown>
) {}
/**
* Function declaration schema computed from name, description, and parameterSchema
*/
get schema(): FunctionDeclaration {
return {
name: this.name,
description: this.description,
parameters: this.parameterSchema as Schema
};
}
/**
* Validates the parameters for the tool
* This is a placeholder implementation and should be overridden
* @param params Parameters to validate
* @returns An error message string if invalid, null otherwise
*/
invalidParams(params: TParams): string | null {
// Implementation would typically use a JSON Schema validator
// This is a placeholder that should be implemented by derived classes
return null;
}
/**
* Gets a pre-execution description of the tool operation
* Default implementation that should be overridden by derived classes
* @param params Parameters for the tool execution
* @returns A markdown string describing what the tool will do
*/
getDescription(params: TParams): string {
return JSON.stringify(params);
}
/**
* Determines if the tool should prompt for confirmation before execution
* @param params Parameters for the tool execution
* @returns Whether or not execute should be confirmed by the user.
*/
shouldConfirmExecute(params: TParams): Promise<ToolCallConfirmationDetails | false> {
return Promise.resolve(false);
}
/**
* Abstract method to execute the tool with the given parameters
* Must be implemented by derived classes
* @param params Parameters for the tool execution
* @returns Result of the tool execution
*/
abstract execute(params: TParams): Promise<TResult>;
}

View File

@ -0,0 +1,57 @@
import { FunctionDeclaration } from "@google/genai";
import { ToolResult } from "./ToolResult.js";
import { ToolCallConfirmationDetails } from "../ui/types.js";
/**
* Interface representing the base Tool functionality
*/
export interface Tool<TParams = unknown, TResult extends ToolResult = ToolResult> {
/**
* The internal name of the tool (used for API calls)
*/
name: string;
/**
* The user-friendly display name of the tool
*/
displayName: string;
/**
* Description of what the tool does
*/
description: string;
/**
* Function declaration schema from @google/genai
*/
schema: FunctionDeclaration;
/**
* Validates the parameters for the tool
* @param params Parameters to validate
* @returns An error message string if invalid, null otherwise
*/
invalidParams(params: TParams): string | null;
/**
* Gets a pre-execution description of the tool operation
* @param params Parameters for the tool execution
* @returns A markdown string describing what the tool will do
* Optional for backward compatibility
*/
getDescription(params: TParams): string;
/**
* Determines if the tool should prompt for confirmation before execution
* @param params Parameters for the tool execution
* @returns Whether execute should be confirmed.
*/
shouldConfirmExecute(params: TParams): Promise<ToolCallConfirmationDetails | false>;
/**
* Executes the tool with the given parameters
* @param params Parameters for the tool execution
* @returns Result of the tool execution
*/
execute(params: TParams): Promise<TResult>;
}

View File

@ -0,0 +1,22 @@
/**
* Standard tool result interface that all tools should implement
*/
export interface ToolResult {
/**
* Content meant to be included in LLM history.
* This should represent the factual outcome of the tool execution.
*/
llmContent: string;
/**
* Markdown string for user display.
* This provides a user-friendly summary or visualization of the result.
*/
returnDisplay: ToolResultDisplay;
}
export type ToolResultDisplay = string | FileDiff;
export interface FileDiff {
fileDiff: string
}

View File

@ -0,0 +1,369 @@
import fs from 'fs';
import path from 'path';
import * as Diff from 'diff';
import { SchemaValidator } from '../utils/schemaValidator.js';
import { ToolResult } from './ToolResult.js';
import { BaseTool } from './BaseTool.js';
import { ToolCallConfirmationDetails, ToolConfirmationOutcome, ToolEditConfirmationDetails } from '../ui/types.js';
import { makeRelative, shortenPath } from '../utils/paths.js';
import { ReadFileTool } from './read-file.tool.js';
import { WriteFileTool } from './write-file.tool.js';
/**
* Parameters for the Edit tool
*/
export interface EditToolParams {
/**
* The absolute path to the file to modify
*/
file_path: string;
/**
* The text to replace
*/
old_string: string;
/**
* The text to replace it with
*/
new_string: string;
/**
* The expected number of replacements to perform (optional, defaults to 1)
*/
expected_replacements?: number;
}
/**
* Result from the Edit tool
*/
export interface EditToolResult extends ToolResult {
}
interface CalculatedEdit {
currentContent: string | null;
newContent: string;
occurrences: number;
error?: { display: string, raw: string };
isNewFile: boolean;
}
/**
* Implementation of the Edit tool that modifies files.
* This tool maintains state for the "Always Edit" confirmation preference.
*/
export class EditTool extends BaseTool<EditToolParams, EditToolResult> {
private shouldAlwaysEdit = false;
private readonly rootDirectory: string;
/**
* Creates a new instance of the EditTool
* @param rootDirectory Root directory to ground this tool in.
*/
constructor(rootDirectory: string) {
super(
'replace',
'Edit',
`Replaces a SINGLE, UNIQUE occurrence of text within a file. Requires providing significant context around the change to ensure uniqueness. For moving/renaming files, use the Bash tool with \`mv\`. For replacing entire file contents or creating new files use the ${WriteFileTool.Name} tool. Always use the ${ReadFileTool.Name} tool to examine the file before using this tool.`,
{
properties: {
file_path: {
description: 'The absolute path to the file to modify. Must start with /. When creating a new file, ensure the parent directory exists (use the `LS` tool to verify).',
type: 'string'
},
old_string: {
description: 'The exact text to replace. CRITICAL: Must uniquely identify the single instance to change. Include at least 3-5 lines of context BEFORE and AFTER the target text, matching whitespace and indentation precisely. If this string matches multiple locations or does not match exactly, the tool will fail. Use an empty string ("") when creating a new file.',
type: 'string'
},
new_string: {
description: 'The text to replace the `old_string` with. When creating a new file (using an empty `old_string`), this should contain the full desired content of the new file. Ensure the resulting code is correct and idiomatic.',
type: 'string'
}
},
required: ['file_path', 'old_string', 'new_string'],
type: 'object'
}
);
this.rootDirectory = path.resolve(rootDirectory);
}
/**
* Checks if a path is within the root directory.
* @param pathToCheck The absolute path to check.
* @returns True if the path is within the root directory, false otherwise.
*/
private isWithinRoot(pathToCheck: string): boolean {
const normalizedPath = path.normalize(pathToCheck);
const normalizedRoot = this.rootDirectory;
const rootWithSep = normalizedRoot.endsWith(path.sep)
? normalizedRoot
: normalizedRoot + path.sep;
return normalizedPath === normalizedRoot || normalizedPath.startsWith(rootWithSep);
}
/**
* Validates the parameters for the Edit tool
* @param params Parameters to validate
* @returns True if parameters are valid, false otherwise
*/
validateParams(params: EditToolParams): boolean {
if (this.schema.parameters && !SchemaValidator.validate(this.schema.parameters as Record<string, unknown>, params)) {
return false;
}
// Ensure path is absolute
if (!path.isAbsolute(params.file_path)) {
console.error(`File path must be absolute: ${params.file_path}`);
return false;
}
// Ensure path is within the root directory
if (!this.isWithinRoot(params.file_path)) {
console.error(`File path must be within the root directory (${this.rootDirectory}): ${params.file_path}`);
return false;
}
// Validate expected_replacements if provided
if (params.expected_replacements !== undefined && params.expected_replacements < 0) {
console.error('Expected replacements must be a non-negative number');
return false;
}
return true;
}
/**
* Calculates the potential outcome of an edit operation.
* @param params Parameters for the edit operation
* @returns An object describing the potential edit outcome
* @throws File system errors if reading the file fails unexpectedly (e.g., permissions)
*/
private calculateEdit(params: EditToolParams): CalculatedEdit {
const expectedReplacements = params.expected_replacements === undefined ? 1 : params.expected_replacements;
let currentContent: string | null = null;
let fileExists = false;
let isNewFile = false;
let newContent = '';
let occurrences = 0;
let error: { display: string, raw: string } | undefined = undefined;
try {
currentContent = fs.readFileSync(params.file_path, 'utf8');
fileExists = true;
} catch (err: any) {
if (err.code !== 'ENOENT') {
throw err;
}
fileExists = false;
}
if (params.old_string === '' && !fileExists) {
isNewFile = true;
newContent = params.new_string;
occurrences = 0;
} else if (!fileExists) {
error = {
display: `File not found.`,
raw: `File not found: ${params.file_path}`
};
} else if (currentContent !== null) {
occurrences = this.countOccurrences(currentContent, params.old_string);
if (occurrences === 0) {
error = {
display: `No edits made`,
raw: `Failed to edit, 0 occurrences found`
}
} else if (occurrences !== expectedReplacements) {
error = {
display: `Failed to edit, expected ${expectedReplacements} occurrences but found ${occurrences}`,
raw: `Failed to edit, Expected ${expectedReplacements} occurrences but found ${occurrences} in file: ${params.file_path}`
}
} else {
newContent = this.replaceAll(currentContent, params.old_string, params.new_string);
}
} else {
error = {
display: `Failed to read content`,
raw: `Failed to read content of existing file: ${params.file_path}`
}
}
return {
currentContent,
newContent,
occurrences,
error,
isNewFile
};
}
/**
* Determines if confirmation is needed and prepares the confirmation details.
* This method performs the calculation needed to generate the diff and respects the `shouldAlwaysEdit` state.
* @param params Parameters for the potential edit operation
* @returns Confirmation details object or false if no confirmation is needed/possible.
*/
async shouldConfirmExecute(params: EditToolParams): Promise<ToolCallConfirmationDetails | false> {
if (this.shouldAlwaysEdit) {
return false;
}
if (!this.validateParams(params)) {
console.error("[EditTool] Attempted confirmation with invalid parameters.");
return false;
}
let calculatedEdit: CalculatedEdit;
try {
calculatedEdit = this.calculateEdit(params);
} catch (error) {
console.error(`Error calculating edit for confirmation: ${error instanceof Error ? error.message : String(error)}`);
return false;
}
if (calculatedEdit.error) {
return false;
}
const fileName = path.basename(params.file_path);
const fileDiff = Diff.createPatch(
fileName,
calculatedEdit.currentContent ?? '',
calculatedEdit.newContent,
'Current',
'Proposed',
{ context: 3, ignoreWhitespace: true, }
);
const confirmationDetails: ToolEditConfirmationDetails = {
title: `Confirm Edit: ${shortenPath(makeRelative(params.file_path, this.rootDirectory))}`,
fileName,
fileDiff,
onConfirm: async (outcome: ToolConfirmationOutcome) => {
if (outcome === ToolConfirmationOutcome.ProceedAlways) {
this.shouldAlwaysEdit = true;
}
},
};
return confirmationDetails;
}
getDescription(params: EditToolParams): string {
const relativePath = makeRelative(params.file_path, this.rootDirectory);
const oldStringSnippet = params.old_string.split('\n')[0].substring(0, 30) + (params.old_string.length > 30 ? '...' : '');
const newStringSnippet = params.new_string.split('\n')[0].substring(0, 30) + (params.new_string.length > 30 ? '...' : '');
return `${shortenPath(relativePath)}: ${oldStringSnippet} => ${newStringSnippet}`;
}
/**
* Executes the edit operation with the given parameters.
* This method recalculates the edit operation before execution.
* @param params Parameters for the edit operation
* @returns Result of the edit operation
*/
async execute(params: EditToolParams): Promise<EditToolResult> {
if (!this.validateParams(params)) {
return {
llmContent: 'Invalid parameters for file edit operation',
returnDisplay: '**Error:** Invalid parameters for file edit operation'
};
}
let editData: CalculatedEdit;
try {
editData = this.calculateEdit(params);
} catch (error) {
return {
llmContent: `Error preparing edit: ${error instanceof Error ? error.message : String(error)}`,
returnDisplay: 'Failed to prepare edit'
};
}
if (editData.error) {
return {
llmContent: editData.error.raw,
returnDisplay: editData.error.display
};
}
try {
this.ensureParentDirectoriesExist(params.file_path);
fs.writeFileSync(params.file_path, editData.newContent, 'utf8');
if (editData.isNewFile) {
return {
llmContent: `Created new file: ${params.file_path} with provided content.`,
returnDisplay: `Created ${shortenPath(makeRelative(params.file_path, this.rootDirectory))}`
};
} else {
const fileName = path.basename(params.file_path);
const fileDiff = Diff.createPatch(
fileName,
editData.currentContent ?? '',
editData.newContent,
'Current',
'Proposed',
{ context: 3, ignoreWhitespace: true }
);
return {
llmContent: `Successfully modified file: ${params.file_path} (${editData.occurrences} replacements).`,
returnDisplay: { fileDiff }
};
}
} catch (error) {
return {
llmContent: `Error executing edit: ${error instanceof Error ? error.message : String(error)}`,
returnDisplay: `Failed to edit file`
};
}
}
/**
* Counts occurrences of a substring in a string
* @param str String to search in
* @param substr Substring to count
* @returns Number of occurrences
*/
private countOccurrences(str: string, substr: string): number {
if (substr === '') {
return 0;
}
let count = 0;
let pos = str.indexOf(substr);
while (pos !== -1) {
count++;
pos = str.indexOf(substr, pos + substr.length);
}
return count;
}
/**
* Replaces all occurrences of a substring in a string
* @param str String to modify
* @param find Substring to find
* @param replace Replacement string
* @returns Modified string
*/
private replaceAll(str: string, find: string, replace: string): string {
if (find === '') {
return str;
}
return str.split(find).join(replace);
}
/**
* Creates parent directories if they don't exist
* @param filePath Path to ensure parent directories exist
*/
private ensureParentDirectoriesExist(filePath: string): void {
const dirName = path.dirname(filePath);
if (!fs.existsSync(dirName)) {
fs.mkdirSync(dirName, { recursive: true });
}
}
}

View File

@ -0,0 +1,227 @@
import fs from 'fs';
import path from 'path';
import fg from 'fast-glob';
import { SchemaValidator } from '../utils/schemaValidator.js';
import { BaseTool } from './BaseTool.js';
import { ToolResult } from './ToolResult.js';
import { shortenPath, makeRelative } from '../utils/paths.js';
/**
* Parameters for the GlobTool
*/
export interface GlobToolParams {
/**
* The glob pattern to match files against
*/
pattern: string;
/**
* The directory to search in (optional, defaults to current directory)
*/
path?: string;
}
/**
* Result from the GlobTool
*/
export interface GlobToolResult extends ToolResult {
}
/**
* Implementation of the GlobTool that finds files matching patterns,
* sorted by modification time (newest first).
*/
export class GlobTool extends BaseTool<GlobToolParams, GlobToolResult> {
/**
* The root directory that this tool is grounded in.
* All file operations will be restricted to this directory.
*/
private rootDirectory: string;
/**
* Creates a new instance of the GlobTool
* @param rootDirectory Root directory to ground this tool in. All operations will be restricted to this directory.
*/
constructor(rootDirectory: string) {
super(
'glob',
'FindFiles',
'Efficiently finds files matching specific glob patterns (e.g., `src/**/*.ts`, `**/*.md`), returning absolute paths sorted by modification time (newest first). Ideal for quickly locating files based on their name or path structure, especially in large codebases.',
{
properties: {
pattern: {
description: 'The glob pattern to match against (e.g., \'*.py\', \'src/**/*.js\', \'docs/*.md\').',
type: 'string'
},
path: {
description: 'Optional: The absolute path to the directory to search within. If omitted, searches the root directory.',
type: 'string'
}
},
required: ['pattern'],
type: 'object'
}
);
// Set the root directory
this.rootDirectory = path.resolve(rootDirectory);
}
/**
* Checks if a path is within the root directory.
* This is a security measure to prevent the tool from accessing files outside of its designated root.
* @param pathToCheck The path to check (expects an absolute path)
* @returns True if the path is within the root directory, false otherwise
*/
private isWithinRoot(pathToCheck: string): boolean {
const absolutePathToCheck = path.resolve(pathToCheck);
const normalizedPath = path.normalize(absolutePathToCheck);
const normalizedRoot = path.normalize(this.rootDirectory);
// Ensure the normalizedRoot ends with a path separator for proper prefix comparison
const rootWithSep = normalizedRoot.endsWith(path.sep)
? normalizedRoot
: normalizedRoot + path.sep;
// Check if it's the root itself or starts with the root path followed by a separator.
// This ensures that we don't accidentally allow access to parent directories.
return normalizedPath === normalizedRoot || normalizedPath.startsWith(rootWithSep);
}
/**
* Validates the parameters for the tool.
* Ensures that the provided parameters adhere to the expected schema and that the search path is valid and within the tool's root directory.
* @param params Parameters to validate
* @returns An error message string if invalid, null otherwise
*/
invalidParams(params: GlobToolParams): string | null {
if (this.schema.parameters && !SchemaValidator.validate(this.schema.parameters as Record<string, unknown>, params)) {
return "Parameters failed schema validation. Ensure 'pattern' is a string and 'path' (if provided) is a string.";
}
// Determine the absolute path to check
const searchDirAbsolute = params.path ?? this.rootDirectory;
// Validate path is within root directory
if (!this.isWithinRoot(searchDirAbsolute)) {
return `Search path ("${searchDirAbsolute}") resolves outside the tool's root directory ("${this.rootDirectory}").`;
}
// Validate path exists and is a directory using the absolute path.
// These checks prevent the tool from attempting to search in non-existent or non-directory paths, which would lead to errors.
try {
if (!fs.existsSync(searchDirAbsolute)) {
return `Search path does not exist: ${shortenPath(makeRelative(searchDirAbsolute, this.rootDirectory))} (absolute: ${searchDirAbsolute})`;
}
if (!fs.statSync(searchDirAbsolute).isDirectory()) {
return `Search path is not a directory: ${shortenPath(makeRelative(searchDirAbsolute, this.rootDirectory))} (absolute: ${searchDirAbsolute})`;
}
} catch (e: any) {
// Catch potential permission errors during sync checks
return `Error accessing search path: ${e.message}`;
}
// Validate glob pattern (basic non-empty check)
if (!params.pattern || typeof params.pattern !== 'string' || params.pattern.trim() === '') {
return "The 'pattern' parameter cannot be empty.";
}
// Could add more sophisticated glob pattern validation if needed
return null; // Parameters are valid
}
/**
* Gets a description of the glob operation.
* @param params Parameters for the glob operation.
* @returns A string describing the glob operation.
*/
getDescription(params: GlobToolParams): string {
let description = `'${params.pattern}'`;
if (params.path) {
const searchDir = params.path || this.rootDirectory;
const relativePath = makeRelative(searchDir, this.rootDirectory);
description += ` within ${shortenPath(relativePath)}`;
}
return description;
}
/**
* Executes the glob search with the given parameters
* @param params Parameters for the glob search
* @returns Result of the glob search
*/
async execute(params: GlobToolParams): Promise<GlobToolResult> {
const validationError = this.invalidParams(params);
if (validationError) {
return {
llmContent: `Error: Invalid parameters provided. Reason: ${validationError}`,
returnDisplay: `**Error:** Failed to execute tool.`
};
}
try {
// 1. Resolve the absolute search directory. Validation ensures it exists and is a directory.
const searchDirAbsolute = params.path ?? this.rootDirectory;
// 2. Perform Glob Search using fast-glob
// We use fast-glob because it's performant and supports glob patterns.
const entries = await fg(params.pattern, {
cwd: searchDirAbsolute, // Search within this absolute directory
absolute: true, // Return absolute paths
onlyFiles: true, // Match only files
stats: true, // Include file stats object for sorting
dot: true, // Include files starting with a dot
ignore: ['**/node_modules/**', '**/.git/**'], // Common sensible default, adjust as needed
followSymbolicLinks: false, // Avoid potential issues with symlinks unless specifically needed
suppressErrors: true, // Suppress EACCES errors for individual files (we handle dir access in validation)
});
// 3. Handle No Results
if (!entries || entries.length === 0) {
return {
llmContent: `No files found matching pattern "${params.pattern}" within ${searchDirAbsolute}.`,
returnDisplay: `No files found`
};
}
// 4. Sort Results by Modification Time (Newest First)
// Sorting by modification time ensures that the most recently modified files are listed first.
// This can be useful for quickly identifying the files that have been recently changed.
// The stats object is guaranteed by the `stats: true` option in the fast-glob configuration.
entries.sort((a, b) => {
// Ensure stats exist before accessing mtime (though fg should provide them)
const mtimeA = a.stats?.mtime?.getTime() ?? 0;
const mtimeB = b.stats?.mtime?.getTime() ?? 0;
return mtimeB - mtimeA; // Descending order
});
// 5. Format Output
const sortedAbsolutePaths = entries.map(entry => entry.path);
// Convert absolute paths to relative paths (to rootDir) for clearer display
const sortedRelativePaths = sortedAbsolutePaths.map(absPath => makeRelative(absPath, this.rootDirectory));
// Construct the result message
const fileListDescription = sortedRelativePaths.map(p => ` - ${shortenPath(p)}`).join('\n');
const fileCount = sortedRelativePaths.length;
const relativeSearchDir = makeRelative(searchDirAbsolute, this.rootDirectory);
const displayPath = shortenPath(relativeSearchDir === '.' ? 'root directory' : relativeSearchDir);
return {
llmContent: `Found ${fileCount} file(s) matching "${params.pattern}" within ${displayPath}, sorted by modification time (newest first):\n${fileListDescription}`,
returnDisplay: `Found ${fileCount} matching file(s)`
};
} catch (error) {
// Catch unexpected errors during glob execution (less likely with suppressErrors=true, but possible)
const errorMessage = error instanceof Error ? error.message : String(error);
console.error(`GlobTool execute Error: ${errorMessage}`, error);
return {
llmContent: `Error during glob search operation: ${errorMessage}`,
returnDisplay: `**Error:** An unexpected error occurred.`
};
}
}
}

View File

@ -0,0 +1,493 @@
import fs from 'fs'; // Used for sync checks in validation
import fsPromises from 'fs/promises'; // Used for async operations in fallback
import path from 'path';
import { EOL } from 'os'; // Used for parsing grep output lines
import { spawn } from 'child_process'; // Used for git grep and system grep
import fastGlob from 'fast-glob'; // Used for JS fallback file searching
import { ToolResult } from './ToolResult.js';
import { BaseTool } from './BaseTool.js';
import { SchemaValidator } from '../utils/schemaValidator.js';
import { makeRelative, shortenPath } from '../utils/paths.js';
// --- Interfaces (kept separate for clarity) ---
/**
* Parameters for the GrepTool
*/
export interface GrepToolParams {
/**
* The regular expression pattern to search for in file contents
*/
pattern: string;
/**
* The directory to search in (optional, defaults to current directory relative to root)
*/
path?: string;
/**
* File pattern to include in the search (e.g. "*.js", "*.{ts,tsx}")
*/
include?: string;
}
/**
* Result object for a single grep match
*/
interface GrepMatch {
filePath: string;
lineNumber: number;
line: string;
}
/**
* Result from the GrepTool
*/
export interface GrepToolResult extends ToolResult {
}
// --- GrepTool Class ---
/**
* Implementation of the GrepTool that searches file contents using git grep, system grep, or JS fallback.
*/
export class GrepTool extends BaseTool<GrepToolParams, GrepToolResult> {
private rootDirectory: string;
/**
* Creates a new instance of the GrepTool
* @param rootDirectory Root directory to ground this tool in. All operations will be restricted to this directory.
*/
constructor(rootDirectory: string) {
super(
'search_file_content',
'SearchText',
'Searches for a regular expression pattern within the content of files in a specified directory (or current working directory). Can filter files by a glob pattern. Returns the lines containing matches, along with their file paths and line numbers.',
{
properties: {
pattern: {
description: 'The regular expression (regex) pattern to search for within file contents (e.g., \'function\\s+myFunction\', \'import\\s+\\{.*\\}\\s+from\\s+.*\').',
type: 'string'
},
path: {
description: 'Optional: The absolute path to the directory to search within. If omitted, searches the current working directory.',
type: 'string'
},
include: {
description: 'Optional: A glob pattern to filter which files are searched (e.g., \'*.js\', \'*.{ts,tsx}\', \'src/**\'). If omitted, searches all files (respecting potential global ignores).',
type: 'string'
}
},
required: ['pattern'],
type: 'object'
}
);
// Ensure rootDirectory is absolute and normalized
this.rootDirectory = path.resolve(rootDirectory);
}
// --- Validation Methods ---
/**
* Checks if a path is within the root directory and resolves it.
* @param relativePath Path relative to the root directory (or undefined for root).
* @returns The absolute path if valid and exists.
* @throws {Error} If path is outside root, doesn't exist, or isn't a directory.
*/
private resolveAndValidatePath(relativePath?: string): string {
const targetPath = path.resolve(this.rootDirectory, relativePath || '.');
// Security Check: Ensure the resolved path is still within the root directory.
if (!targetPath.startsWith(this.rootDirectory) && targetPath !== this.rootDirectory) {
throw new Error(`Path validation failed: Attempted path "${relativePath || '.'}" resolves outside the allowed root directory "${this.rootDirectory}".`);
}
// Check existence and type after resolving
try {
const stats = fs.statSync(targetPath);
if (!stats.isDirectory()) {
throw new Error(`Path is not a directory: ${targetPath}`);
}
} catch (err: any) {
if (err.code === 'ENOENT') {
throw new Error(`Path does not exist: ${targetPath}`);
}
throw new Error(`Failed to access path stats for ${targetPath}: ${err.message}`);
}
return targetPath;
}
/**
* Validates the parameters for the tool
* @param params Parameters to validate
* @returns An error message string if invalid, null otherwise
*/
invalidParams(params: GrepToolParams): string | null {
if (this.schema.parameters && !SchemaValidator.validate(this.schema.parameters as Record<string, unknown>, params)) {
return "Parameters failed schema validation.";
}
try {
new RegExp(params.pattern);
} catch (error) {
return `Invalid regular expression pattern provided: ${params.pattern}. Error: ${error instanceof Error ? error.message : String(error)}`;
}
try {
this.resolveAndValidatePath(params.path);
} catch (error) {
return error instanceof Error ? error.message : String(error);
}
return null; // Parameters are valid
}
// --- Core Execution ---
/**
* Executes the grep search with the given parameters
* @param params Parameters for the grep search
* @returns Result of the grep search
*/
async execute(params: GrepToolParams): Promise<GrepToolResult> {
const validationError = this.invalidParams(params);
if (validationError) {
console.error(`GrepTool Parameter Validation Failed: ${validationError}`);
return {
llmContent: `Error: Invalid parameters provided. Reason: ${validationError}`,
returnDisplay: `**Error:** Failed to execute tool.`
};
}
let searchDirAbs: string;
try {
searchDirAbs = this.resolveAndValidatePath(params.path);
const searchDirDisplay = params.path || '.';
const matches: GrepMatch[] = await this.performGrepSearch({
pattern: params.pattern,
path: searchDirAbs,
include: params.include,
});
if (matches.length === 0) {
const noMatchMsg = `No matches found for pattern "${params.pattern}" in path "${searchDirDisplay}"${params.include ? ` (filter: "${params.include}")` : ''}.`;
const noMatchUser = `No matches found`;
return { llmContent: noMatchMsg, returnDisplay: noMatchUser };
}
const matchesByFile = matches.reduce((acc, match) => {
const relativeFilePath = path.relative(searchDirAbs, path.resolve(searchDirAbs, match.filePath)) || path.basename(match.filePath);
if (!acc[relativeFilePath]) {
acc[relativeFilePath] = [];
}
acc[relativeFilePath].push(match);
acc[relativeFilePath].sort((a, b) => a.lineNumber - b.lineNumber);
return acc;
}, {} as Record<string, GrepMatch[]>);
let llmContent = `Found ${matches.length} match(es) for pattern "${params.pattern}" in path "${searchDirDisplay}"${params.include ? ` (filter: "${params.include}")` : ''}:\n---\n`;
for (const filePath in matchesByFile) {
llmContent += `File: ${filePath}\n`;
matchesByFile[filePath].forEach(match => {
const trimmedLine = match.line.trim();
llmContent += `L${match.lineNumber}: ${trimmedLine}\n`;
});
llmContent += '---\n';
}
return { llmContent: llmContent.trim(), returnDisplay: `Found ${matches.length} matche(s)` };
} catch (error) {
console.error(`Error during GrepTool execution: ${error}`);
const errorMessage = error instanceof Error ? error.message : String(error);
return {
llmContent: `Error during grep search operation: ${errorMessage}`,
returnDisplay: errorMessage
};
}
}
// --- Inlined Grep Logic and Helpers ---
/**
* Checks if a command is available in the system's PATH.
* @param {string} command The command name (e.g., 'git', 'grep').
* @returns {Promise<boolean>} True if the command is available, false otherwise.
*/
private isCommandAvailable(command: string): Promise<boolean> {
return new Promise((resolve) => {
const checkCommand = process.platform === 'win32' ? 'where' : 'command';
const checkArgs = process.platform === 'win32' ? [command] : ['-v', command];
try {
const child = spawn(checkCommand, checkArgs, { stdio: 'ignore', shell: process.platform === 'win32' });
child.on('close', (code) => resolve(code === 0));
child.on('error', () => resolve(false));
} catch (e) {
resolve(false);
}
});
}
/**
* Checks if a directory or its parent directories contain a .git folder.
* @param {string} dirPath Absolute path to the directory to check.
* @returns {Promise<boolean>} True if it's a Git repository, false otherwise.
*/
private async isGitRepository(dirPath: string): Promise<boolean> {
let currentPath = path.resolve(dirPath);
const root = path.parse(currentPath).root;
try {
while (true) {
const gitPath = path.join(currentPath, '.git');
try {
const stats = await fsPromises.stat(gitPath);
if (stats.isDirectory() || stats.isFile()) {
return true;
}
return false;
} catch (err: any) {
if (err.code !== 'ENOENT') {
console.error(`Error checking for .git in ${currentPath}: ${err.message}`);
return false;
}
}
if (currentPath === root) {
break;
}
currentPath = path.dirname(currentPath);
}
} catch (err: any) {
console.error(`Error traversing directory structure upwards from ${dirPath}: ${err instanceof Error ? err.message : String(err)}`);
}
return false;
}
/**
* Parses the standard output of grep-like commands (git grep, system grep).
* Expects format: filePath:lineNumber:lineContent
* Handles colons within file paths and line content correctly.
* @param {string} output The raw stdout string.
* @param {string} basePath The absolute directory the search was run from, for relative paths.
* @returns {GrepMatch[]} Array of match objects.
*/
private parseGrepOutput(output: string, basePath: string): GrepMatch[] {
const results: GrepMatch[] = [];
if (!output) return results;
const lines = output.split(EOL); // Use OS-specific end-of-line
for (const line of lines) {
if (!line.trim()) continue;
// Find the index of the first colon.
const firstColonIndex = line.indexOf(':');
if (firstColonIndex === -1) {
// Malformed line: Does not contain any colon. Skip.
continue;
}
// Find the index of the second colon, searching *after* the first one.
const secondColonIndex = line.indexOf(':', firstColonIndex + 1);
if (secondColonIndex === -1) {
// Malformed line: Contains only one colon (e.g., filename:content). Skip.
// Grep output with -n should always have file:line:content.
continue;
}
// Extract parts based on the found colon indices
const filePathRaw = line.substring(0, firstColonIndex);
const lineNumberStr = line.substring(firstColonIndex + 1, secondColonIndex);
// The rest of the line, starting after the second colon, is the content.
const lineContent = line.substring(secondColonIndex + 1);
const lineNumber = parseInt(lineNumberStr, 10);
if (!isNaN(lineNumber)) {
// Resolve the raw path relative to the base path where grep ran
const absoluteFilePath = path.resolve(basePath, filePathRaw);
// Make the final path relative to the basePath for consistency
const relativeFilePath = path.relative(basePath, absoluteFilePath);
results.push({
// Use relative path, or just the filename if it's in the base path itself
filePath: relativeFilePath || path.basename(absoluteFilePath),
lineNumber: lineNumber,
line: lineContent, // Use the full extracted line content
});
}
// Silently ignore lines where the line number isn't parsable
}
return results;
}
/**
* Gets a description of the grep operation
* @param params Parameters for the grep operation
* @returns A string describing the grep
*/
getDescription(params: GrepToolParams): string {
let description = `'${params.pattern}'`;
if (params.include) {
description += ` in ${params.include}`;
}
if (params.path) {
const searchDir = params.path || this.rootDirectory;
const relativePath = makeRelative(searchDir, this.rootDirectory);
description += ` within ${shortenPath(relativePath || './')}`;
}
return description;
}
/**
* Performs the actual search using the prioritized strategies.
* @param options Search options including pattern, absolute path, and include glob.
* @returns A promise resolving to an array of match objects.
*/
private async performGrepSearch(options: {
pattern: string;
path: string; // Expects absolute path
include?: string;
}): Promise<GrepMatch[]> {
const { pattern, path: absolutePath, include } = options;
let strategyUsed = 'none'; // Keep track for potential error reporting
try {
// --- Strategy 1: git grep ---
const isGit = await this.isGitRepository(absolutePath);
const gitAvailable = isGit && await this.isCommandAvailable('git');
if (gitAvailable) {
strategyUsed = 'git grep';
const gitArgs = ['grep', '--untracked', '-n', '-E', '--ignore-case', pattern];
if (include) {
gitArgs.push('--', include);
}
try {
const output = await new Promise<string>((resolve, reject) => {
const child = spawn('git', gitArgs, { cwd: absolutePath, windowsHide: true });
const stdoutChunks: Buffer[] = [];
const stderrChunks: Buffer[] = [];
child.stdout.on('data', (chunk) => { stdoutChunks.push(chunk); });
child.stderr.on('data', (chunk) => { stderrChunks.push(chunk); });
child.on('error', (err) => reject(new Error(`Failed to start git grep: ${err.message}`)));
child.on('close', (code) => {
const stdoutData = Buffer.concat(stdoutChunks).toString('utf8');
const stderrData = Buffer.concat(stderrChunks).toString('utf8');
if (code === 0) resolve(stdoutData);
else if (code === 1) resolve(''); // No matches is not an error
else reject(new Error(`git grep exited with code ${code}: ${stderrData}`));
});
});
return this.parseGrepOutput(output, absolutePath);
} catch (gitError: any) {
console.error(`GrepTool: git grep strategy failed: ${gitError.message}. Falling back...`);
}
}
// --- Strategy 2: System grep ---
const grepAvailable = await this.isCommandAvailable('grep');
if (grepAvailable) {
strategyUsed = 'system grep';
const grepArgs = ['-r', '-n', '-H', '-E'];
const commonExcludes = ['.git', 'node_modules', 'bower_components'];
commonExcludes.forEach(dir => grepArgs.push(`--exclude-dir=${dir}`));
if (include) {
grepArgs.push(`--include=${include}`);
}
grepArgs.push(pattern);
grepArgs.push('.');
try {
const output = await new Promise<string>((resolve, reject) => {
const child = spawn('grep', grepArgs, { cwd: absolutePath, windowsHide: true });
const stdoutChunks: Buffer[] = [];
const stderrChunks: Buffer[] = [];
child.stdout.on('data', (chunk) => { stdoutChunks.push(chunk); });
child.stderr.on('data', (chunk) => {
const stderrStr = chunk.toString();
if (!stderrStr.includes('Permission denied') && !/grep:.*: Is a directory/i.test(stderrStr)) {
stderrChunks.push(chunk);
}
});
child.on('error', (err) => reject(new Error(`Failed to start system grep: ${err.message}`)));
child.on('close', (code) => {
const stdoutData = Buffer.concat(stdoutChunks).toString('utf8');
const stderrData = Buffer.concat(stderrChunks).toString('utf8').trim();
if (code === 0) resolve(stdoutData);
else if (code === 1) resolve(''); // No matches
else {
if (stderrData) reject(new Error(`System grep exited with code ${code}: ${stderrData}`));
else resolve('');
}
});
});
return this.parseGrepOutput(output, absolutePath);
} catch (grepError: any) {
console.error(`GrepTool: System grep strategy failed: ${grepError.message}. Falling back...`);
}
}
// --- Strategy 3: Pure JavaScript Fallback ---
strategyUsed = 'javascript fallback';
const globPattern = include ? include : '**/*';
const ignorePatterns = ['.git', 'node_modules', 'bower_components', '.svn', '.hg'];
const filesStream = fastGlob.stream(globPattern, {
cwd: absolutePath,
dot: true,
ignore: ignorePatterns,
absolute: true,
onlyFiles: true,
suppressErrors: true,
stats: false,
});
const regex = new RegExp(pattern, 'i');
const allMatches: GrepMatch[] = [];
for await (const filePath of filesStream) {
const fileAbsolutePath = filePath as string;
try {
const content = await fsPromises.readFile(fileAbsolutePath, 'utf8');
const lines = content.split(/\r?\n/);
lines.forEach((line, index) => {
if (regex.test(line)) {
allMatches.push({
filePath: path.relative(absolutePath, fileAbsolutePath) || path.basename(fileAbsolutePath),
lineNumber: index + 1,
line: line,
});
}
});
} catch (readError: any) {
if (readError.code !== 'ENOENT') {
console.error(`GrepTool: Could not read or process file ${fileAbsolutePath}: ${readError.message}`);
}
}
}
return allMatches;
} catch (error: any) {
console.error(`GrepTool: Error during performGrepSearch (Strategy: ${strategyUsed}): ${error.message}`);
throw error; // Re-throw to be caught by the execute method's handler
}
}
}

View File

@ -0,0 +1,306 @@
import fs from 'fs';
import path from 'path';
import { BaseTool } from './BaseTool.js';
import { SchemaValidator } from '../utils/schemaValidator.js';
import { ToolResult } from './ToolResult.js';
import { makeRelative, shortenPath } from '../utils/paths.js';
/**
* Parameters for the LS tool
*/
export interface LSToolParams {
/**
* The absolute path to the directory to list
*/
path: string;
/**
* List of glob patterns to ignore
*/
ignore?: string[];
}
/**
* File entry returned by LS tool
*/
export interface FileEntry {
/**
* Name of the file or directory
*/
name: string;
/**
* Absolute path to the file or directory
*/
path: string;
/**
* Whether this entry is a directory
*/
isDirectory: boolean;
/**
* Size of the file in bytes (0 for directories)
*/
size: number;
/**
* Last modified timestamp
*/
modifiedTime: Date;
}
/**
* Result from the LS tool
*/
export interface LSToolResult extends ToolResult {
/**
* List of file entries
*/
entries: FileEntry[];
/**
* The directory that was listed
*/
listedPath: string;
/**
* Total number of entries found
*/
totalEntries: number;
}
/**
* Implementation of the LS tool that lists directory contents
*/
export class LSTool extends BaseTool<LSToolParams, LSToolResult> {
/**
* The root directory that this tool is grounded in.
* All path operations will be restricted to this directory.
*/
private rootDirectory: string;
/**
* Creates a new instance of the LSTool
* @param rootDirectory Root directory to ground this tool in. All operations will be restricted to this directory.
*/
constructor(rootDirectory: string) {
super(
'list_directory',
'ReadFolder',
'Lists the names of files and subdirectories directly within a specified directory path. Can optionally ignore entries matching provided glob patterns.',
{
properties: {
path: {
description: 'The absolute path to the directory to list (must be absolute, not relative)',
type: 'string'
},
ignore: {
description: 'List of glob patterns to ignore',
items: {
type: 'string'
},
type: 'array'
}
},
required: ['path'],
type: 'object'
}
);
// Set the root directory
this.rootDirectory = path.resolve(rootDirectory);
}
/**
* Checks if a path is within the root directory
* @param pathToCheck The path to check
* @returns True if the path is within the root directory, false otherwise
*/
private isWithinRoot(pathToCheck: string): boolean {
const normalizedPath = path.normalize(pathToCheck);
const normalizedRoot = path.normalize(this.rootDirectory);
// Ensure the normalizedRoot ends with a path separator for proper path comparison
const rootWithSep = normalizedRoot.endsWith(path.sep)
? normalizedRoot
: normalizedRoot + path.sep;
return normalizedPath === normalizedRoot || normalizedPath.startsWith(rootWithSep);
}
/**
* Validates the parameters for the tool
* @param params Parameters to validate
* @returns An error message string if invalid, null otherwise
*/
invalidParams(params: LSToolParams): string | null {
if (this.schema.parameters && !SchemaValidator.validate(this.schema.parameters as Record<string, unknown>, params)) {
return "Parameters failed schema validation.";
}
// Ensure path is absolute
if (!path.isAbsolute(params.path)) {
return `Path must be absolute: ${params.path}`;
}
// Ensure path is within the root directory
if (!this.isWithinRoot(params.path)) {
return `Path must be within the root directory (${this.rootDirectory}): ${params.path}`;
}
return null;
}
/**
* Checks if a filename matches any of the ignore patterns
* @param filename Filename to check
* @param patterns Array of glob patterns to check against
* @returns True if the filename should be ignored
*/
private shouldIgnore(filename: string, patterns?: string[]): boolean {
if (!patterns || patterns.length === 0) {
return false;
}
for (const pattern of patterns) {
// Convert glob pattern to RegExp
const regexPattern = pattern
.replace(/[.+^${}()|[\]\\]/g, '\\$&')
.replace(/\*/g, '.*')
.replace(/\?/g, '.');
const regex = new RegExp(`^${regexPattern}$`);
if (regex.test(filename)) {
return true;
}
}
return false;
}
/**
* Gets a description of the file reading operation
* @param params Parameters for the file reading
* @returns A string describing the file being read
*/
getDescription(params: LSToolParams): string {
const relativePath = makeRelative(params.path, this.rootDirectory);
return shortenPath(relativePath);
}
/**
* Executes the LS operation with the given parameters
* @param params Parameters for the LS operation
* @returns Result of the LS operation
*/
async execute(params: LSToolParams): Promise<LSToolResult> {
const validationError = this.invalidParams(params);
if (validationError) {
return {
entries: [],
listedPath: params.path,
totalEntries: 0,
llmContent: `Error: Invalid parameters provided. Reason: ${validationError}`,
returnDisplay: "**Error:** Failed to execute tool."
};
}
try {
// Check if path exists
if (!fs.existsSync(params.path)) {
return {
entries: [],
listedPath: params.path,
totalEntries: 0,
llmContent: `Directory does not exist: ${params.path}`,
returnDisplay: `Directory does not exist`
};
}
// Check if path is a directory
const stats = fs.statSync(params.path);
if (!stats.isDirectory()) {
return {
entries: [],
listedPath: params.path,
totalEntries: 0,
llmContent: `Path is not a directory: ${params.path}`,
returnDisplay: `Path is not a directory`
};
}
// Read directory contents
const files = fs.readdirSync(params.path);
const entries: FileEntry[] = [];
if (files.length === 0) {
return {
entries: [],
listedPath: params.path,
totalEntries: 0,
llmContent: `Directory is empty: ${params.path}`,
returnDisplay: `Directory is empty.`
};
}
// Process each entry
for (const file of files) {
// Skip if the file matches ignore patterns
if (this.shouldIgnore(file, params.ignore)) {
continue;
}
const fullPath = path.join(params.path, file);
try {
const stats = fs.statSync(fullPath);
const isDir = stats.isDirectory();
entries.push({
name: file,
path: fullPath,
isDirectory: isDir,
size: isDir ? 0 : stats.size,
modifiedTime: stats.mtime
});
} catch (error) {
// Skip entries that can't be accessed
console.error(`Error accessing ${fullPath}: ${error}`);
}
}
// Sort entries (directories first, then alphabetically)
entries.sort((a, b) => {
if (a.isDirectory && !b.isDirectory) return -1;
if (!a.isDirectory && b.isDirectory) return 1;
return a.name.localeCompare(b.name);
});
// Create formatted content for display
const directoryContent = entries.map(entry => {
const typeIndicator = entry.isDirectory ? 'd' : '-';
const sizeInfo = entry.isDirectory ? '' : ` (${entry.size} bytes)`;
return `${typeIndicator} ${entry.name}${sizeInfo}`;
}).join('\n');
return {
entries,
listedPath: params.path,
totalEntries: entries.length,
llmContent: `Directory listing for ${params.path}:\n${directoryContent}`,
returnDisplay: `Found ${entries.length} item(s).`
};
} catch (error) {
const errorMessage = `Error listing directory: ${error instanceof Error ? error.message : String(error)}`;
return {
entries: [],
listedPath: params.path,
totalEntries: 0,
llmContent: errorMessage,
returnDisplay: `**Error:** ${errorMessage}`
};
}
}
}

View File

@ -0,0 +1,296 @@
import fs from 'fs';
import path from 'path';
import { ToolResult } from './ToolResult.js';
import { BaseTool } from './BaseTool.js';
import { SchemaValidator } from '../utils/schemaValidator.js';
import { makeRelative, shortenPath } from '../utils/paths.js';
/**
* Parameters for the ReadFile tool
*/
export interface ReadFileToolParams {
/**
* The absolute path to the file to read
*/
file_path: string;
/**
* The line number to start reading from (optional)
*/
offset?: number;
/**
* The number of lines to read (optional)
*/
limit?: number;
}
/**
* Standardized result from the ReadFile tool
*/
export interface ReadFileToolResult extends ToolResult {
}
/**
* Implementation of the ReadFile tool that reads files from the filesystem
*/
export class ReadFileTool extends BaseTool<ReadFileToolParams, ReadFileToolResult> {
public static readonly Name: string = 'read_file';
// Maximum number of lines to read by default
private static readonly DEFAULT_MAX_LINES = 2000;
// Maximum length of a line before truncating
private static readonly MAX_LINE_LENGTH = 2000;
/**
* The root directory that this tool is grounded in.
* All file operations will be restricted to this directory.
*/
private rootDirectory: string;
/**
* Creates a new instance of the ReadFileTool
* @param rootDirectory Root directory to ground this tool in. All operations will be restricted to this directory.
*/
constructor(rootDirectory: string) {
super(
ReadFileTool.Name,
'ReadFile',
'Reads and returns the content of a specified file from the local filesystem. Handles large files by allowing reading specific line ranges.',
{
properties: {
file_path: {
description: 'The absolute path to the file to read (e.g., \'/home/user/project/file.txt\'). Relative paths are not supported.',
type: 'string'
},
offset: {
description: 'Optional: The 0-based line number to start reading from. Requires \'limit\' to be set. Use for paginating through large files.',
type: 'number'
},
limit: {
description: 'Optional: Maximum number of lines to read. Use with \'offset\' to paginate through large files. If omitted, reads the entire file (if feasible).',
type: 'number'
}
},
required: ['file_path'],
type: 'object'
}
);
// Set the root directory
this.rootDirectory = path.resolve(rootDirectory);
}
/**
* Checks if a path is within the root directory
* @param pathToCheck The path to check
* @returns True if the path is within the root directory, false otherwise
*/
private isWithinRoot(pathToCheck: string): boolean {
const normalizedPath = path.normalize(pathToCheck);
const normalizedRoot = path.normalize(this.rootDirectory);
// Ensure the normalizedRoot ends with a path separator for proper path comparison
const rootWithSep = normalizedRoot.endsWith(path.sep)
? normalizedRoot
: normalizedRoot + path.sep;
return normalizedPath === normalizedRoot || normalizedPath.startsWith(rootWithSep);
}
/**
* Validates the parameters for the ReadFile tool
* @param params Parameters to validate
* @returns True if parameters are valid, false otherwise
*/
invalidParams(params: ReadFileToolParams): string | null {
if (this.schema.parameters && !SchemaValidator.validate(this.schema.parameters as Record<string, unknown>, params)) {
return "Parameters failed schema validation.";
}
// Ensure path is absolute
if (!path.isAbsolute(params.file_path)) {
return `File path must be absolute: ${params.file_path}`;
}
// Ensure path is within the root directory
if (!this.isWithinRoot(params.file_path)) {
return `File path must be within the root directory (${this.rootDirectory}): ${params.file_path}`;
}
// Validate offset and limit if provided
if (params.offset !== undefined && params.offset < 0) {
return 'Offset must be a non-negative number';
}
if (params.limit !== undefined && params.limit <= 0) {
return 'Limit must be a positive number';
}
return null;
}
/**
* Determines if a file is likely binary based on content sampling
* @param filePath Path to the file
* @returns True if the file appears to be binary
*/
private isBinaryFile(filePath: string): boolean {
try {
// Read the first 4KB of the file
const fd = fs.openSync(filePath, 'r');
const buffer = Buffer.alloc(4096);
const bytesRead = fs.readSync(fd, buffer, 0, 4096, 0);
fs.closeSync(fd);
// Check for null bytes or high concentration of non-printable characters
let nonPrintableCount = 0;
for (let i = 0; i < bytesRead; i++) {
// Null byte is a strong indicator of binary data
if (buffer[i] === 0) {
return true;
}
// Count non-printable characters
if (buffer[i] < 9 || (buffer[i] > 13 && buffer[i] < 32)) {
nonPrintableCount++;
}
}
// If more than 30% are non-printable, likely binary
return (nonPrintableCount / bytesRead) > 0.3;
} catch (error) {
return false;
}
}
/**
* Detects the type of file based on extension and content
* @param filePath Path to the file
* @returns File type description
*/
private detectFileType(filePath: string): string {
const ext = path.extname(filePath).toLowerCase();
// Common image formats
if (['.jpg', '.jpeg', '.png', '.gif', '.bmp', '.webp', '.svg'].includes(ext)) {
return 'image';
}
// Other known binary formats
if (['.pdf', '.zip', '.tar', '.gz', '.exe', '.dll', '.so'].includes(ext)) {
return 'binary';
}
// Check content for binary indicators
if (this.isBinaryFile(filePath)) {
return 'binary';
}
return 'text';
}
/**
* Gets a description of the file reading operation
* @param params Parameters for the file reading
* @returns A string describing the file being read
*/
getDescription(params: ReadFileToolParams): string {
const relativePath = makeRelative(params.file_path, this.rootDirectory);
return shortenPath(relativePath);
}
/**
* Reads a file and returns its contents with line numbers
* @param params Parameters for the file reading
* @returns Result with file contents
*/
async execute(params: ReadFileToolParams): Promise<ReadFileToolResult> {
const validationError = this.invalidParams(params);
if (validationError) {
return {
llmContent: `Error: Invalid parameters provided. Reason: ${validationError}`,
returnDisplay: "**Error:** Failed to execute tool."
};
}
try {
// Check if file exists
if (!fs.existsSync(params.file_path)) {
return {
llmContent: `File not found: ${params.file_path}`,
returnDisplay: `File not found.`,
};
}
// Check if it's a directory
const stats = fs.statSync(params.file_path);
if (stats.isDirectory()) {
return {
llmContent: `Path is a directory, not a file: ${params.file_path}`,
returnDisplay: `File is directory.`,
};
}
// Detect file type
const fileType = this.detectFileType(params.file_path);
// Handle binary files differently
if (fileType !== 'text') {
return {
llmContent: `Binary file: ${params.file_path} (${fileType})`,
returnDisplay: ``,
};
}
// Read and process text file
const content = fs.readFileSync(params.file_path, 'utf8');
const lines = content.split('\n');
// Apply offset and limit
const startLine = params.offset || 0;
// Use the default max lines if no limit is provided
const endLine = params.limit
? startLine + params.limit
: Math.min(startLine + ReadFileTool.DEFAULT_MAX_LINES, lines.length);
const selectedLines = lines.slice(startLine, endLine);
// Format with line numbers and handle line truncation
let truncated = false;
const formattedLines = selectedLines.map((line) => {
// Calculate actual line number (1-based)
// Truncate long lines
let processedLine = line;
if (line.length > ReadFileTool.MAX_LINE_LENGTH) {
processedLine = line.substring(0, ReadFileTool.MAX_LINE_LENGTH) + '... [truncated]';
truncated = true;
}
return processedLine;
});
// Check if content was truncated due to line limit or max lines limit
const contentTruncated = (endLine < lines.length) || truncated;
// Create llmContent with truncation info if needed
let llmContent = '';
if (contentTruncated) {
llmContent += `[File truncated: showing lines ${startLine + 1}-${endLine} of ${lines.length} total lines. Use offset parameter to view more.]\n`;
}
llmContent += formattedLines.join('\n');
return {
llmContent,
returnDisplay: '',
};
} catch (error) {
const errorMsg = `Error reading file: ${error instanceof Error ? error.message : String(error)}`;
return {
llmContent: `Error reading file ${params.file_path}: ${errorMsg}`,
returnDisplay: `Failed to read file: ${errorMsg}`,
};
}
}
}

View File

@ -0,0 +1,960 @@
import { spawn, SpawnOptions, ChildProcessWithoutNullStreams, exec } from 'child_process'; // Added 'exec'
import path from 'path';
import os from 'os';
import crypto from 'crypto';
import { promises as fs } from 'fs'; // Added fs.promises
import { BaseTool } from './BaseTool.js'; // Adjust path as needed
import { ToolResult } from './ToolResult.js'; // Adjust path as needed
import { SchemaValidator } from '../utils/schemaValidator.js'; // Adjust path as needed
import { ToolCallConfirmationDetails, ToolConfirmationOutcome, ToolExecuteConfirmationDetails } from '../ui/types.js'; // Adjust path as needed
import { GeminiClient } from '../core/GeminiClient.js';
import { SchemaUnion, Type } from '@google/genai';
import { BackgroundTerminalAnalyzer } from '../utils/BackgroundTerminalAnalyzer.js';
// --- Interfaces ---
export interface TerminalToolParams {
command: string;
description?: string;
timeout?: number;
runInBackground?: boolean;
}
export interface TerminalToolResult extends ToolResult {
// Add specific fields if needed for structured output from polling/LLM
// finalStdout?: string;
// finalStderr?: string;
// llmAnalysis?: string;
}
// --- Constants ---
const MAX_OUTPUT_LENGTH = 10000; // Default max output length
const DEFAULT_TIMEOUT_MS = 30 * 60 * 1000; // 30 minutes (for foreground commands)
const MAX_TIMEOUT_OVERRIDE_MS = 10 * 60 * 1000; // 10 minutes (max override for foreground)
const BACKGROUND_LAUNCH_TIMEOUT_MS = 15 * 1000; // 15 seconds timeout for *launching* background tasks
const BACKGROUND_POLL_INTERVAL_MS = 5000; // 5 seconds interval for checking background process status
const BACKGROUND_POLL_TIMEOUT_MS = 30000; // 30 seconds total polling time for background process status
const BANNED_COMMAND_ROOTS = [
// Session/flow control (excluding cd)
'alias', 'bg', 'command', 'declare', 'dirs', 'disown', 'enable', 'eval', 'exec',
'exit', 'export', 'fc', 'fg', 'getopts', 'hash', 'history', 'jobs', 'kill', 'let',
'local', 'logout', 'popd', 'printf', 'pushd', /* 'pwd' is safe */ 'read', 'readonly', 'set',
'shift', 'shopt', 'source', 'suspend', 'test', 'times', 'trap', 'type', 'typeset',
'ulimit', 'umask', 'unalias', 'unset', 'wait',
// Network commands
'curl', 'wget', 'nc', 'telnet', 'ssh', 'scp', 'ftp', 'sftp',
'http', 'https', 'ftp', 'rsync',
// Browsers/GUI launchers
'lynx', 'w3m', 'links', 'elinks', 'httpie', 'xh', 'http-prompt',
'chrome', 'firefox', 'safari', 'edge', 'xdg-open', 'open'
];
// --- Helper Type for Command Queue ---
interface QueuedCommand {
params: TerminalToolParams;
resolve: (result: TerminalToolResult) => void;
reject: (error: Error) => void;
confirmationDetails: ToolExecuteConfirmationDetails | false; // Kept for potential future use
}
/**
* Implementation of the terminal tool that executes shell commands within a persistent session.
*/
export class TerminalTool extends BaseTool<TerminalToolParams, TerminalToolResult> {
public static Name: string = 'execute_bash_command';
private readonly rootDirectory: string;
private readonly outputLimit: number;
private bashProcess: ChildProcessWithoutNullStreams | null = null;
private currentCwd: string;
private isExecuting: boolean = false;
private commandQueue: QueuedCommand[] = [];
private currentCommandCleanup: (() => void) | null = null;
private shouldAlwaysExecuteCommands: Map<string, boolean> = new Map(); // Track confirmation per root command
private shellReady: Promise<void>;
private resolveShellReady: (() => void) | undefined; // Definite assignment assertion
private rejectShellReady: ((reason?: any) => void) | undefined; // Definite assignment assertion
private readonly backgroundTerminalAnalyzer: BackgroundTerminalAnalyzer;
constructor(rootDirectory: string, outputLimit: number = MAX_OUTPUT_LENGTH) {
const toolDisplayName = 'Terminal';
// --- LLM-Facing Description ---
// Updated description for background tasks to mention polling and LLM analysis
const toolDescription = `Executes one or more bash commands sequentially in a secure and persistent interactive shell session. Can run commands in the foreground (waiting for completion) or background (returning after launch, with subsequent status polling).
Core Functionality:
* Starts in project root: '${path.basename(rootDirectory)}'. Current Directory starts as: ${rootDirectory} (will update based on 'cd' commands).
* Persistent State: Environment variables and the current working directory (\`pwd\`) persist between calls to this tool.
* **Execution Modes:**
* **Foreground (default):** Waits for the command to complete. Captures stdout, stderr, and exit code. Output is truncated if it exceeds ${outputLimit} characters.
* **Background (\`runInBackground: true\`):** Appends \`&\` to the command and redirects its output to temporary files. Returns *after* the command is launched, providing the Process ID (PID) and launch status. Subsequently, the tool **polls** for the background process status for up to ${BACKGROUND_POLL_TIMEOUT_MS / 1000} seconds. Once the process finishes or polling times out, the tool reads the captured stdout/stderr from the temporary files, runs an internal LLM analysis on the output, cleans up the files, and returns the final status, captured output, and analysis.
* Timeout: Optional timeout per 'execute' call (default: ${DEFAULT_TIMEOUT_MS / 60000} min, max override: ${MAX_TIMEOUT_OVERRIDE_MS / 60000} min for foreground). Background *launch* has a fixed shorter timeout (${BACKGROUND_LAUNCH_TIMEOUT_MS / 1000}s) for the launch attempt itself. Background *polling* has its own timeout (${BACKGROUND_POLL_TIMEOUT_MS / 1000}s). Timeout attempts SIGINT for foreground commands.
Usage Guidance & Restrictions:
1. **Directory/File Verification (IMPORTANT):**
* BEFORE executing commands that create files or directories (e.g., \`mkdir foo/bar\`, \`touch new/file.txt\`, \`git clone ...\`), use the dedicated File System tool (e.g., 'list_directory') to verify the target parent directory exists and is the correct location.
* Example: Before running \`mkdir foo/bar\`, first use the File System tool to check that \`foo\` exists in the current directory (\`${rootDirectory}\` initially, check current CWD if it changed).
2. **Use Specialized Tools (CRITICAL):**
* Do NOT use this tool for filesystem searching (\`find\`, \`grep\`). Use the dedicated Search tool instead.
* Do NOT use this tool for reading files (\`cat\`, \`head\`, \`tail\`, \`less\`, \`more\`). Use the dedicated File Reader tool instead.
* Do NOT use this tool for listing files (\`ls\`). Use the dedicated File System tool ('list_directory') instead. Relying on this tool's output for directory structure is unreliable due to potential truncation and lack of structured data.
3. **Security & Banned Commands:**
* Certain commands are banned for security (e.g., network: ${BANNED_COMMAND_ROOTS.filter(c => ['curl', 'wget', 'ssh'].includes(c)).join(', ')}; session: ${BANNED_COMMAND_ROOTS.filter(c => ['exit', 'export', 'kill'].includes(c)).join(', ')}; etc.). The full list is extensive.
* If you attempt a banned command, this tool will return an error explaining the restriction. You MUST relay this error clearly to the user.
4. **Command Execution Notes:**
* Chain multiple commands using shell operators like ';' or '&&'. Do NOT use newlines within the 'command' parameter string itself (newlines are fine inside quoted arguments).
* The shell's current working directory is tracked internally. While \`cd\` is permitted if the user explicitly asks or it's necessary for a workflow, **strongly prefer** using absolute paths or paths relative to the *known* current working directory to avoid errors. Check the '(Executed in: ...)' part of the previous command's output for the CWD.
* Good example (if CWD is /workspace/project): \`pytest tests/unit\` or \`ls /workspace/project/data\`
* Less preferred: \`cd tests && pytest unit\` (only use if necessary or requested)
5. **Background Tasks (\`runInBackground: true\`):**
* Use this for commands that are intended to run continuously (e.g., \`node server.js\`, \`npm start\`).
* The tool initially returns success if the process *launches* successfully, along with its PID.
* **Polling & Final Result:** The tool then monitors the process. The *final* result (delivered after polling completes or times out) will include:
* The final status (completed or timed out).
* The complete stdout and stderr captured in temporary files (truncated if necessary).
* An LLM-generated analysis/summary of the output.
* The initial exit code (usually 0) signifies successful *launching*; the final status indicates completion or timeout after polling.
Use this tool for running build steps (\`npm install\`, \`make\`), linters (\`eslint .\`), test runners (\`pytest\`, \`jest\`), code formatters (\`prettier --write .\`), package managers (\`pip install\`), version control operations (\`git status\`, \`git diff\`), starting background servers/services (\`node server.js --runInBackground true\`), or other safe, standard command-line operations within the project workspace.`;
// --- Parameter Schema ---
const toolParameterSchema = {
type: 'object',
properties: {
command: {
description: `The exact bash command or sequence of commands (using ';' or '&&') to execute. Must adhere to usage guidelines. Example: 'npm install && npm run build'`,
type: 'string'
},
description: {
description: `Optional: A brief, user-centric explanation of what the command does and why it's being run. Used for logging and confirmation prompts. Example: 'Install project dependencies'`,
type: 'string'
},
timeout: {
description: `Optional execution time limit in milliseconds for FOREGROUND commands. Max ${MAX_TIMEOUT_OVERRIDE_MS}ms (${MAX_TIMEOUT_OVERRIDE_MS / 60000} min). Defaults to ${DEFAULT_TIMEOUT_MS}ms (${DEFAULT_TIMEOUT_MS / 60000} min) if not specified or invalid. Ignored if 'runInBackground' is true.`,
type: 'number'
},
runInBackground: {
description: `If true, execute the command in the background using '&'. Defaults to false. Use for servers or long tasks.`,
type: 'boolean',
}
},
required: ['command']
};
super(
TerminalTool.Name,
toolDisplayName,
toolDescription,
toolParameterSchema
);
this.rootDirectory = path.resolve(rootDirectory);
this.currentCwd = this.rootDirectory;
this.outputLimit = outputLimit;
this.shellReady = new Promise((resolve, reject) => {
this.resolveShellReady = resolve;
this.rejectShellReady = reject;
});
this.backgroundTerminalAnalyzer = new BackgroundTerminalAnalyzer();
this.initializeShell();
}
// --- Shell Initialization and Management (largely unchanged) ---
private initializeShell() {
if (this.bashProcess) {
try {
this.bashProcess.kill();
} catch (e) { /* Ignore */ }
}
const spawnOptions: SpawnOptions = {
cwd: this.rootDirectory,
shell: true,
env: { ...process.env },
stdio: ['pipe', 'pipe', 'pipe']
};
try {
const bashPath = os.platform() === 'win32' ? 'bash.exe' : 'bash';
this.bashProcess = spawn(bashPath, ['-s'], spawnOptions) as ChildProcessWithoutNullStreams;
this.currentCwd = this.rootDirectory; // Reset CWD on restart
this.bashProcess.on('error', (err) => {
console.error('Persistent Bash Error:', err);
this.rejectShellReady?.(err); // Use optional chaining as reject might be cleared
this.bashProcess = null;
this.isExecuting = false;
this.clearQueue(new Error(`Persistent bash process failed to start: ${err.message}`));
});
this.bashProcess.on('close', (code, signal) => {
this.bashProcess = null;
this.isExecuting = false;
// Only reject if it hasn't been resolved/rejected already
this.rejectShellReady?.(new Error(`Persistent bash process exited (code: ${code}, signal: ${signal})`));
// Reset shell readiness promise for reinitialization attempts
this.shellReady = new Promise((resolve, reject) => {
this.resolveShellReady = resolve;
this.rejectShellReady = reject;
});
this.clearQueue(new Error(`Persistent bash process exited unexpectedly (code: ${code}, signal: ${signal}). State is lost. Queued commands cancelled.`));
// Attempt to reinitialize after a short delay
setTimeout(() => this.initializeShell(), 1000);
});
// Readiness check - ensure shell is responsive
// Slightly longer timeout to allow shell init
setTimeout(() => {
if (this.bashProcess && !this.bashProcess.killed) {
this.resolveShellReady?.(); // Use optional chaining
} else if (!this.bashProcess) {
// Error likely already handled by 'error' or 'close' event
} else {
// Process was killed during init?
this.rejectShellReady?.(new Error("Shell killed during initialization"));
}
}, 1000); // Increase readiness check timeout slightly
} catch (error: any) {
console.error("Failed to spawn persistent bash:", error);
this.rejectShellReady?.(error); // Use optional chaining
this.bashProcess = null;
this.clearQueue(new Error(`Failed to spawn persistent bash: ${error.message}`));
}
}
// --- Parameter Validation (unchanged) ---
invalidParams(params: TerminalToolParams): string | null {
if (!SchemaValidator.validate(this.parameterSchema as Record<string, unknown>, params)) {
return `Parameters failed schema validation.`;
}
const commandOriginal = params.command.trim();
if (!commandOriginal) {
return "Command cannot be empty.";
}
const commandLower = commandOriginal.toLowerCase();
const commandParts = commandOriginal.split(/[\s;&&|]+/);
for (const part of commandParts) {
if (!part) continue;
// Improved check: strip leading special chars before checking basename
const cleanPart = part.replace(/^[^a-zA-Z0-9]+/, '').split(/[\/\\]/).pop() || part.replace(/^[^a-zA-Z0-9]+/, '');
if (cleanPart && BANNED_COMMAND_ROOTS.includes(cleanPart.toLowerCase())) {
return `Command contains a banned keyword: '${cleanPart}'. Banned list includes network tools, session control, etc.`;
}
}
if (params.timeout !== undefined && (typeof params.timeout !== 'number' || params.timeout <= 0)) {
return 'Timeout must be a positive number of milliseconds.';
}
// Relax the absolute path restriction slightly if needed, but generally good practice
// const firstCommandPart = commandParts[0];
// if (firstCommandPart && (firstCommandPart.startsWith('/') || firstCommandPart.startsWith('\\'))) {
// return 'Executing commands via absolute paths (starting with \'/\' or \'\\\') is restricted. Use commands available in PATH or relative paths.';
// }
return null; // Parameters are valid
}
// --- Description and Confirmation (unchanged) ---
getDescription(params: TerminalToolParams): string {
return params.description || params.command;
}
async shouldConfirmExecute(params: TerminalToolParams): Promise<ToolCallConfirmationDetails | false> {
const rootCommand = params.command.trim().split(/[\s;&&|]+/)[0]?.split(/[\/\\]/).pop() || 'unknown';
if (this.shouldAlwaysExecuteCommands.get(rootCommand)) {
return false;
}
const description = this.getDescription(params);
const confirmationDetails: ToolExecuteConfirmationDetails = {
title: 'Confirm Shell Command',
command: params.command,
rootCommand: rootCommand,
description: `Execute in '${this.currentCwd}':\n${description}`,
onConfirm: async (outcome: ToolConfirmationOutcome) => {
if (outcome === ToolConfirmationOutcome.ProceedAlways) {
this.shouldAlwaysExecuteCommands.set(rootCommand, true);
}
},
};
return confirmationDetails;
}
// --- Command Execution and Queueing (unchanged structure) ---
async execute(params: TerminalToolParams): Promise<TerminalToolResult> {
const validationError = this.invalidParams(params);
if (validationError) {
return {
llmContent: `Command rejected: ${params.command}\nReason: ${validationError}`,
returnDisplay: `Error: ${validationError}`,
};
}
// Assume confirmation is handled before calling execute
return new Promise((resolve) => {
const queuedItem: QueuedCommand = {
params,
resolve, // Resolve outer promise
reject: (error) => resolve({ // Handle internal errors by resolving outer promise
llmContent: `Internal tool error for command: ${params.command}\nError: ${error.message}`,
returnDisplay: `Internal Tool Error: ${error.message}`
}),
confirmationDetails: false // Placeholder
};
this.commandQueue.push(queuedItem);
// Ensure queue processing is triggered *after* adding the item
setImmediate(() => this.triggerQueueProcessing());
});
}
private async triggerQueueProcessing(): Promise<void> {
if (this.isExecuting || this.commandQueue.length === 0) {
return;
}
this.isExecuting = true;
const { params, resolve, reject } = this.commandQueue.shift()!;
try {
await this.shellReady; // Wait for the shell to be ready (or reinitialized)
if (!this.bashProcess || this.bashProcess.killed) { // Check if killed
throw new Error("Persistent bash process is not available or was killed.");
}
// **** Core execution logic call ****
const result = await this.executeCommandInShell(params);
resolve(result); // Resolve the specific command's promise
} catch (error: any) {
console.error(`Error executing command "${params.command}":`, error);
reject(error); // Use the specific command's reject handler
} finally {
this.isExecuting = false;
// Use setImmediate to avoid potential deep recursion
setImmediate(() => this.triggerQueueProcessing());
}
}
// --- **** MODIFIED: Core Command Execution Logic **** ---
private executeCommandInShell(params: TerminalToolParams): Promise<TerminalToolResult> {
// Define temp file paths here to be accessible throughout
let tempStdoutPath: string | null = null;
let tempStderrPath: string | null = null;
let originalResolve: (value: TerminalToolResult | PromiseLike<TerminalToolResult>) => void; // To pass to polling
let originalReject: (reason?: any) => void;
const promise = new Promise<TerminalToolResult>((resolve, reject) => {
originalResolve = resolve; // Assign outer scope resolve
originalReject = reject; // Assign outer scope reject
if (!this.bashProcess) {
return reject(new Error("Bash process is not running. Cannot execute command."));
}
const isBackgroundTask = params.runInBackground ?? false;
const commandUUID = crypto.randomUUID();
const startDelimiter = `::START_CMD_${commandUUID}::`;
const endDelimiter = `::END_CMD_${commandUUID}::`;
const exitCodeDelimiter = `::EXIT_CODE_${commandUUID}::`;
const pidDelimiter = `::PID_${commandUUID}::`; // For background PID
// --- Initialize Temp Files for Background Task ---
if (isBackgroundTask) {
try {
const tempDir = os.tmpdir();
tempStdoutPath = path.join(tempDir, `term_out_${commandUUID}.log`);
tempStderrPath = path.join(tempDir, `term_err_${commandUUID}.log`);
} catch (err: any) {
// If temp dir setup fails, reject immediately
return reject(new Error(`Failed to determine temporary directory: ${err.message}`));
}
}
// --- End Temp File Init ---
let stdoutBuffer = ''; // For launch output
let stderrBuffer = ''; // For launch output
let commandOutputStarted = false;
let exitCode: number | null = null;
let backgroundPid: number | null = null; // Store PID
let receivedEndDelimiter = false;
// Timeout only applies to foreground execution or background *launch* phase
const effectiveTimeout = isBackgroundTask
? BACKGROUND_LAUNCH_TIMEOUT_MS
: Math.min(
params.timeout ?? DEFAULT_TIMEOUT_MS, // Use default timeout if not provided
MAX_TIMEOUT_OVERRIDE_MS
);
let onStdoutData: ((data: Buffer) => void) | null = null;
let onStderrData: ((data: Buffer) => void) | null = null;
let launchTimeoutId: NodeJS.Timeout | null = null; // Renamed for clarity
launchTimeoutId = setTimeout(() => {
const timeoutMessage = isBackgroundTask
? `Background command launch timed out after ${effectiveTimeout}ms.`
: `Command timed out after ${effectiveTimeout}ms.`;
if (!isBackgroundTask && this.bashProcess && !this.bashProcess.killed) {
try {
this.bashProcess.stdin.write('\x03'); // Ctrl+C for foreground timeout
} catch (e: any) { console.error("Error writing SIGINT on timeout:", e); }
}
// Store listeners before calling cleanup, as cleanup nullifies them
const listenersToClean = { onStdoutData, onStderrData };
cleanupListeners(listenersToClean); // Clean up listeners for this command
// Clean up temp files if background launch timed out
if (isBackgroundTask && tempStdoutPath && tempStderrPath) {
this.cleanupTempFiles(tempStdoutPath, tempStderrPath).catch(err => {
console.warn(`Error cleaning up temp files on timeout: ${err.message}`);
});
}
// Resolve the main promise with timeout info
originalResolve({
llmContent: `Command execution failed: ${timeoutMessage}\nCommand: ${params.command}\nExecuted in: ${this.currentCwd}\n${isBackgroundTask ? 'Mode: Background Launch' : `Mode: Foreground\nTimeout Limit: ${effectiveTimeout}ms`}\nPartial Stdout (Launch):\n${this.truncateOutput(stdoutBuffer)}\nPartial Stderr (Launch):\n${this.truncateOutput(stderrBuffer)}\nNote: ${isBackgroundTask ? 'Launch failed or took too long.' : 'Attempted interrupt (SIGINT). Shell state might be unpredictable if command ignored interrupt.'}`,
returnDisplay: `Timeout: ${timeoutMessage}`
});
}, effectiveTimeout);
// --- Data processing logic (refined slightly) ---
const processDataChunk = (chunk: string, isStderr: boolean): boolean => {
let dataToProcess = chunk;
if (!commandOutputStarted) {
const startIndex = dataToProcess.indexOf(startDelimiter);
if (startIndex !== -1) {
commandOutputStarted = true;
dataToProcess = dataToProcess.substring(startIndex + startDelimiter.length);
} else {
return false; // Still waiting for start delimiter
}
}
// Process PID delimiter (mostly expected on stderr for background)
const pidIndex = dataToProcess.indexOf(pidDelimiter);
if (pidIndex !== -1) {
// Extract PID value strictly between delimiter and newline/end
const pidMatch = dataToProcess.substring(pidIndex + pidDelimiter.length).match(/^(\d+)/);
if (pidMatch?.[1]) {
backgroundPid = parseInt(pidMatch[1], 10);
const pidEndIndex = pidIndex + pidDelimiter.length + pidMatch[1].length;
const beforePid = dataToProcess.substring(0, pidIndex);
if (isStderr) stderrBuffer += beforePid; else stdoutBuffer += beforePid;
dataToProcess = dataToProcess.substring(pidEndIndex);
} else {
// Consume delimiter even if no number followed
const beforePid = dataToProcess.substring(0, pidIndex);
if (isStderr) stderrBuffer += beforePid; else stdoutBuffer += beforePid;
dataToProcess = dataToProcess.substring(pidIndex + pidDelimiter.length);
}
}
// Process Exit Code delimiter
const exitCodeIndex = dataToProcess.indexOf(exitCodeDelimiter);
if (exitCodeIndex !== -1) {
const exitCodeMatch = dataToProcess.substring(exitCodeIndex + exitCodeDelimiter.length).match(/^(\d+)/);
if (exitCodeMatch?.[1]) {
exitCode = parseInt(exitCodeMatch[1], 10);
const beforeExitCode = dataToProcess.substring(0, exitCodeIndex);
if (isStderr) stderrBuffer += beforeExitCode; else stdoutBuffer += beforeExitCode;
dataToProcess = dataToProcess.substring(exitCodeIndex + exitCodeDelimiter.length + exitCodeMatch[1].length);
} else {
const beforeExitCode = dataToProcess.substring(0, exitCodeIndex);
if (isStderr) stderrBuffer += beforeExitCode; else stdoutBuffer += beforeExitCode;
dataToProcess = dataToProcess.substring(exitCodeIndex + exitCodeDelimiter.length);
}
}
// Process End delimiter
const endDelimiterIndex = dataToProcess.indexOf(endDelimiter);
if (endDelimiterIndex !== -1) {
receivedEndDelimiter = true;
const beforeEndDelimiter = dataToProcess.substring(0, endDelimiterIndex);
if (isStderr) stderrBuffer += beforeEndDelimiter; else stdoutBuffer += beforeEndDelimiter;
// Consume delimiter and potentially the exit code echoed after it
const afterEndDelimiter = dataToProcess.substring(endDelimiterIndex + endDelimiter.length);
const exitCodeEchoMatch = afterEndDelimiter.match(/^(\d+)/);
dataToProcess = exitCodeEchoMatch ? afterEndDelimiter.substring(exitCodeEchoMatch[1].length) : afterEndDelimiter;
}
// Append remaining data
if (dataToProcess.length > 0) {
if (isStderr) stderrBuffer += dataToProcess; else stdoutBuffer += dataToProcess;
}
// Check completion criteria
if (receivedEndDelimiter && exitCode !== null) {
setImmediate(cleanupAndResolve); // Use setImmediate
return true; // Signal completion of this command's stream processing
}
return false; // More data or delimiters expected
};
// Assign listeners
onStdoutData = (data: Buffer) => processDataChunk(data.toString(), false);
onStderrData = (data: Buffer) => processDataChunk(data.toString(), true);
// --- Cleanup Logic ---
// Pass listeners to allow cleanup even if they are nullified later
const cleanupListeners = (listeners?: { onStdoutData: any, onStderrData: any }) => {
if (launchTimeoutId) clearTimeout(launchTimeoutId);
launchTimeoutId = null;
// Use passed-in listeners if available, otherwise use current scope's
const stdoutListener = listeners?.onStdoutData ?? onStdoutData;
const stderrListener = listeners?.onStderrData ?? onStderrData;
if (this.bashProcess && !this.bashProcess.killed) {
if (stdoutListener) this.bashProcess.stdout.removeListener('data', stdoutListener);
if (stderrListener) this.bashProcess.stderr.removeListener('data', stderrListener);
}
// Only nullify the *current command's* cleanup reference if it matches
if (this.currentCommandCleanup === cleanupListeners) {
this.currentCommandCleanup = null;
}
// Nullify the listener references in the outer scope regardless
onStdoutData = null;
onStderrData = null;
};
// Store *this specific* cleanup function instance for the current command
this.currentCommandCleanup = cleanupListeners;
// --- Final Resolution / Polling Logic ---
const cleanupAndResolve = async () => {
// Prevent double execution if cleanup was already called (e.g., by timeout)
if (!this.currentCommandCleanup || this.currentCommandCleanup !== cleanupListeners) {
// Ensure temp files are cleaned if this command was superseded but might have created them
if (isBackgroundTask && tempStdoutPath && tempStderrPath) {
this.cleanupTempFiles(tempStdoutPath, tempStderrPath).catch(err => {
console.warn(`Error cleaning up temp files for superseded command: ${err.message}`);
});
}
return;
}
// Capture initial output *before* cleanup nullifies buffers indirectly
const launchStdout = this.truncateOutput(stdoutBuffer);
const launchStderr = this.truncateOutput(stderrBuffer);
// Store listeners before calling cleanup
const listenersToClean = { onStdoutData, onStderrData };
cleanupListeners(listenersToClean); // Remove listeners and clear launch timeout NOW
// --- Error check for missing exit code ---
if (exitCode === null) {
console.error(`CRITICAL: Command "${params.command}" (background: ${isBackgroundTask}) finished delimiter processing but exitCode is null.`);
const errorMode = isBackgroundTask ? 'Background Launch' : 'Foreground';
if (isBackgroundTask && tempStdoutPath && tempStderrPath) {
await this.cleanupTempFiles(tempStdoutPath, tempStderrPath);
}
originalResolve({ // Use originalResolve as this is a failure *before* polling starts
llmContent: `Command: ${params.command}\nExecuted in: ${this.currentCwd}\nMode: ${errorMode}\nExit Code: -2 (Internal Error: Exit code not captured)\nStdout (during setup):\n${launchStdout}\nStderr (during setup):\n${launchStderr}`,
returnDisplay: `Internal Error: Failed to capture command exit code.\n${launchStdout}\nStderr: ${launchStderr}`.trim()
});
return;
}
// --- CWD Update Logic (Only for Foreground Success or 'cd') ---
let cwdUpdateError = '';
if (!isBackgroundTask) { // Only run for foreground
const mightChangeCwd = params.command.trim().startsWith('cd ');
if (exitCode === 0 || mightChangeCwd) {
try {
const latestCwd = await this.getCurrentShellCwd();
if (this.currentCwd !== latestCwd) {
this.currentCwd = latestCwd;
}
} catch (e: any) {
if (exitCode === 0) { // Only warn if the command itself succeeded
cwdUpdateError = `\nWarning: Failed to verify/update current working directory after command: ${e.message}`;
console.error("Failed to update CWD after successful command:", e);
}
}
}
}
// --- End CWD Update ---
// --- Result Formatting & Polling Decision ---
if (isBackgroundTask) {
const launchSuccess = exitCode === 0;
const pidString = backgroundPid !== null ? backgroundPid.toString() : 'Not Captured';
// Check if polling should start
if (launchSuccess && backgroundPid !== null && tempStdoutPath && tempStderrPath) {
// --- START POLLING ---
// Don't await this, let it run in the background and resolve the original promise later
this.inspectBackgroundProcess(
backgroundPid,
params.command,
this.currentCwd, // CWD at time of launch
launchStdout, // Initial output captured during launch
launchStderr, // Initial output captured during launch
tempStdoutPath, // Path for final stdout
tempStderrPath, // Path for final stderr
originalResolve // The resolve function of the main promise
);
// IMPORTANT: Do NOT resolve the promise here. pollBackgroundProcess will do it.
// --- END POLLING ---
} else {
// Background launch failed OR PID was not captured OR temp files missing
const reason = backgroundPid === null ? "PID not captured" : `Launch failed (Exit Code: ${exitCode})`;
const displayMessage = `Failed to launch process in background (${reason})`;
console.error(`Background launch failed for command: ${params.command}. Reason: ${reason}`); // ERROR LOG
// Ensure cleanup of temp files if launch failed
if (tempStdoutPath && tempStderrPath) {
await this.cleanupTempFiles(tempStdoutPath, tempStderrPath);
}
originalResolve({ // Use originalResolve as polling won't start
llmContent: `Background Command Launch Failed: ${params.command}\nExecuted in: ${this.currentCwd}\nReason: ${reason}\nPID: ${pidString}\nExit Code (Launch): ${exitCode}\nStdout (During Launch):\n${launchStdout}\nStderr (During Launch):\n${launchStderr}`,
returnDisplay: displayMessage
});
}
} else {
// --- Foreground task result (resolve immediately) ---
let displayOutput = '';
const stdoutTrimmed = launchStdout.trim();
const stderrTrimmed = launchStderr.trim();
if (stderrTrimmed) {
displayOutput = stderrTrimmed;
} else if (stdoutTrimmed) {
displayOutput = stdoutTrimmed;
}
if (exitCode !== 0 && !displayOutput) {
displayOutput = `Failed with exit code: ${exitCode}`;
} else if (exitCode === 0 && !displayOutput) {
displayOutput = `Success (no output)`;
}
originalResolve({ // Use originalResolve for foreground result
llmContent: `Command: ${params.command}\nExecuted in: ${this.currentCwd}\nExit Code: ${exitCode}\nStdout:\n${launchStdout}\nStderr:\n${launchStderr}${cwdUpdateError}`,
returnDisplay: displayOutput.trim() || `Exit Code: ${exitCode}` // Ensure some display
});
// --- End Foreground Result ---
}
}; // End of cleanupAndResolve
// --- Attach listeners ---
if (!this.bashProcess || this.bashProcess.killed) {
console.error("Bash process lost or killed before listeners could be attached.");
// Ensure temp files are cleaned up if they exist
if (isBackgroundTask && tempStdoutPath && tempStderrPath) {
this.cleanupTempFiles(tempStdoutPath, tempStderrPath).catch(err => {
console.warn(`Error cleaning up temp files on attach failure: ${err.message}`);
});
}
return originalReject(new Error("Bash process lost or killed before listeners could be attached."));
}
// Defensive remove shouldn't be strictly necessary with current cleanup logic, but harmless
// if (onStdoutData) this.bashProcess.stdout.removeListener('data', onStdoutData);
// if (onStderrData) this.bashProcess.stderr.removeListener('data', onStderrData);
// Attach the fresh listeners
if (onStdoutData) this.bashProcess.stdout.on('data', onStdoutData);
if (onStderrData) this.bashProcess.stderr.on('data', onStderrData);
// --- Construct and Write Command ---
let commandToWrite: string;
if (isBackgroundTask && tempStdoutPath && tempStderrPath) {
// Background: Redirect command's stdout/stderr to temp files.
// Use subshell { ... } > file 2> file to redirect the command inside.
// Capture PID of the subshell. Capture exit code of the subshell launch.
// Ensure the subshell itself doesn't interfere with delimiter capture on stderr.
commandToWrite = `echo "${startDelimiter}"; { { ${params.command} > "${tempStdoutPath}" 2> "${tempStderrPath}"; } & } 2>/dev/null; __LAST_PID=$!; echo "${pidDelimiter}$__LAST_PID" >&2; echo "${exitCodeDelimiter}$?" >&2; echo "${endDelimiter}$?" >&1\n`;
} else if (!isBackgroundTask) {
// Foreground: Original structure. Capture command exit code.
commandToWrite = `echo "${startDelimiter}"; ${params.command}; __EXIT_CODE=$?; echo "${exitCodeDelimiter}$__EXIT_CODE" >&2; echo "${endDelimiter}$__EXIT_CODE" >&1\n`;
} else {
// Should not happen if background task setup failed, but handle defensively
return originalReject(new Error("Internal setup error: Missing temporary file paths for background execution."));
}
try {
if (this.bashProcess?.stdin?.writable) {
this.bashProcess.stdin.write(commandToWrite, (err) => {
if (err) {
console.error(`Error writing command "${params.command}" to bash stdin (callback):`, err);
// Store listeners before calling cleanup
const listenersToClean = { onStdoutData, onStderrData };
cleanupListeners(listenersToClean); // Attempt cleanup
if (isBackgroundTask && tempStdoutPath && tempStderrPath) {
this.cleanupTempFiles(tempStdoutPath, tempStderrPath).catch(e => console.warn(`Cleanup failed: ${e.message}`));
}
originalReject(new Error(`Shell stdin write error: ${err.message}. Command likely did not execute.`));
}
});
} else {
throw new Error("Shell stdin is not writable or process closed when attempting to write command.");
}
} catch (e: any) {
console.error(`Error writing command "${params.command}" to bash stdin (sync):`, e);
// Store listeners before calling cleanup
const listenersToClean = { onStdoutData, onStderrData };
cleanupListeners(listenersToClean); // Attempt cleanup
if (isBackgroundTask && tempStdoutPath && tempStderrPath) {
this.cleanupTempFiles(tempStdoutPath, tempStderrPath).catch(err => console.warn(`Cleanup failed: ${err.message}`));
}
originalReject(new Error(`Shell stdin write exception: ${e.message}. Command likely did not execute.`));
}
}); // End of main promise constructor
return promise; // Return the promise created at the top
} // End of executeCommandInShell
// --- **** NEW: Background Process Polling **** ---
private async inspectBackgroundProcess(
pid: number,
command: string,
cwd: string,
initialStdout: string, // Stdout during launch phase
initialStderr: string, // Stderr during launch phase
tempStdoutPath: string, // Path to redirected stdout
tempStderrPath: string, // Path to redirected stderr
resolve: (value: TerminalToolResult | PromiseLike<TerminalToolResult>) => void // The original promise's resolve
): Promise<void> { // This function manages its own lifecycle but resolves the outer promise
let finalStdout = '';
let finalStderr = '';
let llmAnalysis = '';
let fileReadError = '';
// --- Call LLM Analysis ---
try {
const { status, summary } = await this.backgroundTerminalAnalyzer.analyze(pid, tempStdoutPath, tempStderrPath, command);
if (status === 'Unknown')
llmAnalysis = `LLM analysis failed: ${summary}`;
else
llmAnalysis = summary;
} catch (llmError: any) {
console.error(`LLM analysis failed for PID ${pid} command "${command}":`, llmError);
llmAnalysis = `LLM analysis failed: ${llmError.message}`; // Include error in analysis placeholder
}
// --- End LLM Call ---
try {
finalStdout = await fs.readFile(tempStdoutPath, 'utf-8');
finalStderr = await fs.readFile(tempStderrPath, 'utf-8');
} catch (err: any) {
console.error(`Error reading temp output files for PID ${pid}:`, err);
fileReadError = `\nWarning: Failed to read temporary output files (${err.message}). Final output may be incomplete.`;
}
// --- Clean up temp files ---
await this.cleanupTempFiles(tempStdoutPath, tempStderrPath);
// --- End Cleanup ---
const truncatedFinalStdout = this.truncateOutput(finalStdout);
const truncatedFinalStderr = this.truncateOutput(finalStderr);
// Resolve the original promise passed into pollBackgroundProcess
resolve({
llmContent: `Background Command: ${command}\nLaunched in: ${cwd}\nPID: ${pid}\n--- LLM Analysis ---\n${llmAnalysis}\n--- Final Stdout (from ${path.basename(tempStdoutPath)}) ---\n${truncatedFinalStdout}\n--- Final Stderr (from ${path.basename(tempStderrPath)}) ---\n${truncatedFinalStderr}\n--- Launch Stdout ---\n${initialStdout}\n--- Launch Stderr ---\n${initialStderr}${fileReadError}`,
returnDisplay: `(PID: ${pid}): ${this.truncateOutput(llmAnalysis, 200)}`
});
} // End of pollBackgroundProcess
// --- **** NEW: Helper to cleanup temp files **** ---
private async cleanupTempFiles(stdoutPath: string | null, stderrPath: string | null): Promise<void> {
const unlinkQuietly = async (filePath: string | null) => {
if (!filePath) return;
try {
await fs.unlink(filePath);
} catch (err: any) {
// Ignore errors like file not found (it might have been deleted already or failed to create)
if (err.code !== 'ENOENT') {
console.warn(`Failed to delete temporary file '${filePath}': ${err.message}`);
} else {
}
}
};
// Run deletions concurrently and wait for both
await Promise.all([
unlinkQuietly(stdoutPath),
unlinkQuietly(stderrPath)
]);
}
// --- Get CWD (mostly unchanged, added robustness) ---
private getCurrentShellCwd(): Promise<string> {
return new Promise((resolve, reject) => {
if (!this.bashProcess || !this.bashProcess.stdin?.writable || this.bashProcess.killed) {
return reject(new Error("Shell not running, stdin not writable, or killed for PWD check"));
}
const pwdUuid = crypto.randomUUID();
const pwdDelimiter = `::PWD_${pwdUuid}::`;
let pwdOutput = '';
let onPwdData: ((data: Buffer) => void) | null = null;
let onPwdError: ((data: Buffer) => void) | null = null; // To catch errors during pwd
let pwdTimeoutId: NodeJS.Timeout | null = null;
let finished = false; // Prevent double resolution/rejection
const cleanupPwdListeners = (err?: Error) => {
if (finished) return; // Already handled
finished = true;
if (pwdTimeoutId) clearTimeout(pwdTimeoutId);
pwdTimeoutId = null;
const stdoutListener = onPwdData; // Capture current reference
const stderrListener = onPwdError; // Capture current reference
onPwdData = null; // Nullify before removing
onPwdError = null;
if (this.bashProcess && !this.bashProcess.killed) {
if (stdoutListener) this.bashProcess.stdout.removeListener('data', stdoutListener);
if (stderrListener) this.bashProcess.stderr.removeListener('data', stderrListener);
}
if (err) {
reject(err);
} else {
// Trim whitespace and trailing newlines robustly
resolve(pwdOutput.trim());
}
}
onPwdData = (data: Buffer) => {
if (!onPwdData) return; // Listener removed
const dataStr = data.toString();
const delimiterIndex = dataStr.indexOf(pwdDelimiter);
if (delimiterIndex !== -1) {
pwdOutput += dataStr.substring(0, delimiterIndex);
cleanupPwdListeners(); // Resolve successfully
} else {
pwdOutput += dataStr;
}
};
onPwdError = (data: Buffer) => {
if (!onPwdError) return; // Listener removed
const dataStr = data.toString();
// If delimiter appears on stderr, or any stderr occurs, treat as error
console.error(`Error during PWD check: ${dataStr}`);
cleanupPwdListeners(new Error(`Stderr received during pwd check: ${this.truncateOutput(dataStr, 100)}`));
};
// Attach listeners
this.bashProcess.stdout.on('data', onPwdData);
this.bashProcess.stderr.on('data', onPwdError);
// Set timeout
pwdTimeoutId = setTimeout(() => {
cleanupPwdListeners(new Error("Timeout waiting for pwd response"));
}, 5000); // 5 second timeout for pwd
// Write command
try {
// Use printf for robustness against special characters in PWD and ensure newline
const pwdCommand = `printf "%s" "$PWD"; printf "${pwdDelimiter}";\n`;
if (this.bashProcess?.stdin?.writable) {
this.bashProcess.stdin.write(pwdCommand, (err) => {
if (err) {
// Error during write callback, likely means shell is unresponsive
console.error("Error writing pwd command (callback):", err);
cleanupPwdListeners(new Error(`Failed to write pwd command: ${err.message}`));
}
});
} else {
throw new Error("Shell stdin not writable for pwd command.");
}
} catch (e: any) {
console.error("Exception writing pwd command:", e);
cleanupPwdListeners(new Error(`Exception writing pwd command: ${e.message}`));
}
});
}
// --- Truncate Output (unchanged) ---
private truncateOutput(output: string, limit?: number): string {
const effectiveLimit = limit ?? this.outputLimit;
if (output.length > effectiveLimit) {
return output.substring(0, effectiveLimit) + `\n... [Output truncated at ${effectiveLimit} characters]`;
}
return output;
}
// --- Clear Queue (unchanged) ---
private clearQueue(error: Error) {
const queuedCount = this.commandQueue.length;
const queue = this.commandQueue;
this.commandQueue = [];
queue.forEach(({ resolve, params }) => resolve({
llmContent: `Command cancelled: ${params.command}\nReason: ${error.message}`,
returnDisplay: `Command Cancelled: ${error.message}`
}));
}
// --- Destroy (Added cleanup for pending background tasks if possible) ---
destroy() {
// Reject any pending shell readiness promise
this.rejectShellReady?.(new Error("BashTool destroyed during initialization or operation."));
this.rejectShellReady = undefined; // Prevent further calls
this.resolveShellReady = undefined;
this.clearQueue(new Error("BashTool is being destroyed."));
// Attempt to cleanup listeners for the *currently executing* command, if any
try {
this.currentCommandCleanup?.();
} catch (e) {
console.warn("Error during current command cleanup:", e)
}
// Handle the bash process itself
if (this.bashProcess) {
const proc = this.bashProcess; // Reference before nullifying
const pid = proc.pid;
this.bashProcess = null; // Nullify reference immediately
proc.stdout?.removeAllListeners();
proc.stderr?.removeAllListeners();
proc.removeAllListeners('error');
proc.removeAllListeners('close');
// Ensure stdin is closed
proc.stdin?.end();
try {
// Don't wait for these, just attempt
proc.kill('SIGTERM'); // Attempt graceful first
setTimeout(() => {
if (!proc.killed) {
proc.kill('SIGKILL'); // Force kill if needed
}
}, 500); // 500ms grace period
} catch (e: any) {
// Catch errors if process already exited etc.
console.warn(`Error trying to kill bash process PID: ${pid}: ${e.message}`);
}
} else {
}
// Note: We cannot reliably clean up temp files for background tasks
// that were polling when destroy() was called without more complex state tracking.
// OS should eventually clean /tmp, or implement a startup cleanup routine if needed.
}
} // End of TerminalTool class

View File

@ -0,0 +1,58 @@
import { ToolListUnion, FunctionDeclaration } from '@google/genai';
import { Tool } from './Tool.js';
import { ToolResult } from './ToolResult.js';
class ToolRegistry {
private tools: Map<string, Tool> = new Map();
/**
* Registers a tool definition.
* @param tool - The tool object containing schema and execution logic.
*/
registerTool(tool: Tool): void {
if (this.tools.has(tool.name)) {
// Decide on behavior: throw error, log warning, or allow overwrite
console.warn(`Tool with name "${tool.name}" is already registered. Overwriting.`);
}
this.tools.set(tool.name, tool);
}
/**
* Retrieves the list of tool schemas in the format required by Gemini.
* @returns A ToolListUnion containing the function declarations.
*/
getToolSchemas(): ToolListUnion {
const declarations: FunctionDeclaration[] = [];
this.tools.forEach(tool => {
declarations.push(tool.schema);
});
// Return Gemini's expected format. Handle the case of no tools.
if (declarations.length === 0) {
// Depending on the SDK version, you might need `undefined`, `[]`, or `[{ functionDeclarations: [] }]`
// Check the documentation for your @google/genai version.
// Let's assume an empty array works or signifies no tools.
return [];
// Or if it requires the structure:
// return [{ functionDeclarations: [] }];
}
return [{ functionDeclarations: declarations }];
}
/**
* Optional: Get a list of registered tool names.
*/
listAvailableTools(): string[] {
return Array.from(this.tools.keys());
}
/**
* Get the definition of a specific tool.
*/
getTool(name: string): Tool | undefined {
return this.tools.get(name);
}
}
// Export a singleton instance of the registry
export const toolRegistry = new ToolRegistry();

View File

@ -0,0 +1,201 @@
import fs from 'fs';
import path from 'path';
import { ToolResult } from './ToolResult.js';
import { BaseTool } from './BaseTool.js';
import { SchemaValidator } from '../utils/schemaValidator.js';
import { makeRelative, shortenPath } from '../utils/paths.js';
import { ToolCallConfirmationDetails, ToolConfirmationOutcome, ToolEditConfirmationDetails } from '../ui/types.js';
import * as Diff from 'diff';
/**
* Parameters for the WriteFile tool
*/
export interface WriteFileToolParams {
/**
* The absolute path to the file to write to
*/
file_path: string;
/**
* The content to write to the file
*/
content: string;
}
/**
* Standardized result from the WriteFile tool
*/
export interface WriteFileToolResult extends ToolResult {
}
/**
* Implementation of the WriteFile tool that writes files to the filesystem
*/
export class WriteFileTool extends BaseTool<WriteFileToolParams, WriteFileToolResult> {
public static readonly Name: string = 'write_file';
private shouldAlwaysWrite = false;
/**
* The root directory that this tool is grounded in.
* All file operations will be restricted to this directory.
*/
private rootDirectory: string;
/**
* Creates a new instance of the WriteFileTool
* @param rootDirectory Root directory to ground this tool in. All operations will be restricted to this directory.
*/
constructor(rootDirectory: string) {
super(
WriteFileTool.Name,
'WriteFile',
'Writes content to a specified file in the local filesystem.',
{
properties: {
file_path: {
description: 'The absolute path to the file to write to (e.g., \'/home/user/project/file.txt\'). Relative paths are not supported.',
type: 'string'
},
content: {
description: 'The content to write to the file.',
type: 'string'
}
},
required: ['file_path', 'content'],
type: 'object'
}
);
// Set the root directory
this.rootDirectory = path.resolve(rootDirectory);
}
/**
* Checks if a path is within the root directory
* @param pathToCheck The path to check
* @returns True if the path is within the root directory, false otherwise
*/
private isWithinRoot(pathToCheck: string): boolean {
const normalizedPath = path.normalize(pathToCheck);
const normalizedRoot = path.normalize(this.rootDirectory);
// Ensure the normalizedRoot ends with a path separator for proper path comparison
const rootWithSep = normalizedRoot.endsWith(path.sep)
? normalizedRoot
: normalizedRoot + path.sep;
return normalizedPath === normalizedRoot || normalizedPath.startsWith(rootWithSep);
}
/**
* Validates the parameters for the WriteFile tool
* @param params Parameters to validate
* @returns True if parameters are valid, false otherwise
*/
invalidParams(params: WriteFileToolParams): string | null {
if (this.schema.parameters && !SchemaValidator.validate(this.schema.parameters as Record<string, unknown>, params)) {
return 'Parameters failed schema validation.';
}
// Ensure path is absolute
if (!path.isAbsolute(params.file_path)) {
return `File path must be absolute: ${params.file_path}`;
}
// Ensure path is within the root directory
if (!this.isWithinRoot(params.file_path)) {
return `File path must be within the root directory (${this.rootDirectory}): ${params.file_path}`;
}
return null;
}
/**
* Determines if the tool should prompt for confirmation before execution
* @param params Parameters for the tool execution
* @returns Whether or not execute should be confirmed by the user.
*/
async shouldConfirmExecute(params: WriteFileToolParams): Promise<ToolCallConfirmationDetails | false> {
if (this.shouldAlwaysWrite) {
return false;
}
const relativePath = makeRelative(params.file_path, this.rootDirectory);
const fileName = path.basename(params.file_path);
let currentContent = '';
try {
currentContent = fs.readFileSync(params.file_path, 'utf8');
} catch (error) {
// File may not exist, which is fine
}
const fileDiff = Diff.createPatch(
fileName,
currentContent,
params.content,
'Current',
'Proposed',
{ context: 3, ignoreWhitespace: true}
);
const confirmationDetails: ToolEditConfirmationDetails = {
title: `Confirm Write: ${shortenPath(relativePath)}`,
fileName,
fileDiff,
onConfirm: async (outcome: ToolConfirmationOutcome) => {
if (outcome === ToolConfirmationOutcome.ProceedAlways) {
this.shouldAlwaysWrite = true;
}
},
};
return confirmationDetails;
}
/**
* Gets a description of the file writing operation
* @param params Parameters for the file writing
* @returns A string describing the file being written to
*/
getDescription(params: WriteFileToolParams): string {
const relativePath = makeRelative(params.file_path, this.rootDirectory);
return `Writing to ${shortenPath(relativePath)}`;
}
/**
* Executes the file writing operation
* @param params Parameters for the file writing
* @returns Result of the file writing operation
*/
async execute(params: WriteFileToolParams): Promise<WriteFileToolResult> {
const validationError = this.invalidParams(params);
if (validationError) {
return {
llmContent: `Error: Invalid parameters provided. Reason: ${validationError}`,
returnDisplay: '**Error:** Failed to execute tool.'
};
}
try {
// Ensure parent directories exist
const dirName = path.dirname(params.file_path);
if (!fs.existsSync(dirName)) {
fs.mkdirSync(dirName, { recursive: true });
}
// Write the file
fs.writeFileSync(params.file_path, params.content, 'utf8');
return {
llmContent: `Successfully wrote to file: ${params.file_path}`,
returnDisplay: `Wrote to ${shortenPath(makeRelative(params.file_path, this.rootDirectory))}`
};
} catch (error) {
const errorMsg = `Error writing to file: ${error instanceof Error ? error.message : String(error)}`;
return {
llmContent: `Error writing to file ${params.file_path}: ${errorMsg}`,
returnDisplay: `Failed to write to file: ${errorMsg}`
};
}
}
}

View File

@ -0,0 +1,90 @@
import React, { useState, useEffect } from 'react';
import { Box, Text } from 'ink';
import type { HistoryItem } from './types.js';
import { useGeminiStream } from './hooks/useGeminiStream.js';
import { useLoadingIndicator } from './hooks/useLoadingIndicator.js';
import Header from './components/Header.js';
import Tips from './components/Tips.js';
import HistoryDisplay from './components/HistoryDisplay.js';
import LoadingIndicator from './components/LoadingIndicator.js';
import InputPrompt from './components/InputPrompt.js';
import Footer from './components/Footer.js';
import { StreamingState } from '../core/StreamingState.js';
import { PartListUnion } from '@google/genai';
interface AppProps {
directory: string;
}
const App = ({ directory }: AppProps) => {
const [query, setQuery] = useState('');
const [history, setHistory] = useState<HistoryItem[]>([]);
const { streamingState, submitQuery, initError } = useGeminiStream(setHistory);
const { elapsedTime, currentLoadingPhrase } = useLoadingIndicator(streamingState);
const handleInputSubmit = (value: PartListUnion) => {
submitQuery(value).then(() => {
setQuery('');
}).catch(() => {
setQuery('');
});
};
useEffect(() => {
if (initError && !history.some(item => item.type === 'error' && item.text?.includes(initError))) {
setHistory(prev => [
...prev,
{ id: Date.now(), type: 'error', text: `Initialization Error: ${initError}. Please check API key and configuration.` } as HistoryItem
]);
}
}, [initError, history]);
const isWaitingForToolConfirmation = history.some(item =>
item.type === 'tool_group' && item.tools.some(tool => tool.confirmationDetails !== undefined)
);
const isInputActive = streamingState === StreamingState.Idle && !initError;
return (
<Box flexDirection="column" padding={1} marginBottom={1} width="100%">
<Header cwd={directory} />
<Tips />
{initError && streamingState !== StreamingState.Responding && !isWaitingForToolConfirmation && (
<Box borderStyle="round" borderColor="red" paddingX={1} marginBottom={1}>
{history.find(item => item.type === 'error' && item.text?.includes(initError))?.text ? (
<Text color="red">{history.find(item => item.type === 'error' && item.text?.includes(initError))?.text}</Text>
) : (
<>
<Text color="red">Initialization Error: {initError}</Text>
<Text color="red"> Please check API key and configuration.</Text>
</>
)}
</Box>
)}
<Box flexDirection="column">
<HistoryDisplay history={history} onSubmit={handleInputSubmit} />
<LoadingIndicator
isLoading={streamingState === StreamingState.Responding}
currentLoadingPhrase={currentLoadingPhrase}
elapsedTime={elapsedTime}
/>
</Box>
{!isWaitingForToolConfirmation && isInputActive && (
<InputPrompt
query={query}
setQuery={setQuery}
onSubmit={handleInputSubmit}
isActive={isInputActive}
/>
)}
<Footer queryLength={query.length} />
</Box>
);
};
export default App;

View File

@ -0,0 +1,21 @@
import React from 'react';
import { Box, Text } from 'ink';
interface FooterProps {
queryLength: number;
}
const Footer: React.FC<FooterProps> = ({ queryLength }) => {
return (
<Box marginTop={1} justifyContent="space-between">
<Box minWidth={15}>
<Text color="gray">
{queryLength === 0 ? "? for shortcuts" : ""}
</Text>
</Box>
<Text color="blue">Gemini</Text>
</Box>
);
};
export default Footer;

View File

@ -0,0 +1,38 @@
import React from 'react';
import { Box, Text } from 'ink';
import { UI_WIDTH, BOX_PADDING_X } from '../constants.js';
import { shortenPath } from '../../utils/paths.js';
interface HeaderProps {
cwd: string;
}
const Header: React.FC<HeaderProps> = ({ cwd }) => {
return (
<>
{/* Static Header Art */}
<Box marginBottom={1}>
<Text color="blue">{`
______ ________ ____ ____ _____ ____ _____ _____
.' ___ ||_ __ ||_ \\ / _||_ _||_ \\|_ _||_ _|
/ .' \\_| | |_ \\_| | \\/ | | | | \\ | | | |
| | ____ | _| _ | |\\ /| | | | | |\\ \\| | | |
\\ \`.___] |_| |__/ | _| |_\\/_| |_ _| |_ _| |_\\ |_ _| |_
\`._____.'|________||_____||_____||_____||_____|\\____||_____|`}</Text>
</Box>
{/* CWD Display */}
<Box
borderStyle="round"
borderColor="blue"
paddingX={BOX_PADDING_X}
flexDirection="column"
marginBottom={1}
width={UI_WIDTH}
>
<Box paddingLeft={2}><Text color="gray">cwd: {shortenPath(cwd, /*maxLength*/ 70)}</Text></Box>
</Box>
</>
);
};
export default Header;

View File

@ -0,0 +1,39 @@
import React from 'react';
import { Box } from 'ink';
import type { HistoryItem } from '../types.js';
import { UI_WIDTH } from '../constants.js';
import UserMessage from './messages/UserMessage.js';
import GeminiMessage from './messages/GeminiMessage.js';
import InfoMessage from './messages/InfoMessage.js';
import ErrorMessage from './messages/ErrorMessage.js';
import ToolGroupMessage from './messages/ToolGroupMessage.js';
import { PartListUnion } from '@google/genai';
interface HistoryDisplayProps {
history: HistoryItem[];
onSubmit: (value: PartListUnion) => void;
}
const HistoryDisplay: React.FC<HistoryDisplayProps> = ({ history, onSubmit }) => {
// No grouping logic needed here anymore
return (
<Box flexDirection="column">
{history.map((item) => (
<Box key={item.id} marginBottom={1}>
{/* Render standard message types */}
{item.type === 'user' && <UserMessage text={item.text} />}
{item.type === 'gemini' && <GeminiMessage text={item.text} />}
{item.type === 'info' && <InfoMessage text={item.text} />}
{item.type === 'error' && <ErrorMessage text={item.text} />}
{/* Render the tool group component */}
{item.type === 'tool_group' && (
<ToolGroupMessage toolCalls={item.tools} onSubmit={onSubmit} />
)}
</Box>
))}
</Box>
);
};
export default HistoryDisplay;

View File

@ -0,0 +1,39 @@
import React from 'react';
import { Box, Text } from 'ink';
import TextInput from 'ink-text-input';
interface InputPromptProps {
query: string;
setQuery: (value: string) => void;
onSubmit: (value: string) => void;
isActive: boolean;
}
const InputPrompt: React.FC<InputPromptProps> = ({
query,
setQuery,
onSubmit,
}) => {
return (
<Box
marginTop={1}
borderStyle="round"
borderColor={'white'}
paddingX={1}
>
<Text color={'white'}>&gt; </Text>
<Box flexGrow={1}>
<TextInput
value={query}
onChange={setQuery}
onSubmit={onSubmit}
showCursor={true}
focus={true}
placeholder={'Ask Gemini... (try "/init" or "/help")'}
/>
</Box>
</Box>
);
};
export default InputPrompt;

View File

@ -0,0 +1,32 @@
import React from 'react';
import { Box, Text } from 'ink';
import Spinner from 'ink-spinner';
interface LoadingIndicatorProps {
isLoading: boolean;
currentLoadingPhrase: string;
elapsedTime: number;
}
const LoadingIndicator: React.FC<LoadingIndicatorProps> = ({
isLoading,
currentLoadingPhrase,
elapsedTime,
}) => {
if (!isLoading) {
return null; // Don't render anything if not loading
}
return (
<Box marginTop={1} paddingLeft={0}>
<Box marginRight={1}>
<Spinner type="dots" />
</Box>
<Text color="cyan">{currentLoadingPhrase} ({elapsedTime}s)</Text>
<Box flexGrow={1}>{/* Spacer */}</Box>
<Text color="gray">(ESC to cancel)</Text>
</Box>
);
};
export default LoadingIndicator;

View File

@ -0,0 +1,17 @@
import React from 'react';
import { Box, Text } from 'ink';
import { UI_WIDTH } from '../constants.js';
const Tips: React.FC = () => {
return (
<Box flexDirection="column" marginBottom={1} width={UI_WIDTH}>
<Text>Tips for getting started:</Text>
<Text>1. <Text bold>/help</Text> for more information.</Text>
<Text>2. <Text bold>/init</Text> to create a GEMINI.md for instructions & context.</Text>
<Text>3. Ask coding questions, edit code or run commands.</Text>
<Text>4. Be specific for the best results.</Text>
</Box>
);
};
export default Tips;

View File

@ -0,0 +1,152 @@
import React from 'react';
import { Box, Text } from 'ink'
interface DiffLine {
type: 'add' | 'del' | 'context' | 'hunk' | 'other';
oldLine?: number;
newLine?: number;
content: string;
}
function parseDiffWithLineNumbers(diffContent: string): DiffLine[] {
const lines = diffContent.split('\n');
const result: DiffLine[] = [];
let currentOldLine = 0;
let currentNewLine = 0;
let inHunk = false;
const hunkHeaderRegex = /^@@ -(\d+),?\d* \+(\d+),?\d* @@/;
for (const line of lines) {
const hunkMatch = line.match(hunkHeaderRegex);
if (hunkMatch) {
currentOldLine = parseInt(hunkMatch[1], 10);
currentNewLine = parseInt(hunkMatch[2], 10);
inHunk = true;
result.push({ type: 'hunk', content: line });
// We need to adjust the starting point because the first line number applies to the *first* actual line change/context,
// but we increment *before* pushing that line. So decrement here.
currentOldLine--;
currentNewLine--;
continue;
}
if (!inHunk) {
// Skip standard Git header lines more robustly
if (line.startsWith('--- ') || line.startsWith('+++ ') || line.startsWith('diff --git') || line.startsWith('index ') || line.startsWith('similarity index') || line.startsWith('rename from') || line.startsWith('rename to') || line.startsWith('new file mode') || line.startsWith('deleted file mode')) continue;
// If it's not a hunk or header, skip (or handle as 'other' if needed)
continue;
}
if (line.startsWith('+')) {
currentNewLine++; // Increment before pushing
result.push({ type: 'add', newLine: currentNewLine, content: line.substring(1) });
} else if (line.startsWith('-')) {
currentOldLine++; // Increment before pushing
result.push({ type: 'del', oldLine: currentOldLine, content: line.substring(1) });
} else if (line.startsWith(' ')) {
currentOldLine++; // Increment before pushing
currentNewLine++;
result.push({ type: 'context', oldLine: currentOldLine, newLine: currentNewLine, content: line.substring(1) });
} else if (line.startsWith('\\')) { // Handle "\ No newline at end of file"
result.push({ type: 'other', content: line });
}
}
return result;
}
interface DiffRendererProps {
diffContent: string;
filename?: string;
tabWidth?: number;
}
const DEFAULT_TAB_WIDTH = 4; // Spaces per tab for normalization
const DiffRenderer: React.FC<DiffRendererProps> = ({ diffContent, tabWidth = DEFAULT_TAB_WIDTH }) => {
if (!diffContent || typeof diffContent !== 'string') {
return <Text color="yellow">No diff content.</Text>;
}
const parsedLines = parseDiffWithLineNumbers(diffContent);
// 1. Normalize whitespace (replace tabs with spaces) *before* further processing
const normalizedLines = parsedLines.map(line => ({
...line,
content: line.content.replace(/\t/g, ' '.repeat(tabWidth))
}));
// Filter out non-displayable lines (hunks, potentially 'other') using the normalized list
const displayableLines = normalizedLines.filter(l => l.type !== 'hunk' && l.type !== 'other');
if (displayableLines.length === 0) {
return (
<Box borderStyle="round" borderColor="gray" padding={1}>
<Text dimColor>No changes detected.</Text>
</Box>
);
}
// Calculate the minimum indentation across all displayable lines
let baseIndentation = Infinity; // Start high to find the minimum
for (const line of displayableLines) {
// Only consider lines with actual content for indentation calculation
if (line.content.trim() === '') continue;
const firstCharIndex = line.content.search(/\S/); // Find index of first non-whitespace char
const currentIndent = (firstCharIndex === -1) ? 0 : firstCharIndex; // Indent is 0 if no non-whitespace found
baseIndentation = Math.min(baseIndentation, currentIndent);
}
// If baseIndentation remained Infinity (e.g., no displayable lines with content), default to 0
if (!isFinite(baseIndentation)) {
baseIndentation = 0;
}
// --- End Modification ---
return (
<Box borderStyle="round" borderColor="gray" flexDirection="column">
{/* Iterate over the lines that should be displayed (already normalized) */}
{displayableLines.map((line, index) => {
const key = `diff-line-${index}`;
let gutterNumStr = '';
let color: string | undefined = undefined;
let prefixSymbol = ' ';
let dim = false;
switch (line.type) {
case 'add':
gutterNumStr = (line.newLine ?? '').toString();
color = 'green';
prefixSymbol = '+';
break;
case 'del':
gutterNumStr = (line.oldLine ?? '').toString();
color = 'red';
prefixSymbol = '-';
break;
case 'context':
// Show new line number for context lines in gutter
gutterNumStr = (line.newLine ?? '').toString();
dim = true;
prefixSymbol = ' ';
break;
}
// Render the line content *after* stripping the calculated *minimum* baseIndentation.
// The line.content here is already the tab-normalized version.
const displayContent = line.content.substring(baseIndentation);
return (
// Using your original rendering structure
<Box key={key} flexDirection="row">
<Text color="gray">{gutterNumStr} </Text>
<Text color={color} dimColor={dim}>{prefixSymbol} </Text>
<Text color={color} dimColor={dim} wrap="wrap">{displayContent}</Text>
</Box>
);
})}
</Box>
);
};
export default DiffRenderer;

View File

@ -0,0 +1,24 @@
import React from 'react';
import { Text, Box } from 'ink';
interface ErrorMessageProps {
text: string;
}
const ErrorMessage: React.FC<ErrorMessageProps> = ({ text }) => {
const prefix = '✕ ';
const prefixWidth = prefix.length;
return (
<Box flexDirection="row">
<Box width={prefixWidth}>
<Text color="red">{prefix}</Text>
</Box>
<Box flexGrow={1}>
<Text wrap="wrap" color="red">{text}</Text>
</Box>
</Box>
);
};
export default ErrorMessage;

View File

@ -0,0 +1,44 @@
import React from 'react';
import { Text, Box } from 'ink';
import { MarkdownRenderer } from '../../utils/MarkdownRenderer.js';
interface GeminiMessageProps {
text: string;
}
const GeminiMessage: React.FC<GeminiMessageProps> = ({ text }) => {
const prefix = '✦ ';
const prefixWidth = prefix.length;
// Handle potentially null or undefined text gracefully
const safeText = text || '';
// Use the static render method from the MarkdownRenderer class
// Pass safeText which is guaranteed to be a string
const renderedBlocks = MarkdownRenderer.render(safeText);
// If the original text was actually empty/null, render the minimal state
if (!safeText && renderedBlocks.length === 0) {
return (
<Box flexDirection="row">
<Box width={prefixWidth}>
<Text color="blue">{prefix}</Text>
</Box>
<Box flexGrow={1}></Box>
</Box>
);
}
return (
<Box flexDirection="row">
<Box width={prefixWidth}>
<Text color="blue">{prefix}</Text>
</Box>
<Box flexGrow={1} flexDirection="column">
{renderedBlocks}
</Box>
</Box>
);
};
export default GeminiMessage;

View File

@ -0,0 +1,24 @@
import React from 'react';
import { Text, Box } from 'ink';
interface InfoMessageProps {
text: string;
}
const InfoMessage: React.FC<InfoMessageProps> = ({ text }) => {
const prefix = ' ';
const prefixWidth = prefix.length;
return (
<Box flexDirection="row">
<Box width={prefixWidth}>
<Text color="yellow">{prefix}</Text>
</Box>
<Box flexGrow={1}>
<Text wrap="wrap" color="yellow">{text}</Text>
</Box>
</Box>
);
};
export default InfoMessage;

View File

@ -0,0 +1,101 @@
import React from 'react';
import { Box, Text, useInput } from 'ink';
import SelectInput from 'ink-select-input';
import { ToolCallConfirmationDetails, ToolEditConfirmationDetails, ToolConfirmationOutcome, ToolExecuteConfirmationDetails } from '../../types.js'; // Adjust path as needed
import { PartListUnion } from '@google/genai';
import DiffRenderer from './DiffRenderer.js';
import { UI_WIDTH } from '../../constants.js';
export interface ToolConfirmationMessageProps {
confirmationDetails: ToolCallConfirmationDetails;
onSubmit: (value: PartListUnion) => void;
}
function isEditDetails(props: ToolCallConfirmationDetails): props is ToolEditConfirmationDetails {
return (props as ToolEditConfirmationDetails).fileName !== undefined;
}
interface InternalOption {
label: string;
value: ToolConfirmationOutcome;
}
const ToolConfirmationMessage: React.FC<ToolConfirmationMessageProps> = ({ confirmationDetails }) => {
const { onConfirm } = confirmationDetails;
useInput((_, key) => {
if (key.escape) {
onConfirm(ToolConfirmationOutcome.Cancel);
}
});
const handleSelect = (item: InternalOption) => {
onConfirm(item.value);
};
let title: string;
let bodyContent: React.ReactNode | null = null; // Removed contextDisplay here
let question: string;
const options: InternalOption[] = [];
if (isEditDetails(confirmationDetails)) {
title = "Edit"; // Title for the outer box
// Body content is now the DiffRenderer, passing filename to it
// The bordered box is removed from here and handled within DiffRenderer
bodyContent = (
<DiffRenderer diffContent={confirmationDetails.fileDiff} />
);
question = `Apply this change?`;
options.push(
{ label: '1. Yes, apply change', value: ToolConfirmationOutcome.ProceedOnce },
{ label: "2. Yes, always apply file edits", value: ToolConfirmationOutcome.ProceedAlways },
{ label: '3. No (esc)', value: ToolConfirmationOutcome.Cancel }
);
} else {
const executionProps = confirmationDetails as ToolExecuteConfirmationDetails;
title = "Execute Command"; // Title for the outer box
// For execution, we still need context display and description
const commandDisplay = <Text color="cyan">{executionProps.command}</Text>;
// Combine command and description into bodyContent for layout consistency
bodyContent = (
<Box flexDirection="column">
<Box paddingX={1} marginLeft={1}>{commandDisplay}</Box>
</Box>
);
question = `Allow execution?`;
const alwaysLabel = `2. Yes, always allow '${executionProps.rootCommand}' commands`;
options.push(
{ label: '1. Yes, allow once', value: ToolConfirmationOutcome.ProceedOnce },
{ label: alwaysLabel, value: ToolConfirmationOutcome.ProceedAlways },
{ label: '3. No (esc)', value: ToolConfirmationOutcome.Cancel }
);
}
return (
<Box flexDirection="column" padding={1} minWidth={UI_WIDTH}>
{/* Body Content (Diff Renderer or Command Info) */}
{/* No separate context display here anymore for edits */}
<Box flexGrow={1} flexShrink={1} overflow="hidden" marginBottom={1}>
{bodyContent}
</Box>
{/* Confirmation Question */}
<Box marginBottom={1} flexShrink={0}>
<Text>{question}</Text>
</Box>
{/* Select Input for Options */}
<Box flexShrink={0}>
<SelectInput items={options} onSelect={handleSelect} />
</Box>
</Box>
);
};
export default ToolConfirmationMessage;

View File

@ -0,0 +1,47 @@
import React from 'react';
import { Box } from 'ink';
import { IndividualToolCallDisplay, ToolCallStatus } from '../../types.js';
import ToolMessage from './ToolMessage.js';
import { PartListUnion } from '@google/genai';
import ToolConfirmationMessage from './ToolConfirmationMessage.js';
interface ToolGroupMessageProps {
toolCalls: IndividualToolCallDisplay[];
onSubmit: (value: PartListUnion) => void;
}
// Main component renders the border and maps the tools using ToolMessage
const ToolGroupMessage: React.FC<ToolGroupMessageProps> = ({ toolCalls, onSubmit }) => {
const hasPending = toolCalls.some(t => t.status === ToolCallStatus.Pending);
const borderColor = hasPending ? "yellow" : "blue";
return (
<Box
flexDirection="column"
borderStyle="round"
borderColor={borderColor}
>
{toolCalls.map((tool) => {
return (
<React.Fragment key={tool.callId}>
<ToolMessage
key={tool.callId} // Use callId as the key
name={tool.name}
description={tool.description}
resultDisplay={tool.resultDisplay}
status={tool.status}
/>
{tool.status === ToolCallStatus.Confirming && tool.confirmationDetails && (
<ToolConfirmationMessage confirmationDetails={tool.confirmationDetails} onSubmit={onSubmit}></ToolConfirmationMessage>
)}
</React.Fragment>
);
})}
{/* Optional: Add padding below the last item if needed,
though ToolMessage already has some vertical space implicitly */}
{/* {tools.length > 0 && <Box height={1} />} */}
</Box>
);
};
export default ToolGroupMessage;

View File

@ -0,0 +1,53 @@
import React from 'react';
import { Box, Text } from 'ink';
import Spinner from 'ink-spinner';
import { ToolCallStatus } from '../../types.js';
import { ToolResultDisplay } from '../../../tools/ToolResult.js';
import DiffRenderer from './DiffRenderer.js';
import { MarkdownRenderer } from '../../utils/MarkdownRenderer.js';
interface ToolMessageProps {
name: string;
description: string;
resultDisplay: ToolResultDisplay | undefined;
status: ToolCallStatus;
}
const ToolMessage: React.FC<ToolMessageProps> = ({ name, description, resultDisplay, status }) => {
const statusIndicatorWidth = 3;
const hasResult = (status === ToolCallStatus.Invoked || status === ToolCallStatus.Canceled) && resultDisplay && resultDisplay.toString().trim().length > 0;
return (
<Box paddingX={1} paddingY={0} flexDirection="column">
{/* Row for Status Indicator and Tool Info */}
<Box minHeight={1}>
{/* Status Indicator */}
<Box minWidth={statusIndicatorWidth}>
{status === ToolCallStatus.Pending && <Spinner type="dots" />}
{status === ToolCallStatus.Invoked && <Text color="green"></Text>}
{status === ToolCallStatus.Confirming && <Text color="blue">?</Text>}
{status === ToolCallStatus.Canceled && <Text color="red" bold>-</Text>}
</Box>
<Box>
<Text color="blue" wrap="truncate-end" strikethrough={status === ToolCallStatus.Canceled}>
<Text bold>{name}</Text> <Text color="gray">{description}</Text>
</Text>
</Box>
</Box>
{hasResult && (
<Box paddingLeft={statusIndicatorWidth}>
<Box flexShrink={1} flexDirection="row">
<Text color="gray"> </Text>
{/* Use default text color (white) or gray instead of dimColor */}
{typeof resultDisplay === 'string' && <Box flexDirection='column'>{MarkdownRenderer.render(resultDisplay)}</Box>}
{typeof resultDisplay === 'object' && <DiffRenderer diffContent={resultDisplay.fileDiff} />}
</Box>
</Box>
)}
</Box>
);
};
export default ToolMessage;

View File

@ -0,0 +1,24 @@
import React from 'react';
import { Text, Box } from 'ink';
interface UserMessageProps {
text: string;
}
const UserMessage: React.FC<UserMessageProps> = ({ text }) => {
const prefix = '> ';
const prefixWidth = prefix.length;
return (
<Box flexDirection="row">
<Box width={prefixWidth}>
<Text color="gray">{prefix}</Text>
</Box>
<Box flexGrow={1}>
<Text wrap="wrap">{text}</Text>
</Box>
</Box>
);
};
export default UserMessage;

View File

@ -0,0 +1,26 @@
const EstimatedArtWidth = 59;
const BoxBorderWidth = 1;
export const BOX_PADDING_X = 1;
// Calculate width based on art, padding, and border
export const UI_WIDTH = EstimatedArtWidth + (BOX_PADDING_X * 2) + (BoxBorderWidth * 2); // ~63
export const WITTY_LOADING_PHRASES = [
'Consulting the digital spirits...',
'Reticulating splines...',
'Warming up the AI hamsters...',
'Asking the magic conch shell...',
'Generating witty retort...',
'Polishing the algorithms...',
'Don\'t rush perfection (or my code)...',
'Brewing fresh bytes...',
'Counting electrons...',
'Engaging cognitive processors...',
'Checking for syntax errors in the universe...',
'One moment, optimizing humor...',
'Shuffling punchlines...',
'Untangling neural nets...',
'Compiling brilliance...',
];
export const PHRASE_CHANGE_INTERVAL_MS = 15000;
export const STREAM_DEBOUNCE_MS = 100;

View File

@ -0,0 +1,142 @@
import { useState, useRef, useCallback, useEffect } from 'react';
import { useInput } from 'ink';
import { GeminiClient } from '../../core/GeminiClient.js';
import { type Chat, type PartListUnion } from '@google/genai';
import { HistoryItem } from '../types.js';
import { processGeminiStream } from '../../core/geminiStreamProcessor.js';
import { StreamingState } from '../../core/StreamingState.js';
const addHistoryItem = (
setHistory: React.Dispatch<React.SetStateAction<HistoryItem[]>>,
itemData: Omit<HistoryItem, 'id'>,
id: number
) => {
setHistory((prevHistory) => [
...prevHistory,
{ ...itemData, id } as HistoryItem,
]);
};
export const useGeminiStream = (
setHistory: React.Dispatch<React.SetStateAction<HistoryItem[]>>,
) => {
const [streamingState, setStreamingState] = useState<StreamingState>(StreamingState.Idle);
const [initError, setInitError] = useState<string | null>(null);
const abortControllerRef = useRef<AbortController | null>(null);
const currentToolGroupIdRef = useRef<number | null>(null);
const chatSessionRef = useRef<Chat | null>(null);
const geminiClientRef = useRef<GeminiClient | null>(null);
const messageIdCounterRef = useRef(0);
// Initialize Client Effect (remains the same)
useEffect(() => {
setInitError(null);
if (!geminiClientRef.current) {
try {
geminiClientRef.current = new GeminiClient();
} catch (error: any) {
setInitError(`Failed to initialize client: ${error.message || 'Unknown error'}`);
}
}
}, []);
// Input Handling Effect (remains the same)
useInput((input, key) => {
if (streamingState === StreamingState.Responding && key.escape) {
abortControllerRef.current?.abort();
}
});
// ID Generation Callback (remains the same)
const getNextMessageId = useCallback((baseTimestamp: number): number => {
messageIdCounterRef.current += 1;
return baseTimestamp + messageIdCounterRef.current;
}, []);
// Submit Query Callback (updated to call processGeminiStream)
const submitQuery = useCallback(async (query: PartListUnion) => {
if (streamingState === StreamingState.Responding) {
// No-op if already going.
return;
}
if (typeof query === 'string' && query.toString().trim().length === 0) {
return;
}
const userMessageTimestamp = Date.now();
const client = geminiClientRef.current;
if (!client) {
setInitError("Gemini client is not available.");
return;
}
if (!chatSessionRef.current) {
chatSessionRef.current = await client.startChat();
}
// Reset state
setStreamingState(StreamingState.Responding);
setInitError(null);
currentToolGroupIdRef.current = null;
messageIdCounterRef.current = 0;
const chat = chatSessionRef.current;
try {
// Add user message
if (typeof query === 'string') {
const trimmedQuery = query.toString();
addHistoryItem(setHistory, { type: 'user', text: trimmedQuery }, userMessageTimestamp);
} else if (
// HACK to detect errored function responses.
typeof query === 'object' &&
query !== null &&
!Array.isArray(query) && // Ensure it's a single Part object
'functionResponse' in query && // Check if it's a function response Part
query.functionResponse?.response && // Check if response object exists
'error' in query.functionResponse.response // Check specifically for the 'error' key
) {
const history = chat.getHistory();
history.push({ role: 'user', parts: [query] });
return;
}
// Prepare for streaming
abortControllerRef.current = new AbortController();
const signal = abortControllerRef.current.signal;
// --- Delegate to Stream Processor ---
const stream = client.sendMessageStream(chat, query, signal);
const addHistoryItemFromStream = (itemData: Omit<HistoryItem, 'id'>, id: number) => {
addHistoryItem(setHistory, itemData, id);
};
const getStreamMessageId = () => getNextMessageId(userMessageTimestamp);
// Call the renamed processor function
await processGeminiStream({
stream,
signal,
setHistory,
submitQuery,
getNextMessageId: getStreamMessageId,
addHistoryItem: addHistoryItemFromStream,
currentToolGroupIdRef,
});
} catch (error: any) {
// (Error handling for stream initiation remains the same)
console.error("Error initiating stream:", error);
if (error.name !== 'AbortError') {
// Use historyUpdater's function potentially? Or keep addHistoryItem here?
// Keeping addHistoryItem here for direct errors from this scope.
addHistoryItem(setHistory, { type: 'error', text: `[Error starting stream: ${error.message}]` }, getNextMessageId(userMessageTimestamp));
}
} finally {
abortControllerRef.current = null;
setStreamingState(StreamingState.Idle);
}
}, [setStreamingState, setHistory, initError, getNextMessageId]);
return { streamingState, submitQuery, initError };
};

View File

@ -0,0 +1,53 @@
import { useState, useEffect, useRef } from 'react';
import { WITTY_LOADING_PHRASES, PHRASE_CHANGE_INTERVAL_MS } from '../constants.js';
import { StreamingState } from '../../core/StreamingState.js';
export const useLoadingIndicator = (streamingState: StreamingState) => {
const [elapsedTime, setElapsedTime] = useState(0);
const [currentLoadingPhrase, setCurrentLoadingPhrase] = useState(WITTY_LOADING_PHRASES[0]);
const timerRef = useRef<NodeJS.Timeout | null>(null);
const phraseIntervalRef = useRef<NodeJS.Timeout | null>(null);
const currentPhraseIndexRef = useRef<number>(0);
// Timer effect for elapsed time during loading
useEffect(() => {
if (streamingState === StreamingState.Responding) {
setElapsedTime(0); // Reset timer on new loading start
timerRef.current = setInterval(() => {
setElapsedTime((prevTime) => prevTime + 1);
}, 1000);
} else if (timerRef.current) {
clearInterval(timerRef.current);
timerRef.current = null;
}
// Cleanup on unmount or when isLoading changes
return () => {
if (timerRef.current) {
clearInterval(timerRef.current);
}
};
}, [streamingState]);
// Effect for cycling through witty loading phrases
useEffect(() => {
if (streamingState === StreamingState.Responding) {
currentPhraseIndexRef.current = 0;
setCurrentLoadingPhrase(WITTY_LOADING_PHRASES[0]);
phraseIntervalRef.current = setInterval(() => {
currentPhraseIndexRef.current = (currentPhraseIndexRef.current + 1) % WITTY_LOADING_PHRASES.length;
setCurrentLoadingPhrase(WITTY_LOADING_PHRASES[currentPhraseIndexRef.current]);
}, PHRASE_CHANGE_INTERVAL_MS);
} else if (phraseIntervalRef.current) {
clearInterval(phraseIntervalRef.current);
phraseIntervalRef.current = null;
}
// Cleanup on unmount or when isLoading changes
return () => {
if (phraseIntervalRef.current) {
clearInterval(phraseIntervalRef.current);
}
};
}, [streamingState]);
return { elapsedTime, currentLoadingPhrase };
};

View File

@ -0,0 +1,62 @@
import { ToolResultDisplay } from "../tools/ToolResult.js";
export enum ToolCallStatus {
Pending,
Invoked,
Confirming,
Canceled,
}
export interface ToolCallEvent {
type: 'tool_call';
status: ToolCallStatus;
callId: string;
name: string;
args: Record<string, any>;
resultDisplay: ToolResultDisplay | undefined;
confirmationDetails: ToolCallConfirmationDetails | undefined;
}
export interface IndividualToolCallDisplay {
callId: string;
name: string;
description: string;
resultDisplay: ToolResultDisplay | undefined;
status: ToolCallStatus;
confirmationDetails: ToolCallConfirmationDetails | undefined;
}
export interface HistoryItemBase {
id: number;
text?: string; // Text content for user/gemini/info/error messages
}
export type HistoryItem = HistoryItemBase & (
| { type: 'user'; text: string }
| { type: 'gemini'; text: string }
| { type: 'info'; text: string }
| { type: 'error'; text: string }
| { type: 'tool_group'; tools: IndividualToolCallDisplay[]; }
);
export interface ToolCallConfirmationDetails {
title: string;
onConfirm: (outcome: ToolConfirmationOutcome) => Promise<void>;
}
export interface ToolEditConfirmationDetails extends ToolCallConfirmationDetails {
fileName: string;
fileDiff: string;
}
export interface ToolExecuteConfirmationDetails extends ToolCallConfirmationDetails {
command: string;
rootCommand: string;
description: string;
}
export enum ToolConfirmationOutcome {
ProceedOnce,
ProceedAlways,
Cancel,
}

View File

@ -0,0 +1,249 @@
import React from 'react';
import { Text, Box } from 'ink';
/**
* A utility class to render a subset of Markdown into Ink components.
* Handles H1-H4, Lists (ul/ol, no nesting), Code Blocks,
* and inline styles (bold, italic, strikethrough, code, links).
*/
export class MarkdownRenderer {
/**
* Renders INLINE markdown elements using an iterative approach.
* Supports: **bold**, *italic*, _italic_, ~~strike~~, [link](url), `code`, ``code``, <u>underline</u>
* @param text The string segment to parse for inline styles.
* @returns An array of React nodes (Text components or strings).
*/
private static _renderInline(text: string): React.ReactNode[] {
const nodes: React.ReactNode[] = [];
let lastIndex = 0;
// UPDATED Regex: Added <u>.*?<\/u> pattern
const inlineRegex = /(\*\*.*?\*\*|\*.*?\*|_.*?_|~~.*?~~|\[.*?\]\(.*?\)|`+.+?`+|<u>.*?<\/u>)/g;
let match;
while ((match = inlineRegex.exec(text)) !== null) {
// 1. Add plain text before the match
if (match.index > lastIndex) {
nodes.push(<Text key={`t-${lastIndex}`}>{text.slice(lastIndex, match.index)}</Text>);
}
const fullMatch = match[0];
let renderedNode: React.ReactNode = null;
const key = `m-${match.index}`; // Base key for matched part
// 2. Determine type of match and render accordingly
try {
if (fullMatch.startsWith('**') && fullMatch.endsWith('**') && fullMatch.length > 4) {
renderedNode = <Text key={key} bold>{fullMatch.slice(2, -2)}</Text>;
} else if (((fullMatch.startsWith('*') && fullMatch.endsWith('*')) || (fullMatch.startsWith('_') && fullMatch.endsWith('_'))) && fullMatch.length > 2) {
renderedNode = <Text key={key} italic>{fullMatch.slice(1, -1)}</Text>;
} else if (fullMatch.startsWith('~~') && fullMatch.endsWith('~~') && fullMatch.length > 4) {
// Strikethrough as gray text
renderedNode = <Text key={key} strikethrough>{fullMatch.slice(2, -2)}</Text>;
} else if (fullMatch.startsWith('`') && fullMatch.endsWith('`') && fullMatch.length > 1) {
// Code: Try to match varying numbers of backticks
const codeMatch = fullMatch.match(/^(`+)(.+?)\1$/s);
if (codeMatch && codeMatch[2]) {
renderedNode = <Text key={key} color="yellow">{codeMatch[2]}</Text>;
} else { // Fallback for simple or non-matching cases
renderedNode = <Text key={key} color="yellow">{fullMatch.slice(1, -1)}</Text>;
}
} else if (fullMatch.startsWith('[') && fullMatch.includes('](') && fullMatch.endsWith(')')) {
// Link: Extract text and URL
const linkMatch = fullMatch.match(/\[(.*?)\]\((.*?)\)/);
if (linkMatch) {
const linkText = linkMatch[1];
const url = linkMatch[2];
// Render link text then URL slightly dimmed/colored
renderedNode = (
<Text key={key}>
{linkText}
<Text color="blue"> ({url})</Text>
</Text>
);
}
} else if (fullMatch.startsWith('<u>') && fullMatch.endsWith('</u>') && fullMatch.length > 6) {
// ***** NEW: Handle underline tag *****
// Use slice(3, -4) to remove <u> and </u>
renderedNode = <Text key={key} underline>{fullMatch.slice(3, -4)}</Text>;
}
} catch (e) {
// In case of regex or slicing errors, fallback to literal rendering
console.error("Error parsing inline markdown part:", fullMatch, e);
renderedNode = null; // Ensure fallback below is used
}
// 3. Add the rendered node or the literal text if parsing failed
nodes.push(renderedNode ?? <Text key={key}>{fullMatch}</Text>);
lastIndex = inlineRegex.lastIndex; // Move index past the current match
}
// 4. Add any remaining plain text after the last match
if (lastIndex < text.length) {
nodes.push(<Text key={`t-${lastIndex}`}>{text.slice(lastIndex)}</Text>);
}
// Filter out potential nulls if any error occurred without fallback
return nodes.filter(node => node !== null);
}
/**
* Helper to render a code block.
*/
private static _renderCodeBlock(key: string, content: string[], lang: string | null): React.ReactNode {
// Basic styling for code block
return (
<Box key={key} borderStyle="round" paddingX={1} borderColor="gray" flexDirection="column">
{lang && <Text dimColor> {lang}</Text>}
{/* Render each line preserving whitespace (within Text component) */}
{content.map((line, idx) => (
<Text key={idx}>{line}</Text>
))}
</Box>
);
}
/**
* Helper to render a list item (ordered or unordered).
*/
private static _renderListItem(key: string, text: string, type: 'ul' | 'ol', marker: string): React.ReactNode {
const renderedText = MarkdownRenderer._renderInline(text); // Allow inline styles in list items
const prefix = type === 'ol' ? `${marker} ` : `${marker} `; // e.g., "1. " or "* "
const prefixWidth = prefix.length;
return (
<Box key={key} paddingLeft={1} flexDirection="row">
<Box width={prefixWidth}>
<Text>{prefix}</Text>
</Box>
<Box flexGrow={1}>
<Text wrap="wrap">{renderedText}</Text>
</Box>
</Box>
);
}
/**
* Renders a full markdown string, handling block elements (headers, lists, code blocks)
* and applying inline styles. This is the main public static method.
* @param text The full markdown string to render.
* @returns An array of React nodes representing markdown blocks.
*/
public static render(text: string): React.ReactNode[] {
if (!text) return [];
const lines = text.split('\n');
// Regexes for block elements
const headerRegex = /^ *(#{1,4}) +(.*)/;
const codeFenceRegex = /^ *(`{3,}|~{3,}) *(\S*?) *$/; // ```lang or ``` or ~~~
const ulItemRegex = /^ *([-*+]) +(.*)/; // Unordered list item, captures bullet and text
const olItemRegex = /^ *(\d+)\. +(.*)/; // Ordered list item, captures number and text
const hrRegex = /^ *([-*_] *){3,} *$/; // Horizontal rule
const contentBlocks: React.ReactNode[] = [];
// State for parsing across lines
let inCodeBlock = false;
let codeBlockContent: string[] = [];
let codeBlockLang: string | null = null;
let codeBlockFence = ''; // Store the type of fence used (``` or ~~~)
let inListType: 'ul' | 'ol' | null = null; // Track current list type to group items
lines.forEach((line, index) => {
const key = `line-${index}`;
// --- State 1: Inside a Code Block ---
if (inCodeBlock) {
const fenceMatch = line.match(codeFenceRegex);
// Check for closing fence, matching the opening one and length
if (fenceMatch && fenceMatch[1].startsWith(codeBlockFence[0]) && fenceMatch[1].length >= codeBlockFence.length) {
// End of code block - render it
contentBlocks.push(MarkdownRenderer._renderCodeBlock(key, codeBlockContent, codeBlockLang));
// Reset state
inCodeBlock = false;
codeBlockContent = [];
codeBlockLang = null;
codeBlockFence = '';
inListType = null; // Ensure list context is reset
} else {
// Add line to current code block content
codeBlockContent.push(line);
}
return; // Process next line
}
// --- State 2: Not Inside a Code Block ---
// Check for block element starts in rough order of precedence/commonness
const codeFenceMatch = line.match(codeFenceRegex);
const headerMatch = line.match(headerRegex);
const ulMatch = line.match(ulItemRegex);
const olMatch = line.match(olItemRegex);
const hrMatch = line.match(hrRegex);
if (codeFenceMatch) {
inCodeBlock = true;
codeBlockFence = codeFenceMatch[1];
codeBlockLang = codeFenceMatch[2] || null;
inListType = null; // Starting code block breaks list
} else if (hrMatch) {
// Render Horizontal Rule (simple dashed line)
// Use box with height and border character, or just Text with dashes
contentBlocks.push(<Box key={key}><Text dimColor>---</Text></Box>);
inListType = null; // HR breaks list
} else if (headerMatch) {
const level = headerMatch[1].length;
const headerText = headerMatch[2];
const renderedHeaderText = MarkdownRenderer._renderInline(headerText);
let headerNode: React.ReactNode = null;
switch (level) { /* ... (header styling as before) ... */
case 1: headerNode = <Text bold color="cyan">{renderedHeaderText}</Text>; break;
case 2: headerNode = <Text bold color="blue">{renderedHeaderText}</Text>; break;
case 3: headerNode = <Text bold>{renderedHeaderText}</Text>; break;
case 4: headerNode = <Text italic color="gray">{renderedHeaderText}</Text>; break;
}
if (headerNode) contentBlocks.push(<Box key={key}>{headerNode}</Box>);
inListType = null; // Header breaks list
} else if (ulMatch) {
const marker = ulMatch[1]; // *, -, or +
const itemText = ulMatch[2];
// If previous line was not UL, maybe add spacing? For now, just render item.
contentBlocks.push(MarkdownRenderer._renderListItem(key, itemText, 'ul', marker));
inListType = 'ul'; // Set/maintain list context
} else if (olMatch) {
const marker = olMatch[1]; // The number
const itemText = olMatch[2];
contentBlocks.push(MarkdownRenderer._renderListItem(key, itemText, 'ol', marker));
inListType = 'ol'; // Set/maintain list context
} else {
// --- Regular line (Paragraph or Empty line) ---
inListType = null; // Any non-list line breaks the list sequence
// Render line content if it's not blank, applying inline styles
const renderedLine = MarkdownRenderer._renderInline(line);
if (renderedLine.length > 0 || line.length > 0) { // Render lines with content or only whitespace
contentBlocks.push(
<Box key={key}>
<Text wrap="wrap">{renderedLine}</Text>
</Box>
);
} else if (line.trim().length === 0) { // Handle specifically empty lines
// Add minimal space for blank lines between paragraphs/blocks
if (contentBlocks.length > 0 && !inCodeBlock) { // Avoid adding space inside code block state (handled above)
const previousBlock = contentBlocks[contentBlocks.length - 1];
// Avoid adding multiple blank lines consecutively easily - check if previous was also blank?
// For now, add a minimal spacer for any blank line outside code blocks.
contentBlocks.push(<Box key={key} height={1} />);
}
}
}
});
// Handle unclosed code block at the end of the input
if (inCodeBlock) {
contentBlocks.push(MarkdownRenderer._renderCodeBlock(`line-eof`, codeBlockContent, codeBlockLang));
}
return contentBlocks;
}
}

View File

@ -0,0 +1,325 @@
import { promises as fs } from 'fs';
import { SchemaUnion, Type } from "@google/genai"; // Assuming these types exist
import { GeminiClient } from "../core/GeminiClient.js"; // Assuming this path
import { exec } from 'child_process'; // Needed for Windows process check
import { promisify } from 'util'; // To promisify exec
// Promisify child_process.exec for easier async/await usage
const execAsync = promisify(exec);
// Define the expected interface for the AI client dependency
export interface AiClient {
generateJson(
prompt: any[], // Keep flexible or define a stricter prompt structure type
schema: SchemaUnion
): Promise<any>; // Ideally, specify the expected JSON structure TAnalysisResult | TAnalysisFailure
}
// Identifier for the background process (e.g., PID)
// Using `unknown` allows more flexibility than `object` while still being type-safe
export type ProcessHandle = number | string | unknown;
// Represents the structure expected from a successful LLM analysis call
export interface AnalysisResult {
summary: string;
inferredStatus: 'Running' | 'SuccessReported' | 'ErrorReported' | 'Unknown';
}
// Represents the structure returned when the LLM analysis itself fails
export interface AnalysisFailure {
error: string;
inferredStatus: 'AnalysisFailed';
}
// Type guard to check if the result is a failure object
function isAnalysisFailure(result: AnalysisResult | AnalysisFailure): result is AnalysisFailure {
return (result as AnalysisFailure).inferredStatus === 'AnalysisFailed';
}
// Represents the final outcome after polling is complete (or failed/timed out)
export interface FinalAnalysisOutcome {
status: string; // e.g., 'SuccessReported', 'ErrorReported', 'ProcessEnded_SuccessReported', 'TimedOut_Running', 'AnalysisFailed'
summary: string; // Final summary or error message
}
export class BackgroundTerminalAnalyzer {
private ai: AiClient;
// Make polling parameters configurable via constructor
private pollIntervalMs: number;
private maxAttempts: number;
private initialDelayMs: number;
// --- Dependency Injection & Configuration ---
constructor(
aiClient?: AiClient, // Allow injecting AiClient, default to GeminiClient
options: {
pollIntervalMs?: number,
maxAttempts?: number,
initialDelayMs?: number
} = {} // Provide default options
) {
this.ai = aiClient || new GeminiClient(); // Use injected client or default
this.pollIntervalMs = options.pollIntervalMs ?? 5000; // Default 5 seconds
this.maxAttempts = options.maxAttempts ?? 6; // Default 6 attempts (approx 30s total)
this.initialDelayMs = options.initialDelayMs ?? 500; // Default 0.5s initial delay
}
/**
* Polls the output of a background process using an LLM
* until a conclusive status is determined or timeout occurs.
* @param pid The handle/identifier of the background process (typically PID number).
* @param tempStdoutFilePath Path to the temporary file capturing stdout.
* @param tempStderrFilePath Path to the temporary file capturing stderr.
* @param command The command string that was executed (for context in prompts).
* @returns A promise resolving to the final analysis outcome.
*/
public async analyze(
pid: ProcessHandle,
tempStdoutFilePath: string,
tempStderrFilePath: string,
command: string
): Promise<FinalAnalysisOutcome> {
// --- Initial Delay ---
// Wait briefly before the first check to allow the process to initialize
// and potentially write initial output.
await new Promise(resolve => setTimeout(resolve, this.initialDelayMs));
let attempts = 0;
let lastAnalysisResult: AnalysisResult | AnalysisFailure | null = null;
while (attempts < this.maxAttempts) {
attempts++;
let currentStdout: string = '';
let currentStderr: string = '';
// --- Robust File Reading ---
try {
currentStdout = await fs.readFile(tempStdoutFilePath, 'utf-8');
} catch (error: any) {
// If file doesn't exist yet or isn't readable, treat as empty, but log warning
if (error.code !== 'ENOENT') {
console.warn(`Attempt ${attempts}: Failed to read stdout file ${tempStdoutFilePath}: ${error.message}`);
}
}
try {
currentStderr = await fs.readFile(tempStderrFilePath, 'utf-8');
} catch (error: any) {
if (error.code !== 'ENOENT') {
console.warn(`Attempt ${attempts}: Failed to read stderr file ${tempStderrFilePath}: ${error.message}`);
}
}
// --- Process Status Check ---
let isRunning = false;
try {
// Check if process is running *before* the final analysis if it seems to have ended
isRunning = await this.isProcessRunning(pid);
if (!isRunning) {
// Reread files one last time in case output was written just before exit
try { currentStdout = await fs.readFile(tempStdoutFilePath, 'utf-8'); } catch {}
try { currentStderr = await fs.readFile(tempStderrFilePath, 'utf-8'); } catch {}
lastAnalysisResult = await this.analyzeOutputWithLLM(currentStdout, currentStderr, command);
if (isAnalysisFailure(lastAnalysisResult)) {
return { status: 'ProcessEnded_AnalysisFailed', summary: `Process ended. Final analysis failed: ${lastAnalysisResult.error}` };
}
// Append ProcessEnded to the status determined by the final analysis
return { status: 'ProcessEnded_' + lastAnalysisResult.inferredStatus, summary: `Process ended. Final analysis summary: ${lastAnalysisResult.summary}` };
}
} catch (procCheckError: any) {
// Log the error but allow polling to continue, as log analysis might still be useful
console.warn(`Could not check process status for PID ${pid} on attempt ${attempts}: ${procCheckError.message}`);
// Decide if you want to bail out here or continue analysis based on logs only
// For now, we continue.
}
// --- LLM Analysis ---
lastAnalysisResult = await this.analyzeOutputWithLLM(currentStdout, currentStderr, command);
if (isAnalysisFailure(lastAnalysisResult)) {
console.error(`LLM Analysis failed for PID ${pid} on attempt ${attempts}:`, lastAnalysisResult.error);
// Stop polling on analysis failure, returning the specific failure status
return { status: lastAnalysisResult.inferredStatus, summary: lastAnalysisResult.error };
}
// --- Exit Conditions ---
if (lastAnalysisResult.inferredStatus === 'SuccessReported' || lastAnalysisResult.inferredStatus === 'ErrorReported') {
return { status: lastAnalysisResult.inferredStatus, summary: lastAnalysisResult.summary };
}
// Heuristic: If the process seems stable and 'Running' after several checks,
// return that status without waiting for the full timeout. Adjust threshold as needed.
const runningExitThreshold = Math.floor(this.maxAttempts / 3) + 1; // e.g., exit after attempt 4 if maxAttempts is 6
if (attempts >= runningExitThreshold && lastAnalysisResult.inferredStatus === 'Running') {
return { status: lastAnalysisResult.inferredStatus, summary: lastAnalysisResult.summary };
}
// --- Wait before next poll ---
if (attempts < this.maxAttempts) {
await new Promise(resolve => setTimeout(resolve, this.pollIntervalMs));
}
} // End while loop
// --- Timeout Condition ---
console.warn(`Polling timed out for PID ${pid} after ${this.maxAttempts} attempts.`);
// Determine final status based on the last successful analysis (if any)
const finalStatus = (lastAnalysisResult && !isAnalysisFailure(lastAnalysisResult))
? `TimedOut_${lastAnalysisResult.inferredStatus}` // e.g., TimedOut_Running
: 'TimedOut_AnalysisFailed'; // If last attempt failed or no analysis succeeded
const finalSummary = (lastAnalysisResult && !isAnalysisFailure(lastAnalysisResult))
? `Polling timed out after ${this.maxAttempts} attempts. Last known summary: ${lastAnalysisResult.summary}`
: (lastAnalysisResult && isAnalysisFailure(lastAnalysisResult))
? `Polling timed out; last analysis attempt failed: ${lastAnalysisResult}`
: `Polling timed out after ${this.maxAttempts} attempts without any successful analysis.`;
return { status: finalStatus, summary: finalSummary };
}
// --- Actual Implementation of isProcessRunning ---
/**
* Checks if the background process is still running using OS-specific methods.
* @param pid Process handle/identifier (expects a number for standard checks).
* @returns True if running, false otherwise.
* @throws Error if the check itself fails critically (e.g., command not found, permissions).
*/
private async isProcessRunning(pid: ProcessHandle): Promise<boolean> {
if (typeof pid !== 'number' || !Number.isInteger(pid) || pid <= 0) {
console.warn(`isProcessRunning: Invalid PID provided (${pid}). Assuming not running.`);
return false;
}
try {
if (process.platform === 'win32') {
// Windows: Use tasklist command
const command = `tasklist /FI "PID eq ${pid}" /NH`; // /NH for no header
const { stdout } = await execAsync(command);
// Check if the output contains the process information (it will have the image name if found)
return stdout.toLowerCase().includes('.exe'); // A simple check, adjust if needed
} else {
// Linux/macOS/Unix-like: Use kill -0 signal
// process.kill sends signal 0 to check existence without killing
process.kill(pid, 0);
return true; // If no error is thrown, process exists
}
} catch (error: any) {
if (error.code === 'ESRCH') {
// ESRCH: Standard error code for "No such process" on Unix-like systems
return false;
} else if (process.platform === 'win32' && error.message.includes('No tasks are running')) {
// tasklist specific error when PID doesn't exist
return false;
} else {
// Other errors (e.g., EPERM - permission denied) mean we couldn't determine status.
// Re-throwing might be appropriate depending on desired behavior.
// Here, we log it and cautiously return true, assuming it *might* still be running.
console.warn(`isProcessRunning(${pid}) encountered error: ${error.message}. Assuming process might still exist.`);
// Or you could throw the error: throw new Error(`Failed to check process status for PID ${pid}: ${error.message}`);
return true; // Cautious assumption
}
}
}
// --- LLM Analysis Method (largely unchanged but added validation robustness) ---
private async analyzeOutputWithLLM(
stdout: string,
stderr: string,
command: string
): Promise<AnalysisResult | AnalysisFailure> {
try {
const schema: SchemaUnion = { /* ... schema definition remains the same ... */
type: Type.OBJECT,
properties: {
summary: {
type: Type.STRING,
description: "A concise interpretation of significant events, progress, final results, or errors found in the process's stdout and stderr. Summarizes what the logs indicate happened. Should be formatted as markdown."
},
inferredStatus: {
type: Type.STRING,
description: "The inferred status based *only* on analyzing the provided log content. Possible values: 'Running' (logs show ongoing activity without completion/error), 'SuccessReported' (logs indicate successful completion or final positive result), 'ErrorReported' (logs indicate an error or failure), 'Unknown' (status cannot be clearly determined from the log content).",
enum: ['Running', 'SuccessReported', 'ErrorReported', 'Unknown']
}
},
required: ['summary', 'inferredStatus']
};
const prompt = `**Analyze Background Process Logs**
**Context:** A command (\`${command}\`) was executed in the background. You are analyzing the standard output (stdout) and standard error (stderr) collected so far to understand its progress and outcome. This analysis will be used to inform a user about what the command did.
**Input:**
* **Command:** \`${command}\`
* **Stdout:**
\`\`\`
${stdout.slice(-2000) || '(empty)'} ${stdout.length > 2000 ? '\n... (truncated)' : ''}
\`\`\`
* **Stderr:**
\`\`\`
${stderr.slice(-2000) || '(empty)'} ${stderr.length > 2000 ? '\n... (truncated)' : ''}
\`\`\`
**Task:**
Based *only* on the provided stdout and stderr:
1. **Interpret and Summarize:** Do *not* simply repeat the logs. Analyze the content and provide a concise summary describing the significant actions, results, progress, or errors reported by the command. If logs are empty, state that no output was captured. Summaries should be formatted as markdown. Focus on the most recent or conclusive information if logs are long.
2. **Infer Current Status:** Based *only* on the log content, infer the likely status of the command's execution as reflected *in the logs*. Choose the most appropriate status from the options defined in the schema (\`Running\`, \`SuccessReported\`, \`ErrorReported\`, \`Unknown\`). For example:
* If logs show ongoing activity or progress messages without clear completion or error signals, use \`Running\`.
* If logs contain explicit messages indicating successful completion or the final expected output of a successful run, use \`SuccessReported\`.
* If logs contain error messages, stack traces, or failure indications, use \`ErrorReported\`.
* If the logs provide insufficient information to determine a clear status (e.g., empty logs, vague messages), use \`Unknown\`.
* If dealing with a node server, the second the port has been shown the server is considered booted, use \`SuccessReported\`.
* *Note: This status reflects the log content, not necessarily the absolute real-time state of the OS process.*
3. **Format Output:** Return the results as a JSON object adhering strictly to the following schema:
\`\`\`json
${JSON.stringify({ // Generate the schema JSON string for the prompt context
type: "object",
properties: {
summary: { type: "string", description: "Concise markdown summary of log interpretation." },
inferredStatus: { type: "string", enum: ["Running", "SuccessReported", "ErrorReported", "Unknown"], description: "Status inferred *only* from log content." }
},
required: ["summary", "inferredStatus"]
}, null, 2)}
\`\`\`
**Instructions:**
* The \`summary\` must be an interpretation of the logs, focusing on key outcomes or activities. Prioritize recent events if logs are extensive.
* The \`inferredStatus\` should reflect the most likely state *deduced purely from the log text provided*. Ensure it is one of the specified enum values.`;
const response = await this.ai.generateJson([{ role: "user", parts: [{ text: prompt }] }], schema);
// --- Enhanced Validation ---
if (typeof response !== 'object' || response === null) {
throw new Error(`LLM returned non-object response: ${JSON.stringify(response)}`);
}
if (typeof response.summary !== 'string' || response.summary.trim() === '') {
// Ensure summary is a non-empty string
console.warn("LLM response validation warning: 'summary' field is missing, empty or not a string. Raw response:", response);
// Decide how to handle: throw error, or assign default? Let's throw for now.
throw new Error(`LLM response missing or invalid 'summary'. Got: ${JSON.stringify(response.summary)}`);
}
if (!['Running', 'SuccessReported', 'ErrorReported', 'Unknown'].includes(response.inferredStatus)) {
console.warn(`LLM response validation warning: 'inferredStatus' is invalid ('${response.inferredStatus}'). Raw response:`, response);
// Decide how to handle: throw error, or default to 'Unknown'? Let's throw.
throw new Error(`LLM returned invalid 'inferredStatus': ${JSON.stringify(response.inferredStatus)}`);
}
return response as AnalysisResult; // Cast after validation
} catch (error: any) {
console.error(`LLM analysis call failed for command "${command}":`, error);
// Ensure the error message passed back is helpful
const errorMessage = error instanceof Error ? error.message : String(error);
return {
error: `LLM analysis call encountered an error: ${errorMessage}`,
inferredStatus: 'AnalysisFailed'
};
}
}
}

View File

@ -0,0 +1,349 @@
import * as fs from 'fs/promises';
import * as path from 'path';
const MAX_ITEMS = 200;
const TRUNCATION_INDICATOR = '...';
const DEFAULT_IGNORED_FOLDERS = new Set(['node_modules', '.git', 'dist']);
// --- Interfaces ---
/** Options for customizing folder structure retrieval. */
interface FolderStructureOptions {
/** Maximum number of files and folders combined to display. Defaults to 200. */
maxItems?: number;
/** Set of folder names to ignore completely. Case-sensitive. */
ignoredFolders?: Set<string>;
/** Optional regex to filter included files by name. */
fileIncludePattern?: RegExp;
}
// Define a type for the merged options where fileIncludePattern remains optional
type MergedFolderStructureOptions = Required<Omit<FolderStructureOptions, 'fileIncludePattern'>> & {
fileIncludePattern?: RegExp;
};
/** Represents the full, unfiltered information about a folder and its contents. */
interface FullFolderInfo {
name: string;
path: string;
files: string[];
subFolders: FullFolderInfo[];
totalChildren: number; // Total files + subfolders recursively
totalFiles: number; // Total files recursively
isIgnored?: boolean; // Flag to easily identify ignored folders later
}
/** Represents the potentially truncated structure used for display. */
interface ReducedFolderNode {
name: string; // Folder name
isRoot?: boolean;
files: string[]; // File names, might end with '...'
subFolders: ReducedFolderNode[]; // Subfolders, might be truncated
hasMoreFiles?: boolean; // Indicates if files were truncated for this specific folder
hasMoreSubfolders?: boolean; // Indicates if subfolders were truncated for this specific folder
}
// --- Helper Functions ---
/**
* Recursively reads the full directory structure without truncation.
* Ignored folders are included but not recursed into.
* @param folderPath The absolute path to the folder.
* @param options Configuration options.
* @returns A promise resolving to the FullFolderInfo or null if access denied/not found.
*/
async function readFullStructure(
folderPath: string,
options: MergedFolderStructureOptions
): Promise<FullFolderInfo | null> {
const name = path.basename(folderPath);
// Initialize with isIgnored: false
const folderInfo: Omit<FullFolderInfo, 'totalChildren' | 'totalFiles'> = {
name: name,
path: folderPath,
files: [],
subFolders: [],
isIgnored: false,
};
let totalChildrenCount = 0;
let totalFileCount = 0;
try {
const entries = await fs.readdir(folderPath, { withFileTypes: true });
// Process directories first
for (const entry of entries) {
if (entry.isDirectory()) {
const subFolderName = entry.name;
const subFolderPath = path.join(folderPath, subFolderName);
// Check if the folder should be ignored
if (options.ignoredFolders.has(subFolderName)) {
// Add ignored folder node but don't recurse
const ignoredFolderInfo: FullFolderInfo = {
name: subFolderName,
path: subFolderPath,
files: [],
subFolders: [],
totalChildren: 0, // No children explored
totalFiles: 0, // No files explored
isIgnored: true, // Mark as ignored
};
folderInfo.subFolders.push(ignoredFolderInfo);
// Skip recursion for this folder
continue;
}
// If not ignored, recurse as before
const subFolderInfo = await readFullStructure(subFolderPath, options);
// Add non-empty folders OR explicitly ignored folders
if (subFolderInfo && (subFolderInfo.totalChildren > 0 || subFolderInfo.files.length > 0 || subFolderInfo.isIgnored)) {
folderInfo.subFolders.push(subFolderInfo);
}
}
}
// Then process files (only if the current folder itself isn't marked as ignored)
for (const entry of entries) {
if (entry.isFile()) {
const fileName = entry.name;
// Include if no pattern or if pattern matches
if (!options.fileIncludePattern || options.fileIncludePattern.test(fileName)) {
folderInfo.files.push(fileName);
}
}
}
// Calculate totals *after* processing children
// Ignored folders contribute 0 to counts here because we didn't look inside.
totalFileCount = folderInfo.files.length + folderInfo.subFolders.reduce((sum, sf) => sum + sf.totalFiles, 0);
// Count the ignored folder itself as one child item in the parent's count.
totalChildrenCount = folderInfo.files.length + folderInfo.subFolders.length + folderInfo.subFolders.reduce((sum, sf) => sum + sf.totalChildren, 0);
} catch (error: any) {
if (error.code === 'EACCES' || error.code === 'ENOENT') {
console.warn(`Warning: Could not read directory ${folderPath}: ${error.message}`);
return null;
}
throw error;
}
return {
...(folderInfo as FullFolderInfo), // Cast needed after conditional assignment check
totalChildren: totalChildrenCount,
totalFiles: totalFileCount,
};
}
/**
* Reduces the full folder structure based on the maxItems limit using BFS.
* Handles explicitly ignored folders by showing them with a truncation indicator.
* @param fullInfo The complete folder structure info.
* @param maxItems The maximum number of items (files + folders) to include.
* @param ignoredFolders The set of folder names that were ignored during the read phase.
* @returns The root node of the reduced structure.
*/
function reduceStructure(
fullInfo: FullFolderInfo,
maxItems: number,
ignoredFolders: Set<string> // Pass ignoredFolders for checking
): ReducedFolderNode {
const rootReducedNode: ReducedFolderNode = { name: fullInfo.name, files: [], subFolders: [], isRoot: true };
const queue: Array<{ original: FullFolderInfo; reduced: ReducedFolderNode }> = [];
// Don't count the root itself towards the limit initially
queue.push({ original: fullInfo, reduced: rootReducedNode });
let itemCount = 0; // Count folders + files added to the reduced structure
while (queue.length > 0) {
const { original: originalFolder, reduced: reducedFolder } = queue.shift()!;
// If the folder being processed was itself marked as ignored (shouldn't happen for root)
if (originalFolder.isIgnored) {
continue;
}
// Process Files
let fileLimitReached = false;
for (const file of originalFolder.files) {
// Check limit *before* adding the file
if (itemCount >= maxItems) {
if (!fileLimitReached) {
reducedFolder.files.push(TRUNCATION_INDICATOR);
reducedFolder.hasMoreFiles = true;
fileLimitReached = true;
}
break;
}
reducedFolder.files.push(file);
itemCount++;
}
// Process Subfolders
let subfolderLimitReached = false;
for (const subFolder of originalFolder.subFolders) {
// Count the folder itself towards the limit
itemCount++;
if (itemCount > maxItems) {
if (!subfolderLimitReached) {
// Add a placeholder node ONLY if we haven't already added one
const truncatedSubfolderNode: ReducedFolderNode = {
name: subFolder.name,
files: [TRUNCATION_INDICATOR], // Generic truncation
subFolders: [],
hasMoreFiles: true,
};
reducedFolder.subFolders.push(truncatedSubfolderNode);
reducedFolder.hasMoreSubfolders = true;
subfolderLimitReached = true;
}
continue; // Stop processing further subfolders for this parent
}
// Handle explicitly ignored folders identified during the read phase
if (subFolder.isIgnored) {
const ignoredReducedNode: ReducedFolderNode = {
name: subFolder.name,
files: [TRUNCATION_INDICATOR], // Indicate contents ignored/truncated
subFolders: [],
hasMoreFiles: true, // Mark as truncated
};
reducedFolder.subFolders.push(ignoredReducedNode);
// DO NOT add the ignored folder to the queue for further processing
}
else {
// If not ignored and within limit, create the reduced node and add to queue
const reducedSubFolder: ReducedFolderNode = { name: subFolder.name, files: [], subFolders: [] };
reducedFolder.subFolders.push(reducedSubFolder);
queue.push({ original: subFolder, reduced: reducedSubFolder });
}
}
}
return rootReducedNode;
}
/** Calculates the total number of items present in the reduced structure. */
function countReducedItems(node: ReducedFolderNode): number {
let count = 0;
// Count files, treating '...' as one item if present
count += node.files.length;
// Count subfolders and recursively count their contents
count += node.subFolders.length;
for (const sub of node.subFolders) {
// Check if it's a placeholder ignored/truncated node
const isTruncatedPlaceholder = (sub.files.length === 1 && sub.files[0] === TRUNCATION_INDICATOR && sub.subFolders.length === 0);
if (!isTruncatedPlaceholder) {
count += countReducedItems(sub);
}
// Don't add count for items *inside* the placeholder node itself.
}
return count;
}
/**
* Formats the reduced folder structure into a tree-like string.
* (No changes needed in this function)
* @param node The current node in the reduced structure.
* @param indent The current indentation string.
* @param isLast Sibling indicator.
* @param builder Array to build the string lines.
*/
function formatReducedStructure(
node: ReducedFolderNode,
indent: string,
isLast: boolean,
builder: string[]
): void {
const connector = isLast ? "└───" : "├───";
const linePrefix = indent + connector;
// Don't print the root node's name directly, only its contents
if (!node.isRoot) {
builder.push(`${linePrefix}${node.name}/`);
}
const childIndent = indent + (isLast || node.isRoot ? " " : "│ "); // Use " " if last, "│" otherwise
// Render files
const fileCount = node.files.length;
for (let i = 0; i < fileCount; i++) {
const isLastFile = i === fileCount - 1 && node.subFolders.length === 0;
const fileConnector = isLastFile ? "└───" : "├───";
builder.push(`${childIndent}${fileConnector}${node.files[i]}`);
}
// Render subfolders
const subFolderCount = node.subFolders.length;
for (let i = 0; i < subFolderCount; i++) {
const isLastSub = i === subFolderCount - 1;
formatReducedStructure(node.subFolders[i], childIndent, isLastSub, builder);
}
}
// --- Main Exported Function ---
/**
* Generates a string representation of a directory's structure,
* limiting the number of items displayed. Ignored folders are shown
* followed by '...' instead of their contents.
*
* @param directory The absolute or relative path to the directory.
* @param options Optional configuration settings.
* @returns A promise resolving to the formatted folder structure string.
*/
export async function getFolderStructure(
directory: string,
options?: FolderStructureOptions
): Promise<string> {
const resolvedPath = path.resolve(directory);
const mergedOptions: MergedFolderStructureOptions = {
maxItems: options?.maxItems ?? MAX_ITEMS,
ignoredFolders: options?.ignoredFolders ?? DEFAULT_IGNORED_FOLDERS,
fileIncludePattern: options?.fileIncludePattern,
};
try {
// 1. Read the full structure (includes ignored folders marked as such)
const fullInfo = await readFullStructure(resolvedPath, mergedOptions);
if (!fullInfo) {
return `Error: Could not read directory "${resolvedPath}". Check path and permissions.`;
}
// 2. Reduce the structure (handles ignored folders specifically)
const reducedRoot = reduceStructure(fullInfo, mergedOptions.maxItems, mergedOptions.ignoredFolders);
// 3. Count items in the *reduced* structure for the summary
const rootNodeItselfCount = 0; // Don't count the root node in the items summary
const reducedItemCount = countReducedItems(reducedRoot) - rootNodeItselfCount;
// 4. Format the reduced structure into a string
const structureLines: string[] = [];
formatReducedStructure(reducedRoot, "", true, structureLines);
// 5. Build the final output string
const displayPath = resolvedPath.replace(/\\/g, '/');
const totalOriginalChildren = fullInfo.totalChildren;
let disclaimer = "";
// Check if any truncation happened OR if ignored folders were present
if (reducedItemCount < totalOriginalChildren || fullInfo.subFolders.some(sf => sf.isIgnored)) {
disclaimer = `Folders or files indicated with ${TRUNCATION_INDICATOR} contain more items not shown or were ignored.`;
}
const summary = `Showing ${reducedItemCount} of ${totalOriginalChildren} items (files + folders). ${disclaimer}`.trim();
return `${summary}\n\n${displayPath}/\n${structureLines.join('\n')}`;
} catch (error: any) {
console.error(`Error getting folder structure for ${resolvedPath}:`, error);
return `Error processing directory "${resolvedPath}": ${error.message}`;
}
}

View File

@ -0,0 +1,102 @@
import process from 'node:process';
import path from 'node:path'; // Import the 'path' module
/**
* Returns the target directory, using the provided argument or the current working directory.
*/
export function getTargetDirectory(targetDirArg: string | undefined): string {
return targetDirArg || process.cwd();
}
/**
* Shortens a path string if it exceeds maxLen, prioritizing the start and end segments.
* Example: /path/to/a/very/long/file.txt -> /path/.../long/file.txt
*/
export function shortenPath(filePath: string, maxLen: number = 35): string {
if (filePath.length <= maxLen) {
return filePath;
}
const parsedPath = path.parse(filePath);
const root = parsedPath.root;
const separator = path.sep;
// Get segments of the path *after* the root
const relativePath = filePath.substring(root.length);
const segments = relativePath.split(separator).filter(s => s !== ''); // Filter out empty segments
// Handle cases with no segments after root (e.g., "/", "C:\") or only one segment
if (segments.length <= 1) {
// Fallback to simple start/end truncation for very short paths or single segments
const keepLen = Math.floor((maxLen - 3) / 2);
// Ensure keepLen is not negative if maxLen is very small
if (keepLen <= 0) {
return filePath.substring(0, maxLen - 3) + '...';
}
const start = filePath.substring(0, keepLen);
const end = filePath.substring(filePath.length - keepLen);
return `${start}...${end}`;
}
const firstDir = segments[0];
const startComponent = root + firstDir;
const endPartSegments: string[] = [];
// Base length: startComponent + separator + "..."
let currentLength = startComponent.length + separator.length + 3;
// Iterate backwards through segments (excluding the first one)
for (let i = segments.length - 1; i >= 1; i--) {
const segment = segments[i];
// Length needed if we add this segment: current + separator + segment
const lengthWithSegment = currentLength + separator.length + segment.length;
if (lengthWithSegment <= maxLen) {
endPartSegments.unshift(segment); // Add to the beginning of the end part
currentLength = lengthWithSegment;
} else {
// Adding this segment would exceed maxLen
break;
}
}
// Construct the final path
let result = startComponent + separator + '...';
if (endPartSegments.length > 0) {
result += separator + endPartSegments.join(separator);
}
// As a final check, if the result is somehow still too long (e.g., startComponent + ... is too long)
// fallback to simple truncation of the original path
if (result.length > maxLen) {
const keepLen = Math.floor((maxLen - 3) / 2);
if (keepLen <= 0) {
return filePath.substring(0, maxLen - 3) + '...';
}
const start = filePath.substring(0, keepLen);
const end = filePath.substring(filePath.length - keepLen);
return `${start}...${end}`;
}
return result;
}
/**
* Calculates the relative path from a root directory to a target path.
* Ensures both paths are resolved before calculating.
* Returns '.' if the target path is the same as the root directory.
*
* @param targetPath The absolute or relative path to make relative.
* @param rootDirectory The absolute path of the directory to make the target path relative to.
* @returns The relative path from rootDirectory to targetPath.
*/
export function makeRelative(targetPath: string, rootDirectory: string): string {
const resolvedTargetPath = path.resolve(targetPath);
const resolvedRootDirectory = path.resolve(rootDirectory);
const relativePath = path.relative(resolvedRootDirectory, resolvedTargetPath);
// If the paths are the same, path.relative returns '', return '.' instead
return relativePath || '.';
}

View File

@ -0,0 +1,49 @@
/**
* Simple utility to validate objects against JSON Schemas
* In a real implementation, you would use a library like Ajv
*/
export class SchemaValidator {
/**
* Validates data against a JSON schema
* @param schema JSON Schema to validate against
* @param data Data to validate
* @returns True if valid, false otherwise
*/
static validate(schema: Record<string, unknown>, data: unknown): boolean {
// This is a simplified implementation
// In a real application, you would use a library like Ajv for proper validation
// Check for required fields
if (schema.required && Array.isArray(schema.required)) {
const required = schema.required as string[];
const dataObj = data as Record<string, unknown>;
for (const field of required) {
if (dataObj[field] === undefined) {
console.error(`Missing required field: ${field}`);
return false;
}
}
}
// Check property types if properties are defined
if (schema.properties && typeof schema.properties === 'object') {
const properties = schema.properties as Record<string, { type?: string }>;
const dataObj = data as Record<string, unknown>;
for (const [key, prop] of Object.entries(properties)) {
if (dataObj[key] !== undefined && prop.type) {
const expectedType = prop.type;
const actualType = Array.isArray(dataObj[key]) ? 'array' : typeof dataObj[key];
if (expectedType !== actualType) {
console.error(`Type mismatch for property "${key}": expected ${expectedType}, got ${actualType}`);
return false;
}
}
}
}
return true;
}
}

View File

@ -0,0 +1,22 @@
{
"extends": "../../tsconfig.json",
"compilerOptions": {
"outDir": "./dist",
"rootDir": "./src",
"jsx": "react",
"lib": [
"DOM",
"DOM.Iterable",
"ES2020"
],
"module": "Node16",
"target": "ES2020",
},
"exclude": [
"node_modules",
"dist"
],
"include": [
"src"
]
}

10
tsconfig.json Normal file
View File

@ -0,0 +1,10 @@
{
"compilerOptions": {
"strict": true,
"esModuleInterop": true,
"skipLibCheck": true,
"forceConsistentCasingInFileNames": true,
"resolveJsonModule": true,
"sourceMap": true
}
}