diff --git a/.vscode/launch.json b/.vscode/launch.json index 8e8e56b6..5a7c6522 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -42,6 +42,46 @@ "console": "integratedTerminal", "internalConsoleOptions": "neverOpen", "skipFiles": ["/**"] + }, + { + "type": "node", + "request": "launch", + "name": "Debug Server Test: read-many-files", + "runtimeExecutable": "npm", + "runtimeArgs": [ + "run", + "test", + "-w", + "packages/server", + "--", + "--inspect-brk=9229", + "--no-file-parallelism", + "${workspaceFolder}/packages/server/src/tools/read-many-files.test.ts" + ], + "cwd": "${workspaceFolder}", + "console": "integratedTerminal", + "internalConsoleOptions": "neverOpen", + "skipFiles": ["/**"] + }, + { + "type": "node", + "request": "launch", + "name": "Debug CLI Test: atCommandProcessor", + "runtimeExecutable": "npm", + "runtimeArgs": [ + "run", + "test", + "-w", + "packages/cli", + "--", + "--inspect-brk=9229", + "--no-file-parallelism", + "${workspaceFolder}/packages/cli/src/ui/hooks/atCommandProcessor.test.ts" + ], + "cwd": "${workspaceFolder}", + "console": "integratedTerminal", + "internalConsoleOptions": "neverOpen", + "skipFiles": ["/**"] } ] } diff --git a/package-lock.json b/package-lock.json index 75a4d84e..7bc3354e 100644 --- a/package-lock.json +++ b/package-lock.json @@ -11,6 +11,7 @@ "packages/*" ], "devDependencies": { + "@types/mime-types": "^2.1.4", "esbuild": "^0.25.4", "eslint": "^9.24.0", "eslint-config-prettier": "^10.1.2", @@ -1330,6 +1331,13 @@ "dev": true, "license": "MIT" }, + "node_modules/@types/mime-types": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/@types/mime-types/-/mime-types-2.1.4.tgz", + "integrity": "sha512-lfU4b34HOri+kAY5UheuFMWPDOI+OPceBSHZKp69gEyTL/mmJ4cnU6Y/rlme3UL3GyOn6Y42hyIEw0/q8sWx5w==", + "dev": true, + "license": "MIT" + }, "node_modules/@types/node": { "version": "20.17.30", "resolved": "https://registry.npmjs.org/@types/node/-/node-20.17.30.tgz", @@ -8290,6 +8298,7 @@ "ink-spinner": "^5.0.0", "ink-text-input": "^6.0.0", "lowlight": "^3.3.0", + "mime-types": "^2.1.4", "open": "^10.1.2", "react": "^18.3.1", "read-package-up": "^11.0.0", @@ -8319,6 +8328,27 @@ "node": ">=18" } }, + "packages/cli/node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "packages/cli/node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "license": "MIT", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, "packages/server": { "name": "@gemini-code/server", "version": "0.1.0", diff --git a/package.json b/package.json index 8cbba2e2..a25087e8 100644 --- a/package.json +++ b/package.json @@ -27,6 +27,7 @@ }, "devDependencies": { "esbuild": "^0.25.4", + "@types/mime-types": "^2.1.4", "eslint": "^9.24.0", "eslint-config-prettier": "^10.1.2", "eslint-plugin-import": "^2.31.0", diff --git a/packages/cli/package.json b/packages/cli/package.json index b421ad8f..33bb0780 100644 --- a/packages/cli/package.json +++ b/packages/cli/package.json @@ -41,6 +41,7 @@ "ink-text-input": "^6.0.0", "lowlight": "^3.3.0", "open": "^10.1.2", + "mime-types": "^2.1.4", "react": "^18.3.1", "read-package-up": "^11.0.0", "shell-quote": "^1.8.2", diff --git a/packages/cli/src/ui/hooks/atCommandProcessor.test.ts b/packages/cli/src/ui/hooks/atCommandProcessor.test.ts new file mode 100644 index 00000000..966fd0fa --- /dev/null +++ b/packages/cli/src/ui/hooks/atCommandProcessor.test.ts @@ -0,0 +1,323 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { describe, it, expect, vi, beforeEach, afterEach, Mock } from 'vitest'; +import { handleAtCommand } from './atCommandProcessor.js'; +import { Config, ToolResult } from '@gemini-code/server'; +import { ToolCallStatus } from '../types.js'; // Adjusted import +import { /* PartListUnion, */ Part } from '@google/genai'; // Removed PartListUnion +import { UseHistoryManagerReturn } from './useHistoryManager.js'; +import * as fsPromises from 'fs/promises'; // Import for mocking stat +import type { Stats } from 'fs'; // Import Stats type for mocking + +// Mock Config and ToolRegistry +const mockGetToolRegistry = vi.fn(); +const mockGetTargetDir = vi.fn(); +const mockConfig = { + getToolRegistry: mockGetToolRegistry, + getTargetDir: mockGetTargetDir, +} as unknown as Config; + +// Mock read_many_files tool +const mockReadManyFilesExecute = vi.fn(); +const mockReadManyFilesTool = { + name: 'read_many_files', + displayName: 'Read Many Files', + description: 'Reads multiple files.', + execute: mockReadManyFilesExecute, + getDescription: vi.fn((params) => `Read files: ${params.paths.join(', ')}`), +}; + +// Mock addItem from useHistoryManager +const mockAddItem: Mock = vi.fn(); +const mockOnDebugMessage: Mock<(message: string) => void> = vi.fn(); + +vi.mock('fs/promises', async () => { + const actual = await vi.importActual('fs/promises'); + return { + ...actual, + stat: vi.fn(), // Mock stat here + }; +}); + +describe('handleAtCommand', () => { + let abortController: AbortController; + + beforeEach(() => { + vi.resetAllMocks(); + abortController = new AbortController(); + mockGetTargetDir.mockReturnValue('/test/dir'); + mockGetToolRegistry.mockReturnValue({ + getTool: vi.fn((toolName: string) => { + if (toolName === 'read_many_files') { + return mockReadManyFilesTool; + } + return undefined; + }), + }); + // Default mock for fs.stat if not overridden by a specific test + vi.mocked(fsPromises.stat).mockResolvedValue({ + isDirectory: () => false, + } as unknown as Stats); + }); + + afterEach(() => { + abortController.abort(); // Ensure any pending operations are cancelled + }); + + it('should pass through query if no @ command is present', async () => { + const query = 'regular user query'; + const result = await handleAtCommand({ + query, + config: mockConfig, + addItem: mockAddItem, + onDebugMessage: mockOnDebugMessage, + messageId: 123, + signal: abortController.signal, + }); + + expect(mockAddItem).toHaveBeenCalledWith( + { type: 'user', text: query }, + 123, + ); + expect(result.processedQuery).toEqual([{ text: query }]); + expect(result.shouldProceed).toBe(true); + expect(mockReadManyFilesExecute).not.toHaveBeenCalled(); + }); + + it('should pass through query if only a lone @ symbol is present', async () => { + const queryWithSpaces = ' @ '; + // const trimmedQuery = queryWithSpaces.trim(); // Not needed for addItem expectation here + const result = await handleAtCommand({ + query: queryWithSpaces, // Pass the version with spaces to the function + config: mockConfig, + addItem: mockAddItem, + onDebugMessage: mockOnDebugMessage, + messageId: 124, + signal: abortController.signal, + }); + + // For a lone '@', addItem is called with the *original untrimmed* query + expect(mockAddItem).toHaveBeenCalledWith( + { type: 'user', text: queryWithSpaces }, + 124, + ); + // processedQuery should also be the original untrimmed version for lone @ + expect(result.processedQuery).toEqual([{ text: queryWithSpaces }]); + expect(result.shouldProceed).toBe(true); + expect(mockOnDebugMessage).toHaveBeenCalledWith( + 'Lone @ detected, passing directly to LLM.', + ); + }); + + it('should process a valid text file path', async () => { + const filePath = 'path/to/file.txt'; + const query = `@${filePath}`; + const fileContent = 'This is the file content.'; + mockReadManyFilesExecute.mockResolvedValue({ + llmContent: fileContent, + returnDisplay: 'Read 1 file.', + } as ToolResult); + // fs.stat will use the default mock (isDirectory: false) + + const result = await handleAtCommand({ + query, + config: mockConfig, + addItem: mockAddItem, + onDebugMessage: mockOnDebugMessage, + messageId: 125, + signal: abortController.signal, + }); + + expect(mockAddItem).toHaveBeenCalledWith( + { type: 'user', text: query }, + 125, + ); + expect(mockReadManyFilesExecute).toHaveBeenCalledWith( + { paths: [filePath] }, + abortController.signal, + ); + expect(mockAddItem).toHaveBeenCalledWith( + expect.objectContaining({ + type: 'tool_group', + tools: expect.arrayContaining([ + expect.objectContaining({ + name: 'Read Many Files', + status: ToolCallStatus.Success, + resultDisplay: 'Read 1 file.', + }), + ]), + }), + 125, + ); + expect(result.processedQuery).toEqual([ + '\n--- Content from: ${contentLabel} ---\n', + fileContent, + '\n--- End of content ---\n', + ]); + expect(result.shouldProceed).toBe(true); + }); + + it('should process a valid directory path and convert to glob', async () => { + const dirPath = 'path/to/dir'; + const query = `@${dirPath}`; + const dirContent = [ + 'Content of file 1.', + 'Content of file 2.', + 'Content of file 3.', + ]; + vi.mocked(fsPromises.stat).mockResolvedValue({ + isDirectory: () => true, + } as unknown as Stats); + + mockReadManyFilesExecute.mockResolvedValue({ + llmContent: dirContent, + returnDisplay: 'Read directory contents.', + } as ToolResult); + + const result = await handleAtCommand({ + query, + config: mockConfig, + addItem: mockAddItem, + onDebugMessage: mockOnDebugMessage, + messageId: 126, + signal: abortController.signal, + }); + + expect(mockAddItem).toHaveBeenCalledWith( + { type: 'user', text: query }, + 126, + ); + expect(mockReadManyFilesExecute).toHaveBeenCalledWith( + { paths: [`${dirPath}/**`] }, // Expect glob pattern + abortController.signal, + ); + expect(mockOnDebugMessage).toHaveBeenCalledWith( + `Path resolved to directory, using glob: ${dirPath}/**`, + ); + expect(mockAddItem).toHaveBeenCalledWith( + expect.objectContaining({ type: 'tool_group' }), + 126, + ); + expect(result.processedQuery).toEqual([ + '\n--- Content from: ${contentLabel} ---\n', + ...dirContent, + '\n--- End of content ---\n', + ]); + expect(result.shouldProceed).toBe(true); + }); + + it('should process a valid image file path', async () => { + const imagePath = 'path/to/image.png'; + const query = `@${imagePath}`; + const imageData: Part = { + inlineData: { mimeType: 'image/png', data: 'base64imagedata' }, + }; + mockReadManyFilesExecute.mockResolvedValue({ + llmContent: [imageData], + returnDisplay: 'Read 1 image.', + } as ToolResult); + // fs.stat will use the default mock (isDirectory: false) + + const result = await handleAtCommand({ + query, + config: mockConfig, + addItem: mockAddItem, + onDebugMessage: mockOnDebugMessage, + messageId: 127, + signal: abortController.signal, + }); + + expect(mockAddItem).toHaveBeenCalledWith( + { type: 'user', text: query }, + 127, + ); + expect(mockReadManyFilesExecute).toHaveBeenCalledWith( + { paths: [imagePath] }, + abortController.signal, + ); + expect(mockAddItem).toHaveBeenCalledWith( + expect.objectContaining({ + type: 'tool_group', + tools: expect.arrayContaining([ + expect.objectContaining({ + name: 'Read Many Files', + status: ToolCallStatus.Success, + resultDisplay: 'Read 1 image.', + }), + ]), + }), + 127, + ); + expect(result.processedQuery).toEqual([ + '\n--- Content from: ${contentLabel} ---\n', + imageData, + '\n--- End of content ---\n', + ]); + expect(result.shouldProceed).toBe(true); + }); + + it('should handle query with text before and after @command', async () => { + const textBefore = 'Explain this:'; + const filePath = 'doc.md'; + const textAfter = 'in detail.'; + const query = `${textBefore} @${filePath} ${textAfter}`; + const fileContent = 'Markdown content.'; + mockReadManyFilesExecute.mockResolvedValue({ + llmContent: fileContent, + returnDisplay: 'Read 1 doc.', + } as ToolResult); + // fs.stat will use the default mock (isDirectory: false) + + const result = await handleAtCommand({ + query, + config: mockConfig, + addItem: mockAddItem, + onDebugMessage: mockOnDebugMessage, + messageId: 128, + signal: abortController.signal, + }); + + expect(mockAddItem).toHaveBeenCalledWith( + { type: 'user', text: query }, // Expect original query for addItem + 128, + ); + expect(result.processedQuery).toEqual([ + { text: textBefore }, + '\n--- Content from: ${contentLabel} ---\n', + fileContent, + '\n--- End of content ---\n', + { text: textAfter }, + ]); + expect(result.shouldProceed).toBe(true); + }); + + it('should correctly unescape paths with escaped spaces', async () => { + const rawPath = 'path/to/my\\ file.txt'; + const unescapedPath = 'path/to/my file.txt'; + const query = `@${rawPath}`; + const fileContent = 'Content of file with space.'; + mockReadManyFilesExecute.mockResolvedValue({ + llmContent: fileContent, + returnDisplay: 'Read 1 file.', + } as ToolResult); + // fs.stat will use the default mock (isDirectory: false) + + await handleAtCommand({ + query, + config: mockConfig, + addItem: mockAddItem, + onDebugMessage: mockOnDebugMessage, + messageId: 129, + signal: abortController.signal, + }); + + expect(mockReadManyFilesExecute).toHaveBeenCalledWith( + { paths: [unescapedPath] }, // Expect unescaped path + abortController.signal, + ); + }); +}); diff --git a/packages/cli/src/ui/hooks/atCommandProcessor.ts b/packages/cli/src/ui/hooks/atCommandProcessor.ts index e2934840..a5b602ad 100644 --- a/packages/cli/src/ui/hooks/atCommandProcessor.ts +++ b/packages/cli/src/ui/hooks/atCommandProcessor.ts @@ -6,7 +6,7 @@ import * as fs from 'fs/promises'; import * as path from 'path'; -import { PartListUnion } from '@google/genai'; +import { PartListUnion, PartUnion } from '@google/genai'; import { Config, getErrorMessage, @@ -126,6 +126,7 @@ export async function handleAtCommand({ } const contentLabel = pathPart; + const toolRegistry = config.getToolRegistry(); const readManyFilesTool = toolRegistry.getTool('read_many_files'); @@ -168,7 +169,6 @@ export async function handleAtCommand({ try { const result = await readManyFilesTool.execute(toolArgs, signal); - const fileContent = result.llmContent || ''; toolCallDisplay = { callId: `client-read-${userMessageTimestamp}`, @@ -180,13 +180,22 @@ export async function handleAtCommand({ }; // Prepare the query parts for the LLM - const processedQueryParts = []; + const processedQueryParts: PartUnion[] = []; if (textBefore) { processedQueryParts.push({ text: textBefore }); } - processedQueryParts.push({ - text: `\n--- Content from: ${contentLabel} ---\n${fileContent}\n--- End Content ---`, - }); + + // Process the result from the tool + processedQueryParts.push('\n--- Content from: ${contentLabel} ---\n'); + if (Array.isArray(result.llmContent)) { + for (const part of result.llmContent) { + processedQueryParts.push(part); + } + } else { + processedQueryParts.push(result.llmContent); + } + processedQueryParts.push('\n--- End of content ---\n'); + if (textAfter) { processedQueryParts.push({ text: textAfter }); } diff --git a/packages/cli/src/ui/hooks/useGeminiStream.ts b/packages/cli/src/ui/hooks/useGeminiStream.ts index 19cb244d..92b055bb 100644 --- a/packages/cli/src/ui/hooks/useGeminiStream.ts +++ b/packages/cli/src/ui/hooks/useGeminiStream.ts @@ -25,6 +25,7 @@ import { ToolResultDisplay, ToolEditConfirmationDetails, ToolExecuteConfirmationDetails, + partListUnionToString, } from '@gemini-code/server'; import { type Chat, type PartListUnion, type Part } from '@google/genai'; import { @@ -280,13 +281,14 @@ export const useGeminiStream = ( ); if (abortControllerRef.current.signal.aborted) { declineToolExecution( - result.llmContent, + partListUnionToString(result.llmContent), ToolCallStatus.Canceled, request, originalConfirmationDetails, ); return; } + const functionResponse: Part = { functionResponse: { name: request.name, diff --git a/packages/server/src/__mocks__/fs/promises.ts b/packages/server/src/__mocks__/fs/promises.ts new file mode 100644 index 00000000..42385911 --- /dev/null +++ b/packages/server/src/__mocks__/fs/promises.ts @@ -0,0 +1,48 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { vi } from 'vitest'; +import * as actualFsPromises from 'node:fs/promises'; + +const readFileMock = vi.fn(); + +// Export a control object so tests can access and manipulate the mock +export const mockControl = { + mockReadFile: readFileMock, +}; + +// Export all other functions from the actual fs/promises module +export const { + access, + appendFile, + chmod, + chown, + copyFile, + cp, + lchmod, + lchown, + link, + lstat, + mkdir, + open, + opendir, + readdir, + readlink, + realpath, + rename, + rmdir, + rm, + stat, + symlink, + truncate, + unlink, + utimes, + watch, + writeFile, +} = actualFsPromises; + +// Override readFile with our mock +export const readFile = readFileMock; diff --git a/packages/server/src/core/geminiRequest.ts b/packages/server/src/core/geminiRequest.ts new file mode 100644 index 00000000..e85bd51e --- /dev/null +++ b/packages/server/src/core/geminiRequest.ts @@ -0,0 +1,71 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { type PartListUnion, type Part } from '@google/genai'; + +/** + * Represents a request to be sent to the Gemini API. + * For now, it's an alias to PartListUnion as the primary content. + * This can be expanded later to include other request parameters. + */ +export type GeminiCodeRequest = PartListUnion; + +export function partListUnionToString(value: PartListUnion): string { + if (typeof value === 'string') { + return value; + } + + if (Array.isArray(value)) { + return value.map(partListUnionToString).join(''); + } + + // Cast to Part, assuming it might contain project-specific fields + const part = value as Part & { + videoMetadata?: unknown; + thought?: string; + codeExecutionResult?: unknown; + executableCode?: unknown; + }; + + if (part.videoMetadata !== undefined) { + return `[Video Metadata]`; + } + + if (part.thought !== undefined) { + return `[Thought: ${part.thought}]`; + } + + if (part.codeExecutionResult !== undefined) { + return `[Code Execution Result]`; + } + + if (part.executableCode !== undefined) { + return `[Executable Code]`; + } + + // Standard Part fields + if (part.fileData !== undefined) { + return `[File Data]`; + } + + if (part.functionCall !== undefined) { + return `[Function Call: ${part.functionCall.name}]`; + } + + if (part.functionResponse !== undefined) { + return `[Function Response: ${part.functionResponse.name}]`; + } + + if (part.inlineData !== undefined) { + return `<${part.inlineData.mimeType}>`; + } + + if (part.text !== undefined) { + return part.text; + } + + return ''; +} diff --git a/packages/server/src/index.ts b/packages/server/src/index.ts index 9183c2f9..c90ef2fd 100644 --- a/packages/server/src/index.ts +++ b/packages/server/src/index.ts @@ -11,6 +11,7 @@ export * from './config/config.js'; export * from './core/client.js'; export * from './core/prompts.js'; export * from './core/turn.js'; +export * from './core/geminiRequest.js'; // Potentially export types from turn.ts if needed externally // export { GeminiEventType } from './core/turn.js'; // Example diff --git a/packages/server/src/tools/glob.test.ts b/packages/server/src/tools/glob.test.ts index 3437a66e..d42e5b1c 100644 --- a/packages/server/src/tools/glob.test.ts +++ b/packages/server/src/tools/glob.test.ts @@ -5,6 +5,7 @@ */ import { GlobTool, GlobToolParams } from './glob.js'; +import { partListUnionToString } from '../core/geminiRequest.js'; // import { ToolResult } from './tools.js'; // ToolResult is implicitly used by execute import path from 'path'; import fs from 'fs/promises'; @@ -134,9 +135,14 @@ describe('GlobTool', () => { it('should correctly sort files by modification time (newest first)', async () => { const params: GlobToolParams = { pattern: '*.sortme' }; const result = await globTool.execute(params, abortSignal); - expect(result.llmContent).toContain('Found 2 file(s)'); - const filesListed = result.llmContent - .substring(result.llmContent.indexOf(':') + 1) + const llmContent = partListUnionToString(result.llmContent); + + expect(llmContent).toContain('Found 2 file(s)'); + // Ensure llmContent is a string for TypeScript type checking + expect(typeof llmContent).toBe('string'); + + const filesListed = llmContent + .substring(llmContent.indexOf(':') + 1) .trim() .split('\n'); expect(filesListed[0]).toContain(path.join(tempRootDir, 'newer.sortme')); diff --git a/packages/server/src/tools/read-many-files.test.ts b/packages/server/src/tools/read-many-files.test.ts new file mode 100644 index 00000000..50156b55 --- /dev/null +++ b/packages/server/src/tools/read-many-files.test.ts @@ -0,0 +1,332 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { vi } from 'vitest'; +import type { Mock } from 'vitest'; +import { describe, it, expect, beforeEach, afterEach } from 'vitest'; +import { mockControl } from '../__mocks__/fs/promises.js'; +import { ReadManyFilesTool } from './read-many-files.js'; +import path from 'path'; +import fs from 'fs'; // Actual fs for setup +import os from 'os'; + +describe('ReadManyFilesTool', () => { + let tool: ReadManyFilesTool; + let tempRootDir: string; + let tempDirOutsideRoot: string; + let mockReadFileFn: Mock; + + beforeEach(async () => { + tempRootDir = fs.mkdtempSync( + path.join(os.tmpdir(), 'read-many-files-root-'), + ); + tempDirOutsideRoot = fs.mkdtempSync( + path.join(os.tmpdir(), 'read-many-files-external-'), + ); + tool = new ReadManyFilesTool(tempRootDir); + + mockReadFileFn = mockControl.mockReadFile; + mockReadFileFn.mockReset(); + + mockReadFileFn.mockImplementation( + async (filePath: fs.PathLike, options?: Record) => { + const fp = + typeof filePath === 'string' + ? filePath + : (filePath as Buffer).toString(); + + if (fs.existsSync(fp)) { + const originalFs = await vi.importActual('fs'); + return originalFs.promises.readFile(fp, options); + } + + if (fp.endsWith('nonexistent-file.txt')) { + const err = new Error( + `ENOENT: no such file or directory, open '${fp}'`, + ); + (err as NodeJS.ErrnoException).code = 'ENOENT'; + throw err; + } + if (fp.endsWith('unreadable.txt')) { + const err = new Error(`EACCES: permission denied, open '${fp}'`); + (err as NodeJS.ErrnoException).code = 'EACCES'; + throw err; + } + if (fp.endsWith('.png')) + return Buffer.from([0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a]); // PNG header + if (fp.endsWith('.pdf')) return Buffer.from('%PDF-1.4...'); // PDF start + if (fp.endsWith('binary.bin')) + return Buffer.from([0x00, 0x01, 0x02, 0x00, 0x03]); + + const err = new Error( + `ENOENT: no such file or directory, open '${fp}' (unmocked path)`, + ); + (err as NodeJS.ErrnoException).code = 'ENOENT'; + throw err; + }, + ); + }); + + afterEach(() => { + if (fs.existsSync(tempRootDir)) { + fs.rmSync(tempRootDir, { recursive: true, force: true }); + } + if (fs.existsSync(tempDirOutsideRoot)) { + fs.rmSync(tempDirOutsideRoot, { recursive: true, force: true }); + } + }); + + describe('validateParams', () => { + it('should return null for valid relative paths within root', () => { + const params = { paths: ['file1.txt', 'subdir/file2.txt'] }; + expect(tool.validateParams(params)).toBeNull(); + }); + + it('should return null for valid glob patterns within root', () => { + const params = { paths: ['*.txt', 'subdir/**/*.js'] }; + expect(tool.validateParams(params)).toBeNull(); + }); + + it('should return null for paths trying to escape the root (e.g., ../) as execute handles this', () => { + const params = { paths: ['../outside.txt'] }; + expect(tool.validateParams(params)).toBeNull(); + }); + + it('should return null for absolute paths as execute handles this', () => { + const params = { paths: [path.join(tempDirOutsideRoot, 'absolute.txt')] }; + expect(tool.validateParams(params)).toBeNull(); + }); + + it('should return error if paths array is empty', () => { + const params = { paths: [] }; + expect(tool.validateParams(params)).toBe( + 'The "paths" parameter is required and must be a non-empty array of strings/glob patterns.', + ); + }); + + it('should return null for valid exclude and include patterns', () => { + const params = { + paths: ['src/**/*.ts'], + exclude: ['**/*.test.ts'], + include: ['src/utils/*.ts'], + }; + expect(tool.validateParams(params)).toBeNull(); + }); + }); + + describe('execute', () => { + const createFile = (filePath: string, content = '') => { + const fullPath = path.join(tempRootDir, filePath); + fs.mkdirSync(path.dirname(fullPath), { recursive: true }); + fs.writeFileSync(fullPath, content); + }; + const createBinaryFile = (filePath: string, data: Uint8Array) => { + const fullPath = path.join(tempRootDir, filePath); + fs.mkdirSync(path.dirname(fullPath), { recursive: true }); + fs.writeFileSync(fullPath, data); + }; + + it('should read a single specified file', async () => { + createFile('file1.txt', 'Content of file1'); + const params = { paths: ['file1.txt'] }; + const result = await tool.execute(params, new AbortController().signal); + expect(result.llmContent).toEqual([ + '--- file1.txt ---\n\nContent of file1\n\n', + ]); + expect(result.returnDisplay).toContain( + 'Successfully read and concatenated content from **1 file(s)**', + ); + }); + + it('should read multiple specified files', async () => { + createFile('file1.txt', 'Content1'); + createFile('subdir/file2.js', 'Content2'); + const params = { paths: ['file1.txt', 'subdir/file2.js'] }; + const result = await tool.execute(params, new AbortController().signal); + const content = result.llmContent as string[]; + expect( + content.some((c) => c.includes('--- file1.txt ---\n\nContent1\n\n')), + ).toBe(true); + expect( + content.some((c) => + c.includes('--- subdir/file2.js ---\n\nContent2\n\n'), + ), + ).toBe(true); + expect(result.returnDisplay).toContain( + 'Successfully read and concatenated content from **2 file(s)**', + ); + }); + + it('should handle glob patterns', async () => { + createFile('file.txt', 'Text file'); + createFile('another.txt', 'Another text'); + createFile('sub/data.json', '{}'); + const params = { paths: ['*.txt'] }; + const result = await tool.execute(params, new AbortController().signal); + const content = result.llmContent as string[]; + expect( + content.some((c) => c.includes('--- file.txt ---\n\nText file\n\n')), + ).toBe(true); + expect( + content.some((c) => + c.includes('--- another.txt ---\n\nAnother text\n\n'), + ), + ).toBe(true); + expect(content.find((c) => c.includes('sub/data.json'))).toBeUndefined(); + expect(result.returnDisplay).toContain( + 'Successfully read and concatenated content from **2 file(s)**', + ); + }); + + it('should respect exclude patterns', async () => { + createFile('src/main.ts', 'Main content'); + createFile('src/main.test.ts', 'Test content'); + const params = { paths: ['src/**/*.ts'], exclude: ['**/*.test.ts'] }; + const result = await tool.execute(params, new AbortController().signal); + const content = result.llmContent as string[]; + expect(content).toEqual(['--- src/main.ts ---\n\nMain content\n\n']); + expect( + content.find((c) => c.includes('src/main.test.ts')), + ).toBeUndefined(); + expect(result.returnDisplay).toContain( + 'Successfully read and concatenated content from **1 file(s)**', + ); + }); + + it('should handle non-existent specific files gracefully', async () => { + const params = { paths: ['nonexistent-file.txt'] }; + const result = await tool.execute(params, new AbortController().signal); + expect(result.llmContent).toEqual([ + 'No files matching the criteria were found or all were skipped.', + ]); + expect(result.returnDisplay).toContain( + 'No files were read and concatenated based on the criteria.', + ); + }); + + it('should use default excludes', async () => { + createFile('node_modules/some-lib/index.js', 'lib code'); + createFile('src/app.js', 'app code'); + const params = { paths: ['**/*.js'] }; + const result = await tool.execute(params, new AbortController().signal); + const content = result.llmContent as string[]; + expect(content).toEqual(['--- src/app.js ---\n\napp code\n\n']); + expect( + content.find((c) => c.includes('node_modules/some-lib/index.js')), + ).toBeUndefined(); + expect(result.returnDisplay).toContain( + 'Successfully read and concatenated content from **1 file(s)**', + ); + }); + + it('should NOT use default excludes if useDefaultExcludes is false', async () => { + createFile('node_modules/some-lib/index.js', 'lib code'); + createFile('src/app.js', 'app code'); + const params = { paths: ['**/*.js'], useDefaultExcludes: false }; + const result = await tool.execute(params, new AbortController().signal); + const content = result.llmContent as string[]; + expect( + content.some((c) => + c.includes('--- node_modules/some-lib/index.js ---\n\nlib code\n\n'), + ), + ).toBe(true); + expect( + content.some((c) => c.includes('--- src/app.js ---\n\napp code\n\n')), + ).toBe(true); + expect(result.returnDisplay).toContain( + 'Successfully read and concatenated content from **2 file(s)**', + ); + }); + + it('should include images as inlineData parts if explicitly requested by extension', async () => { + createBinaryFile( + 'image.png', + Buffer.from([0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a]), + ); + const params = { paths: ['*.png'] }; // Explicitly requesting .png + const result = await tool.execute(params, new AbortController().signal); + expect(result.llmContent).toEqual([ + { + inlineData: { + data: Buffer.from([ + 0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, + ]).toString('base64'), + mimeType: 'image/png', + }, + }, + ]); + expect(result.returnDisplay).toContain( + 'Successfully read and concatenated content from **1 file(s)**', + ); + }); + + it('should include images as inlineData parts if explicitly requested by name', async () => { + createBinaryFile( + 'myExactImage.png', + Buffer.from([0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a]), + ); + const params = { paths: ['myExactImage.png'] }; // Explicitly requesting by full name + const result = await tool.execute(params, new AbortController().signal); + expect(result.llmContent).toEqual([ + { + inlineData: { + data: Buffer.from([ + 0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, + ]).toString('base64'), + mimeType: 'image/png', + }, + }, + ]); + }); + + it('should skip PDF files if not explicitly requested by extension or name', async () => { + createBinaryFile('document.pdf', Buffer.from('%PDF-1.4...')); + createFile('notes.txt', 'text notes'); + const params = { paths: ['*'] }; // Generic glob, not specific to .pdf + const result = await tool.execute(params, new AbortController().signal); + const content = result.llmContent as string[]; + expect( + content.some( + (c) => typeof c === 'string' && c.includes('--- notes.txt ---'), + ), + ).toBe(true); + expect(result.returnDisplay).toContain( + '**Skipped 1 item(s) (up to 5 shown):**', + ); + expect(result.returnDisplay).toContain( + '- `document.pdf` (Reason: asset file (image/pdf) was not explicitly requested by name or extension)', + ); + }); + + it('should include PDF files as inlineData parts if explicitly requested by extension', async () => { + createBinaryFile('important.pdf', Buffer.from('%PDF-1.4...')); + const params = { paths: ['*.pdf'] }; // Explicitly requesting .pdf files + const result = await tool.execute(params, new AbortController().signal); + expect(result.llmContent).toEqual([ + { + inlineData: { + data: Buffer.from('%PDF-1.4...').toString('base64'), + mimeType: 'application/pdf', + }, + }, + ]); + }); + + it('should include PDF files as inlineData parts if explicitly requested by name', async () => { + createBinaryFile('report-final.pdf', Buffer.from('%PDF-1.4...')); + const params = { paths: ['report-final.pdf'] }; + const result = await tool.execute(params, new AbortController().signal); + expect(result.llmContent).toEqual([ + { + inlineData: { + data: Buffer.from('%PDF-1.4...').toString('base64'), + mimeType: 'application/pdf', + }, + }, + ]); + }); + }); +}); diff --git a/packages/server/src/tools/read-many-files.ts b/packages/server/src/tools/read-many-files.ts index 4d9d35e8..5064ae1b 100644 --- a/packages/server/src/tools/read-many-files.ts +++ b/packages/server/src/tools/read-many-files.ts @@ -12,6 +12,8 @@ import * as path from 'path'; import fg from 'fast-glob'; import { GEMINI_MD_FILENAME } from './memoryTool.js'; +import { PartListUnion } from '@google/genai'; +import mime from 'mime-types'; /** * Parameters for the ReadManyFilesTool. */ @@ -82,14 +84,6 @@ const DEFAULT_EXCLUDES: string[] = [ '**/*.bz2', '**/*.rar', '**/*.7z', - '**/*.png', - '**/*.jpg', - '**/*.jpeg', - '**/*.gif', - '**/*.bmp', - '**/*.tiff', - '**/*.ico', - '**/*.pdf', '**/*.doc', '**/*.docx', '**/*.xls', @@ -187,6 +181,13 @@ Default excludes apply to common non-text files and large dependency directories } validateParams(params: ReadManyFilesParams): string | null { + if ( + !params.paths || + !Array.isArray(params.paths) || + params.paths.length === 0 + ) { + return 'The "paths" parameter is required and must be a non-empty array of strings/glob patterns.'; + } if ( this.schema.parameters && !SchemaValidator.validate( @@ -263,7 +264,7 @@ Default excludes apply to common non-text files and large dependency directories const filesToConsider = new Set(); const skippedFiles: Array<{ path: string; reason: string }> = []; const processedFilesRelativePaths: string[] = []; - let concatenatedContent = ''; + const content: PartListUnion = []; const effectiveExcludes = useDefaultExcludes ? [...DEFAULT_EXCLUDES, ...exclude] @@ -319,28 +320,63 @@ Default excludes apply to common non-text files and large dependency directories .relative(toolBaseDir, filePath) .replace(/\\/g, '/'); try { - const contentBuffer = await fs.readFile(filePath); - // Basic binary detection: check for null bytes in the first 1KB - const sample = contentBuffer.subarray( - 0, - Math.min(contentBuffer.length, 1024), - ); - if (sample.includes(0)) { - skippedFiles.push({ - path: relativePathForDisplay, - reason: 'Skipped (appears to be binary)', + const mimeType = mime.lookup(filePath); + if ( + mimeType && + (mimeType.startsWith('image/') || mimeType === 'application/pdf') + ) { + const fileExtension = path.extname(filePath); + const fileNameWithoutExtension = path.basename( + filePath, + fileExtension, + ); + const requestedExplicitly = inputPatterns.some( + (pattern: string) => + pattern.toLowerCase().includes(fileExtension) || + pattern.includes(fileNameWithoutExtension), + ); + + if (!requestedExplicitly) { + skippedFiles.push({ + path: relativePathForDisplay, + reason: + 'asset file (image/pdf) was not explicitly requested by name or extension', + }); + continue; + } + const contentBuffer = await fs.readFile(filePath); + const base64Data = contentBuffer.toString('base64'); + content.push({ + inlineData: { + data: base64Data, + mimeType, + }, }); - continue; + processedFilesRelativePaths.push(relativePathForDisplay); + } else { + const contentBuffer = await fs.readFile(filePath); + // Basic binary detection: check for null bytes in the first 1KB + const sample = contentBuffer.subarray( + 0, + Math.min(contentBuffer.length, 1024), + ); + if (sample.includes(0)) { + skippedFiles.push({ + path: relativePathForDisplay, + reason: 'appears to be binary', + }); + continue; + } + // Using default encoding + const fileContent = contentBuffer.toString(DEFAULT_ENCODING); + // Using default separator format + const separator = DEFAULT_OUTPUT_SEPARATOR_FORMAT.replace( + '{filePath}', + relativePathForDisplay, + ); + content.push(`${separator}\n\n${fileContent}\n\n`); + processedFilesRelativePaths.push(relativePathForDisplay); } - // Using default encoding - const fileContent = contentBuffer.toString(DEFAULT_ENCODING); - // Using default separator format - const separator = DEFAULT_OUTPUT_SEPARATOR_FORMAT.replace( - '{filePath}', - relativePathForDisplay, - ); - concatenatedContent += `${separator}\n\n${fileContent}\n\n`; - processedFilesRelativePaths.push(relativePathForDisplay); } catch (error) { skippedFiles.push({ path: relativePathForDisplay, @@ -352,18 +388,24 @@ Default excludes apply to common non-text files and large dependency directories let displayMessage = `### ReadManyFiles Result (Target Dir: \`${this.targetDir}\`)\n\n`; if (processedFilesRelativePaths.length > 0) { displayMessage += `Successfully read and concatenated content from **${processedFilesRelativePaths.length} file(s)**.\n`; - displayMessage += `\n**Processed Files:**\n`; - processedFilesRelativePaths - .slice(0, 10) - .forEach((p) => (displayMessage += `- \`${p}\`\n`)); - if (processedFilesRelativePaths.length > 10) { + if (processedFilesRelativePaths.length <= 10) { + displayMessage += `\n**Processed Files:**\n`; + processedFilesRelativePaths.forEach( + (p) => (displayMessage += `- \`${p}\`\n`), + ); + } else { + displayMessage += `\n**Processed Files (first 10 shown):**\n`; + processedFilesRelativePaths + .slice(0, 10) + .forEach((p) => (displayMessage += `- \`${p}\`\n`)); displayMessage += `- ...and ${processedFilesRelativePaths.length - 10} more.\n`; } - } else { - displayMessage += `No files were read and concatenated based on the criteria.\n`; } if (skippedFiles.length > 0) { + if (processedFilesRelativePaths.length === 0) { + displayMessage += `No files were read and concatenated based on the criteria.\n`; + } displayMessage += `\n**Skipped ${skippedFiles.length} item(s) (up to 5 shown):**\n`; skippedFiles .slice(0, 5) @@ -373,18 +415,21 @@ Default excludes apply to common non-text files and large dependency directories if (skippedFiles.length > 5) { displayMessage += `- ...and ${skippedFiles.length - 5} more.\n`; } - } - if ( - concatenatedContent.length === 0 && - processedFilesRelativePaths.length === 0 + } else if ( + processedFilesRelativePaths.length === 0 && + skippedFiles.length === 0 ) { - concatenatedContent = - 'No files matching the criteria were found or all were skipped.'; + displayMessage += `No files were read and concatenated based on the criteria.\n`; } + if (content.length === 0) { + content.push( + 'No files matching the criteria were found or all were skipped.', + ); + } return { - llmContent: concatenatedContent, - returnDisplay: displayMessage, + llmContent: content, + returnDisplay: displayMessage.trim(), }; } } diff --git a/packages/server/src/tools/tools.ts b/packages/server/src/tools/tools.ts index de4bf287..329010bc 100644 --- a/packages/server/src/tools/tools.ts +++ b/packages/server/src/tools/tools.ts @@ -4,7 +4,7 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { FunctionDeclaration, Schema } from '@google/genai'; +import { FunctionDeclaration, PartListUnion, Schema } from '@google/genai'; /** * Interface representing the base Tool functionality @@ -152,7 +152,7 @@ export interface ToolResult { * Content meant to be included in LLM history. * This should represent the factual outcome of the tool execution. */ - llmContent: string; + llmContent: PartListUnion; /** * Markdown string for user display.