Manually fix hooks and utils linting errors (partial)

- More changes are to come, this is truly a partial change in order to not disrupt as many people as possible.

Part of https://b.corp.google.com/issues/411384603
This commit is contained in:
Taylor Mullen 2025-04-18 17:47:49 -04:00 committed by N. Taylor Mullen
parent dfae3f6284
commit e7fa39112a
5 changed files with 35 additions and 43 deletions

View File

@ -29,7 +29,7 @@ async function main() {
}
// --- Global Unhandled Rejection Handler ---
process.on('unhandledRejection', (reason, promise) => {
process.on('unhandledRejection', (reason, _) => {
// Check if this is the known 429 ClientError that sometimes escapes
// this is a workaround for a specific issue with the way we are calling gemini
// where a 429 error is thrown but not caught, causing an unhandled rejection

View File

@ -5,6 +5,7 @@ import { type Chat, type PartListUnion } from '@google/genai';
import { HistoryItem } from '../types.js';
import { processGeminiStream , StreamingState } from '../../core/gemini-stream.js';
import { globalConfig } from '../../config/config.js';
import { getErrorMessage, isNodeError } from '../../utils/errors.js';
const addHistoryItem = (
setHistory: React.Dispatch<React.SetStateAction<HistoryItem[]>>,
@ -36,9 +37,9 @@ export const useGeminiStream = (
if (!geminiClientRef.current) {
try {
geminiClientRef.current = new GeminiClient(globalConfig);
} catch (error: any) {
} catch (error: unknown) {
setInitError(
`Failed to initialize client: ${error.message || 'Unknown error'}`,
`Failed to initialize client: ${getErrorMessage(error) || 'Unknown error'}`,
);
}
}
@ -136,17 +137,17 @@ export const useGeminiStream = (
addHistoryItem: addHistoryItemFromStream,
currentToolGroupIdRef,
});
} catch (error: any) {
} catch (error: unknown) {
// (Error handling for stream initiation remains the same)
console.error('Error initiating stream:', error);
if (error.name !== 'AbortError') {
if (!isNodeError(error) || error.name !== 'AbortError') {
// Use historyUpdater's function potentially? Or keep addHistoryItem here?
// Keeping addHistoryItem here for direct errors from this scope.
addHistoryItem(
setHistory,
{
type: 'error',
text: `[Error starting stream: ${error.message}]`,
text: `[Error starting stream: ${getErrorMessage(error)}]`,
},
getNextMessageId(userMessageTimestamp),
);

View File

@ -214,7 +214,6 @@ export class MarkdownRenderer {
let codeBlockContent: string[] = [];
let codeBlockLang: string | null = null;
let codeBlockFence = ''; // Store the type of fence used (``` or ~~~)
let inListType: 'ul' | 'ol' | null = null; // Track current list type to group items
lines.forEach((line, index) => {
const key = `line-${index}`;
@ -241,7 +240,6 @@ export class MarkdownRenderer {
codeBlockContent = [];
codeBlockLang = null;
codeBlockFence = '';
inListType = null; // Ensure list context is reset
} else {
// Add line to current code block content
codeBlockContent.push(line);
@ -261,7 +259,6 @@ export class MarkdownRenderer {
inCodeBlock = true;
codeBlockFence = codeFenceMatch[1];
codeBlockLang = codeFenceMatch[2] || null;
inListType = null; // Starting code block breaks list
} else if (hrMatch) {
// Render Horizontal Rule (simple dashed line)
// Use box with height and border character, or just Text with dashes
@ -270,7 +267,6 @@ export class MarkdownRenderer {
<Text dimColor>---</Text>
</Box>,
);
inListType = null; // HR breaks list
} else if (headerMatch) {
const level = headerMatch[1].length;
const headerText = headerMatch[2];
@ -301,9 +297,11 @@ export class MarkdownRenderer {
</Text>
);
break;
default:
headerNode = <Text>{renderedHeaderText}</Text>;
break;
}
if (headerNode) contentBlocks.push(<Box key={key}>{headerNode}</Box>);
inListType = null; // Header breaks list
} else if (ulMatch) {
const marker = ulMatch[1]; // *, -, or +
const itemText = ulMatch[2];
@ -311,18 +309,14 @@ export class MarkdownRenderer {
contentBlocks.push(
MarkdownRenderer._renderListItem(key, itemText, 'ul', marker),
);
inListType = 'ul'; // Set/maintain list context
} else if (olMatch) {
const marker = olMatch[1]; // The number
const itemText = olMatch[2];
contentBlocks.push(
MarkdownRenderer._renderListItem(key, itemText, 'ol', marker),
);
inListType = 'ol'; // Set/maintain list context
} else {
// --- Regular line (Paragraph or Empty line) ---
inListType = null; // Any non-list line breaks the list sequence
// Render line content if it's not blank, applying inline styles
const renderedLine = MarkdownRenderer._renderInline(line);
if (renderedLine.length > 0 || line.length > 0) {
@ -336,8 +330,6 @@ export class MarkdownRenderer {
// Handle specifically empty lines
// Add minimal space for blank lines between paragraphs/blocks
if (contentBlocks.length > 0 && !inCodeBlock) {
// Avoid adding space inside code block state (handled above)
const previousBlock = contentBlocks[contentBlocks.length - 1];
// Avoid adding multiple blank lines consecutively easily - check if previous was also blank?
// For now, add a minimal spacer for any blank line outside code blocks.
contentBlocks.push(<Box key={key} height={1} />);

View File

@ -1,9 +1,10 @@
import { promises as fs } from 'fs';
import { SchemaUnion, Type } from '@google/genai'; // Assuming these types exist
import { Content, SchemaUnion, Type } from '@google/genai'; // Assuming these types exist
import { GeminiClient } from '../core/gemini-client.js'; // Assuming this path
import { exec } from 'child_process'; // Needed for Windows process check
import { promisify } from 'util'; // To promisify exec
import { globalConfig } from '../config/config.js';
import { getErrorMessage, isNodeError } from './errors.js';
// Promisify child_process.exec for easier async/await usage
const execAsync = promisify(exec);
@ -11,8 +12,9 @@ const execAsync = promisify(exec);
// Define the expected interface for the AI client dependency
export interface AiClient {
generateJson(
prompt: any[], // Keep flexible or define a stricter prompt structure type
prompt: Content[], // Keep flexible or define a stricter prompt structure type
schema: SchemaUnion,
// eslint-disable-next-line @typescript-eslint/no-explicit-any
): Promise<any>; // Ideally, specify the expected JSON structure TAnalysisResult | TAnalysisFailure
}
@ -98,20 +100,20 @@ export class BackgroundTerminalAnalyzer {
// --- Robust File Reading ---
try {
currentStdout = await fs.readFile(tempStdoutFilePath, 'utf-8');
} catch (error: any) {
} catch (error: unknown) {
// If file doesn't exist yet or isn't readable, treat as empty, but log warning
if (error.code !== 'ENOENT') {
if (!isNodeError(error) || error.code !== 'ENOENT') {
console.warn(
`Attempt ${attempts}: Failed to read stdout file ${tempStdoutFilePath}: ${error.message}`,
`Attempt ${attempts}: Failed to read stdout file ${tempStdoutFilePath}: ${getErrorMessage(error)}`,
);
}
}
try {
currentStderr = await fs.readFile(tempStderrFilePath, 'utf-8');
} catch (error: any) {
if (error.code !== 'ENOENT') {
} catch (error: unknown) {
if (!isNodeError(error) || error.code !== 'ENOENT') {
console.warn(
`Attempt ${attempts}: Failed to read stderr file ${tempStderrFilePath}: ${error.message}`,
`Attempt ${attempts}: Failed to read stderr file ${tempStderrFilePath}: ${getErrorMessage(error)}`,
);
}
}
@ -125,10 +127,10 @@ export class BackgroundTerminalAnalyzer {
// Reread files one last time in case output was written just before exit
try {
currentStdout = await fs.readFile(tempStdoutFilePath, 'utf-8');
} catch {}
} catch { /* ignore */ }
try {
currentStderr = await fs.readFile(tempStderrFilePath, 'utf-8');
} catch {}
} catch { /* ignore */ }
lastAnalysisResult = await this.analyzeOutputWithLLM(
currentStdout,
@ -148,10 +150,10 @@ export class BackgroundTerminalAnalyzer {
summary: `Process ended. Final analysis summary: ${lastAnalysisResult.summary}`,
};
}
} catch (procCheckError: any) {
} catch (procCheckError: unknown) {
// Log the error but allow polling to continue, as log analysis might still be useful
console.warn(
`Could not check process status for PID ${pid} on attempt ${attempts}: ${procCheckError.message}`,
`Could not check process status for PID ${pid} on attempt ${attempts}: ${getErrorMessage(procCheckError)}`,
);
// Decide if you want to bail out here or continue analysis based on logs only
// For now, we continue.
@ -257,13 +259,13 @@ export class BackgroundTerminalAnalyzer {
process.kill(pid, 0);
return true; // If no error is thrown, process exists
}
} catch (error: any) {
if (error.code === 'ESRCH') {
} catch (error: unknown) {
if (isNodeError(error) && error.code === 'ESRCH') {
// ESRCH: Standard error code for "No such process" on Unix-like systems
return false;
} else if (
process.platform === 'win32' &&
error.message.includes('No tasks are running')
getErrorMessage(error).includes('No tasks are running')
) {
// tasklist specific error when PID doesn't exist
return false;
@ -272,7 +274,7 @@ export class BackgroundTerminalAnalyzer {
// Re-throwing might be appropriate depending on desired behavior.
// Here, we log it and cautiously return true, assuming it *might* still be running.
console.warn(
`isProcessRunning(${pid}) encountered error: ${error.message}. Assuming process might still exist.`,
`isProcessRunning(${pid}) encountered error: ${getErrorMessage(error)}. Assuming process might still exist.`,
);
// Or you could throw the error: throw new Error(`Failed to check process status for PID ${pid}: ${error.message}`);
return true; // Cautious assumption
@ -402,16 +404,14 @@ Based *only* on the provided stdout and stderr:
}
return response as AnalysisResult; // Cast after validation
} catch (error: any) {
} catch (error: unknown) {
console.error(
`LLM analysis call failed for command "${command}":`,
error,
);
// Ensure the error message passed back is helpful
const errorMessage =
error instanceof Error ? error.message : String(error);
return {
error: `LLM analysis call encountered an error: ${errorMessage}`,
error: `LLM analysis call encountered an error: ${getErrorMessage(error)}`,
inferredStatus: 'AnalysisFailed',
};
}

View File

@ -1,5 +1,6 @@
import * as fs from 'fs/promises';
import * as path from 'path';
import { getErrorMessage, isNodeError } from './errors.js';
const MAX_ITEMS = 200;
const TRUNCATION_INDICATOR = '...';
@ -135,8 +136,8 @@ async function readFullStructure(
folderInfo.files.length +
folderInfo.subFolders.length +
folderInfo.subFolders.reduce((sum, sf) => sum + sf.totalChildren, 0);
} catch (error: any) {
if (error.code === 'EACCES' || error.code === 'ENOENT') {
} catch (error: unknown) {
if (isNodeError(error) && (error.code === 'EACCES' || error.code === 'ENOENT')) {
console.warn(
`Warning: Could not read directory ${folderPath}: ${error.message}`,
);
@ -163,7 +164,6 @@ async function readFullStructure(
function reduceStructure(
fullInfo: FullFolderInfo,
maxItems: number,
ignoredFolders: Set<string>, // Pass ignoredFolders for checking
): ReducedFolderNode {
const rootReducedNode: ReducedFolderNode = {
name: fullInfo.name,
@ -348,7 +348,6 @@ export async function getFolderStructure(
const reducedRoot = reduceStructure(
fullInfo,
mergedOptions.maxItems,
mergedOptions.ignoredFolders,
);
// 3. Count items in the *reduced* structure for the summary
@ -377,8 +376,8 @@ export async function getFolderStructure(
`Showing ${reducedItemCount} of ${totalOriginalChildren} items (files + folders). ${disclaimer}`.trim();
return `${summary}\n\n${displayPath}/\n${structureLines.join('\n')}`;
} catch (error: any) {
} catch (error: unknown) {
console.error(`Error getting folder structure for ${resolvedPath}:`, error);
return `Error processing directory "${resolvedPath}": ${error.message}`;
return `Error processing directory "${resolvedPath}": ${getErrorMessage(error)}`;
}
}