Remove redundant else branches (#86)
Else branches are an anti pattern especially if you can easily return from the previous branch. Over time, else branches cause deep nesting and make code unreadable and unmaintainable. Remove elses where possible.
This commit is contained in:
parent
dea0782c89
commit
53a5728009
|
@ -54,8 +54,10 @@ process.on('unhandledRejection', (reason, _promise) => {
|
|||
);
|
||||
console.warn('-----------------------------------------');
|
||||
console.warn('Reason:', reason);
|
||||
// No process.exit(1);
|
||||
} else {
|
||||
return;
|
||||
// No process.exit(1); Don't exit.
|
||||
}
|
||||
|
||||
// Log other unexpected unhandled rejections as critical errors
|
||||
console.error('=========================================');
|
||||
console.error('CRITICAL: Unhandled Promise Rejection!');
|
||||
|
@ -67,7 +69,6 @@ process.on('unhandledRejection', (reason, _promise) => {
|
|||
}
|
||||
// Exit for genuinely unhandled errors
|
||||
process.exit(1);
|
||||
}
|
||||
});
|
||||
|
||||
// --- Global Entry Point ---
|
||||
|
|
|
@ -70,12 +70,12 @@ export function usePipedInput(): PipedInputState {
|
|||
stdin.removeListener('error', handleError);
|
||||
stdin.removeListener('end', handleEnd);
|
||||
};
|
||||
} else {
|
||||
}
|
||||
|
||||
// No piped input (running interactively)
|
||||
setIsLoading(false);
|
||||
// Optionally set an 'info' state or just let isLoading=false & isPiped=false suffice
|
||||
// setError('No piped input detected.'); // Maybe don't treat this as an 'error'
|
||||
}
|
||||
|
||||
// Intentionally run only once on mount or when stdin theoretically changes
|
||||
}, [stdin, isRawModeSupported, setRawMode /*, exit */]);
|
||||
|
|
|
@ -129,12 +129,11 @@ export class GeminiClient {
|
|||
if (error instanceof Error && error.name === 'AbortError') {
|
||||
console.log('Gemini stream request aborted by user.');
|
||||
throw error;
|
||||
} else {
|
||||
}
|
||||
console.error(`Error during Gemini stream or tool interaction:`, error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async generateJson(
|
||||
contents: Content[],
|
||||
|
|
|
@ -152,14 +152,13 @@ export class Turn {
|
|||
);
|
||||
if (confirmationDetails) {
|
||||
return { ...pendingToolCall, confirmationDetails };
|
||||
} else {
|
||||
}
|
||||
const result = await tool.execute(pendingToolCall.args);
|
||||
return {
|
||||
...pendingToolCall,
|
||||
result,
|
||||
confirmationDetails: undefined,
|
||||
};
|
||||
}
|
||||
} catch (execError: unknown) {
|
||||
return {
|
||||
...pendingToolCall,
|
||||
|
@ -191,7 +190,7 @@ export class Turn {
|
|||
type: GeminiEventType.ToolCallConfirmation,
|
||||
value: serverConfirmationetails,
|
||||
};
|
||||
} else {
|
||||
}
|
||||
const responsePart = this.buildFunctionResponse(outcome);
|
||||
this.fnResponses.push(responsePart);
|
||||
const responseInfo: ToolCallResponseInfo = {
|
||||
|
@ -201,7 +200,7 @@ export class Turn {
|
|||
error: outcome.error,
|
||||
};
|
||||
yield { type: GeminiEventType.ToolCallResponse, value: responseInfo };
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -225,23 +224,23 @@ export class Turn {
|
|||
// Builds the Part array expected by the Google GenAI API
|
||||
private buildFunctionResponse(outcome: ServerToolExecutionOutcome): Part {
|
||||
const { name, result, error } = outcome;
|
||||
let fnResponsePayload: Record<string, unknown>;
|
||||
|
||||
if (error) {
|
||||
// Format error for the LLM
|
||||
const errorMessage = error?.message || String(error);
|
||||
fnResponsePayload = { error: `Tool execution failed: ${errorMessage}` };
|
||||
console.error(`[Server Turn] Error executing tool ${name}:`, error);
|
||||
} else {
|
||||
// Pass successful tool result (content meant for LLM)
|
||||
fnResponsePayload = { output: result?.llmContent ?? '' }; // Default to empty string if no content
|
||||
}
|
||||
|
||||
return {
|
||||
functionResponse: {
|
||||
name,
|
||||
id: outcome.callId,
|
||||
response: fnResponsePayload,
|
||||
response: { error: `Tool execution failed: ${errorMessage}` },
|
||||
},
|
||||
};
|
||||
}
|
||||
return {
|
||||
functionResponse: {
|
||||
name,
|
||||
id: outcome.callId,
|
||||
response: { output: result?.llmContent ?? '' },
|
||||
},
|
||||
};
|
||||
}
|
||||
|
|
|
@ -299,23 +299,23 @@ export class BackgroundTerminalAnalyzer {
|
|||
const { stdout } = await execAsync(command);
|
||||
// Check if the output contains the process information (it will have the image name if found)
|
||||
return stdout.toLowerCase().includes('.exe'); // A simple check, adjust if needed
|
||||
} else {
|
||||
}
|
||||
// Linux/macOS/Unix-like: Use kill -0 signal
|
||||
// process.kill sends signal 0 to check existence without killing
|
||||
process.kill(pid, 0);
|
||||
return true; // If no error is thrown, process exists
|
||||
}
|
||||
} catch (error: unknown) {
|
||||
if (isNodeError(error) && error.code === 'ESRCH') {
|
||||
// ESRCH: Standard error code for "No such process" on Unix-like systems
|
||||
return false;
|
||||
} else if (
|
||||
}
|
||||
if (
|
||||
process.platform === 'win32' &&
|
||||
getErrorMessage(error).includes('No tasks are running')
|
||||
) {
|
||||
// tasklist specific error when PID doesn't exist
|
||||
return false;
|
||||
} else {
|
||||
}
|
||||
// Other errors (e.g., EPERM - permission denied) mean we couldn't determine status.
|
||||
// Re-throwing might be appropriate depending on desired behavior.
|
||||
// Here, we log it and cautiously return true, assuming it *might* still be running.
|
||||
|
@ -326,7 +326,6 @@ export class BackgroundTerminalAnalyzer {
|
|||
return true; // Cautious assumption
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// --- LLM Analysis Method (largely unchanged but added validation robustness) ---
|
||||
private async performLlmAnalysis(
|
||||
|
|
Loading…
Reference in New Issue