update token usage with types

This commit is contained in:
jerop 2025-06-11 06:56:53 +00:00 committed by Jerop Kipruto
parent 9d992b32e4
commit 7ba2b13870
7 changed files with 120 additions and 15 deletions

View File

@ -338,6 +338,7 @@ These are numerical measurements of behavior over time.
- **Attributes**:
- `model`
- `gemini_cli.token.input.count` (Counter, Int): Counts the total number of input tokens sent to the API.
- `gemini_cli.token.usage` (Counter, Int): Counts the number of tokens used.
- **Attributes**:
- `model`
- `type` (string: "input", "output", "thought", "cache", or "tool")

View File

@ -216,7 +216,7 @@ export class GeminiClient {
private _logApiRequest(model: string, inputTokenCount: number): void {
logApiRequest({
model,
prompt_token_count: inputTokenCount,
input_token_count: inputTokenCount,
duration_ms: 0, // Duration is not known at request time
});
}
@ -245,6 +245,11 @@ export class GeminiClient {
attempt,
status_code: undefined,
error: responseError,
output_token_count: response.usageMetadata?.candidatesTokenCount ?? 0,
cached_content_token_count:
response.usageMetadata?.cachedContentTokenCount ?? 0,
thoughts_token_count: response.usageMetadata?.thoughtsTokenCount ?? 0,
tool_token_count: response.usageMetadata?.toolUsePromptTokenCount ?? 0,
});
}

View File

@ -17,5 +17,5 @@ export const METRIC_TOOL_CALL_COUNT = 'gemini_cli.tool.call.count';
export const METRIC_TOOL_CALL_LATENCY = 'gemini_cli.tool.call.latency';
export const METRIC_API_REQUEST_COUNT = 'gemini_cli.api.request.count';
export const METRIC_API_REQUEST_LATENCY = 'gemini_cli.api.request.latency';
export const METRIC_TOKEN_INPUT_COUNT = 'gemini_cli.token.input.count';
export const METRIC_TOKEN_USAGE = 'gemini_cli.token.usage';
export const METRIC_SESSION_COUNT = 'gemini_cli.session.count';

View File

@ -25,7 +25,7 @@ import {
} from './types.js';
import {
recordApiErrorMetrics,
recordApiRequestMetrics,
recordTokenUsageMetrics,
recordApiResponseMetrics,
recordToolCallMetrics,
} from './metrics.js';
@ -120,11 +120,11 @@ export function logApiRequest(
};
const logger = logs.getLogger(SERVICE_NAME);
const logRecord: LogRecord = {
body: `API request to ${event.model}. Tokens: ${event.prompt_token_count}.`,
body: `API request to ${event.model}. Tokens: ${event.input_token_count}.`,
attributes,
};
logger.emit(logRecord);
recordApiRequestMetrics(event.model, event.prompt_token_count);
recordTokenUsageMetrics(event.model, event.input_token_count, 'input');
}
export function logApiError(
@ -188,4 +188,12 @@ export function logApiResponse(
event.status_code,
event.error,
);
recordTokenUsageMetrics(event.model, event.output_token_count, 'output');
recordTokenUsageMetrics(
event.model,
event.cached_content_token_count,
'cache',
);
recordTokenUsageMetrics(event.model, event.thoughts_token_count, 'thought');
recordTokenUsageMetrics(event.model, event.tool_token_count, 'tool');
}

View File

@ -0,0 +1,86 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect, vi, beforeEach, type Mock } from 'vitest';
import { Counter, Meter, metrics } from '@opentelemetry/api';
import { initializeMetrics, recordTokenUsageMetrics } from './metrics.js';
const mockCounter = {
add: vi.fn(),
} as unknown as Counter;
const mockMeter = {
createCounter: vi.fn().mockReturnValue(mockCounter),
createHistogram: vi.fn().mockReturnValue({ record: vi.fn() }),
} as unknown as Meter;
vi.mock('@opentelemetry/api', () => ({
metrics: {
getMeter: vi.fn(),
},
ValueType: {
INT: 1,
},
}));
describe('Telemetry Metrics', () => {
beforeEach(() => {
vi.clearAllMocks();
(metrics.getMeter as Mock).mockReturnValue(mockMeter);
});
describe('recordTokenUsageMetrics', () => {
it('should not record metrics if not initialized', () => {
recordTokenUsageMetrics('gemini-pro', 100, 'input');
expect(mockCounter.add).not.toHaveBeenCalled();
});
it('should record token usage with the correct attributes', () => {
initializeMetrics();
recordTokenUsageMetrics('gemini-pro', 100, 'input');
expect(mockCounter.add).toHaveBeenCalledWith(100, {
model: 'gemini-pro',
type: 'input',
});
});
it('should record token usage for different types', () => {
initializeMetrics();
recordTokenUsageMetrics('gemini-pro', 50, 'output');
expect(mockCounter.add).toHaveBeenCalledWith(50, {
model: 'gemini-pro',
type: 'output',
});
recordTokenUsageMetrics('gemini-pro', 25, 'thought');
expect(mockCounter.add).toHaveBeenCalledWith(25, {
model: 'gemini-pro',
type: 'thought',
});
recordTokenUsageMetrics('gemini-pro', 75, 'cache');
expect(mockCounter.add).toHaveBeenCalledWith(75, {
model: 'gemini-pro',
type: 'cache',
});
recordTokenUsageMetrics('gemini-pro', 125, 'tool');
expect(mockCounter.add).toHaveBeenCalledWith(125, {
model: 'gemini-pro',
type: 'tool',
});
});
it('should handle different models', () => {
initializeMetrics();
recordTokenUsageMetrics('gemini-ultra', 200, 'input');
expect(mockCounter.add).toHaveBeenCalledWith(200, {
model: 'gemini-ultra',
type: 'input',
});
});
});
});

View File

@ -18,7 +18,7 @@ import {
METRIC_TOOL_CALL_LATENCY,
METRIC_API_REQUEST_COUNT,
METRIC_API_REQUEST_LATENCY,
METRIC_TOKEN_INPUT_COUNT,
METRIC_TOKEN_USAGE,
METRIC_SESSION_COUNT,
} from './constants.js';
@ -27,7 +27,7 @@ let toolCallCounter: Counter | undefined;
let toolCallLatencyHistogram: Histogram | undefined;
let apiRequestCounter: Counter | undefined;
let apiRequestLatencyHistogram: Histogram | undefined;
let tokenInputCounter: Counter | undefined;
let tokenUsageCounter: Counter | undefined;
let isMetricsInitialized = false;
export function getMeter(): Meter | undefined {
@ -64,8 +64,8 @@ export function initializeMetrics(): void {
valueType: ValueType.INT,
},
);
tokenInputCounter = meter.createCounter(METRIC_TOKEN_INPUT_COUNT, {
description: 'Counts the total number of input tokens sent to the API.',
tokenUsageCounter = meter.createCounter(METRIC_TOKEN_USAGE, {
description: 'Counts the total number of tokens used.',
valueType: ValueType.INT,
});
@ -95,12 +95,13 @@ export function recordToolCallMetrics(
});
}
export function recordApiRequestMetrics(
export function recordTokenUsageMetrics(
model: string,
inputTokenCount: number,
tokenCount: number,
type: 'input' | 'output' | 'thought' | 'cache' | 'tool',
): void {
if (!tokenInputCounter || !isMetricsInitialized) return;
tokenInputCounter.add(inputTokenCount, { model });
if (!tokenUsageCounter || !isMetricsInitialized) return;
tokenUsageCounter.add(tokenCount, { model, type });
}
export function recordApiResponseMetrics(

View File

@ -27,7 +27,7 @@ export interface ApiRequestEvent {
'event.timestamp': string; // ISO 8601
model: string;
duration_ms: number;
prompt_token_count: number;
input_token_count: number;
}
export interface ApiErrorEvent {
@ -49,6 +49,10 @@ export interface ApiResponseEvent {
duration_ms: number;
error?: string;
attempt: number;
output_token_count: number;
cached_content_token_count: number;
thoughts_token_count: number;
tool_token_count: number;
}
export interface CliConfigEvent {