Skip to content
Merged
Show file tree
Hide file tree
Changes from 9 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions packages/cli/src/ui/components/InputPrompt.test.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -235,6 +235,10 @@ describe('InputPrompt', () => {
await wait();

expect(props.onSubmit).toHaveBeenCalledWith('commit this');
// Enter path must NOT call buffer.insert — it passes text directly to
// handleSubmitAndClear. Calling insert would re-fill the buffer after
// it was already cleared (the microtask race bug).
expect(mockBuffer.insert).not.toHaveBeenCalled();
unmount();
});

Expand Down
6 changes: 5 additions & 1 deletion packages/cli/src/ui/components/InputPrompt.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -892,7 +892,11 @@ export const InputPrompt: React.FC<InputPromptProps> = ({
followup.state.suggestion
) {
const text = followup.state.suggestion;
followup.accept('enter');
// Skip onAccept (buffer.insert) — we pass the text directly to
// handleSubmitAndClear which clears the buffer synchronously.
// Without skipOnAccept the microtask in accept() would re-insert
// the suggestion into the buffer after it was already cleared.
followup.accept('enter', { skipOnAccept: true });
handleSubmitAndClear(text);
return true;
}
Expand Down
5 changes: 4 additions & 1 deletion packages/cli/src/ui/hooks/useFollowupSuggestions.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,10 @@ export interface UseFollowupSuggestionsReturn {
/** Set suggestion text (called by parent component) */
setSuggestion: (text: string | null) => void;
/** Accept the current suggestion */
accept: (method?: 'tab' | 'enter' | 'right') => void;
accept: (
method?: 'tab' | 'enter' | 'right',
options?: { skipOnAccept?: boolean },
) => void;
/** Dismiss the current suggestion */
dismiss: () => void;
/** Clear all state */
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,11 +23,16 @@ import {
import { OpenAILogger } from '../../utils/openaiLogger.js';
import type OpenAI from 'openai';

vi.mock('../../telemetry/loggers.js', () => ({
logApiRequest: vi.fn(),
logApiResponse: vi.fn(),
logApiError: vi.fn(),
}));
vi.mock('../../telemetry/loggers.js', async (importOriginal) => {
const actual =
await importOriginal<typeof import('../../telemetry/loggers.js')>();
return {
...actual,
logApiRequest: vi.fn(),
logApiResponse: vi.fn(),
logApiError: vi.fn(),
};
});

vi.mock('../../utils/openaiLogger.js', () => ({
OpenAILogger: vi.fn().mockImplementation(() => ({
Expand Down Expand Up @@ -474,4 +479,91 @@ describe('LoggingContentGenerator', () => {
},
]);
});

it.each(['prompt_suggestion', 'forked_query'])(
'skips logApiRequest and OpenAI logging for internal promptId %s (generateContent)',
async (promptId) => {
const mockResponse = {
responseId: 'internal-resp',
modelVersion: 'test-model',
candidates: [{ content: { parts: [{ text: 'suggestion' }] } }],
usageMetadata: { promptTokenCount: 10, candidatesTokenCount: 5 },
} as unknown as GenerateContentResponse;

const mockWrapped = {
generateContent: vi.fn().mockResolvedValue(mockResponse),
generateContentStream: vi.fn(),
} as unknown as ContentGenerator;

const gen = new LoggingContentGenerator(mockWrapped, createConfig(), {
model: 'test-model',
enableOpenAILogging: true,
openAILoggingDir: '/tmp/test-logs',
});

const request = {
model: 'test-model',
contents: [{ role: 'user', parts: [{ text: 'test' }] }],
} as unknown as GenerateContentParameters;

await gen.generateContent(request, promptId);

// logApiRequest should NOT be called for internal prompts
expect(logApiRequest).not.toHaveBeenCalled();
// logApiResponse SHOULD be called (for /stats token tracking)
expect(logApiResponse).toHaveBeenCalled();
// OpenAI logger should be constructed, but no interaction should be logged
expect(OpenAILogger).toHaveBeenCalled();
const loggerInstance = (
OpenAILogger as unknown as ReturnType<typeof vi.fn>
).mock.results[0]?.value;
expect(loggerInstance.logInteraction).not.toHaveBeenCalled();
},
);

it.each(['prompt_suggestion', 'forked_query'])(
'skips logApiRequest and OpenAI logging for internal promptId %s (generateContentStream)',
async (promptId) => {
const mockChunk = {
responseId: 'stream-resp',
modelVersion: 'test-model',
candidates: [{ content: { parts: [{ text: 'suggestion' }] } }],
usageMetadata: { promptTokenCount: 10, candidatesTokenCount: 5 },
} as unknown as GenerateContentResponse;

async function* fakeStream() {
yield mockChunk;
}

const mockWrapped = {
generateContent: vi.fn(),
generateContentStream: vi.fn().mockResolvedValue(fakeStream()),
} as unknown as ContentGenerator;

const gen = new LoggingContentGenerator(mockWrapped, createConfig(), {
model: 'test-model',
enableOpenAILogging: true,
openAILoggingDir: '/tmp/test-logs',
});

const request = {
model: 'test-model',
contents: [{ role: 'user', parts: [{ text: 'test' }] }],
} as unknown as GenerateContentParameters;

const stream = await gen.generateContentStream(request, promptId);
// Consume the stream
for await (const _chunk of stream) {
// drain
}

expect(logApiRequest).not.toHaveBeenCalled();
expect(logApiResponse).toHaveBeenCalled();
expect(OpenAILogger).toHaveBeenCalled();
const loggerInstance = (
OpenAILogger as unknown as ReturnType<typeof vi.fn>
).mock.results[0]?.value;
expect(loggerInstance.logInteraction).not.toHaveBeenCalled();
},
);
});
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ import {
logApiRequest,
logApiResponse,
} from '../../telemetry/loggers.js';
import { isInternalPromptId } from '../../utils/internalPromptIds.js';
import type {
ContentGenerator,
ContentGeneratorConfig,
Expand Down Expand Up @@ -143,8 +144,17 @@ export class LoggingContentGenerator implements ContentGenerator {
userPromptId: string,
): Promise<GenerateContentResponse> {
const startTime = Date.now();
this.logApiRequest(this.toContents(req.contents), req.model, userPromptId);
const openaiRequest = await this.buildOpenAIRequestForLogging(req);
const isInternal = isInternalPromptId(userPromptId);
if (!isInternal) {
this.logApiRequest(
this.toContents(req.contents),
req.model,
userPromptId,
);
}
const openaiRequest = isInternal
? undefined
: await this.buildOpenAIRequestForLogging(req);
Comment thread
wenshao marked this conversation as resolved.
try {
const response = await this.wrapped.generateContent(req, userPromptId);
const durationMs = Date.now() - startTime;
Expand All @@ -155,12 +165,16 @@ export class LoggingContentGenerator implements ContentGenerator {
userPromptId,
response.usageMetadata,
);
await this.logOpenAIInteraction(openaiRequest, response);
if (!isInternal) {
await this.logOpenAIInteraction(openaiRequest, response);
}
return response;
} catch (error) {
const durationMs = Date.now() - startTime;
this._logApiError('', durationMs, error, req.model, userPromptId);
await this.logOpenAIInteraction(openaiRequest, undefined, error);
if (!isInternal) {
await this.logOpenAIInteraction(openaiRequest, undefined, error);
}
Comment thread
wenshao marked this conversation as resolved.
throw error;
}
}
Expand All @@ -170,16 +184,27 @@ export class LoggingContentGenerator implements ContentGenerator {
userPromptId: string,
): Promise<AsyncGenerator<GenerateContentResponse>> {
const startTime = Date.now();
this.logApiRequest(this.toContents(req.contents), req.model, userPromptId);
const openaiRequest = await this.buildOpenAIRequestForLogging(req);
const isInternal = isInternalPromptId(userPromptId);
if (!isInternal) {
this.logApiRequest(
this.toContents(req.contents),
req.model,
userPromptId,
);
}
const openaiRequest = isInternal
? undefined
: await this.buildOpenAIRequestForLogging(req);
Comment thread
wenshao marked this conversation as resolved.

let stream: AsyncGenerator<GenerateContentResponse>;
try {
stream = await this.wrapped.generateContentStream(req, userPromptId);
} catch (error) {
const durationMs = Date.now() - startTime;
this._logApiError('', durationMs, error, req.model, userPromptId);
await this.logOpenAIInteraction(openaiRequest, undefined, error);
if (!isInternal) {
await this.logOpenAIInteraction(openaiRequest, undefined, error);
}
throw error;
}

Expand All @@ -199,12 +224,27 @@ export class LoggingContentGenerator implements ContentGenerator {
model: string,
openaiRequest?: OpenAI.Chat.ChatCompletionCreateParams,
): AsyncGenerator<GenerateContentResponse> {
const isInternal = isInternalPromptId(userPromptId);
// For internal prompts we only need the last usage metadata (for /stats);
// skip collecting full responses to avoid unnecessary memory overhead.
const responses: GenerateContentResponse[] = [];

// Track first-seen IDs so _logApiResponse/_logApiError have accurate
// values even when we skip collecting full responses for internal prompts.
let firstResponseId = '';
let firstModelVersion = '';
let lastUsageMetadata: GenerateContentResponseUsageMetadata | undefined;
try {
for await (const response of stream) {
responses.push(response);
if (!firstResponseId && response.responseId) {
firstResponseId = response.responseId;
}
if (!firstModelVersion && response.modelVersion) {
firstModelVersion = response.modelVersion;
}
if (!isInternal) {
responses.push(response);
}
if (response.usageMetadata) {
lastUsageMetadata = response.usageMetadata;
}
Expand All @@ -213,25 +253,29 @@ export class LoggingContentGenerator implements ContentGenerator {
// Only log successful API response if no error occurred
const durationMs = Date.now() - startTime;
this._logApiResponse(
responses[0]?.responseId ?? '',
firstResponseId,
durationMs,
responses[0]?.modelVersion || model,
firstModelVersion || model,
userPromptId,
lastUsageMetadata,
);
const consolidatedResponse =
this.consolidateGeminiResponsesForLogging(responses);
await this.logOpenAIInteraction(openaiRequest, consolidatedResponse);
if (!isInternal) {
const consolidatedResponse =
this.consolidateGeminiResponsesForLogging(responses);
await this.logOpenAIInteraction(openaiRequest, consolidatedResponse);
}
} catch (error) {
const durationMs = Date.now() - startTime;
this._logApiError(
responses[0]?.responseId ?? '',
firstResponseId,
durationMs,
error,
responses[0]?.modelVersion || model,
firstModelVersion || model,
userPromptId,
);
Comment thread
wenshao marked this conversation as resolved.
await this.logOpenAIInteraction(openaiRequest, undefined, error);
if (!isInternal) {
await this.logOpenAIInteraction(openaiRequest, undefined, error);
}
throw error;
}
}
Expand Down
31 changes: 31 additions & 0 deletions packages/core/src/followup/followupState.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -294,6 +294,37 @@ describe('createFollowupController', () => {
ctrl.cleanup();
});

it('accept with skipOnAccept skips onAccept callback but still clears state and fires telemetry', async () => {
const onStateChange = vi.fn();
const onAccept = vi.fn();
const onOutcome = vi.fn();
const ctrl = createFollowupController({
onStateChange,
getOnAccept: () => onAccept,
onOutcome,
});

ctrl.setSuggestion('run tests');
vi.advanceTimersByTime(300);
onStateChange.mockClear();

ctrl.accept('enter', { skipOnAccept: true });

// State should be cleared
expect(onStateChange).toHaveBeenCalledWith(INITIAL_FOLLOWUP_STATE);

// Telemetry should still fire
expect(onOutcome).toHaveBeenCalledWith(
expect.objectContaining({ outcome: 'accepted', accept_method: 'enter' }),
);

// Flush microtask — onAccept should NOT be called
await Promise.resolve();
expect(onAccept).not.toHaveBeenCalled();

ctrl.cleanup();
});

it('setSuggestion replaces a pending suggestion', () => {
const onStateChange = vi.fn();
const ctrl = createFollowupController({ onStateChange });
Expand Down
14 changes: 11 additions & 3 deletions packages/core/src/followup/followupState.ts
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,10 @@ export interface FollowupControllerActions {
/** Set suggestion text (with delayed show). Null clears immediately. */
setSuggestion: (text: string | null) => void;
/** Accept the current suggestion and invoke onAccept callback */
accept: (method?: 'tab' | 'enter' | 'right') => void;
accept: (
method?: 'tab' | 'enter' | 'right',
options?: { skipOnAccept?: boolean },
) => void;
/** Dismiss/clear suggestion */
dismiss: () => void;
/** Hard-clear all state and timers */
Expand Down Expand Up @@ -135,7 +138,10 @@ export function createFollowupController(
}, SUGGESTION_DELAY_MS);
};

const accept = (method?: 'tab' | 'enter' | 'right'): void => {
const accept = (
method?: 'tab' | 'enter' | 'right',
options?: { skipOnAccept?: boolean },
): void => {
if (accepting) {
return;
}
Expand Down Expand Up @@ -170,7 +176,9 @@ export function createFollowupController(

queueMicrotask(() => {
try {
getOnAccept?.()?.(text);
if (!options?.skipOnAccept) {
getOnAccept?.()?.(text);
}
} catch (error: unknown) {
// eslint-disable-next-line no-console
console.error('[followup] onAccept callback threw:', error);
Expand Down
Loading
Loading