Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion packages/cli/src/gemini.test.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -985,7 +985,10 @@ describe('gemini.tsx main function exit codes', () => {
vi.restoreAllMocks();
});

it('should exit with 42 for invalid input combination (prompt-interactive with non-TTY)', async () => {
it('should handle prompt-interactive with non-TTY without crashing', async () => {
// -i without TTY now falls through to headless interactive path.
// With mocked config (isInteractive=false), it exits with 42 (no input)
// because the mock doesn't derive interactive from argv.
vi.mocked(loadCliConfig).mockResolvedValue(createMockConfig());
vi.mocked(loadSettings).mockReturnValue(
createMockSettings({
Expand Down
101 changes: 58 additions & 43 deletions packages/cli/src/gemini.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ import {
import { loadCliConfig, parseArguments } from './config/config.js';
import * as cliConfig from './config/config.js';
import { readStdin } from './utils/readStdin.js';
import { readStdinLines } from './utils/readStdinLines.js';
import { createHash } from 'node:crypto';
import v8 from 'node:v8';
import os from 'node:os';
Expand Down Expand Up @@ -268,15 +269,6 @@ export async function main() {
});
}

// Check for invalid input combinations early to prevent crashes
if (argv.promptInteractive && !process.stdin.isTTY) {
writeToStderr(
'Error: The --prompt-interactive flag cannot be used when input is piped from stdin.\n',
);
await runExitCleanup();
process.exit(ExitCodes.FATAL_INPUT_ERROR);
}

const isDebugMode = cliConfig.isDebugMode(argv);
const consolePatcher = new ConsolePatcher({
stderr: true,
Expand Down Expand Up @@ -387,7 +379,7 @@ export async function main() {
process.exit(ExitCodes.FATAL_AUTHENTICATION_ERROR);
}
let stdinData = '';
if (!process.stdin.isTTY) {
if (!process.stdin.isTTY && !argv.promptInteractive) {
stdinData = await readStdin();
}

Expand Down Expand Up @@ -596,7 +588,7 @@ export async function main() {

cliStartupHandle?.end();
// Render UI, passing necessary config values. Check that there is no command line question.
if (config.isInteractive()) {
if (config.isInteractive() && process.stdin.isTTY) {
await startInteractiveUI(
config,
settings,
Expand All @@ -607,15 +599,13 @@ export async function main() {
);
return;
}

await config.initialize();
startupProfiler.flush(config);

// If not a TTY, read from stdin
// This is for cases where the user pipes input directly into the command
let stdinData: string | undefined = undefined;
if (!process.stdin.isTTY) {
stdinData = await readStdin();
// If not a TTY, read from stdin as a single prompt.
// Skip in interactive mode (-i) — stdin will be read line-by-line.
if (!process.stdin.isTTY && !config.isInteractive()) {
const stdinData = await readStdin();
if (stdinData) {
input = input ? `${stdinData}\n\n${input}` : stdinData;
}
Expand Down Expand Up @@ -649,25 +639,6 @@ export async function main() {
await config.getHookSystem()?.fireSessionEndEvent(SessionEndReason.Exit);
});

if (!input) {
debugLogger.error(
`No input provided via stdin. Input can be provided by piping data into gemini or using the --prompt option.`,
);
await runExitCleanup();
process.exit(ExitCodes.FATAL_INPUT_ERROR);
}

const prompt_id = sessionId;
logUserPrompt(
config,
new UserPromptEvent(
input.length,
prompt_id,
config.getContentGeneratorConfig()?.authType,
input,
),
);

const authType = await validateNonInteractiveAuth(
settings.merged.security.auth.selectedType,
settings.merged.security.auth.useExternal,
Expand All @@ -682,19 +653,63 @@ export async function main() {

initializeOutputListenersAndFlush();

await runNonInteractive({
config,
settings,
input,
prompt_id,
resumedSessionData,
});
// Unified prompt loop: yields once for single-shot, multiple times for
// multi-turn pipe sessions. RESULT events signal each response end.
let promptCount = 0;
for await (const prompt of prompts(input, config.isInteractive())) {
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think, the new piped stdin path has no error handling for broken pipes

Since this PR specifically targets long-running programmatic sessions, the pipe can break at any point - e.g., the parent process crashes, or the network drops in a remote session.

The existing readStdin.ts already handles this with an explicit onError callback (lines 53-56) and a safety net for late errors (lines 70-72). But the new readStdinLines.ts uses a bare for await...of at line 49 with no error handling:

// readStdinLines.ts, line 49 — throws if the stream emits an error
for await (const chunk of stream) {

The consumer in gemini.tsx also has no try-catch around the loop at line 659:

// gemini.tsx, line 659 — no try-catch
for await (const prompt of prompts(input, config.isInteractive())) {

So when the pipe breaks, the error flies past the cleanup code at lines 690-692 (runExitCleanup() + process.exit), which means telemetry doesn't flush, SessionEnd hooks don't fire, and temp files aren't cleaned up.

A lightweight fix would be wrapping the loop in gemini.tsx:

try {
  for await (const prompt of prompts(input, config.isInteractive())) {
    promptCount++;
    // ... existing loop body ...
  }
} catch (err) {
  debugLogger.error('Piped stdin stream error:', err);
}
// Cleanup now runs normally regardless of pipe errors
if (promptCount === 0) { ... }
await runExitCleanup();
process.exit(ExitCodes.SUCCESS);

This way, a broken pipe logs the error and falls through to the existing cleanup path - same graceful behavior that readStdin.ts already provides.

promptCount++;
const prompt_id =
promptCount === 1 ? sessionId : `${sessionId}-${promptCount}`;
logUserPrompt(
config,
new UserPromptEvent(
prompt.length,
prompt_id,
config.getContentGeneratorConfig()?.authType,
prompt,
),
);

await runNonInteractive({
config,
settings,
input: prompt,
prompt_id,
resumedSessionData: promptCount === 1 ? resumedSessionData : undefined,
});
}

if (promptCount === 0) {
debugLogger.error(
`No input provided via stdin. Input can be provided by piping data into gemini or using the --prompt option.`,
);
await runExitCleanup();
process.exit(ExitCodes.FATAL_INPUT_ERROR);
}

// Call cleanup before process.exit, which causes cleanup to not run
await runExitCleanup();
process.exit(ExitCodes.SUCCESS);
}
}

/**
* Yields prompts from the available input source.
* Single-shot (interactive=false): yields the provided input once.
* Multi-turn (interactive=true): yields initial input, then reads stdin line-by-line.
*/
async function* prompts(
input?: string,
interactive?: boolean,
): AsyncGenerator<string> {
if (input) {
yield input;
if (!interactive) return;
}
if (!interactive || process.stdin.isTTY) return;
yield* readStdinLines();
}

export function initializeOutputListenersAndFlush() {
// If there are no listeners for output, make sure we flush so output is not
// lost.
Expand Down
138 changes: 138 additions & 0 deletions packages/cli/src/utils/readStdinLines.test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,138 @@
/**
* @license
* Copyright 2026 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/

import { vi, describe, expect, it } from 'vitest';
import { readStdinLines } from './readStdinLines.js';
import { PassThrough } from 'node:stream';

vi.mock('@google/gemini-cli-core', () => ({
debugLogger: {
warn: vi.fn(),
},
}));

/** Helper: collect all values from the async generator. */
async function collect(gen: AsyncGenerator<string>): Promise<string[]> {
const results: string[] = [];
for await (const line of gen) {
results.push(line);
}
return results;
}

/** Helper: create a PassThrough stream and push lines into it. */
function createStream(lines: string[]): PassThrough {
const stream = new PassThrough();
for (const line of lines) {
stream.write(line);
}
stream.end();
return stream;
}

describe('readStdinLines', () => {
it('should yield each non-empty line from piped input', async () => {
const stream = createStream(['hello\n', 'world\n']);
const result = await collect(readStdinLines(stream));
expect(result).toEqual(['hello', 'world']);
});

it('should skip empty lines', async () => {
const stream = createStream(['hello\n', '\n', '\n', 'world\n']);
const result = await collect(readStdinLines(stream));
expect(result).toEqual(['hello', 'world']);
});

it('should trim whitespace from lines', async () => {
const stream = createStream([' hello \n', ' world \n']);
const result = await collect(readStdinLines(stream));
expect(result).toEqual(['hello', 'world']);
});

it('should handle input without trailing newline', async () => {
const stream = createStream(['hello\n', 'world']);
const result = await collect(readStdinLines(stream));
expect(result).toEqual(['hello', 'world']);
});

it('should yield nothing for empty stream', async () => {
const stream = createStream([]);
const result = await collect(readStdinLines(stream));
expect(result).toEqual([]);
});

it('should handle multi-byte UTF-8 characters (CJK)', async () => {
const stream = createStream(['한글 테스트\n', '日本語\n']);
const result = await collect(readStdinLines(stream));
expect(result).toEqual(['한글 테스트', '日本語']);
});

it('should handle emoji (4-byte UTF-8)', async () => {
const stream = createStream(['hello 😀🎉\n', 'world 🚀\n']);
const result = await collect(readStdinLines(stream));
expect(result).toEqual(['hello 😀🎉', 'world 🚀']);
});

it('should handle chunks split across multiple writes', async () => {
const stream = new PassThrough();
stream.write('hel');
stream.write('lo\nwor');
stream.write('ld\n');
stream.end();
const result = await collect(readStdinLines(stream));
expect(result).toEqual(['hello', 'world']);
});

it('should stop reading when total size exceeds cumulative limit', async () => {
// Create lines that accumulate past the 8MB total limit.
// Use a small stream with known byte sizes to verify the check fires.
const oneMB = 'a'.repeat(1024 * 1024);
const lines: string[] = [];
for (let i = 0; i < 10; i++) {
lines.push(oneMB + '\n');
}
const stream = createStream(lines);
const result = await collect(readStdinLines(stream));
// 8 lines of ~1MB each should fit; the 9th should be rejected
expect(result.length).toBeLessThanOrEqual(8);
expect(result.length).toBeGreaterThanOrEqual(7);
});

it('should truncate oversized lines at valid UTF-8 boundary', async () => {
// A line of ~9MB of 3-byte Korean characters
const bigLine = '한'.repeat(3 * 1024 * 1024) + '\n'; // 9MB in UTF-8
const stream = createStream([bigLine]);
const result = await collect(readStdinLines(stream));
expect(result.length).toBe(1);
// The truncated line should be valid UTF-8 and <= 8MB
const resultBytes = Buffer.byteLength(result[0], 'utf8');
expect(resultBytes).toBeLessThanOrEqual(8 * 1024 * 1024);
// Should not end with a broken character (no replacement chars)
expect(result[0]).not.toContain('\uFFFD');
});

it('should handle oversized buffer without newline (flush path)', async () => {
// Write >8MB without any newline to trigger the flush path
const bigChunk = 'x'.repeat(9 * 1024 * 1024);
const stream = createStream([bigChunk]);
const result = await collect(readStdinLines(stream));
expect(result.length).toBe(1);
const resultBytes = Buffer.byteLength(result[0], 'utf8');
expect(resultBytes).toBeLessThanOrEqual(8 * 1024 * 1024);
});

it('should track totalSize consistently with post-truncation bytes', async () => {
// A 9MB CJK line followed by a small line.
// With post-truncation tracking, the truncated 8MB line leaves 0 budget,
// so the second line should be dropped.
const bigLine = '한'.repeat(3 * 1024 * 1024) + '\n'; // ~9MB UTF-8
const smallLine = 'hello\n';
const stream = createStream([bigLine, smallLine]);
const result = await collect(readStdinLines(stream));
// First line truncated to ~8MB, second should be dropped (totalSize exceeded)
expect(result.length).toBe(1);
});
});
Loading