Skip to content

Commit 1aa4a32

Browse files
authored
test(core): improve testing for API request/response parsing (#21227)
1 parent 22d962e commit 1aa4a32

3 files changed

Lines changed: 252 additions & 1 deletion

File tree

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
{"method":"generateContentStream","response":[{"candidates":[{"content":{"parts":[{"text":"Part 1. "}],"role":"model"},"index":0}]},{"usageMetadata":{"promptTokenCount":100,"candidatesTokenCount":10,"totalTokenCount":110}},{"candidates":[{"content":{"parts":[{"text":"Part 2."}],"role":"model"},"index":0}],"finishReason":"STOP"}]}
Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,50 @@
1+
/**
2+
* @license
3+
* Copyright 2026 Google LLC
4+
* SPDX-License-Identifier: Apache-2.0
5+
*/
6+
7+
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
8+
import { TestRig } from './test-helper.js';
9+
import { join, dirname } from 'node:path';
10+
import { fileURLToPath } from 'node:url';
11+
12+
describe('API Resilience E2E', () => {
13+
let rig: TestRig;
14+
15+
beforeEach(() => {
16+
rig = new TestRig();
17+
});
18+
19+
afterEach(async () => {
20+
await rig.cleanup();
21+
});
22+
23+
it('should not crash when receiving metadata-only chunks in a stream', async () => {
24+
await rig.setup('api-resilience-metadata-only', {
25+
fakeResponsesPath: join(
26+
dirname(fileURLToPath(import.meta.url)),
27+
'api-resilience.responses',
28+
),
29+
settings: {
30+
planSettings: { modelRouting: false },
31+
},
32+
});
33+
34+
// Run the CLI with a simple prompt.
35+
// The fake responses will provide a stream with a metadata-only chunk in the middle.
36+
// We use gemini-3-pro-preview to minimize internal service calls.
37+
const result = await rig.run({
38+
args: ['hi', '--model', 'gemini-3-pro-preview'],
39+
});
40+
41+
// Verify the output contains text from the normal chunks.
42+
// If the CLI crashed on the metadata chunk, rig.run would throw.
43+
expect(result).toContain('Part 1.');
44+
expect(result).toContain('Part 2.');
45+
46+
// Verify telemetry event for the prompt was still generated
47+
const hasUserPromptEvent = await rig.waitForTelemetryEvent('user_prompt');
48+
expect(hasUserPromptEvent).toBe(true);
49+
});
50+
});

packages/core/src/code_assist/server.test.ts

Lines changed: 201 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,14 @@
77
import { beforeEach, describe, it, expect, vi, afterEach } from 'vitest';
88
import { CodeAssistServer } from './server.js';
99
import { OAuth2Client } from 'google-auth-library';
10-
import { UserTierId, ActionStatus } from './types.js';
10+
import {
11+
UserTierId,
12+
ActionStatus,
13+
type LoadCodeAssistResponse,
14+
type GeminiUserTier,
15+
type SetCodeAssistGlobalUserSettingRequest,
16+
type CodeAssistGlobalUserSettingResponse,
17+
} from './types.js';
1118
import { FinishReason } from '@google/genai';
1219
import { LlmRole } from '../telemetry/types.js';
1320
import { logInvalidChunk } from '../telemetry/loggers.js';
@@ -678,6 +685,85 @@ describe('CodeAssistServer', () => {
678685
expect(response).toEqual(mockResponse);
679686
});
680687

688+
it('should call fetchAdminControls endpoint', async () => {
689+
const { server } = createTestServer();
690+
const mockResponse = { adminControlsApplicable: true };
691+
const requestPostSpy = vi
692+
.spyOn(server, 'requestPost')
693+
.mockResolvedValue(mockResponse);
694+
695+
const req = { project: 'test-project' };
696+
const response = await server.fetchAdminControls(req);
697+
698+
expect(requestPostSpy).toHaveBeenCalledWith('fetchAdminControls', req);
699+
expect(response).toEqual(mockResponse);
700+
});
701+
702+
it('should call getCodeAssistGlobalUserSetting endpoint', async () => {
703+
const { server } = createTestServer();
704+
const mockResponse: CodeAssistGlobalUserSettingResponse = {
705+
freeTierDataCollectionOptin: true,
706+
};
707+
const requestGetSpy = vi
708+
.spyOn(server, 'requestGet')
709+
.mockResolvedValue(mockResponse);
710+
711+
const response = await server.getCodeAssistGlobalUserSetting();
712+
713+
expect(requestGetSpy).toHaveBeenCalledWith(
714+
'getCodeAssistGlobalUserSetting',
715+
);
716+
expect(response).toEqual(mockResponse);
717+
});
718+
719+
it('should call setCodeAssistGlobalUserSetting endpoint', async () => {
720+
const { server } = createTestServer();
721+
const mockResponse: CodeAssistGlobalUserSettingResponse = {
722+
freeTierDataCollectionOptin: true,
723+
};
724+
const requestPostSpy = vi
725+
.spyOn(server, 'requestPost')
726+
.mockResolvedValue(mockResponse);
727+
728+
const req: SetCodeAssistGlobalUserSettingRequest = {
729+
freeTierDataCollectionOptin: true,
730+
};
731+
const response = await server.setCodeAssistGlobalUserSetting(req);
732+
733+
expect(requestPostSpy).toHaveBeenCalledWith(
734+
'setCodeAssistGlobalUserSetting',
735+
req,
736+
);
737+
expect(response).toEqual(mockResponse);
738+
});
739+
740+
it('should call loadCodeAssist during refreshAvailableCredits', async () => {
741+
const { server } = createTestServer();
742+
const mockPaidTier = {
743+
id: 'test-tier',
744+
name: 'tier',
745+
availableCredits: [{ creditType: 'G1', creditAmount: '50' }],
746+
};
747+
const mockResponse = { paidTier: mockPaidTier };
748+
749+
vi.spyOn(server, 'loadCodeAssist').mockResolvedValue(
750+
mockResponse as unknown as LoadCodeAssistResponse,
751+
);
752+
753+
// Initial state: server has a paidTier without availableCredits
754+
(server as unknown as { paidTier: GeminiUserTier }).paidTier = {
755+
id: 'test-tier',
756+
name: 'tier',
757+
};
758+
759+
await server.refreshAvailableCredits();
760+
761+
expect(server.loadCodeAssist).toHaveBeenCalled();
762+
expect(server.paidTier?.availableCredits).toEqual(
763+
mockPaidTier.availableCredits,
764+
);
765+
});
766+
681767
describe('robustness testing', () => {
682768
it('should not crash on random error objects in loadCodeAssist (isVpcScAffectedUser)', async () => {
683769
const { server } = createTestServer();
@@ -867,6 +953,46 @@ data: ${jsonString}
867953
);
868954
});
869955

956+
it('should handle malformed JSON within a multi-line data block', async () => {
957+
const config = makeFakeConfig();
958+
const mockRequest = vi.fn();
959+
const client = { request: mockRequest } as unknown as OAuth2Client;
960+
const server = new CodeAssistServer(
961+
client,
962+
'test-project',
963+
{},
964+
'test-session',
965+
UserTierId.FREE,
966+
undefined,
967+
undefined,
968+
config,
969+
);
970+
971+
const { Readable } = await import('node:stream');
972+
const mockStream = new Readable({
973+
read() {},
974+
});
975+
976+
mockRequest.mockResolvedValue({ data: mockStream });
977+
978+
const stream = await server.requestStreamingPost('testStream', {});
979+
980+
setTimeout(() => {
981+
mockStream.push('data: {\n');
982+
mockStream.push('data: "invalid": json\n');
983+
mockStream.push('data: }\n\n');
984+
mockStream.push(null);
985+
}, 0);
986+
987+
const results = [];
988+
for await (const res of stream) {
989+
results.push(res);
990+
}
991+
992+
expect(results).toHaveLength(0);
993+
expect(logInvalidChunk).toHaveBeenCalled();
994+
});
995+
870996
it('should safely process random response streams in generateContentStream (consumed/remaining credits)', async () => {
871997
const { mockRequest, client } = createTestServer();
872998
const testServer = new CodeAssistServer(
@@ -914,5 +1040,79 @@ data: ${jsonString}
9141040
}
9151041
// Should not crash
9161042
});
1043+
1044+
it('should be resilient to metadata-only chunks without candidates in generateContentStream', async () => {
1045+
const { mockRequest, client } = createTestServer();
1046+
const testServer = new CodeAssistServer(
1047+
client,
1048+
'test-project',
1049+
{},
1050+
'test-session',
1051+
UserTierId.FREE,
1052+
);
1053+
const { Readable } = await import('node:stream');
1054+
1055+
// Chunk 2 is metadata-only, no candidates
1056+
const streamResponses = [
1057+
{
1058+
traceId: '1',
1059+
response: {
1060+
candidates: [{ content: { parts: [{ text: 'Hello' }] }, index: 0 }],
1061+
},
1062+
},
1063+
{
1064+
traceId: '2',
1065+
consumedCredits: [{ creditType: 'GOOGLE_ONE_AI', creditAmount: '5' }],
1066+
response: {
1067+
usageMetadata: { promptTokenCount: 10, totalTokenCount: 15 },
1068+
},
1069+
},
1070+
{
1071+
traceId: '3',
1072+
response: {
1073+
candidates: [
1074+
{ content: { parts: [{ text: ' World' }] }, index: 0 },
1075+
],
1076+
},
1077+
},
1078+
];
1079+
1080+
const mockStream = new Readable({
1081+
read() {
1082+
for (const resp of streamResponses) {
1083+
this.push(`data: ${JSON.stringify(resp)}\n\n`);
1084+
}
1085+
this.push(null);
1086+
},
1087+
});
1088+
mockRequest.mockResolvedValueOnce({ data: mockStream });
1089+
vi.spyOn(testServer, 'recordCodeAssistMetrics').mockResolvedValue(
1090+
undefined,
1091+
);
1092+
1093+
const stream = await testServer.generateContentStream(
1094+
{ model: 'test-model', contents: [] },
1095+
'user-prompt-id',
1096+
LlmRole.MAIN,
1097+
);
1098+
1099+
const results = [];
1100+
for await (const res of stream) {
1101+
results.push(res);
1102+
}
1103+
1104+
expect(results).toHaveLength(3);
1105+
expect(results[0].candidates).toHaveLength(1);
1106+
expect(results[0].candidates?.[0].content?.parts?.[0].text).toBe('Hello');
1107+
1108+
// Chunk 2 (metadata-only) should still be yielded but with empty candidates
1109+
expect(results[1].candidates).toHaveLength(0);
1110+
expect(results[1].usageMetadata?.promptTokenCount).toBe(10);
1111+
1112+
expect(results[2].candidates).toHaveLength(1);
1113+
expect(results[2].candidates?.[0].content?.parts?.[0].text).toBe(
1114+
' World',
1115+
);
1116+
});
9171117
});
9181118
});

0 commit comments

Comments
 (0)