Skip to content

Commit 4e0f630

Browse files
authored
test(vertexai): add backendName param to mock response getters (#8906)
`backendName` can be either `googleAI` or `vertexAI`. This can be passed to `getMockResponse` or `getMockResponseStreaming` to specify whether the mock response lookup should be done in the set of vertexAI or googleAI mock files. Modified the `convert-mocks.ts` script to read mock responses from the 'developerapi' directory, and add an export to the generated file for the new lookup object with those mock responses.
1 parent b332825 commit 4e0f630

9 files changed

+186
-46
lines changed

packages/vertexai/src/methods/count-tokens.test.ts

+10-2
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,10 @@ describe('countTokens()', () => {
4545
restore();
4646
});
4747
it('total tokens', async () => {
48-
const mockResponse = getMockResponse('unary-success-total-tokens.json');
48+
const mockResponse = getMockResponse(
49+
'vertexAI',
50+
'unary-success-total-tokens.json'
51+
);
4952
const makeRequestStub = stub(request, 'makeRequest').resolves(
5053
mockResponse as Response
5154
);
@@ -69,6 +72,7 @@ describe('countTokens()', () => {
6972
});
7073
it('total tokens with modality details', async () => {
7174
const mockResponse = getMockResponse(
75+
'vertexAI',
7276
'unary-success-detailed-token-response.json'
7377
);
7478
const makeRequestStub = stub(request, 'makeRequest').resolves(
@@ -96,6 +100,7 @@ describe('countTokens()', () => {
96100
});
97101
it('total tokens no billable characters', async () => {
98102
const mockResponse = getMockResponse(
103+
'vertexAI',
99104
'unary-success-no-billable-characters.json'
100105
);
101106
const makeRequestStub = stub(request, 'makeRequest').resolves(
@@ -120,7 +125,10 @@ describe('countTokens()', () => {
120125
);
121126
});
122127
it('model not found', async () => {
123-
const mockResponse = getMockResponse('unary-failure-model-not-found.json');
128+
const mockResponse = getMockResponse(
129+
'vertexAI',
130+
'unary-failure-model-not-found.json'
131+
);
124132
const mockFetch = stub(globalThis, 'fetch').resolves({
125133
ok: false,
126134
status: 404,

packages/vertexai/src/methods/generate-content.test.ts

+22-4
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,7 @@ describe('generateContent()', () => {
6161
});
6262
it('short response', async () => {
6363
const mockResponse = getMockResponse(
64+
'vertexAI',
6465
'unary-success-basic-reply-short.json'
6566
);
6667
const makeRequestStub = stub(request, 'makeRequest').resolves(
@@ -84,7 +85,10 @@ describe('generateContent()', () => {
8485
);
8586
});
8687
it('long response', async () => {
87-
const mockResponse = getMockResponse('unary-success-basic-reply-long.json');
88+
const mockResponse = getMockResponse(
89+
'vertexAI',
90+
'unary-success-basic-reply-long.json'
91+
);
8892
const makeRequestStub = stub(request, 'makeRequest').resolves(
8993
mockResponse as Response
9094
);
@@ -105,6 +109,7 @@ describe('generateContent()', () => {
105109
});
106110
it('long response with token details', async () => {
107111
const mockResponse = getMockResponse(
112+
'vertexAI',
108113
'unary-success-basic-response-long-usage-metadata.json'
109114
);
110115
const makeRequestStub = stub(request, 'makeRequest').resolves(
@@ -138,7 +143,10 @@ describe('generateContent()', () => {
138143
);
139144
});
140145
it('citations', async () => {
141-
const mockResponse = getMockResponse('unary-success-citations.json');
146+
const mockResponse = getMockResponse(
147+
'vertexAI',
148+
'unary-success-citations.json'
149+
);
142150
const makeRequestStub = stub(request, 'makeRequest').resolves(
143151
mockResponse as Response
144152
);
@@ -163,6 +171,7 @@ describe('generateContent()', () => {
163171
});
164172
it('blocked prompt', async () => {
165173
const mockResponse = getMockResponse(
174+
'vertexAI',
166175
'unary-failure-prompt-blocked-safety.json'
167176
);
168177
const makeRequestStub = stub(request, 'makeRequest').resolves(
@@ -184,6 +193,7 @@ describe('generateContent()', () => {
184193
});
185194
it('finishReason safety', async () => {
186195
const mockResponse = getMockResponse(
196+
'vertexAI',
187197
'unary-failure-finish-reason-safety.json'
188198
);
189199
const makeRequestStub = stub(request, 'makeRequest').resolves(
@@ -204,7 +214,10 @@ describe('generateContent()', () => {
204214
);
205215
});
206216
it('empty content', async () => {
207-
const mockResponse = getMockResponse('unary-failure-empty-content.json');
217+
const mockResponse = getMockResponse(
218+
'vertexAI',
219+
'unary-failure-empty-content.json'
220+
);
208221
const makeRequestStub = stub(request, 'makeRequest').resolves(
209222
mockResponse as Response
210223
);
@@ -224,6 +237,7 @@ describe('generateContent()', () => {
224237
});
225238
it('unknown enum - should ignore', async () => {
226239
const mockResponse = getMockResponse(
240+
'vertexAI',
227241
'unary-success-unknown-enum-safety-ratings.json'
228242
);
229243
const makeRequestStub = stub(request, 'makeRequest').resolves(
@@ -244,7 +258,10 @@ describe('generateContent()', () => {
244258
);
245259
});
246260
it('image rejected (400)', async () => {
247-
const mockResponse = getMockResponse('unary-failure-image-rejected.json');
261+
const mockResponse = getMockResponse(
262+
'vertexAI',
263+
'unary-failure-image-rejected.json'
264+
);
248265
const mockFetch = stub(globalThis, 'fetch').resolves({
249266
ok: false,
250267
status: 400,
@@ -257,6 +274,7 @@ describe('generateContent()', () => {
257274
});
258275
it('api not enabled (403)', async () => {
259276
const mockResponse = getMockResponse(
277+
'vertexAI',
260278
'unary-failure-firebasevertexai-api-not-enabled.json'
261279
);
262280
const mockFetch = stub(globalThis, 'fetch').resolves({

packages/vertexai/src/models/generative-model.test.ts

+10-1
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,7 @@ describe('GenerativeModel', () => {
6060
);
6161
expect(genModel.systemInstruction?.parts[0].text).to.equal('be friendly');
6262
const mockResponse = getMockResponse(
63+
'vertexAI',
6364
'unary-success-basic-reply-short.json'
6465
);
6566
const makeRequestStub = stub(request, 'makeRequest').resolves(
@@ -89,6 +90,7 @@ describe('GenerativeModel', () => {
8990
});
9091
expect(genModel.systemInstruction?.parts[0].text).to.equal('be friendly');
9192
const mockResponse = getMockResponse(
93+
'vertexAI',
9294
'unary-success-basic-reply-short.json'
9395
);
9496
const makeRequestStub = stub(request, 'makeRequest').resolves(
@@ -129,6 +131,7 @@ describe('GenerativeModel', () => {
129131
);
130132
expect(genModel.systemInstruction?.parts[0].text).to.equal('be friendly');
131133
const mockResponse = getMockResponse(
134+
'vertexAI',
132135
'unary-success-basic-reply-short.json'
133136
);
134137
const makeRequestStub = stub(request, 'makeRequest').resolves(
@@ -177,6 +180,7 @@ describe('GenerativeModel', () => {
177180
);
178181
expect(genModel.systemInstruction?.parts[0].text).to.equal('be friendly');
179182
const mockResponse = getMockResponse(
183+
'vertexAI',
180184
'unary-success-basic-reply-short.json'
181185
);
182186
const makeRequestStub = stub(request, 'makeRequest').resolves(
@@ -206,6 +210,7 @@ describe('GenerativeModel', () => {
206210
});
207211
expect(genModel.systemInstruction?.parts[0].text).to.equal('be friendly');
208212
const mockResponse = getMockResponse(
213+
'vertexAI',
209214
'unary-success-basic-reply-short.json'
210215
);
211216
const makeRequestStub = stub(request, 'makeRequest').resolves(
@@ -239,6 +244,7 @@ describe('GenerativeModel', () => {
239244
);
240245
expect(genModel.systemInstruction?.parts[0].text).to.equal('be friendly');
241246
const mockResponse = getMockResponse(
247+
'vertexAI',
242248
'unary-success-basic-reply-short.json'
243249
);
244250
const makeRequestStub = stub(request, 'makeRequest').resolves(
@@ -277,7 +283,10 @@ describe('GenerativeModel', () => {
277283
});
278284
it('calls countTokens', async () => {
279285
const genModel = new GenerativeModel(fakeVertexAI, { model: 'my-model' });
280-
const mockResponse = getMockResponse('unary-success-total-tokens.json');
286+
const mockResponse = getMockResponse(
287+
'vertexAI',
288+
'unary-success-total-tokens.json'
289+
);
281290
const makeRequestStub = stub(request, 'makeRequest').resolves(
282291
mockResponse as Response
283292
);

packages/vertexai/src/models/imagen-model.test.ts

+3
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,7 @@ const fakeVertexAI: VertexAI = {
4747
describe('ImagenModel', () => {
4848
it('generateImages makes a request to predict with default parameters', async () => {
4949
const mockResponse = getMockResponse(
50+
'vertexAI',
5051
'unary-success-generate-images-base64.json'
5152
);
5253
const makeRequestStub = stub(request, 'makeRequest').resolves(
@@ -90,6 +91,7 @@ describe('ImagenModel', () => {
9091
});
9192

9293
const mockResponse = getMockResponse(
94+
'vertexAI',
9395
'unary-success-generate-images-base64.json'
9496
);
9597
const makeRequestStub = stub(request, 'makeRequest').resolves(
@@ -133,6 +135,7 @@ describe('ImagenModel', () => {
133135
});
134136
it('throws if prompt blocked', async () => {
135137
const mockResponse = getMockResponse(
138+
'vertexAI',
136139
'unary-failure-generate-images-prompt-blocked.json'
137140
);
138141

packages/vertexai/src/requests/request.test.ts

+1
Original file line numberDiff line numberDiff line change
@@ -414,6 +414,7 @@ describe('request methods', () => {
414414
});
415415
it('Network error, API not enabled', async () => {
416416
const mockResponse = getMockResponse(
417+
'vertexAI',
417418
'unary-failure-firebasevertexai-api-not-enabled.json'
418419
);
419420
const fetchStub = stub(globalThis, 'fetch').resolves(

packages/vertexai/src/requests/response-helpers.test.ts

+5
Original file line numberDiff line numberDiff line change
@@ -257,6 +257,7 @@ describe('response-helpers methods', () => {
257257
describe('handlePredictResponse', () => {
258258
it('returns base64 images', async () => {
259259
const mockResponse = getMockResponse(
260+
'vertexAI',
260261
'unary-success-generate-images-base64.json'
261262
) as Response;
262263
const res = await handlePredictResponse<ImagenInlineImage>(mockResponse);
@@ -270,6 +271,7 @@ describe('response-helpers methods', () => {
270271
});
271272
it('returns GCS images', async () => {
272273
const mockResponse = getMockResponse(
274+
'vertexAI',
273275
'unary-success-generate-images-gcs.json'
274276
) as Response;
275277
const res = await handlePredictResponse<ImagenGCSImage>(mockResponse);
@@ -284,6 +286,7 @@ describe('response-helpers methods', () => {
284286
});
285287
it('has filtered reason and no images if all images were filtered', async () => {
286288
const mockResponse = getMockResponse(
289+
'vertexAI',
287290
'unary-failure-generate-images-all-filtered.json'
288291
) as Response;
289292
const res = await handlePredictResponse<ImagenInlineImage>(mockResponse);
@@ -294,6 +297,7 @@ describe('response-helpers methods', () => {
294297
});
295298
it('has filtered reason and no images if all base64 images were filtered', async () => {
296299
const mockResponse = getMockResponse(
300+
'vertexAI',
297301
'unary-failure-generate-images-base64-some-filtered.json'
298302
) as Response;
299303
const res = await handlePredictResponse<ImagenInlineImage>(mockResponse);
@@ -308,6 +312,7 @@ describe('response-helpers methods', () => {
308312
});
309313
it('has filtered reason and no images if all GCS images were filtered', async () => {
310314
const mockResponse = getMockResponse(
315+
'vertexAI',
311316
'unary-failure-generate-images-gcs-some-filtered.json'
312317
) as Response;
313318
const res = await handlePredictResponse<ImagenGCSImage>(mockResponse);

packages/vertexai/src/requests/stream-reader.test.ts

+15-1
Original file line numberDiff line numberDiff line change
@@ -72,6 +72,7 @@ describe('processStream', () => {
7272
});
7373
it('streaming response - short', async () => {
7474
const fakeResponse = getMockResponseStreaming(
75+
'vertexAI',
7576
'streaming-success-basic-reply-short.txt'
7677
);
7778
const result = processStream(fakeResponse as Response);
@@ -83,6 +84,7 @@ describe('processStream', () => {
8384
});
8485
it('streaming response - long', async () => {
8586
const fakeResponse = getMockResponseStreaming(
87+
'vertexAI',
8688
'streaming-success-basic-reply-long.txt'
8789
);
8890
const result = processStream(fakeResponse as Response);
@@ -95,6 +97,7 @@ describe('processStream', () => {
9597
});
9698
it('streaming response - long - big chunk', async () => {
9799
const fakeResponse = getMockResponseStreaming(
100+
'vertexAI',
98101
'streaming-success-basic-reply-long.txt',
99102
1e6
100103
);
@@ -107,7 +110,10 @@ describe('processStream', () => {
107110
expect(aggregatedResponse.text()).to.include('to their owners.');
108111
});
109112
it('streaming response - utf8', async () => {
110-
const fakeResponse = getMockResponseStreaming('streaming-success-utf8.txt');
113+
const fakeResponse = getMockResponseStreaming(
114+
'vertexAI',
115+
'streaming-success-utf8.txt'
116+
);
111117
const result = processStream(fakeResponse as Response);
112118
for await (const response of result.stream) {
113119
expect(response.text()).to.not.be.empty;
@@ -118,6 +124,7 @@ describe('processStream', () => {
118124
});
119125
it('streaming response - functioncall', async () => {
120126
const fakeResponse = getMockResponseStreaming(
127+
'vertexAI',
121128
'streaming-success-function-call-short.txt'
122129
);
123130
const result = processStream(fakeResponse as Response);
@@ -141,6 +148,7 @@ describe('processStream', () => {
141148
});
142149
it('candidate had finishReason', async () => {
143150
const fakeResponse = getMockResponseStreaming(
151+
'vertexAI',
144152
'streaming-failure-finish-reason-safety.txt'
145153
);
146154
const result = processStream(fakeResponse as Response);
@@ -153,6 +161,7 @@ describe('processStream', () => {
153161
});
154162
it('prompt was blocked', async () => {
155163
const fakeResponse = getMockResponseStreaming(
164+
'vertexAI',
156165
'streaming-failure-prompt-blocked-safety.txt'
157166
);
158167
const result = processStream(fakeResponse as Response);
@@ -165,6 +174,7 @@ describe('processStream', () => {
165174
});
166175
it('empty content', async () => {
167176
const fakeResponse = getMockResponseStreaming(
177+
'vertexAI',
168178
'streaming-failure-empty-content.txt'
169179
);
170180
const result = processStream(fakeResponse as Response);
@@ -176,6 +186,7 @@ describe('processStream', () => {
176186
});
177187
it('unknown enum - should ignore', async () => {
178188
const fakeResponse = getMockResponseStreaming(
189+
'vertexAI',
179190
'streaming-success-unknown-safety-enum.txt'
180191
);
181192
const result = processStream(fakeResponse as Response);
@@ -187,6 +198,7 @@ describe('processStream', () => {
187198
});
188199
it('recitation ending with a missing content field', async () => {
189200
const fakeResponse = getMockResponseStreaming(
201+
'vertexAI',
190202
'streaming-failure-recitation-no-content.txt'
191203
);
192204
const result = processStream(fakeResponse as Response);
@@ -205,6 +217,7 @@ describe('processStream', () => {
205217
});
206218
it('handles citations', async () => {
207219
const fakeResponse = getMockResponseStreaming(
220+
'vertexAI',
208221
'streaming-success-citations.txt'
209222
);
210223
const result = processStream(fakeResponse as Response);
@@ -224,6 +237,7 @@ describe('processStream', () => {
224237
});
225238
it('removes empty text parts', async () => {
226239
const fakeResponse = getMockResponseStreaming(
240+
'vertexAI',
227241
'streaming-success-empty-text-part.txt'
228242
);
229243
const result = processStream(fakeResponse as Response);

0 commit comments

Comments
 (0)