Skip to content

Commit 5dc4587

Browse files
committed
test: add tests for deep merge without mutation and gemini model aliasing
- Add test for deep merging nested dicts without mutating provider config - Add test for reasoning effort preservation in aliased gemini-3-latest models
1 parent f964aa3 commit 5dc4587

2 files changed

Lines changed: 73 additions & 2 deletions

File tree

core

Submodule core updated 1 file

test/test_post_body_parameter_overrides.py

Lines changed: 72 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
66

77
from core.models import RequestModel
8-
from core.request import get_payload
8+
from core.request import apply_post_body_parameter_overrides, get_payload
99

1010

1111
def test_gpt_responses_generic_post_body_overrides_apply():
@@ -136,6 +136,40 @@ def test_post_body_parameter_overrides_can_remove_model_specific_fields():
136136
assert "response_format" not in payload
137137

138138

139+
def test_post_body_parameter_overrides_deep_merge_nested_dicts_without_mutating_provider_config():
140+
payload = {
141+
"generationConfig": {
142+
"temperature": 1,
143+
"thinkingConfig": {
144+
"thinkingLevel": "minimal",
145+
},
146+
}
147+
}
148+
provider = {
149+
"model": ["gemini-3-flash"],
150+
"preferences": {
151+
"post_body_parameter_overrides": {
152+
"gemini-3-flash": {
153+
"generationConfig": {
154+
"thinkingConfig": {
155+
"includeThoughts": True,
156+
}
157+
}
158+
}
159+
}
160+
},
161+
}
162+
163+
apply_post_body_parameter_overrides(payload, provider, "gemini-3-flash")
164+
165+
assert payload["generationConfig"]["temperature"] == 1
166+
assert payload["generationConfig"]["thinkingConfig"]["includeThoughts"] is True
167+
assert payload["generationConfig"]["thinkingConfig"]["thinkingLevel"] == "minimal"
168+
assert provider["preferences"]["post_body_parameter_overrides"]["gemini-3-flash"]["generationConfig"]["thinkingConfig"] == {
169+
"includeThoughts": True,
170+
}
171+
172+
139173
def test_gemini_reasoning_effort_overrides_post_body_thinking_level():
140174
request = RequestModel(
141175
model="gemini-3-flash",
@@ -170,6 +204,43 @@ def test_gemini_reasoning_effort_overrides_post_body_thinking_level():
170204
assert payload["generationConfig"]["thinkingConfig"]["thinkingLevel"] == "minimal"
171205

172206

207+
def test_gemini_reasoning_effort_is_preserved_for_aliased_gemini_3_latest_models():
208+
request = RequestModel(
209+
model="gemini-3-flash",
210+
messages=[{"role": "user", "content": "hello"}],
211+
reasoning={"effort": "minimal"},
212+
temperature=1,
213+
stream=False,
214+
)
215+
provider = {
216+
"provider": "gemini",
217+
"base_url": "https://generativelanguage.googleapis.com/v1beta",
218+
"api": "test-key",
219+
"model": [{"gemini-flash-latest": "gemini-3-flash"}],
220+
"preferences": {
221+
"post_body_parameter_overrides": {
222+
"gemini-3-flash": {
223+
"generationConfig": {
224+
"thinkingConfig": {
225+
"includeThoughts": True,
226+
}
227+
}
228+
}
229+
}
230+
},
231+
}
232+
233+
_, _, payload = asyncio.run(get_payload(request, "gemini", provider, api_key="test-key"))
234+
235+
assert payload["generationConfig"]["temperature"] == 1
236+
assert payload["generationConfig"]["maxOutputTokens"] == 8192
237+
assert payload["generationConfig"]["thinkingConfig"]["includeThoughts"] is True
238+
assert payload["generationConfig"]["thinkingConfig"]["thinkingLevel"] == "minimal"
239+
assert provider["preferences"]["post_body_parameter_overrides"]["gemini-3-flash"]["generationConfig"]["thinkingConfig"] == {
240+
"includeThoughts": True,
241+
}
242+
243+
173244
def test_vertex_gemini_reasoning_effort_overrides_post_body_thinking_level():
174245
request = RequestModel(
175246
model="gemini-3-flash",

0 commit comments

Comments
 (0)