Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 14 additions & 0 deletions litellm/llms/dashscope/chat/transformation.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,27 @@

from typing import Any, Coroutine, List, Literal, Optional, Tuple, Union, overload

from litellm.types.llms.openai import ChatCompletionToolParam

from litellm.secret_managers.main import get_secret_str
from litellm.types.llms.openai import AllMessageValues
Comment on lines +7 to 10
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

P2 Duplicate import from the same module

ChatCompletionToolParam and AllMessageValues are both imported from litellm.types.llms.openai in separate statements. Per the project's style guide, these should be merged into a single import.

Suggested change
from litellm.types.llms.openai import ChatCompletionToolParam
from litellm.secret_managers.main import get_secret_str
from litellm.types.llms.openai import AllMessageValues
from litellm.types.llms.openai import AllMessageValues, ChatCompletionToolParam
from litellm.secret_managers.main import get_secret_str

Context Used: CLAUDE.md (source)

Note: If this suggestion doesn't match your team's coding style, reply to this and let me know. I'll remember it for next time!


from ...openai.chat.gpt_transformation import OpenAIGPTConfig


class DashScopeChatConfig(OpenAIGPTConfig):
def remove_cache_control_flag_from_messages_and_tools(
self,
model: str,
messages: List[AllMessageValues],
tools: Optional[List[ChatCompletionToolParam]] = None,
) -> Tuple[List[AllMessageValues], Optional[List[ChatCompletionToolParam]]]:
"""
Override to preserve cache_control for DashScope.
DashScope supports cache_control - don't strip it.
"""
return messages, tools

@overload
def _transform_messages(
self, messages: List[AllMessageValues], model: str, is_async: Literal[True]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -144,3 +144,47 @@ def test_dashscope_no_longer_transforms_content_list(self):
assert transformed_messages[0]["content"][0]["text"] == "Hello"
assert transformed_messages[0]["content"][1]["type"] == "text"
assert transformed_messages[0]["content"][1]["text"] == "World"

def test_dashscope_preserves_cache_control_in_messages(self):
"""DashScope should NOT strip cache_control from messages."""
config = DashScopeChatConfig()

messages = [
{
"role": "system",
"content": "You are a helpful assistant.",
"cache_control": {"type": "ephemeral"},
},
{
"role": "user",
"content": "Hello, world!",
},
]

transformed_messages, _ = config.remove_cache_control_flag_from_messages_and_tools(
model="dashscope/qwen-turbo", messages=messages
)

assert transformed_messages[0].get("cache_control") == {"type": "ephemeral"}

def test_dashscope_preserves_cache_control_in_tools(self):
"""DashScope should NOT strip cache_control from tools."""
config = DashScopeChatConfig()

tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get weather information",
"parameters": {"type": "object", "properties": {}},
},
"cache_control": {"type": "ephemeral"},
}
]

_, transformed_tools = config.remove_cache_control_flag_from_messages_and_tools(
model="dashscope/qwen-turbo", messages=[], tools=tools
)

assert transformed_tools[0].get("cache_control") == {"type": "ephemeral"}
Loading