
sidebar.wechat

sidebar.feishu
sidebar.chooseYourWayToJoin

sidebar.scanToAddConsultant
When building LLM applications, API format differences between model providers are a common pain point. While both OpenAI and Anthropic support tool calling and streaming responses, their message formats are vastly different. AskTable's ChatMessageBuilder provides an elegant solution: a unified internal message format + bidirectional converters.
messages = [
{"role": "system", "content": "You are a helpful assistant"},
{"role": "user", "content": "What's the weather?"},
{
"role": "assistant",
"content": "Let me check",
"tool_calls": [{
"id": "call_123",
"type": "function",
"function": {"name": "get_weather", "arguments": '{"city": "Beijing"}'}
}]
},
{"role": "tool", "tool_call_id": "call_123", "content": "Sunny, 25°C"}
]
messages = [
{
"role": "user",
"content": [{"type": "text", "text": "What's the weather?"}]
},
{
"role": "assistant",
"content": [
{"type": "text", "text": "Let me check"},
{"type": "tool_use", "id": "call_123", "name": "get_weather", "input": '{"city": "Beijing"}'}
]
},
{
"role": "user",
"content": [{"type": "tool_result", "tool_use_id": "call_123", "content": "Sunny, 25°C"}]
}
]
| Feature | OpenAI | Anthropic |
|---|---|---|
| System Prompt | Separate system message | Passed as API parameter |
| Content Format | String | Content Block array |
| Tool Calls | tool_calls field | tool_use Content Block |
| Tool Results | Separate tool role message | tool_result Content Block |
| Thinking | Not supported (some models support reasoning) | Native thinking Block support |
ChatMessageBuilder uses an Anthropic-like Content Block format as its internal representation:
# Internal message format
InternalMessage = {
"role": "assistant" | "user",
"content": [
{"type": "text", "text": "..."},
{"type": "thinking", "thinking": "..."},
{"type": "tool_use", "id": "...", "name": "...", "input": "..."},
{"type": "tool_result", "tool_use_id": "...", "content": "..."}
]
}
def append_openai_message(self, message: ChatCompletionMessageParam) -> None:
role = message["role"]
if role == "user":
# User message
self._messages.append({
"role": "user",
"content": [{"type": "text", "text": str(message["content"])}],
})
elif role == "assistant":
# Assistant message
blocks: list[ContentBlock] = []
# Add text content
content = message.get("content")
if isinstance(content, str) and content:
blocks.append({"type": "text", "text": content})
# Add tool calls
tool_calls = message.get("tool_calls")
if tool_calls:
for tc in tool_calls:
blocks.append({
"type": "tool_use",
"id": tc["id"],
"name": tc["function"]["name"],
"input": tc["function"]["arguments"],
})
if blocks:
self._messages.append({"role": "assistant", "content": blocks})
elif role == "tool":
# Tool result message
tool_call_id = message.get("tool_call_id")
content = message.get("content", "")
if tool_call_id:
# If the last message is a user message, append to it
if self._messages[-1]["role"] == "user":
self._messages[-1]["content"].append({
"type": "tool_result",
"tool_use_id": tool_call_id,
"content": str(content),
})
else:
# Otherwise, create a new user message
self._messages.append({
"role": "user",
"content": [{
"type": "tool_result",
"tool_use_id": tool_call_id,
"content": str(content),
}],
})
Streaming responses require incremental message building:
def append_openai_delta(self, chunk: ChatCompletionChunk) -> StreamEvent | None:
if not chunk.choices:
return None
choice = chunk.choices[0]
delta = choice.delta
# Ensure assistant message exists
if not self._messages or self._messages[-1]["role"] != "assistant":
self._messages.append({"role": "assistant", "content": []})
blocks = self._messages[-1]["content"]
# Process text content
if delta.content:
if blocks and blocks[-1]["type"] == "text":
# Append to existing text block
blocks[-1]["text"] += delta.content
else:
# Create new text block
blocks.append({"type": "text", "text": delta.content})
return AssistantStreamEvent(
role="assistant",
content=TextDelta(type="text", text=delta.content)
)
# Process thinking/reasoning
reasoning_text = None
if hasattr(delta, "reasoning_details") and delta.reasoning_details:
reasoning_text = delta.reasoning_details[0].get("text", "")
elif hasattr(delta, "reasoning") and delta.reasoning is not None:
reasoning_text = delta.reasoning
elif hasattr(delta, "reasoning_content") and delta.reasoning_content is not None:
reasoning_text = delta.reasoning_content
if reasoning_text:
if blocks and blocks[-1]["type"] == "thinking":
blocks[-1]["thinking"] += reasoning_text
else:
blocks.append({"type": "thinking", "thinking": reasoning_text})
return AssistantStreamEvent(
role="assistant",
content=ThinkingDelta(type="thinking", thinking=reasoning_text)
)
# Process tool calls
if delta.tool_calls:
for tc_delta in delta.tool_calls:
idx = tc_delta.index if tc_delta.index is not None else 0
tool_use_block = self._get_or_create_tool_use_block(blocks, idx)
if tc_delta.id:
tool_use_block["id"] = tc_delta.id
if tc_delta.function:
if tc_delta.function.name:
tool_use_block["name"] = tc_delta.function.name
if tc_delta.function.arguments:
tool_use_block["input"] += tc_delta.function.arguments
return None # Events sent only after tool calls are complete
# Process finish_reason - send tool call events
if choice.finish_reason:
tool_use_blocks = [b for b in blocks if b["type"] == "tool_use"]
if tool_use_blocks:
events = [
AssistantStreamEvent(
role="assistant",
content=ToolUse(
type="tool_use",
id=b["id"],
name=b["name"],
input=b["input"],
),
)
for b in tool_use_blocks
]
return events if len(events) > 1 else events[0]
return None
def dump_openai(self, cache_control: bool = False) -> list[ChatCompletionMessageParam]:
openai_messages = []
# Add system prompt
if self.system_prompt is not None:
openai_messages.append({"role": "system", "content": self.system_prompt})
for msg in self._messages:
content_blocks = msg["content"]
# Separate different block types
text_parts = []
tool_uses = []
tool_results = []
for block in content_blocks:
if block["type"] == "text":
text_parts.append(block["text"])
elif block["type"] == "thinking":
# OpenAI doesn't support thinking, skip
pass
elif block["type"] == "tool_use":
tool_uses.append(block)
elif block["type"] == "tool_result":
tool_results.append(block)
# Build assistant message
if msg["role"] == "assistant":
assistant_msg = {
"role": "assistant",
"content": "".join(text_parts),
}
if tool_uses:
assistant_msg["tool_calls"] = [
{
"id": tool["id"],
"type": "function",
"function": {
"name": tool["name"],
"arguments": tool["input"],
},
}
for tool in tool_uses
]
openai_messages.append(assistant_msg)
# Build user message
elif msg["role"] == "user" and text_parts:
openai_messages.append({"role": "user", "content": "".join(text_parts)})
# Build tool messages
for tool_result in tool_results:
content = tool_result["content"]
openai_messages.append({
"role": "tool",
"tool_call_id": tool_result["tool_use_id"],
"content": str(content),
})
# Add cache_control (for Anthropic-compatible OpenAI API)
if cache_control and openai_messages:
last_msg = openai_messages[-1]
if last_msg["role"] == "user":
content = last_msg.get("content", "")
if isinstance(content, str):
last_msg["content"] = [{
"type": "text",
"text": content,
"cache_control": {"type": "ephemeral"},
}]
return openai_messages
def dump_anthropic(self) -> list[InternalMessage]:
"""
Export to Anthropic format (returns internal format directly)
"""
return self._messages
ChatMessageBuilder can track which tool calls have not yet returned results:
def get_unresolved_tool_use_blocks(self) -> list[ContentBlock]:
"""Find unresolved tool_use blocks in the last assistant message"""
for msg in reversed(self._messages):
if msg["role"] == "assistant":
tool_use_blocks = [
block for block in msg["content"] if block["type"] == "tool_use"
]
if not tool_use_blocks:
return []
# Collect resolved tool_use IDs
resolved_ids = {
block["tool_use_id"]
for m in self._messages
if m["role"] == "user"
for block in m["content"]
if block["type"] == "tool_result"
}
return [b for b in tool_use_blocks if b["id"] not in resolved_ids]
return []
def append_tool_result(self, tool_call_id: str, content: str) -> StreamEvent:
# Create tool_result block
tool_result_block = {
"type": "tool_result",
"tool_use_id": tool_call_id,
"content": content,
}
# Add as user message
if self._messages[-1]["role"] == "user":
self._messages[-1]["content"].append(tool_result_block)
else:
self._messages.append({"role": "user", "content": [tool_result_block]})
return StreamUserEvent(
role="user",
content=ToolResult(
type="tool_result",
tool_use_id=tool_call_id,
content=content
),
)
# Initialize
builder = ChatMessageBuilder(system_prompt="You are a helpful assistant")
# Add user message
builder.append_openai_message({
"role": "user",
"content": "What's the weather in Beijing?"
})
# Use OpenAI API
openai_messages = builder.dump_openai()
response = openai.chat.completions.create(
model="gpt-4",
messages=openai_messages
)
# Or use Anthropic API
anthropic_messages = builder.dump_anthropic()
response = anthropic.messages.create(
model="claude-3-5-sonnet-20241022",
system=builder.system_prompt,
messages=anthropic_messages
)
builder = ChatMessageBuilder()
# Stream OpenAI response
stream = openai.chat.completions.create(
model="gpt-4",
messages=messages,
stream=True
)
for chunk in stream:
event = builder.append_openai_delta(chunk)
if event:
# Send to frontend
yield event
builder = ChatMessageBuilder()
# User message
builder.append_openai_message({
"role": "user",
"content": "What's the weather?"
})
# LLM response (includes tool call)
builder.append_openai_message({
"role": "assistant",
"content": "Let me check",
"tool_calls": [{
"id": "call_123",
"type": "function",
"function": {"name": "get_weather", "arguments": '{"city": "Beijing"}'}
}]
})
# Check unresolved tool calls
unresolved = builder.get_unresolved_tool_use_blocks()
print(unresolved) # [{"type": "tool_use", "id": "call_123", ...}]
# Append tool result
builder.append_tool_result("call_123", "Sunny, 25°C")
# Continue conversation
messages = builder.dump_openai()
ChatMessageBuilder supports multiple thinking/reasoning formats:
# OpenAI o1 format
delta.reasoning_details = [{"text": "Let me think..."}]
# OpenRouter format
delta.reasoning = "Let me think..."
# Qwen format
delta.reasoning_content = "Let me think..."
All formats are converted into a unified thinking block:
{"type": "thinking", "thinking": "Let me think..."}
Incrementally build messages during streaming to avoid redundant parsing:
# Incrementally append text
if blocks and blocks[-1]["type"] == "text":
blocks[-1]["text"] += delta.content
Only export to a specific format when needed:
# Internal format remains unchanged
builder._messages # Always in unified format
# Export on demand
openai_messages = builder.dump_openai() # Only converted when called
For identical message histories, export results can be cached:
@lru_cache(maxsize=128)
def dump_openai_cached(self, messages_hash: str):
return self.dump_openai()
ChatMessageBuilder elegantly solves multi-model API compatibility through a unified internal format and bidirectional converters:
This design not only simplifies multi-model integration but also provides an extensible foundation for supporting more models in the future.
sidebar.noProgrammingNeeded
sidebar.startFreeTrial