Adding all project files

This commit is contained in:
Martina Burlando 2025-08-02 02:00:33 +02:00
parent 6c9e127bdc
commit cd4316ad0f
42289 changed files with 8009643 additions and 0 deletions

View file

@ -0,0 +1,88 @@
import asyncio
import sys
from functools import partial
import typer
def _patch_anyio_open_process():
"""
Patch anyio.open_process to allow detached processes on Windows and Unix-like systems.
This is necessary to prevent the MCP client from being interrupted by Ctrl+C when running in the CLI.
"""
import subprocess
import anyio
if getattr(anyio, "_tiny_agents_patched", False):
return
anyio._tiny_agents_patched = True
original_open_process = anyio.open_process
if sys.platform == "win32":
# On Windows, we need to set the creation flags to create a new process group
async def open_process_in_new_group(*args, **kwargs):
"""
Wrapper for open_process to handle Windows-specific process creation flags.
"""
# Ensure we pass the creation flags for Windows
kwargs.setdefault("creationflags", subprocess.CREATE_NEW_PROCESS_GROUP)
return await original_open_process(*args, **kwargs)
anyio.open_process = open_process_in_new_group
else:
# For Unix-like systems, we can use setsid to create a new session
async def open_process_in_new_group(*args, **kwargs):
"""
Wrapper for open_process to handle Unix-like systems with start_new_session=True.
"""
kwargs.setdefault("start_new_session", True)
return await original_open_process(*args, **kwargs)
anyio.open_process = open_process_in_new_group
async def _async_prompt(exit_event: asyncio.Event, prompt: str = "» ") -> str:
"""
Asynchronous prompt function that reads input from stdin without blocking.
This function is designed to work in an asynchronous context, allowing the event loop to gracefully stop it (e.g. on Ctrl+C).
Alternatively, we could use https://github.com/vxgmichel/aioconsole but that would be an additional dependency.
"""
loop = asyncio.get_event_loop()
if sys.platform == "win32":
# Windows: Use run_in_executor to avoid blocking the event loop
# Degraded solution: this is not ideal as user will have to CTRL+C once more to stop the prompt (and it'll not be graceful)
return await loop.run_in_executor(None, partial(typer.prompt, prompt, prompt_suffix=" "))
else:
# UNIX-like: Use loop.add_reader for non-blocking stdin read
future = loop.create_future()
def on_input():
line = sys.stdin.readline()
loop.remove_reader(sys.stdin)
future.set_result(line)
print(prompt, end=" ", flush=True)
loop.add_reader(sys.stdin, on_input) # not supported on Windows
# Wait for user input or exit event
# Wait until either the user hits enter or exit_event is set
exit_task = asyncio.create_task(exit_event.wait())
await asyncio.wait(
[future, exit_task],
return_when=asyncio.FIRST_COMPLETED,
)
# Check which one has been triggered
if exit_event.is_set():
future.cancel()
return ""
line = await future
return line.strip()

View file

@ -0,0 +1,103 @@
from __future__ import annotations
import asyncio
from typing import AsyncGenerator, Dict, Iterable, List, Optional, Union
from huggingface_hub import ChatCompletionInputMessage, ChatCompletionStreamOutput, MCPClient
from .._providers import PROVIDER_OR_POLICY_T
from .constants import DEFAULT_SYSTEM_PROMPT, EXIT_LOOP_TOOLS, MAX_NUM_TURNS
from .types import ServerConfig
class Agent(MCPClient):
"""
Implementation of a Simple Agent, which is a simple while loop built right on top of an [`MCPClient`].
<Tip warning={true}>
This class is experimental and might be subject to breaking changes in the future without prior notice.
</Tip>
Args:
model (`str`, *optional*):
The model to run inference with. Can be a model id hosted on the Hugging Face Hub, e.g. `meta-llama/Meta-Llama-3-8B-Instruct`
or a URL to a deployed Inference Endpoint or other local or remote endpoint.
servers (`Iterable[Dict]`):
MCP servers to connect to. Each server is a dictionary containing a `type` key and a `config` key. The `type` key can be `"stdio"` or `"sse"`, and the `config` key is a dictionary of arguments for the server.
provider (`str`, *optional*):
Name of the provider to use for inference. Defaults to "auto" i.e. the first of the providers available for the model, sorted by the user's order in https://hf.co/settings/inference-providers.
If model is a URL or `base_url` is passed, then `provider` is not used.
base_url (`str`, *optional*):
The base URL to run inference. Defaults to None.
api_key (`str`, *optional*):
Token to use for authentication. Will default to the locally Hugging Face saved token if not provided. You can also use your own provider API key to interact directly with the provider's service.
prompt (`str`, *optional*):
The system prompt to use for the agent. Defaults to the default system prompt in `constants.py`.
"""
def __init__(
self,
*,
model: Optional[str] = None,
servers: Iterable[ServerConfig],
provider: Optional[PROVIDER_OR_POLICY_T] = None,
base_url: Optional[str] = None,
api_key: Optional[str] = None,
prompt: Optional[str] = None,
):
super().__init__(model=model, provider=provider, base_url=base_url, api_key=api_key)
self._servers_cfg = list(servers)
self.messages: List[Union[Dict, ChatCompletionInputMessage]] = [
{"role": "system", "content": prompt or DEFAULT_SYSTEM_PROMPT}
]
async def load_tools(self) -> None:
for cfg in self._servers_cfg:
await self.add_mcp_server(**cfg)
async def run(
self,
user_input: str,
*,
abort_event: Optional[asyncio.Event] = None,
) -> AsyncGenerator[Union[ChatCompletionStreamOutput, ChatCompletionInputMessage], None]:
"""
Run the agent with the given user input.
Args:
user_input (`str`):
The user input to run the agent with.
abort_event (`asyncio.Event`, *optional*):
An event that can be used to abort the agent. If the event is set, the agent will stop running.
"""
self.messages.append({"role": "user", "content": user_input})
num_turns: int = 0
next_turn_should_call_tools = True
while True:
if abort_event and abort_event.is_set():
return
async for item in self.process_single_turn_with_tools(
self.messages,
exit_loop_tools=EXIT_LOOP_TOOLS,
exit_if_first_chunk_no_tool=(num_turns > 0 and next_turn_should_call_tools),
):
yield item
num_turns += 1
last = self.messages[-1]
if last.get("role") == "tool" and last.get("name") in {t.function.name for t in EXIT_LOOP_TOOLS}:
return
if last.get("role") != "tool" and num_turns > MAX_NUM_TURNS:
return
if last.get("role") != "tool" and next_turn_should_call_tools:
return
next_turn_should_call_tools = last.get("role") != "tool"

View file

@ -0,0 +1,247 @@
import asyncio
import os
import signal
import traceback
from typing import Optional
import typer
from rich import print
from ._cli_hacks import _async_prompt, _patch_anyio_open_process
from .agent import Agent
from .utils import _load_agent_config
app = typer.Typer(
rich_markup_mode="rich",
help="A squad of lightweight composable AI applications built on Hugging Face's Inference Client and MCP stack.",
)
run_cli = typer.Typer(
name="run",
help="Run the Agent in the CLI",
invoke_without_command=True,
)
app.add_typer(run_cli, name="run")
async def run_agent(
agent_path: Optional[str],
) -> None:
"""
Tiny Agent loop.
Args:
agent_path (`str`, *optional*):
Path to a local folder containing an `agent.json` and optionally a custom `PROMPT.md` file or a built-in agent stored in a Hugging Face dataset.
"""
_patch_anyio_open_process() # Hacky way to prevent stdio connections to be stopped by Ctrl+C
config, prompt = _load_agent_config(agent_path)
inputs = config.get("inputs", [])
servers = config.get("servers", [])
abort_event = asyncio.Event()
exit_event = asyncio.Event()
first_sigint = True
loop = asyncio.get_running_loop()
original_sigint_handler = signal.getsignal(signal.SIGINT)
def _sigint_handler() -> None:
nonlocal first_sigint
if first_sigint:
first_sigint = False
abort_event.set()
print("\n[red]Interrupted. Press Ctrl+C again to quit.[/red]", flush=True)
return
print("\n[red]Exiting...[/red]", flush=True)
exit_event.set()
try:
sigint_registered_in_loop = False
try:
loop.add_signal_handler(signal.SIGINT, _sigint_handler)
sigint_registered_in_loop = True
except (AttributeError, NotImplementedError):
# Windows (or any loop that doesn't support it) : fall back to sync
signal.signal(signal.SIGINT, lambda *_: _sigint_handler())
# Handle inputs (i.e. env variables injection)
resolved_inputs: dict[str, str] = {}
if len(inputs) > 0:
print(
"[bold blue]Some initial inputs are required by the agent. "
"Please provide a value or leave empty to load from env.[/bold blue]"
)
for input_item in inputs:
input_id = input_item["id"]
description = input_item["description"]
env_special_value = f"${{input:{input_id}}}"
# Check if the input is used by any server or as an apiKey
input_usages = set()
for server in servers:
# Check stdio's "env" and http/sse's "headers" mappings
env_or_headers = server.get("env", {}) if server["type"] == "stdio" else server.get("headers", {})
for key, value in env_or_headers.items():
if env_special_value in value:
input_usages.add(key)
raw_api_key = config.get("apiKey")
if isinstance(raw_api_key, str) and env_special_value in raw_api_key:
input_usages.add("apiKey")
if not input_usages:
print(
f"[yellow]Input '{input_id}' defined in config but not used by any server or as an API key."
" Skipping.[/yellow]"
)
continue
# Prompt user for input
env_variable_key = input_id.replace("-", "_").upper()
print(
f"[blue] • {input_id}[/blue]: {description}. (default: load from {env_variable_key}).",
end=" ",
)
user_input = (await _async_prompt(exit_event=exit_event)).strip()
if exit_event.is_set():
return
# Fallback to environment variable when user left blank
final_value = user_input
if not final_value:
final_value = os.getenv(env_variable_key, "")
if final_value:
print(f"[green]Value successfully loaded from '{env_variable_key}'[/green]")
else:
print(
f"[yellow]No value found for '{env_variable_key}' in environment variables. Continuing.[/yellow]"
)
resolved_inputs[input_id] = final_value
# Inject resolved value (can be empty) into stdio's env or http/sse's headers
for server in servers:
env_or_headers = server.get("env", {}) if server["type"] == "stdio" else server.get("headers", {})
for key, value in env_or_headers.items():
if env_special_value in value:
env_or_headers[key] = env_or_headers[key].replace(env_special_value, final_value)
print()
raw_api_key = config.get("apiKey")
if isinstance(raw_api_key, str):
substituted_api_key = raw_api_key
for input_id, val in resolved_inputs.items():
substituted_api_key = substituted_api_key.replace(f"${{input:{input_id}}}", val)
config["apiKey"] = substituted_api_key
# Main agent loop
async with Agent(
provider=config.get("provider"), # type: ignore[arg-type]
model=config.get("model"),
base_url=config.get("endpointUrl"), # type: ignore[arg-type]
api_key=config.get("apiKey"),
servers=servers, # type: ignore[arg-type]
prompt=prompt,
) as agent:
await agent.load_tools()
print(f"[bold blue]Agent loaded with {len(agent.available_tools)} tools:[/bold blue]")
for t in agent.available_tools:
print(f"[blue] • {t.function.name}[/blue]")
while True:
abort_event.clear()
# Check if we should exit
if exit_event.is_set():
return
try:
user_input = await _async_prompt(exit_event=exit_event)
first_sigint = True
except EOFError:
print("\n[red]EOF received, exiting.[/red]", flush=True)
break
except KeyboardInterrupt:
if not first_sigint and abort_event.is_set():
continue
else:
print("\n[red]Keyboard interrupt during input processing.[/red]", flush=True)
break
try:
async for chunk in agent.run(user_input, abort_event=abort_event):
if abort_event.is_set() and not first_sigint:
break
if exit_event.is_set():
return
if hasattr(chunk, "choices"):
delta = chunk.choices[0].delta
if delta.content:
print(delta.content, end="", flush=True)
if delta.tool_calls:
for call in delta.tool_calls:
if call.id:
print(f"<Tool {call.id}>", end="")
if call.function.name:
print(f"{call.function.name}", end=" ")
if call.function.arguments:
print(f"{call.function.arguments}", end="")
else:
print(
f"\n\n[green]Tool[{chunk.name}] {chunk.tool_call_id}\n{chunk.content}[/green]\n",
flush=True,
)
print()
except Exception as e:
tb_str = traceback.format_exc()
print(f"\n[bold red]Error during agent run: {e}\n{tb_str}[/bold red]", flush=True)
first_sigint = True # Allow graceful interrupt for the next command
except Exception as e:
tb_str = traceback.format_exc()
print(f"\n[bold red]An unexpected error occurred: {e}\n{tb_str}[/bold red]", flush=True)
raise e
finally:
if sigint_registered_in_loop:
try:
loop.remove_signal_handler(signal.SIGINT)
except (AttributeError, NotImplementedError):
pass
else:
signal.signal(signal.SIGINT, original_sigint_handler)
@run_cli.callback()
def run(
path: Optional[str] = typer.Argument(
None,
help=(
"Path to a local folder containing an agent.json file or a built-in agent "
"stored in the 'tiny-agents/tiny-agents' Hugging Face dataset "
"(https://huggingface.co/datasets/tiny-agents/tiny-agents)"
),
show_default=False,
),
):
try:
asyncio.run(run_agent(path))
except KeyboardInterrupt:
print("\n[red]Application terminated by KeyboardInterrupt.[/red]", flush=True)
raise typer.Exit(code=130)
except Exception as e:
print(f"\n[bold red]An unexpected error occurred: {e}[/bold red]", flush=True)
raise e
if __name__ == "__main__":
app()

View file

@ -0,0 +1,82 @@
from __future__ import annotations
import sys
from pathlib import Path
from typing import List
from huggingface_hub import ChatCompletionInputTool
FILENAME_CONFIG = "agent.json"
FILENAME_PROMPT = "PROMPT.md"
DEFAULT_AGENT = {
"model": "Qwen/Qwen2.5-72B-Instruct",
"provider": "nebius",
"servers": [
{
"type": "stdio",
"command": "npx",
"args": [
"-y",
"@modelcontextprotocol/server-filesystem",
str(Path.home() / ("Desktop" if sys.platform == "darwin" else "")),
],
},
{
"type": "stdio",
"command": "npx",
"args": ["@playwright/mcp@latest"],
},
],
}
DEFAULT_SYSTEM_PROMPT = """
You are an agent - please keep going until the users query is completely
resolved, before ending your turn and yielding back to the user. Only terminate
your turn when you are sure that the problem is solved, or if you need more
info from the user to solve the problem.
If you are not sure about anything pertaining to the users request, use your
tools to read files and gather the relevant information: do NOT guess or make
up an answer.
You MUST plan extensively before each function call, and reflect extensively
on the outcomes of the previous function calls. DO NOT do this entire process
by making function calls only, as this can impair your ability to solve the
problem and think insightfully.
""".strip()
MAX_NUM_TURNS = 10
TASK_COMPLETE_TOOL: ChatCompletionInputTool = ChatCompletionInputTool.parse_obj( # type: ignore[assignment]
{
"type": "function",
"function": {
"name": "task_complete",
"description": "Call this tool when the task given by the user is complete",
"parameters": {
"type": "object",
"properties": {},
},
},
}
)
ASK_QUESTION_TOOL: ChatCompletionInputTool = ChatCompletionInputTool.parse_obj( # type: ignore[assignment]
{
"type": "function",
"function": {
"name": "ask_question",
"description": "Ask the user for more info required to solve or clarify their problem.",
"parameters": {
"type": "object",
"properties": {},
},
},
}
)
EXIT_LOOP_TOOLS: List[ChatCompletionInputTool] = [TASK_COMPLETE_TOOL, ASK_QUESTION_TOOL]
DEFAULT_REPO_ID = "tiny-agents/tiny-agents"

View file

@ -0,0 +1,369 @@
import json
import logging
from contextlib import AsyncExitStack
from datetime import timedelta
from pathlib import Path
from typing import TYPE_CHECKING, Any, AsyncIterable, Dict, List, Literal, Optional, Union, overload
from typing_extensions import NotRequired, TypeAlias, TypedDict, Unpack
from ...utils._runtime import get_hf_hub_version
from .._generated._async_client import AsyncInferenceClient
from .._generated.types import (
ChatCompletionInputMessage,
ChatCompletionInputTool,
ChatCompletionStreamOutput,
ChatCompletionStreamOutputDeltaToolCall,
)
from .._providers import PROVIDER_OR_POLICY_T
from .utils import format_result
if TYPE_CHECKING:
from mcp import ClientSession
logger = logging.getLogger(__name__)
# Type alias for tool names
ToolName: TypeAlias = str
ServerType: TypeAlias = Literal["stdio", "sse", "http"]
class StdioServerParameters_T(TypedDict):
command: str
args: NotRequired[List[str]]
env: NotRequired[Dict[str, str]]
cwd: NotRequired[Union[str, Path, None]]
class SSEServerParameters_T(TypedDict):
url: str
headers: NotRequired[Dict[str, Any]]
timeout: NotRequired[float]
sse_read_timeout: NotRequired[float]
class StreamableHTTPParameters_T(TypedDict):
url: str
headers: NotRequired[dict[str, Any]]
timeout: NotRequired[timedelta]
sse_read_timeout: NotRequired[timedelta]
terminate_on_close: NotRequired[bool]
class MCPClient:
"""
Client for connecting to one or more MCP servers and processing chat completions with tools.
<Tip warning={true}>
This class is experimental and might be subject to breaking changes in the future without prior notice.
</Tip>
Args:
model (`str`, `optional`):
The model to run inference with. Can be a model id hosted on the Hugging Face Hub, e.g. `meta-llama/Meta-Llama-3-8B-Instruct`
or a URL to a deployed Inference Endpoint or other local or remote endpoint.
provider (`str`, *optional*):
Name of the provider to use for inference. Defaults to "auto" i.e. the first of the providers available for the model, sorted by the user's order in https://hf.co/settings/inference-providers.
If model is a URL or `base_url` is passed, then `provider` is not used.
base_url (`str`, *optional*):
The base URL to run inference. Defaults to None.
api_key (`str`, `optional`):
Token to use for authentication. Will default to the locally Hugging Face saved token if not provided. You can also use your own provider API key to interact directly with the provider's service.
"""
def __init__(
self,
*,
model: Optional[str] = None,
provider: Optional[PROVIDER_OR_POLICY_T] = None,
base_url: Optional[str] = None,
api_key: Optional[str] = None,
):
# Initialize MCP sessions as a dictionary of ClientSession objects
self.sessions: Dict[ToolName, "ClientSession"] = {}
self.exit_stack = AsyncExitStack()
self.available_tools: List[ChatCompletionInputTool] = []
# To be able to send the model in the payload if `base_url` is provided
if model is None and base_url is None:
raise ValueError("At least one of `model` or `base_url` should be set in `MCPClient`.")
self.payload_model = model
self.client = AsyncInferenceClient(
model=None if base_url is not None else model,
provider=provider,
api_key=api_key,
base_url=base_url,
)
async def __aenter__(self):
"""Enter the context manager"""
await self.client.__aenter__()
await self.exit_stack.__aenter__()
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
"""Exit the context manager"""
await self.client.__aexit__(exc_type, exc_val, exc_tb)
await self.cleanup()
async def cleanup(self):
"""Clean up resources"""
await self.client.close()
await self.exit_stack.aclose()
@overload
async def add_mcp_server(self, type: Literal["stdio"], **params: Unpack[StdioServerParameters_T]): ...
@overload
async def add_mcp_server(self, type: Literal["sse"], **params: Unpack[SSEServerParameters_T]): ...
@overload
async def add_mcp_server(self, type: Literal["http"], **params: Unpack[StreamableHTTPParameters_T]): ...
async def add_mcp_server(self, type: ServerType, **params: Any):
"""Connect to an MCP server
Args:
type (`str`):
Type of the server to connect to. Can be one of:
- "stdio": Standard input/output server (local)
- "sse": Server-sent events (SSE) server
- "http": StreamableHTTP server
**params (`Dict[str, Any]`):
Server parameters that can be either:
- For stdio servers:
- command (str): The command to run the MCP server
- args (List[str], optional): Arguments for the command
- env (Dict[str, str], optional): Environment variables for the command
- cwd (Union[str, Path, None], optional): Working directory for the command
- For SSE servers:
- url (str): The URL of the SSE server
- headers (Dict[str, Any], optional): Headers for the SSE connection
- timeout (float, optional): Connection timeout
- sse_read_timeout (float, optional): SSE read timeout
- For StreamableHTTP servers:
- url (str): The URL of the StreamableHTTP server
- headers (Dict[str, Any], optional): Headers for the StreamableHTTP connection
- timeout (timedelta, optional): Connection timeout
- sse_read_timeout (timedelta, optional): SSE read timeout
- terminate_on_close (bool, optional): Whether to terminate on close
"""
from mcp import ClientSession, StdioServerParameters
from mcp import types as mcp_types
# Determine server type and create appropriate parameters
if type == "stdio":
# Handle stdio server
from mcp.client.stdio import stdio_client
logger.info(f"Connecting to stdio MCP server with command: {params['command']} {params.get('args', [])}")
client_kwargs = {"command": params["command"]}
for key in ["args", "env", "cwd"]:
if params.get(key) is not None:
client_kwargs[key] = params[key]
server_params = StdioServerParameters(**client_kwargs)
read, write = await self.exit_stack.enter_async_context(stdio_client(server_params))
elif type == "sse":
# Handle SSE server
from mcp.client.sse import sse_client
logger.info(f"Connecting to SSE MCP server at: {params['url']}")
client_kwargs = {"url": params["url"]}
for key in ["headers", "timeout", "sse_read_timeout"]:
if params.get(key) is not None:
client_kwargs[key] = params[key]
read, write = await self.exit_stack.enter_async_context(sse_client(**client_kwargs))
elif type == "http":
# Handle StreamableHTTP server
from mcp.client.streamable_http import streamablehttp_client
logger.info(f"Connecting to StreamableHTTP MCP server at: {params['url']}")
client_kwargs = {"url": params["url"]}
for key in ["headers", "timeout", "sse_read_timeout", "terminate_on_close"]:
if params.get(key) is not None:
client_kwargs[key] = params[key]
read, write, _ = await self.exit_stack.enter_async_context(streamablehttp_client(**client_kwargs))
# ^ TODO: should be handle `get_session_id_callback`? (function to retrieve the current session ID)
else:
raise ValueError(f"Unsupported server type: {type}")
session = await self.exit_stack.enter_async_context(
ClientSession(
read_stream=read,
write_stream=write,
client_info=mcp_types.Implementation(
name="huggingface_hub.MCPClient",
version=get_hf_hub_version(),
),
)
)
logger.debug("Initializing session...")
await session.initialize()
# List available tools
response = await session.list_tools()
logger.debug("Connected to server with tools:", [tool.name for tool in response.tools])
for tool in response.tools:
if tool.name in self.sessions:
logger.warning(f"Tool '{tool.name}' already defined by another server. Skipping.")
continue
# Map tool names to their server for later lookup
self.sessions[tool.name] = session
# Add tool to the list of available tools (for use in chat completions)
self.available_tools.append(
ChatCompletionInputTool.parse_obj_as_instance(
{
"type": "function",
"function": {
"name": tool.name,
"description": tool.description,
"parameters": tool.inputSchema,
},
}
)
)
async def process_single_turn_with_tools(
self,
messages: List[Union[Dict, ChatCompletionInputMessage]],
exit_loop_tools: Optional[List[ChatCompletionInputTool]] = None,
exit_if_first_chunk_no_tool: bool = False,
) -> AsyncIterable[Union[ChatCompletionStreamOutput, ChatCompletionInputMessage]]:
"""Process a query using `self.model` and available tools, yielding chunks and tool outputs.
Args:
messages (`List[Dict]`):
List of message objects representing the conversation history
exit_loop_tools (`List[ChatCompletionInputTool]`, *optional*):
List of tools that should exit the generator when called
exit_if_first_chunk_no_tool (`bool`, *optional*):
Exit if no tool is present in the first chunks. Default to False.
Yields:
[`ChatCompletionStreamOutput`] chunks or [`ChatCompletionInputMessage`] objects
"""
# Prepare tools list based on options
tools = self.available_tools
if exit_loop_tools is not None:
tools = [*exit_loop_tools, *self.available_tools]
# Create the streaming request
response = await self.client.chat.completions.create(
model=self.payload_model,
messages=messages,
tools=tools,
tool_choice="auto",
stream=True,
)
message: Dict[str, Any] = {"role": "unknown", "content": ""}
final_tool_calls: Dict[int, ChatCompletionStreamOutputDeltaToolCall] = {}
num_of_chunks = 0
# Read from stream
async for chunk in response:
num_of_chunks += 1
delta = chunk.choices[0].delta if chunk.choices and len(chunk.choices) > 0 else None
if not delta:
continue
# Process message
if delta.role:
message["role"] = delta.role
if delta.content:
message["content"] += delta.content
# Process tool calls
if delta.tool_calls:
for tool_call in delta.tool_calls:
# Aggregate chunks into tool calls
if tool_call.index not in final_tool_calls:
if (
tool_call.function.arguments is None or tool_call.function.arguments == "{}"
): # Corner case (depends on provider)
tool_call.function.arguments = ""
final_tool_calls[tool_call.index] = tool_call
elif tool_call.function.arguments:
final_tool_calls[tool_call.index].function.arguments += tool_call.function.arguments
# Optionally exit early if no tools in first chunks
if exit_if_first_chunk_no_tool and num_of_chunks <= 2 and len(final_tool_calls) == 0:
return
# Yield each chunk to caller
yield chunk
# Add the assistant message with tool calls (if any) to messages
if message["content"] or final_tool_calls:
# if the role is unknown, set it to assistant
if message.get("role") == "unknown":
message["role"] = "assistant"
# Convert final_tool_calls to the format expected by OpenAI
if final_tool_calls:
tool_calls_list: List[Dict[str, Any]] = []
for tc in final_tool_calls.values():
tool_calls_list.append(
{
"id": tc.id,
"type": "function",
"function": {
"name": tc.function.name,
"arguments": tc.function.arguments or "{}",
},
}
)
message["tool_calls"] = tool_calls_list
messages.append(message)
# Process tool calls one by one
for tool_call in final_tool_calls.values():
function_name = tool_call.function.name
try:
function_args = json.loads(tool_call.function.arguments or "{}")
except json.JSONDecodeError as err:
tool_message = {
"role": "tool",
"tool_call_id": tool_call.id,
"name": function_name,
"content": f"Invalid JSON generated by the model: {err}",
}
tool_message_as_obj = ChatCompletionInputMessage.parse_obj_as_instance(tool_message)
messages.append(tool_message_as_obj)
yield tool_message_as_obj
continue # move to next tool call
tool_message = {"role": "tool", "tool_call_id": tool_call.id, "content": "", "name": function_name}
# Check if this is an exit loop tool
if exit_loop_tools and function_name in [t.function.name for t in exit_loop_tools]:
tool_message_as_obj = ChatCompletionInputMessage.parse_obj_as_instance(tool_message)
messages.append(tool_message_as_obj)
yield tool_message_as_obj
return
# Execute tool call with the appropriate session
session = self.sessions.get(function_name)
if session is not None:
try:
result = await session.call_tool(function_name, function_args)
tool_message["content"] = format_result(result)
except Exception as err:
tool_message["content"] = f"Error: MCP tool call failed with error message: {err}"
else:
tool_message["content"] = f"Error: No session found for tool: {function_name}"
# Yield tool message
tool_message_as_obj = ChatCompletionInputMessage.parse_obj_as_instance(tool_message)
messages.append(tool_message_as_obj)
yield tool_message_as_obj

View file

@ -0,0 +1,42 @@
from typing import Dict, List, Literal, TypedDict, Union
from typing_extensions import NotRequired
class InputConfig(TypedDict, total=False):
id: str
description: str
type: str
password: bool
class StdioServerConfig(TypedDict):
type: Literal["stdio"]
command: str
args: List[str]
env: Dict[str, str]
cwd: str
class HTTPServerConfig(TypedDict):
type: Literal["http"]
url: str
headers: Dict[str, str]
class SSEServerConfig(TypedDict):
type: Literal["sse"]
url: str
headers: Dict[str, str]
ServerConfig = Union[StdioServerConfig, HTTPServerConfig, SSEServerConfig]
# AgentConfig root object
class AgentConfig(TypedDict):
model: str
provider: str
apiKey: NotRequired[str]
inputs: List[InputConfig]
servers: List[ServerConfig]

View file

@ -0,0 +1,124 @@
"""
Utility functions for MCPClient and Tiny Agents.
Formatting utilities taken from the JS SDK: https://github.com/huggingface/huggingface.js/blob/main/packages/mcp-client/src/ResultFormatter.ts.
"""
import json
from pathlib import Path
from typing import TYPE_CHECKING, List, Optional, Tuple
from huggingface_hub import snapshot_download
from huggingface_hub.errors import EntryNotFoundError
from .constants import DEFAULT_AGENT, DEFAULT_REPO_ID, FILENAME_CONFIG, FILENAME_PROMPT
from .types import AgentConfig
if TYPE_CHECKING:
from mcp import types as mcp_types
def format_result(result: "mcp_types.CallToolResult") -> str:
"""
Formats a mcp.types.CallToolResult content into a human-readable string.
Args:
result (CallToolResult)
Object returned by mcp.ClientSession.call_tool.
Returns:
str
A formatted string representing the content of the result.
"""
content = result.content
if len(content) == 0:
return "[No content]"
formatted_parts: List[str] = []
for item in content:
if item.type == "text":
formatted_parts.append(item.text)
elif item.type == "image":
formatted_parts.append(
f"[Binary Content: Image {item.mimeType}, {_get_base64_size(item.data)} bytes]\n"
f"The task is complete and the content accessible to the User"
)
elif item.type == "audio":
formatted_parts.append(
f"[Binary Content: Audio {item.mimeType}, {_get_base64_size(item.data)} bytes]\n"
f"The task is complete and the content accessible to the User"
)
elif item.type == "resource":
resource = item.resource
if hasattr(resource, "text"):
formatted_parts.append(resource.text)
elif hasattr(resource, "blob"):
formatted_parts.append(
f"[Binary Content ({resource.uri}): {resource.mimeType}, {_get_base64_size(resource.blob)} bytes]\n"
f"The task is complete and the content accessible to the User"
)
return "\n".join(formatted_parts)
def _get_base64_size(base64_str: str) -> int:
"""Estimate the byte size of a base64-encoded string."""
# Remove any prefix like "data:image/png;base64,"
if "," in base64_str:
base64_str = base64_str.split(",")[1]
padding = 0
if base64_str.endswith("=="):
padding = 2
elif base64_str.endswith("="):
padding = 1
return (len(base64_str) * 3) // 4 - padding
def _load_agent_config(agent_path: Optional[str]) -> Tuple[AgentConfig, Optional[str]]:
"""Load server config and prompt."""
def _read_dir(directory: Path) -> Tuple[AgentConfig, Optional[str]]:
cfg_file = directory / FILENAME_CONFIG
if not cfg_file.exists():
raise FileNotFoundError(f" Config file not found in {directory}! Please make sure it exists locally")
config: AgentConfig = json.loads(cfg_file.read_text(encoding="utf-8"))
prompt_file = directory / FILENAME_PROMPT
prompt: Optional[str] = prompt_file.read_text(encoding="utf-8") if prompt_file.exists() else None
return config, prompt
if agent_path is None:
return DEFAULT_AGENT, None # type: ignore[return-value]
path = Path(agent_path).expanduser()
if path.is_file():
return json.loads(path.read_text(encoding="utf-8")), None
if path.is_dir():
return _read_dir(path)
# fetch from the Hub
try:
repo_dir = Path(
snapshot_download(
repo_id=DEFAULT_REPO_ID,
allow_patterns=f"{agent_path}/*",
repo_type="dataset",
)
)
return _read_dir(repo_dir / agent_path)
except Exception as err:
raise EntryNotFoundError(
f" Agent {agent_path} not found in tiny-agents/tiny-agents! Please make sure it exists in https://huggingface.co/datasets/tiny-agents/tiny-agents."
) from err