Use this file to discover all available pages before exploring further.
The Edgee Python SDK supports OpenAI-compatible function calling (tools), allowing models to request execution of functions you define. This enables models to interact with external APIs, databases, and your application logic.
response = edgee.send( model="gpt-5.2", input={ "messages": [ {"role": "user", "content": "What is the weather?"} ], "tools": [ { "type": "function", "function": { "name": "get_weather", "description": "Get the current weather", "parameters": {...} } } ], "tool_choice": { "type": "function", "function": {"name": "get_weather"} } })# Model will always call get_weather
Example - Disable Tool Calls:
response = edgee.send( model="gpt-5.2", input={ "messages": [ {"role": "user", "content": "What is the weather?"} ], "tools": [ { "type": "function", "function": { "name": "get_weather", "description": "Get the current weather", "parameters": {...} } } ], "tool_choice": "none" })# Model will not call tools, even though they're available
import jsontool_call = response.tool_calls[0]args = json.loads(tool_call["function"]["arguments"])# args is now a Python dictionaryprint(args["location"]) # e.g., "Paris"
Models can request multiple tool calls in a single response. Use parallel execution when possible:
import asyncioif response.tool_calls and len(response.tool_calls) > 0: async def execute_tool_call(tool_call): args = json.loads(tool_call["function"]["arguments"]) result = await execute_function(tool_call["function"]["name"], args) return { "tool_call_id": tool_call["id"], "result": result } # Execute all tool calls in parallel results = await asyncio.gather(*[ execute_tool_call(tool_call) for tool_call in response.tool_calls ]) # Add all tool results to messages for result in results: messages.append({ "role": "tool", "tool_call_id": result["tool_call_id"], "content": json.dumps(result["result"]) })
Example - Handling Multiple Tool Calls:
# Step 2: Execute all tool callsmessages = [ {"role": "user", "content": "What is the weather in Paris and Tokyo?"}, response1.message # Include assistant's message]if response1.tool_calls: for tool_call in response1.tool_calls: args = json.loads(tool_call["function"]["arguments"]) result = await get_weather(args["location"], args.get("unit")) messages.append({ "role": "tool", "tool_call_id": tool_call["id"], "content": json.dumps(result) })
Include tools in follow-up requests so the model can call them again if needed:
response2 = edgee.send( model="gpt-5.2", input={ "messages": [...messages_with_tool_results], "tools": [ # Keep the same tools available {"type": "function", "function": {...}} ] })
Example - Checking for Tool Calls:
if response.tool_calls: # Model wants to call a function for tool_call in response.tool_calls: print(f"Function: {tool_call['function']['name']}") print(f"Arguments: {tool_call['function']['arguments']}")
Example - Executing Functions and Sending Results:
# Execute the functiontool_call = response.tool_calls[0]args = json.loads(tool_call["function"]["arguments"])weather_result = await get_weather(args["location"], args.get("unit"))# Send the result backresponse2 = edgee.send( model="gpt-5.2", input={ "messages": [ {"role": "user", "content": "What is the weather in Paris?"}, response.message, # Include assistant's message with tool_calls { "role": "tool", "tool_call_id": tool_call["id"], "content": json.dumps(weather_result) } ], "tools": [ # Include the same tools for potential follow-up calls { "type": "function", "function": { "name": "get_weather", "description": "Get the current weather for a location", "parameters": {...} } } ] })print(response2.text)# "The weather in Paris is 15°C and sunny."