Use this file to discover all available pages before exploring further.
Edgee is an AI Gateway that reduces LLM costs by up to 50% through intelligent token compression.
If you want to save tokens for your coding agents, or if you want to optimize the contexts of your AI applications, Edgee is the solution for you.
AI Applications: Get Started in Seconds with our SDKs
TypeScript
Python
Go
Rust
OpenAI SDK
Anthropic SDK
LangChain
cURL
import Edgee from 'edgee';const edgee = new Edgee("your-api-key");const response = await edgee.send({ model: 'gpt-5.2', input: 'What is the capital of France?',});console.log(response.text);if (response.compression) { console.log(`Tokens saved: ${response.compression.saved_tokens}`);}
from edgee import Edgeeedgee = Edgee("your-api-key")response = edgee.send( model="gpt-5.2", input="What is the capital of France?")print(response.text)if response.compression: print(f"Tokens saved: {response.compression.saved_tokens}")
package mainimport ( "fmt" "log" "github.com/edgee-ai/go-sdk/edgee")func main() { client, _ := edgee.NewClient("your-api-key") response, err := client.Send("gpt-5.2", "What is the capital of France?") if err != nil { log.Fatal(err) } fmt.Println(response.Text()) if response.Compression != nil { fmt.Printf("Tokens saved: %d\n", response.Compression.SavedTokens) }}
use edgee::Edgee;let client = Edgee::with_api_key("your-api-key");let response = client.send("gpt-5.2", "What is the capital of France?").await.unwrap();println!("{}", response.text().unwrap_or(""));if let Some(compression) = &response.compression { println!("Tokens saved: {}", compression.saved_tokens);}
import OpenAI from "openai";const openai = new OpenAI({ baseURL: "https://api.edgee.ai/v1", apiKey: process.env.EDGEE_API_KEY,});const completion = await openai.chat.completions.create({ model: "gpt-5.2", messages: [ { role: "user", content: "What is the capital of France?" } ],});console.log(completion.choices[0].message.content);
import Anthropic from '@anthropic-ai/sdk';const client = new Anthropic({ baseURL: 'https://api.edgee.ai', apiKey: process.env.EDGEE_API_KEY,});const message = await client.messages.create({ model: 'claude-sonnet-4.5', max_tokens: 1024, messages: [ { role: 'user', content: 'What is the capital of France?' } ]});console.log(message.content);
from langchain_openai import ChatOpenAIfrom langchain_core.messages import HumanMessageimport osllm = ChatOpenAI( base_url="https://api.edgee.ai/v1", api_key=os.getenv("EDGEE_API_KEY"), model="gpt-5.2",)response = llm.invoke([HumanMessage(content="What is the capital of France?")])print(response.content)
curl https://api.edgee.ai/v1/chat/completions \ -H "Authorization: Bearer $EDGEE_API_KEY" \ -H "Content-Type: application/json" \ -d '{"model":"gpt-5.2","messages":[{"role":"user","content":"What is the capital of France?"}]}'
That’s it. You now have access to every major LLM provider, automatic failovers, cost tracking, and full observability, all through Edgee’s Gateway.