SDK Integration
Use Antigravity Gateway with popular AI SDKs.
OpenAI Python SDK
python
from openai import OpenAI
client = OpenAI(
base_url="http://localhost:8080/v1",
api_key="any-value"
)
# Non-streaming
response = client.chat.completions.create(
model="gemini-3-flash",
messages=[{"role": "user", "content": "Hello!"}]
)
print(response.choices[0].message.content)
# Streaming
stream = client.chat.completions.create(
model="gemini-3-flash",
messages=[{"role": "user", "content": "Hello!"}],
stream=True
)
for chunk in stream:
if chunk.choices[0].delta.content:
print(chunk.choices[0].delta.content, end="")OpenAI JavaScript SDK
javascript
import OpenAI from 'openai';
const client = new OpenAI({
baseURL: 'http://localhost:8080/v1',
apiKey: 'any-value'
});
// Non-streaming
const response = await client.chat.completions.create({
model: 'gemini-3-flash',
messages: [{ role: 'user', content: 'Hello!' }]
});
console.log(response.choices[0].message.content);
// Streaming
const stream = await client.chat.completions.create({
model: 'gemini-3-flash',
messages: [{ role: 'user', content: 'Hello!' }],
stream: true
});
for await (const chunk of stream) {
process.stdout.write(chunk.choices[0]?.delta?.content || '');
}Anthropic Python SDK
python
import anthropic
client = anthropic.Anthropic(
base_url="http://localhost:8080",
api_key="any-value"
)
# Non-streaming
message = client.messages.create(
model="claude-sonnet-4-5-thinking",
max_tokens=1024,
messages=[{"role": "user", "content": "Hello!"}]
)
print(message.content[0].text)
# Streaming
with client.messages.stream(
model="claude-sonnet-4-5-thinking",
max_tokens=1024,
messages=[{"role": "user", "content": "Hello!"}]
) as stream:
for text in stream.text_stream:
print(text, end="")cURL
OpenAI Format
bash
# Non-streaming
curl http://localhost:8080/v1/chat/completions \
-H "Content-Type: application/json" \
-H "Authorization: Bearer any-value" \
-d '{
"model": "gemini-3-flash",
"messages": [{"role": "user", "content": "Hello!"}]
}'
# Streaming
curl http://localhost:8080/v1/chat/completions \
-H "Content-Type: application/json" \
-H "Authorization: Bearer any-value" \
-d '{
"model": "gemini-3-flash",
"messages": [{"role": "user", "content": "Hello!"}],
"stream": true
}'Anthropic Format
bash
# Non-streaming
curl http://localhost:8080/v1/messages \
-H "Content-Type: application/json" \
-H "x-api-key: any-value" \
-H "anthropic-version: 2023-06-01" \
-d '{
"model": "claude-sonnet-4-5-thinking",
"max_tokens": 1024,
"messages": [{"role": "user", "content": "Hello!"}]
}'
# Streaming
curl http://localhost:8080/v1/messages \
-H "Content-Type: application/json" \
-H "x-api-key: any-value" \
-H "anthropic-version: 2023-06-01" \
-d '{
"model": "claude-sonnet-4-5-thinking",
"max_tokens": 1024,
"messages": [{"role": "user", "content": "Hello!"}],
"stream": true
}'LangChain
Python
python
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(
base_url="http://localhost:8080/v1",
api_key="any-value",
model="gemini-3-flash"
)
response = llm.invoke("Hello!")
print(response.content)JavaScript
javascript
import { ChatOpenAI } from "@langchain/openai";
const llm = new ChatOpenAI({
configuration: {
baseURL: "http://localhost:8080/v1",
},
apiKey: "any-value",
modelName: "gemini-3-flash"
});
const response = await llm.invoke("Hello!");
console.log(response.content);Vercel AI SDK
typescript
import { createOpenAI } from '@ai-sdk/openai';
import { generateText } from 'ai';
const gateway = createOpenAI({
baseURL: 'http://localhost:8080/v1',
apiKey: 'any-value',
});
const { text } = await generateText({
model: gateway('gemini-3-flash'),
prompt: 'Hello!',
});
console.log(text);