pip install -U openai geopy gradio rich
export OPENAI_API_KEY=xxxxxxxx
Basic
from openai import OpenAI
client = OpenAI()
response = client.responses.create(
model="gpt-4o",
input="Write a one-sentence bedtime story about a unicorn."
)
print(response.output_text)
UI
import gradio as gr
from openai import OpenAI
client = OpenAI()
def ask_ai(question):
response = client.responses.create(
model="gpt-4o",
input=question
)
return response.output_text
demo = gr.Interface(
fn=ask_ai,
inputs="text",
outputs="text",
title="AI Assistant"
)
if __name__ == "__main__":
demo.launch()
Tools
from openai import OpenAI
client = OpenAI()
response = client.responses.create(
model="gpt-4o",
tools=[{"type": "web_search_preview"}],
input="Give me two AI news story from today in 2 sentence."
)
print(response.output_text)
RAG
from openai import OpenAI
from rich import print
client = OpenAI()
response = client.responses.create(
model="gpt-4o-mini",
input="Tell me about GraphRAG",
tools=[{
"type": "file_search",
"vector_store_ids": ["vs_67d08c2b03asdf"]
}]
)
print(response)
Custom Tools
from openai import OpenAI
import requests
import json
from rich import print
from geopy.geocoders import Nominatim
client = OpenAI()
# 1. Create Custom Tool
def get_weather(location):
geolocator = Nominatim(user_agent="weather_app")
location_data = geolocator.geocode(location)
latitude, longitude = location_data.latitude, location_data.longitude if location_data else (0, 0)
response = requests.get(f"https://api.open-meteo.com/v1/forecast?latitude={latitude}&longitude={longitude}¤t=temperature_2m,wind_speed_10m&hourly=temperature_2m,relative_humidity_2m,wind_speed_10m")
data = response.json()
return data['current']['temperature_2m']
# 2. Create Custom Tool Definition
tools = [{
"type": "function",
"name": "get_weather",
"description": "Get current temperature for a given location.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "City and country e.g. Bogotá, Colombia"
}
},
"required": [
"location"
],
"additionalProperties": False
}
}]
# 3. Ask Question to the model
input_messages = [{"role": "user", "content": "What is the weather like in Paris today?"}]
response = client.responses.create(
model="gpt-4o",
input=input_messages,
tools=tools
)
print(f"Response: {response.output}")
# 4. Parse the model's response & Run the Tool
tool_call = response.output[0]
args = json.loads(tool_call.arguments)
result = get_weather(args["location"])
print(f"Result: {result}")
# 5. Append the result to the input messages
input_messages.append(tool_call)
input_messages.append({
"type": "function_call_output",
"call_id": tool_call.call_id,
"output": str(result)
})
# 6. Ask the model again with original question and the response from the tool
response_2 = client.responses.create(
model="gpt-4o",
input=input_messages,
tools=tools,
)
print(f"Response 2: {response_2.output_text}")
Stream
from openai import OpenAI
client = OpenAI()
stream = client.responses.create(
model="gpt-4o-mini",
input=[
{
"role": "user",
"content": "Write a 1000 words bedtime story about a unicorn.",
},
],
stream=True,
)
for event in stream:
if hasattr(event, 'delta'):
print(event.delta, end="")
elif hasattr(event, 'text') and event.type == 'response.output_text.done':
pass
LM Studio Upcoming update
from openai import OpenAI
client = OpenAI(base_url="http://localhost:1234/v1", api_key="fake-key")
response = client.responses.create(
model="gemma3",
input="Write a one-sentence bedtime story about a unicorn."
)
print(response)