Categories
Tools

MCP Beginners Guide

pip install "mcp[cli]" yfinance
import yfinance as yf
from mcp.server.fastmcp import FastMCP

mcp = FastMCP("stock_prices")

@mcp.tool()
async def get_stock_price(ticker: str) -> str:
    """Get the current stock price for a given ticker symbol.
    
    Args:
        ticker: Stock ticker symbol (e.g., AAPL, MSFT, GOOG)
        
    Returns:
        Current stock price as a string
    """
    try:
        stock = yf.Ticker(ticker)
        info = stock.info
        current_price = info.get('currentPrice') or info.get('regularMarketPrice')
        if not current_price:
            return f"Could not retrieve price for {ticker}"
        return f"${current_price:.2f}"
        
    except Exception as e:
        return f"Error: {str(e)}"

if __name__ == "__main__":
    mcp.run(transport='stdio')
{
  "mcpServers": {
    "stock_prices": {
      "command": "/Users/praison/miniconda3/envs/mcp/bin/python",
      "args": [
        "/Users/praison/stockprice/app.py"
      ]
  }
}

With Other Providers

{
  "mcpServers": {
    "stock_prices": {
      "command": "/Users/praison/miniconda3/envs/mcp/bin/python",
      "args": [
        "/Users/praison/stockprice/app.py"
      ]
    },
    "neon": {
      "command": "npx",
      "args": [
        "-y",
        "@smithery/cli@latest",
        "run",
        "neon",
        "--config",
        "\"{\\\"apiKey\\\":\\\"YOUR_NEON_API_KEY\\\"}\""
      ]
    }
  }
}
Categories
OpenAI

OpenAI Response API Basics

pip install -U openai geopy gradio rich
export OPENAI_API_KEY=xxxxxxxx

Basic

from openai import OpenAI
client = OpenAI()

response = client.responses.create(
    model="gpt-4o",
    input="Write a one-sentence bedtime story about a unicorn."
)

print(response.output_text)

UI

import gradio as gr
from openai import OpenAI

client = OpenAI()

def ask_ai(question):
    response = client.responses.create(
        model="gpt-4o",
        input=question
    )
    return response.output_text

demo = gr.Interface(
    fn=ask_ai,
    inputs="text",
    outputs="text",
    title="AI Assistant"
)

if __name__ == "__main__":
    demo.launch()

Tools

from openai import OpenAI
client = OpenAI()

response = client.responses.create(
    model="gpt-4o",
    tools=[{"type": "web_search_preview"}],
    input="Give me two AI news story from today in 2 sentence."
)

print(response.output_text)

RAG

from openai import OpenAI
from rich import print
client = OpenAI()

response = client.responses.create(
    model="gpt-4o-mini",
    input="Tell me about GraphRAG",
    tools=[{
        "type": "file_search",
        "vector_store_ids": ["vs_67d08c2b03asdf"]
    }]
)
print(response)

Custom Tools

from openai import OpenAI
import requests
import json
from rich import print
from geopy.geocoders import Nominatim

client = OpenAI()

# 1. Create Custom Tool
def get_weather(location):
    geolocator = Nominatim(user_agent="weather_app")
    location_data = geolocator.geocode(location)
    latitude, longitude = location_data.latitude, location_data.longitude if location_data else (0, 0)
    response = requests.get(f"https://api.open-meteo.com/v1/forecast?latitude={latitude}&longitude={longitude}&current=temperature_2m,wind_speed_10m&hourly=temperature_2m,relative_humidity_2m,wind_speed_10m")
    data = response.json()
    return data['current']['temperature_2m']

# 2. Create Custom Tool Definition
tools = [{
    "type": "function",
    "name": "get_weather",
    "description": "Get current temperature for a given location.",
    "parameters": {
        "type": "object",
        "properties": {
            "location": {
                "type": "string",
                "description": "City and country e.g. Bogotá, Colombia"
            }
        },
        "required": [
            "location"
        ],
        "additionalProperties": False
    }
}]

# 3. Ask Question to the model
input_messages = [{"role": "user", "content": "What is the weather like in Paris today?"}]
response = client.responses.create(
    model="gpt-4o",
    input=input_messages,
    tools=tools
)
print(f"Response: {response.output}")

# 4. Parse the model's response & Run the Tool
tool_call = response.output[0]
args = json.loads(tool_call.arguments)
result = get_weather(args["location"])
print(f"Result: {result}")

# 5. Append the result to the input messages
input_messages.append(tool_call)  
input_messages.append({           
    "type": "function_call_output",
    "call_id": tool_call.call_id,
    "output": str(result)
})

# 6. Ask the model again with original question and the response from the tool
response_2 = client.responses.create(
    model="gpt-4o",
    input=input_messages,
    tools=tools,
)
print(f"Response 2: {response_2.output_text}")

Stream

from openai import OpenAI
client = OpenAI()

stream = client.responses.create(
    model="gpt-4o-mini",
    input=[
        {
            "role": "user",
            "content": "Write a 1000 words bedtime story about a unicorn.",
        },
    ],
    stream=True,
)

for event in stream:
    if hasattr(event, 'delta'):
        print(event.delta, end="")
    elif hasattr(event, 'text') and event.type == 'response.output_text.done':
        pass

LM Studio Upcoming update

from openai import OpenAI
client = OpenAI(base_url="http://localhost:1234/v1", api_key="fake-key")

response = client.responses.create(
    model="gemma3",
    input="Write a one-sentence bedtime story about a unicorn."
)

print(response)
Categories
Praison AI

Elon Musk Mervin Praison

Categories
Ollama

OpenAI Agents SDK Ollama

Installation

Download Ollama from https://ollama.com

pip install -U openai-agents chainlit duckduckgo-search
ollama pull llama3.2

Single Agent

from agents import Agent, Runner, OpenAIChatCompletionsModel, AsyncOpenAI

model = OpenAIChatCompletionsModel( 
    model="llama3.2",
    openai_client=AsyncOpenAI(base_url="http://localhost:11434/v1")
)

agent = Agent(name="Assistant",
              instructions="You are a helpful assistant",
              model=model)

result = Runner.run_sync(agent, "Create a meal plan for a week.")
print(result.final_output)
python app.py

Multi Agents

from duckduckgo_search import DDGS
from agents import Agent, Runner, AsyncOpenAI, OpenAIChatCompletionsModel, function_tool
from datetime import datetime

current_date = datetime.now().strftime("%Y-%m")

model = OpenAIChatCompletionsModel(
    model="llama3.2",
    openai_client=AsyncOpenAI(base_url="http://localhost:11434/v1")
)

# 1. Create Internet Search Tool

@function_tool
def get_news_articles(topic):
    print(f"Running DuckDuckGo news search for {topic}...")
    
    # DuckDuckGo search
    ddg_api = DDGS()
    results = ddg_api.text(f"{topic} {current_date}", max_results=5)
    if results:
        news_results = "\n\n".join([f"Title: {result['title']}\nURL: {result['href']}\nDescription: {result['body']}" for result in results])
        print(news_results)
        return news_results
    else:
        return f"Could not find news results for {topic}."
    
# 2. Create AI Agents

# News Agent to fetch news
news_agent = Agent(
    name="News Assistant",
    instructions="You provide the latest news articles for a given topic using DuckDuckGo search.",
    tools=[get_news_articles],
    model=model
)

# Editor Agent to edit news
editor_agent = Agent(
    name="Editor Assistant",
    instructions="Rewrite and give me as news article ready for publishing. Each News story in separate section.",
    model=model
)

# 3. Create workflow

def run_news_workflow(topic):
    print("Running news Agent workflow...")
    
    # Step 1: Fetch news
    news_response = Runner.run_sync(
        news_agent,
        f"Get me the news about {topic} on {current_date}"
    )
    
    # Access the content from RunResult object
    raw_news = news_response.final_output
    
    # Step 2: Pass news to editor for final review
    edited_news_response = Runner.run_sync(
        editor_agent,
        raw_news
    )
    
    # Access the content from RunResult object
    edited_news = edited_news_response.final_output
    
    print("Final news article:")
    print(edited_news)
    
    return edited_news

# Example of running the news workflow for a given topic
print(run_news_workflow("AI"))

UI

import chainlit as cl
from news import run_news_workflow

@cl.on_message
async def main(message: cl.Message):
    """
    Main function to handle user messages and run the news workflow.
    """
    # Get the topic from the user message
    topic = message.content
    
    # Send a thinking message
    await cl.Message(
        content=f"Searching for news about '{topic}'...",
        author="News Bot"
    ).send()
    
    try:
        # Run the news workflow
        news_content = run_news_workflow(topic)
        
        # Send the result back to the user
        await cl.Message(
            content=news_content,
            author="News Bot"
        ).send()
    except Exception as e:
        # Handle any errors
        await cl.Message(
            content=f"Error fetching news: {str(e)}",
            author="News Bot"
        ).send()

@cl.on_chat_start
async def start():
    """
    Function that runs when a new chat session starts.
    """
    # Send a welcome message
    await cl.Message(
        content="Welcome to the News Assistant! What topic would you like to get news about?",
        author="News Bot"
    ).send() 
chainlit run ui.py
Categories
Ollama

Gemma 3 Create Agents

ollama pull gemma3
pip install ollama chainlit "praisonaiagents[llm]"

Ollama code

import ollama

response = ollama.chat(
    model='gemma3',
    messages=[
        {
        'role': 'user',
        'content': 'Give me a meal plan for today',
        },
    ],
)

print(response['message']['content'])
python app.py

UI

ui.py file

import ollama
import chainlit as cl

@cl.on_message
async def main(message: cl.Message):
    response = ollama.chat(
        model='gemma3',
        messages=[
            {
            'role': 'user',
            'content': message.content,
            },
        ],
    )
    
    await cl.Message(content=response['message']['content']).send()

@cl.on_chat_start
async def start():
    await cl.Message(content="Hello! I'm Gemma3. How can I help you today?").send()
chainlit run ui.py

Single Agent

from praisonaiagents import Agent

agent = Agent( instructions="You are a helpful assistant",llm="ollama/gemma3")

agent.start("Why sky is Blue?")
python app.py

Multi Agents

from praisonaiagents import Agent, PraisonAIAgents
from praisonaiagents.tools import internet_search

agent1 = Agent(instructions="Write a linkedIn post", tools=[internet_search], llm="ollama/gemma3")
agent2 = Agent(instructions="Write a tweet based on the linkedIn post", llm="ollama/gemma3")

agents = PraisonAIAgents(agents=[agent1, agent2])

agents.start("Write about donald trump 2025 election")
python app.py
Categories
OpenAI

OpenAI Responses API Basics

pip install openai
export OPENAI_API_KEY=xxxxxxxxxxxxx

Responses API

from openai import OpenAI
from rich import print
client = OpenAI()

response = client.responses.create(
    model="gpt-4o",
    input="Write a meal plan for a week."
)

print(response.output_text)

Chatbot

pip install gradio
from openai import OpenAI
from rich import print
import gradio as gr

client = OpenAI()

def generate_response(prompt):
    response = client.responses.create(
        model="gpt-4o",
        input=prompt
    )
    return response.output_text

# Create Gradio interface
demo = gr.Interface(
    fn=generate_response,
    inputs=gr.Textbox(placeholder="Enter your prompt here...", label="Prompt", value="Write a meal plan for a week."),
    outputs=gr.Textbox(label="Response"),
    title="OpenAI GPT-4o",
    description="Enter a prompt to get a response from GPT-4o"
)

if __name__ == "__main__":
    demo.launch()

Web Search

from openai import OpenAI
client = OpenAI()

response = client.responses.create(
    model="gpt-4o",
    tools=[{"type": "web_search_preview"}],
    input="Tell me about Mervin Praison"
)

print(response.output_text)

File Search

from openai import OpenAI
from rich import print
client = OpenAI()

response = client.responses.create(
    model="gpt-4o-mini",
    input="Tell me about GraphRAG",
    tools=[{
        "type": "file_search",
        "vector_store_ids": ["vs_67d08c2b03ac8191baff7fbbfcc7ffd2"]
    }]
)
print(response)

Computer Use

from openai import OpenAI
client = OpenAI()

response = client.responses.create(
    model="computer-use-preview",
    tools=[{
        "type": "computer_use_preview",
        "display_width": 1024,
        "display_height": 768,
        "environment": "browser" # other possible values: "mac", "windows", "ubuntu"
    }],
    input=[
        {
            "role": "user",
            "content": "Check the latest OpenAI news on bing.com."
        }
    ],
    truncation="auto"
)

print(response.output)

Tool

from agents import Agent, ModelSettings, function_tool, Runner

def get_weather(city: str) -> str:
    return f"The weather in {city} is sunny and 34 degree celsius"

agent = Agent(
    name="Haiku agent",
    instructions="Always respond in haiku form",
    model="gpt-4o-mini",
    tools=[function_tool(get_weather)],
)

result = Runner.run_sync(agent, "What's the weather in Tokyo?")
print(result.final_output)

Agents SDK

pip install openai-agents
from agents import Agent, Runner

agent = Agent(name="Assistant", instructions="You are a helpful assistant")

result = Runner.run_sync(agent, "Create a meal plan for a week.")
print(result.final_output)
Categories
Tools

MCP Browser Logs Config

{
  "mcpServers": {
    "browser-tools": {
      "command": "npx",
      "args": [
        "-y",
        "@agentdeskai/[email protected]"
      ]
    }
  }
}
Categories
Finetuning VLM

PraisonAI Vision Model Training config.yaml

config.yaml

dataset:
- name: mervinpraison/Radiology_mini-10rows
dataset_num_proc: 2
dataset_text_field: text
gradient_accumulation_steps: 4
hf_model_name: mervinpraison/Llama-3.2-11B-Vision-test
huggingface_save: 'true'
learning_rate: 0.0002
load_in_4bit: true
loftq_config: null
logging_steps: 1
lora_alpha: 16
lora_bias: none
lora_dropout: 0
lora_r: 16
lora_target_modules:
- q_proj
- k_proj
- v_proj
- o_proj
- gate_proj
- up_proj
- down_proj
lr_scheduler_type: linear
max_seq_length: 2048
max_steps: 10
model_name: unsloth/Llama-3.2-11B-Vision-Instruct-bnb-4bit
model_parameters: 14b
num_train_epochs: 1
ollama_model: mervinpraison/llama-3.2-11b-vision-test
ollama_save: 'true'
optim: adamw_8bit
output_dir: outputs
packing: false
per_device_train_batch_size: 1
quantization_method:
- q4_k_m
random_state: 3407
seed: 3407
train: 'true'
use_gradient_checkpointing: unsloth
use_rslora: false
warmup_steps: 5
weight_decay: 0.01

# Vision-specific parameters
finetune_vision_layers: false
finetune_language_layers: true
finetune_attention_modules: true
finetune_mlp_modules: true
vision_instruction: "You are an expert radiographer. Describe accurately what you see in this image."
Categories
Finetuning

PraisonAI Train Test Config.yaml

dataset:
- name: mervinpraison/alpaca-cleaned-10rows
dataset_num_proc: 2
dataset_text_field: text
gradient_accumulation_steps: 2
hf_model_name: mervinpraison/llama3.2-1B-instruct-test
huggingface_save: 'true'
learning_rate: 0.0002
load_in_4bit: true
loftq_config: null
logging_steps: 2
lora_alpha: 16
lora_bias: none
lora_dropout: 0
lora_r: 16
lora_target_modules:
- q_proj
- k_proj
- v_proj
- o_proj
- gate_proj
- up_proj
- down_proj
lr_scheduler_type: linear
max_seq_length: 2048
max_steps: 10
model_name: unsloth/Llama-3.2-1B-Instruct-bnb-4bit
model_parameters: 1b
num_train_epochs: 1
ollama_model: mervinpraison/llama3.2-1B-instruct-test
ollama_save: 'true'
optim: adamw_8bit
output_dir: outputs
packing: false
per_device_train_batch_size: 2
quantization_method:
- q4_k_m
random_state: 3407
seed: 3407
train: 'true'
use_gradient_checkpointing: unsloth
use_rslora: false
warmup_steps: 5
weight_decay: 0.01
Categories
Dataset

Find Sequence Length of Dataset

pip install datasets matplotlib
from datasets import load_dataset
import matplotlib.pyplot as plt

# Load dataset
dataset = load_dataset("yahma/alpaca-cleaned")

# Select the text column (assuming it's named 'text', modify if needed)
column_name = 'input'  # Change this if the text data is stored under a different column

# Compute sequence lengths
sequence_lengths = [len(text.split()) for text in dataset['train'][column_name]]

# Plot histogram
plt.figure(figsize=(10, 5))
plt.hist(sequence_lengths, bins=50, edgecolor='black')
plt.xlabel("Sequence Length (in words)")
plt.ylabel("Frequency")
plt.title("Distribution of Sequence Lengths")
plt.show()

# Print basic statistics
import numpy as np
print(f"Mean length: {np.mean(sequence_lengths):.2f}")
print(f"Median length: {np.median(sequence_lengths):.2f}")
print(f"Max length: {np.max(sequence_lengths)}")
print(f"Min length: {np.min(sequence_lengths)}")
from datasets import load_dataset
import matplotlib.pyplot as plt
import numpy as np

# Load dataset
dataset = load_dataset("yahma/alpaca-cleaned")

# Get all column names (assuming 'train' split exists)
all_columns = dataset["train"].column_names
print("All columns:", all_columns)

# Filter columns that include 'instruction' or 'input'
filtered_columns = [col for col in all_columns if "instruction" in col.lower() or "input" in col.lower()]
print("Filtered columns:", filtered_columns)

# Choose column for analysis: prefer "instruction" if available, else "input", otherwise the first found column
if "instruction" in filtered_columns:
    column_name = "instruction"
elif "input" in filtered_columns:
    column_name = "input"
elif filtered_columns:
    column_name = filtered_columns[0]
else:
    raise ValueError("No column containing 'instruction' or 'input' found.")

# Compute sequence lengths (word count) for the selected column in the 'train' split
sequence_lengths = [len(text.split()) for text in dataset["train"][column_name]]

# Plot histogram
plt.figure(figsize=(10, 5))
plt.hist(sequence_lengths, bins=50, edgecolor='black')
plt.xlabel("Sequence Length (in words)")
plt.ylabel("Frequency")
plt.title(f"Distribution of Sequence Lengths for '{column_name}' column")
plt.show()

# Print basic statistics
print(f"Mean length: {np.mean(sequence_lengths):.2f}")
print(f"Median length: {np.median(sequence_lengths):.2f}")
print(f"Max length: {np.max(sequence_lengths)}")
print(f"Min length: {np.min(sequence_lengths)}")