配合昨天白嫖的会员,正好有API可以用了,官方的API不兼容newapi等,所以有了这个脚本
模型是我乱写的,实际为后台设置的模型
以下三个参数请从Codegpt后台获取
CODEGPT_AGENT_ID = "YOUR_CODEGPT_AGENT_ID"
CODEGPT_ORG_ID = "YOUR_CODEGPT_ORG_ID"
CODEGPT_API_KEY = "YOUR_CODEGPT_API_KEY"
以下为对外的KEY
# Valid API key for our service
VALID_API_KEY = "YOUR_VALID_API_KEY"
requirements.txt内容为:
fastapi==0.104.1
uvicorn==0.24.0
requests==2.31.0
pydantic==2.5.0
python-multipart==0.0.6
python脚本:
总结
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
OpenAI Compatible API Server for CodeGPT
Wraps CodeGPT API to provide OpenAI-compatible endpoints
"""
import json
import time
import uuid
from typing import Optional, Dict, Any, List
from datetime import datetime
import requests
import uvicorn
from fastapi import FastAPI, HTTPException, Depends, Header
from fastapi.responses import StreamingResponse, JSONResponse
from pydantic import BaseModel, Field
# Configuration
CODEGPT_API_URL = "https://api.codegpt.co/api/v1/chat/completions"
CODEGPT_AGENT_ID = "YOUR_CODEGPT_AGENT_ID"
CODEGPT_ORG_ID = "YOUR_CODEGPT_ORG_ID"
CODEGPT_API_KEY = "YOUR_CODEGPT_API_KEY"
# Valid API key for our service
VALID_API_KEY = "YOUR_VALID_API_KEY"
app = FastAPI(
title="OpenAI Compatible API",
description="OpenAI Compatible API Server powered by CodeGPT",
version="1.0.0"
)
# Pydantic models for OpenAI compatibility
class Message(BaseModel):
role: str
content: str
class ChatCompletionRequest(BaseModel):
model: str = "claude-4-sonnet-thinking-max"
messages: List[Message]
max_tokens: Optional[int] = None
temperature: Optional[float] = None
top_p: Optional[float] = None
n: Optional[int] = 1
stream: Optional[bool] = False
stop: Optional[List[str]] = None
presence_penalty: Optional[float] = None
frequency_penalty: Optional[float] = None
logit_bias: Optional[Dict[str, float]] = None
user: Optional[str] = None
class Usage(BaseModel):
prompt_tokens: int
completion_tokens: int
total_tokens: int
class Choice(BaseModel):
index: int
message: Message
finish_reason: Optional[str] = None
class ChatCompletionResponse(BaseModel):
id: str
object: str = "chat.completion"
created: int
model: str
choices: List[Choice]
usage: Usage
class StreamChoice(BaseModel):
index: int
delta: Dict[str, Any]
finish_reason: Optional[str] = None
class ChatCompletionStreamResponse(BaseModel):
id: str
object: str = "chat.completion.chunk"
created: int
model: str
choices: List[StreamChoice]
class Model(BaseModel):
id: str
object: str = "model"
created: int
owned_by: str
class ModelsResponse(BaseModel):
object: str = "list"
data: List[Model]
# Authentication
def verify_api_key(authorization: str = Header(None)):
if not authorization:
raise HTTPException(status_code=401, detail="Missing authorization header")
if not authorization.startswith("Bearer "):
raise HTTPException(status_code=401, detail="Invalid authorization format")
api_key = authorization[7:] # Remove "Bearer " prefix
if api_key != VALID_API_KEY:
raise HTTPException(status_code=401, detail="Invalid API key")
return api_key
# Helper functions
def parse_codegpt_stream_line(line: str) -> Optional[Dict[str, Any]]:
"""Parse a single line from CodeGPT stream response"""
line = line.strip()
if not line or not line.startswith("data: "):
return None
try:
data_str = line[6:] # Remove "data: " prefix
if data_str == "[DONE]":
return {"done": True}
return json.loads(data_str)
except json.JSONDecodeError:
return None
def convert_codegpt_to_openai_stream(codegpt_data: Dict[str, Any], request_id: str, model: str) -> Optional[str]:
"""Convert CodeGPT stream data to OpenAI format"""
if codegpt_data.get("done"):
# Send final chunk
final_chunk = ChatCompletionStreamResponse(
id=request_id,
created=int(time.time()),
model=model,
choices=[StreamChoice(
index=0,
delta={},
finish_reason="stop"
)]
)
return f"data: {final_chunk.model_dump_json()}\n\n"
choices = codegpt_data.get("choices", [])
if not choices:
return None
choice = choices[0]
delta = choice.get("delta", {})
contents = delta.get("contents", [])
# Extract text content
text_content = ""
for content in contents:
if content.get("type") == "text":
text_content += content.get("content", "")
if text_content:
chunk = ChatCompletionStreamResponse(
id=request_id,
created=int(time.time()),
model=model,
choices=[StreamChoice(
index=0,
delta={"content": text_content},
finish_reason=choice.get("finish_reason")
)]
)
return f"data: {chunk.model_dump_json()}\n\n"
return None
def stream_codegpt_response(codegpt_response, request_id: str, model: str):
"""Stream CodeGPT response and convert to OpenAI format"""
try:
# Send initial chunk
initial_chunk = ChatCompletionStreamResponse(
id=request_id,
created=int(time.time()),
model=model,
choices=[StreamChoice(
index=0,
delta={"role": "assistant"},
finish_reason=None
)]
)
yield f"data: {initial_chunk.model_dump_json()}\n\n"
# Process stream
for line in codegpt_response.iter_lines(decode_unicode=True):
if line:
codegpt_data = parse_codegpt_stream_line(line)
if codegpt_data:
openai_chunk = convert_codegpt_to_openai_stream(codegpt_data, request_id, model)
if openai_chunk:
yield openai_chunk
# Send done signal
yield "data: [DONE]\n\n"
except Exception as e:
print(f"Error in stream: {e}")
# Send error chunk
error_chunk = ChatCompletionStreamResponse(
id=request_id,
created=int(time.time()),
model=model,
choices=[StreamChoice(
index=0,
delta={},
finish_reason="error"
)]
)
yield f"data: {error_chunk.model_dump_json()}\n\n"
yield "data: [DONE]\n\n"
# API Endpoints
@app.get("/v1/models")
async def list_models(_: str = Depends(verify_api_key)):
"""List available models (OpenAI compatible)"""
models = [
Model(
id="claude-4-sonnet-thinking-max",
created=int(time.time()),
owned_by="codegpt"
)
]
return ModelsResponse(data=models)
@app.post("/v1/chat/completions")
async def chat_completions(
request: ChatCompletionRequest,
_: str = Depends(verify_api_key)
):
"""Create chat completion (OpenAI compatible)"""
# Prepare CodeGPT request
codegpt_payload = {
"agentId": CODEGPT_AGENT_ID,
"messages": [{"role": msg.role, "content": msg.content} for msg in request.messages],
"format": "json",
"stream": request.stream or False
}
codegpt_headers = {
"accept": "application/json",
"CodeGPT-Org-Id": CODEGPT_ORG_ID,
"content-type": "application/json",
"authorization": f"Bearer {CODEGPT_API_KEY}"
}
request_id = f"chatcmpl-{uuid.uuid4().hex}"
try:
# Make request to CodeGPT
codegpt_response = requests.post(
CODEGPT_API_URL,
json=codegpt_payload,
headers=codegpt_headers,
stream=request.stream or False,
timeout=60
)
if not codegpt_response.ok:
raise HTTPException(
status_code=codegpt_response.status_code,
detail=f"CodeGPT API error: {codegpt_response.text}"
)
if request.stream:
# Return streaming response
return StreamingResponse(
stream_codegpt_response(codegpt_response, request_id, request.model),
media_type="text/plain",
headers={"Cache-Control": "no-cache", "Connection": "keep-alive"}
)
else:
# Process non-streaming response
response_text = codegpt_response.text
# Extract content from CodeGPT response
# For simplicity, we'll return the raw response as content
# In a real implementation, you'd parse the full response
content = "I apologize, but there was an issue processing your request."
# Try to extract actual content if possible
lines = response_text.split('\n')
extracted_content = ""
for line in lines:
if line.strip().startswith('data: '):
try:
data = json.loads(line.strip()[6:])
choices = data.get("choices", [])
if choices:
delta = choices[0].get("delta", {})
contents = delta.get("contents", [])
for content_item in contents:
if content_item.get("type") == "text":
extracted_content += content_item.get("content", "")
except:
continue
if extracted_content.strip():
content = extracted_content.strip()
# Create OpenAI compatible response
response = ChatCompletionResponse(
id=request_id,
created=int(time.time()),
model=request.model,
choices=[Choice(
index=0,
message=Message(role="assistant", content=content),
finish_reason="stop"
)],
usage=Usage(
prompt_tokens=100, # Estimated
completion_tokens=len(content.split()),
total_tokens=100 + len(content.split())
)
)
return response
except requests.RequestException as e:
raise HTTPException(status_code=500, detail=f"Request failed: {str(e)}")
except Exception as e:
raise HTTPException(status_code=500, detail=f"Internal error: {str(e)}")
@app.get("/")
async def root():
"""Root endpoint"""
return {
"message": "OpenAI Compatible API Server powered by CodeGPT",
"version": "1.0.0",
"endpoints": {
"models": "/v1/models",
"chat_completions": "/v1/chat/completions"
}
}
@app.get("/health")
async def health_check():
"""Health check endpoint"""
return {"status": "healthy", "timestamp": datetime.now().isoformat()}
if __name__ == "__main__":
print("Starting OpenAI Compatible API Server...")
print(f"Server will run on http://localhost:8888")
print(f"API Key: {VALID_API_KEY}")
print(f"Models endpoint: http://localhost:8888/v1/models")
print(f"Chat completions endpoint: http://localhost:8888/v1/chat/completions")
uvicorn.run(
app,
host="0.0.0.0",
port=8888,
log_level="info"
)