codegpt官方API转openai格式脚本

配合昨天白嫖的会员,正好有API可以用了,官方的API不兼容newapi等,所以有了这个脚本
模型是我乱写的,实际为后台设置的模型

以下三个参数请从Codegpt后台获取

CODEGPT_AGENT_ID = "YOUR_CODEGPT_AGENT_ID"
CODEGPT_ORG_ID = "YOUR_CODEGPT_ORG_ID"
CODEGPT_API_KEY = "YOUR_CODEGPT_API_KEY"

以下为对外的KEY

# Valid API key for our service
VALID_API_KEY = "YOUR_VALID_API_KEY"

requirements.txt内容为:

fastapi==0.104.1
uvicorn==0.24.0
requests==2.31.0
pydantic==2.5.0
python-multipart==0.0.6

python脚本:

总结
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
OpenAI Compatible API Server for CodeGPT
Wraps CodeGPT API to provide OpenAI-compatible endpoints
"""

import json
import time
import uuid
from typing import Optional, Dict, Any, List
from datetime import datetime

import requests
import uvicorn
from fastapi import FastAPI, HTTPException, Depends, Header
from fastapi.responses import StreamingResponse, JSONResponse
from pydantic import BaseModel, Field


# Configuration
CODEGPT_API_URL = "https://api.codegpt.co/api/v1/chat/completions"
CODEGPT_AGENT_ID = "YOUR_CODEGPT_AGENT_ID"
CODEGPT_ORG_ID = "YOUR_CODEGPT_ORG_ID"
CODEGPT_API_KEY = "YOUR_CODEGPT_API_KEY"

# Valid API key for our service
VALID_API_KEY = "YOUR_VALID_API_KEY"

app = FastAPI(
    title="OpenAI Compatible API",
    description="OpenAI Compatible API Server powered by CodeGPT",
    version="1.0.0"
)


# Pydantic models for OpenAI compatibility
class Message(BaseModel):
    role: str
    content: str


class ChatCompletionRequest(BaseModel):
    model: str = "claude-4-sonnet-thinking-max"
    messages: List[Message]
    max_tokens: Optional[int] = None
    temperature: Optional[float] = None
    top_p: Optional[float] = None
    n: Optional[int] = 1
    stream: Optional[bool] = False
    stop: Optional[List[str]] = None
    presence_penalty: Optional[float] = None
    frequency_penalty: Optional[float] = None
    logit_bias: Optional[Dict[str, float]] = None
    user: Optional[str] = None


class Usage(BaseModel):
    prompt_tokens: int
    completion_tokens: int
    total_tokens: int


class Choice(BaseModel):
    index: int
    message: Message
    finish_reason: Optional[str] = None


class ChatCompletionResponse(BaseModel):
    id: str
    object: str = "chat.completion"
    created: int
    model: str
    choices: List[Choice]
    usage: Usage


class StreamChoice(BaseModel):
    index: int
    delta: Dict[str, Any]
    finish_reason: Optional[str] = None


class ChatCompletionStreamResponse(BaseModel):
    id: str
    object: str = "chat.completion.chunk"
    created: int
    model: str
    choices: List[StreamChoice]


class Model(BaseModel):
    id: str
    object: str = "model"
    created: int
    owned_by: str


class ModelsResponse(BaseModel):
    object: str = "list"
    data: List[Model]


# Authentication
def verify_api_key(authorization: str = Header(None)):
    if not authorization:
        raise HTTPException(status_code=401, detail="Missing authorization header")
    
    if not authorization.startswith("Bearer "):
        raise HTTPException(status_code=401, detail="Invalid authorization format")
    
    api_key = authorization[7:]  # Remove "Bearer " prefix
    if api_key != VALID_API_KEY:
        raise HTTPException(status_code=401, detail="Invalid API key")
    
    return api_key


# Helper functions
def parse_codegpt_stream_line(line: str) -> Optional[Dict[str, Any]]:
    """Parse a single line from CodeGPT stream response"""
    line = line.strip()
    if not line or not line.startswith("data: "):
        return None
    
    try:
        data_str = line[6:]  # Remove "data: " prefix
        if data_str == "[DONE]":
            return {"done": True}
        
        return json.loads(data_str)
    except json.JSONDecodeError:
        return None


def convert_codegpt_to_openai_stream(codegpt_data: Dict[str, Any], request_id: str, model: str) -> Optional[str]:
    """Convert CodeGPT stream data to OpenAI format"""
    if codegpt_data.get("done"):
        # Send final chunk
        final_chunk = ChatCompletionStreamResponse(
            id=request_id,
            created=int(time.time()),
            model=model,
            choices=[StreamChoice(
                index=0,
                delta={},
                finish_reason="stop"
            )]
        )
        return f"data: {final_chunk.model_dump_json()}\n\n"
    
    choices = codegpt_data.get("choices", [])
    if not choices:
        return None
    
    choice = choices[0]
    delta = choice.get("delta", {})
    contents = delta.get("contents", [])
    
    # Extract text content
    text_content = ""
    for content in contents:
        if content.get("type") == "text":
            text_content += content.get("content", "")
    
    if text_content:
        chunk = ChatCompletionStreamResponse(
            id=request_id,
            created=int(time.time()),
            model=model,
            choices=[StreamChoice(
                index=0,
                delta={"content": text_content},
                finish_reason=choice.get("finish_reason")
            )]
        )
        return f"data: {chunk.model_dump_json()}\n\n"
    
    return None


def stream_codegpt_response(codegpt_response, request_id: str, model: str):
    """Stream CodeGPT response and convert to OpenAI format"""
    try:
        # Send initial chunk
        initial_chunk = ChatCompletionStreamResponse(
            id=request_id,
            created=int(time.time()),
            model=model,
            choices=[StreamChoice(
                index=0,
                delta={"role": "assistant"},
                finish_reason=None
            )]
        )
        yield f"data: {initial_chunk.model_dump_json()}\n\n"
        
        # Process stream
        for line in codegpt_response.iter_lines(decode_unicode=True):
            if line:
                codegpt_data = parse_codegpt_stream_line(line)
                if codegpt_data:
                    openai_chunk = convert_codegpt_to_openai_stream(codegpt_data, request_id, model)
                    if openai_chunk:
                        yield openai_chunk
        
        # Send done signal
        yield "data: [DONE]\n\n"
        
    except Exception as e:
        print(f"Error in stream: {e}")
        # Send error chunk
        error_chunk = ChatCompletionStreamResponse(
            id=request_id,
            created=int(time.time()),
            model=model,
            choices=[StreamChoice(
                index=0,
                delta={},
                finish_reason="error"
            )]
        )
        yield f"data: {error_chunk.model_dump_json()}\n\n"
        yield "data: [DONE]\n\n"


# API Endpoints
@app.get("/v1/models")
async def list_models(_: str = Depends(verify_api_key)):
    """List available models (OpenAI compatible)"""
    models = [
        Model(
            id="claude-4-sonnet-thinking-max",
            created=int(time.time()),
            owned_by="codegpt"
        )
    ]
    
    return ModelsResponse(data=models)


@app.post("/v1/chat/completions")
async def chat_completions(
    request: ChatCompletionRequest,
    _: str = Depends(verify_api_key)
):
    """Create chat completion (OpenAI compatible)"""
    
    # Prepare CodeGPT request
    codegpt_payload = {
        "agentId": CODEGPT_AGENT_ID,
        "messages": [{"role": msg.role, "content": msg.content} for msg in request.messages],
        "format": "json",
        "stream": request.stream or False
    }
    
    codegpt_headers = {
        "accept": "application/json",
        "CodeGPT-Org-Id": CODEGPT_ORG_ID,
        "content-type": "application/json",
        "authorization": f"Bearer {CODEGPT_API_KEY}"
    }
    
    request_id = f"chatcmpl-{uuid.uuid4().hex}"
    
    try:
        # Make request to CodeGPT
        codegpt_response = requests.post(
            CODEGPT_API_URL,
            json=codegpt_payload,
            headers=codegpt_headers,
            stream=request.stream or False,
            timeout=60
        )
        
        if not codegpt_response.ok:
            raise HTTPException(
                status_code=codegpt_response.status_code,
                detail=f"CodeGPT API error: {codegpt_response.text}"
            )
        
        if request.stream:
            # Return streaming response
            return StreamingResponse(
                stream_codegpt_response(codegpt_response, request_id, request.model),
                media_type="text/plain",
                headers={"Cache-Control": "no-cache", "Connection": "keep-alive"}
            )
        else:
            # Process non-streaming response
            response_text = codegpt_response.text
            
            # Extract content from CodeGPT response
            # For simplicity, we'll return the raw response as content
            # In a real implementation, you'd parse the full response
            content = "I apologize, but there was an issue processing your request."
            
            # Try to extract actual content if possible
            lines = response_text.split('\n')
            extracted_content = ""
            for line in lines:
                if line.strip().startswith('data: '):
                    try:
                        data = json.loads(line.strip()[6:])
                        choices = data.get("choices", [])
                        if choices:
                            delta = choices[0].get("delta", {})
                            contents = delta.get("contents", [])
                            for content_item in contents:
                                if content_item.get("type") == "text":
                                    extracted_content += content_item.get("content", "")
                    except:
                        continue
            
            if extracted_content.strip():
                content = extracted_content.strip()
            
            # Create OpenAI compatible response
            response = ChatCompletionResponse(
                id=request_id,
                created=int(time.time()),
                model=request.model,
                choices=[Choice(
                    index=0,
                    message=Message(role="assistant", content=content),
                    finish_reason="stop"
                )],
                usage=Usage(
                    prompt_tokens=100,  # Estimated
                    completion_tokens=len(content.split()),
                    total_tokens=100 + len(content.split())
                )
            )
            
            return response
            
    except requests.RequestException as e:
        raise HTTPException(status_code=500, detail=f"Request failed: {str(e)}")
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"Internal error: {str(e)}")


@app.get("/")
async def root():
    """Root endpoint"""
    return {
        "message": "OpenAI Compatible API Server powered by CodeGPT",
        "version": "1.0.0",
        "endpoints": {
            "models": "/v1/models",
            "chat_completions": "/v1/chat/completions"
        }
    }


@app.get("/health")
async def health_check():
    """Health check endpoint"""
    return {"status": "healthy", "timestamp": datetime.now().isoformat()}


if __name__ == "__main__":
    print("Starting OpenAI Compatible API Server...")
    print(f"Server will run on http://localhost:8888")
    print(f"API Key: {VALID_API_KEY}")
    print(f"Models endpoint: http://localhost:8888/v1/models")
    print(f"Chat completions endpoint: http://localhost:8888/v1/chat/completions")
    
    uvicorn.run(
        app,
        host="0.0.0.0",
        port=8888,
        log_level="info"
    )
10 个赞

感谢大佬!

感谢大佬

好像只有3.7呀,能用到4.0吗

我觉得他这个是后台选的,4.0 vscode插件能调用,不知道有没有大佬能把api也搞成4.0

感谢大佬

roo code里用不了嘛

感谢大帅!

还没试过 简单的调试了一下能用

感谢大师

大佬v5

感谢大师

可以用了,我改了一下

我试了试应该没限制。酒馆应该不行 :bili_001:

{
“id”: “chatcmpl-a24be630956c46de87f9defdc343f326”,
“object”: “chat.completion”,
“created”: 1748523870,
“model”: “Claude 3.7 Sonnet”,
“choices”: [
{
“index”: 0,
“message”: {
“role”: “assistant”,
“content”: “I apologize, but there was an issue processing your request.”
},
“finish_reason”: “stop”
}
],
“usage”: {
“prompt_tokens”: 100,
“completion_tokens”: 10,
“total_tokens”: 110
}
}
返回这个是啥意思啊,模型错了吗?

不清楚,他官方文档都没有显示的指明model。试试3.7 thinking? 还有他这个有内容审查的,我写代码他都能抱歉拒绝

模型名称感觉不太明确,你是用的啥啊

claude 3.7 thinking,我是恶趣味返回的claude 4.0,直接用这玩意就行
如果你有疑惑可以看看官方文档:Completion

{
“id”: “chatcmpl-003bbd445b9b41398a9c270fb451f6b4”,
“object”: “chat.completion”,
“created”: 1748525469,
“model”: “claude 3.7 thinking”,
“choices”: [
{
“index”: 0,
“message”: {
“role”: “assistant”,
“content”: “I apologize, but there was an issue processing your request.”
},
“finish_reason”: “stop”
}
],
“usage”: {
“prompt_tokens”: 100,
“completion_tokens”: 10,
“total_tokens”: 110
}
}

发送的是:
{
“model”: “claude 3.7 thinking”,
“messages”: [
{
“role”: “user”,
“content”: “你好!”
}
]
}

佬,是哪里不对吗?

感谢大佬, codegpt 2api 用起来了, 非流式响应代码有点问题 用ai修复了下。

#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
OpenAI Compatible API Server for CodeGPT
Wraps CodeGPT API to provide OpenAI-compatible endpoints
"""

import json
import time
import uuid
from typing import Optional, Dict, Any, List
from datetime import datetime

import requests
import uvicorn
from fastapi import FastAPI, HTTPException, Depends, Header
from fastapi.responses import StreamingResponse, JSONResponse
from pydantic import BaseModel, Field


# Configuration
CODEGPT_API_URL = "https://api.codegpt.co/api/v1/chat/completions" 
CODEGPT_AGENT_ID = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"  
CODEGPT_ORG_ID = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"    
CODEGPT_API_KEY = "sk-xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"  

# Valid API key for our service
VALID_API_KEY = "xxxxxxxxxxxxxxxxxxxxxxx" 

app = FastAPI(
    title="OpenAI Compatible API",
    description="OpenAI Compatible API Server powered by CodeGPT",
    version="1.0.0"
)


# Pydantic models for OpenAI compatibility
class Message(BaseModel):
    role: str
    content: str


class ChatCompletionRequest(BaseModel):
    model: str = "claude-4-sonnet-thinking-max"
    messages: List[Message]
    max_tokens: Optional[int] = None
    temperature: Optional[float] = None
    top_p: Optional[float] = None
    n: Optional[int] = 1
    stream: Optional[bool] = False
    stop: Optional[List[str]] = None
    presence_penalty: Optional[float] = None
    frequency_penalty: Optional[float] = None
    logit_bias: Optional[Dict[str, float]] = None
    user: Optional[str] = None


class Usage(BaseModel):
    prompt_tokens: int
    completion_tokens: int
    total_tokens: int


class Choice(BaseModel):
    index: int
    message: Message
    finish_reason: Optional[str] = None


class ChatCompletionResponse(BaseModel):
    id: str
    object: str = "chat.completion"
    created: int
    model: str
    choices: List[Choice]
    usage: Usage


class StreamChoice(BaseModel):
    index: int
    delta: Dict[str, Any]
    finish_reason: Optional[str] = None


class ChatCompletionStreamResponse(BaseModel):
    id: str
    object: str = "chat.completion.chunk"
    created: int
    model: str
    choices: List[StreamChoice]


# Authentication
def verify_api_key(authorization: str = Header(None)):
    if not authorization:
        raise HTTPException(status_code=401, detail="Missing authorization header")
    
    if not authorization.startswith("Bearer "):
        raise HTTPException(status_code=401, detail="Invalid authorization format")
    
    api_key = authorization[7:]  # Remove "Bearer " prefix
    if api_key != VALID_API_KEY:
        raise HTTPException(status_code=401, detail="Invalid API key")
    
    return api_key


# Helper functions
def parse_codegpt_stream_line(line: str) -> Optional[Dict[str, Any]]:
    """Parse a single line from CodeGPT stream response"""
    line = line.strip()
    if not line or not line.startswith("data: "):
        return None
    
    try:
        data_str = line[6:]  # Remove "data: " prefix
        if data_str == "[DONE]":
            return {"done": True}
        
        return json.loads(data_str)
    except json.JSONDecodeError:
        return None


def convert_codegpt_to_openai_stream(codegpt_data: Dict[str, Any], request_id: str, model: str) -> Optional[str]:
    """Convert CodeGPT stream data to OpenAI format, handling 'contents' array."""
    if codegpt_data.get("done"):
        # Send final chunk explicitly marking finish_reason as stop
        final_chunk = ChatCompletionStreamResponse(
            id=request_id,
            created=int(time.time()),
            model=model,
            choices=[StreamChoice(
                index=0, # Assuming index 0
                delta={},
                finish_reason="stop"
            )]
        )
        return f"data: {final_chunk.model_dump_json()}\n\n"

    choices = codegpt_data.get("choices", [])
    if not choices:
        return None # No choices in this chunk

    choice = choices[0]
    delta = choice.get("delta", {})
    contents = delta.get("contents", [])
    # Get finish_reason from the choice level, not delta level in CodeGPT's format
    finish_reason = choice.get("finish_reason")

    # Extract only 'text' type content from the 'contents' array
    text_content = ""
    if isinstance(contents, list):
        for content_item in contents:
            # Ensure content_item is a dict before accessing keys
            if isinstance(content_item, dict) and content_item.get("type") == "text":
                text_content += content_item.get("content", "")
    # Fallback/Alternative: check delta.content directly if contents is empty or missing
    elif not text_content and isinstance(delta.get("content"), str):
         text_content = delta.get("content", "")

    # Determine if a chunk should be sent:
    # Send if there's text content OR if there's a finish_reason.
    # A chunk with only finish_reason is valid in OpenAI stream format.
    should_send_chunk = bool(text_content) or bool(finish_reason)

    if should_send_chunk:
        openai_delta = {}
        if text_content:
            openai_delta["content"] = text_content
        # Role typically sent only in the first chunk by stream_codegpt_response,
        # so usually not needed here unless CodeGPT sends it mid-stream.

        chunk = ChatCompletionStreamResponse(
            id=request_id,
            created=int(time.time()),
            model=model, # Use the model name from the original request
            choices=[StreamChoice(
                index=0, # Assuming index 0 for simplicity
                delta=openai_delta, # Send delta only if content exists
                finish_reason=finish_reason # Pass finish_reason if present
            )]
        )
        return f"data: {chunk.model_dump_json()}\n\n"

    # If no text content and no finish_reason, it might be an empty chunk or just 'think' content.
    # We don't forward these as separate chunks in OpenAI format unless they contain text or finish_reason.
    return None


def stream_codegpt_response(codegpt_response, request_id: str, model: str):
    """Stream CodeGPT response and convert to OpenAI format"""
    try:
        # Send initial chunk
        initial_chunk = ChatCompletionStreamResponse(
            id=request_id,
            created=int(time.time()),
            model=model,
            choices=[StreamChoice(
                index=0,
                delta={"role": "assistant"},
                finish_reason=None
            )]
        )
        yield f"data: {initial_chunk.model_dump_json()}\n\n"
        
        # Process stream
        for line in codegpt_response.iter_lines(decode_unicode=True):
            if line:
                codegpt_data = parse_codegpt_stream_line(line)
                if codegpt_data:
                    openai_chunk = convert_codegpt_to_openai_stream(codegpt_data, request_id, model)
                    if openai_chunk:
                        yield openai_chunk
        
        # Send done signal
        yield "data: [DONE]\n\n"
        
    except Exception as e:
        print(f"Error in stream: {e}")
        # Send error chunk
        error_chunk = ChatCompletionStreamResponse(
            id=request_id,
            created=int(time.time()),
            model=model,
            choices=[StreamChoice(
                index=0,
                delta={},
                finish_reason="error"
            )]
        )
        yield f"data: {error_chunk.model_dump_json()}\n\n"
        yield "data: [DONE]\n\n"


# API Endpoints
@app.post("/v1/chat/completions")
async def chat_completions(
    request: ChatCompletionRequest,
    _: str = Depends(verify_api_key)
):
    """Create chat completion (OpenAI compatible)"""
    
    # Prepare CodeGPT request
    # Estimate prompt tokens (simple split by space)
    prompt_tokens = sum(len(msg.content.split()) for msg in request.messages)
    codegpt_payload = {
        "agentId": CODEGPT_AGENT_ID,
        "messages": [{"role": msg.role, "content": msg.content} for msg in request.messages],
        "format": "json",
        "stream": request.stream or False
    }
    
    codegpt_headers = {
        "accept": "application/json",
        "CodeGPT-Org-Id": CODEGPT_ORG_ID,
        "content-type": "application/json",
        "authorization": f"Bearer {CODEGPT_API_KEY}"
    }
    
    request_id = f"chatcmpl-{uuid.uuid4().hex}"
    
    try:
        # Make request to CodeGPT
        codegpt_response = requests.post(
            CODEGPT_API_URL,
            json=codegpt_payload,
            headers=codegpt_headers,
            stream=request.stream or False,
            timeout=60
        )
        
        if not codegpt_response.ok:
            raise HTTPException(
                status_code=codegpt_response.status_code,
                detail=f"CodeGPT API error: {codegpt_response.text}"
            )
        
        if request.stream:
            # Return streaming response
            return StreamingResponse(
                stream_codegpt_response(codegpt_response, request_id, request.model),
                media_type="text/plain",
                headers={"Cache-Control": "no-cache", "Connection": "keep-alive"}
            )
        else:
            # Process non-streaming response
            try:
                codegpt_json = codegpt_response.json()
                # Extract content from the CodeGPT JSON response
                choices = codegpt_json.get("choices", [])
                if choices and choices[0].get("message"):
                    content = choices[0]["message"].get("content", "")
                else:
                    content = "Error: Could not extract content from CodeGPT response."
                
                # Estimate completion tokens (simple split by space)
                completion_tokens = len(content.split())
            except json.JSONDecodeError:
                content = "Error: Failed to decode CodeGPT JSON response."
                completion_tokens = 0
            except Exception as e:
                content = f"Error processing CodeGPT response: {str(e)}"
                completion_tokens = 0

            # Create OpenAI compatible response
            response = ChatCompletionResponse(
                id=request_id,
                created=int(time.time()),
                model=request.model,
                choices=[Choice(
                    index=0,
                    message=Message(role="assistant", content=content),
                    finish_reason="stop"
                )],
                usage=Usage(
                    prompt_tokens=prompt_tokens,  # Use calculated prompt tokens
                    completion_tokens=completion_tokens, # Use calculated completion tokens
                    total_tokens=prompt_tokens + completion_tokens
                )
            )
            
            return response
            
    except requests.RequestException as e:
        raise HTTPException(status_code=500, detail=f"Request failed: {str(e)}")
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"Internal error: {str(e)}")


@app.get("/")
async def root():
    """Root endpoint"""
    return {
        "message": "OpenAI Compatible API Server powered by CodeGPT",
        "version": "1.0.0",
        "endpoints": {
            "chat_completions": "/v1/chat/completions"
        }
    }


@app.get("/health")
async def health_check():
    """Health check endpoint"""
    return {"status": "healthy", "timestamp": datetime.now().isoformat()}


if __name__ == "__main__":
    print("Starting OpenAI Compatible API Server...")
    print(f"Server will run on http://localhost:8888")
    print(f"API Key: {VALID_API_KEY}")
    print(f"Chat completions endpoint: http://localhost:8888/v1/chat/completions")

    uvicorn.run(
        app,
        host="0.0.0.0",
        port=8888,
        log_level="info"
    )
1 个赞