Skip to content

Commit 95d9dac

Browse files
committed
feat: Enhance LLM logging and configuration options
- Updated `pyproject.toml` to modify PyQt5 version constraints and add PyQt5-Qt5 for Windows. - Introduced a logging mechanism for LLM requests and responses, including a new `request_logger.py` for managing logs. - Added a user interface for viewing LLM logs, allowing users to inspect request and response details. - Adjusted configuration settings to improve thread management and font options in the application. These changes improve the application's logging capabilities and user experience when interacting with LLM functionalities.
1 parent b815b32 commit 95d9dac

File tree

9 files changed

+1535
-1036
lines changed

9 files changed

+1535
-1036
lines changed

app/common/config.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -139,7 +139,7 @@ class Config(QConfig):
139139
)
140140
deeplx_endpoint = ConfigItem("Translate", "DeeplxEndpoint", "")
141141
batch_size = RangeConfigItem("Translate", "BatchSize", 10, RangeValidator(5, 50))
142-
thread_num = RangeConfigItem("Translate", "ThreadNum", 10, RangeValidator(1, 100))
142+
thread_num = RangeConfigItem("Translate", "ThreadNum", 10, RangeValidator(1, 50))
143143

144144
# ------------------- 转录配置 -------------------
145145
transcribe_model = OptionsConfigItem(
@@ -273,7 +273,7 @@ class Config(QConfig):
273273
)
274274

275275
# 圆角背景模式配置
276-
rounded_bg_font_name = ConfigItem("RoundedBgStyle", "FontName", "Noto Sans SC")
276+
rounded_bg_font_name = ConfigItem("RoundedBgStyle", "FontName", "LXGW WenKai")
277277
rounded_bg_font_size = RangeConfigItem(
278278
"RoundedBgStyle", "FontSize", 52, RangeValidator(16, 120)
279279
)

app/core/llm/client.py

Lines changed: 27 additions & 53 deletions
Original file line numberDiff line numberDiff line change
@@ -18,37 +18,16 @@
1818
from app.core.utils.cache import get_llm_cache, memoize
1919
from app.core.utils.logger import setup_logger
2020

21+
from .request_logger import create_logging_http_client, log_llm_response
22+
2123
_global_client: Optional[OpenAI] = None
2224
_client_lock = threading.Lock()
2325

2426
logger = setup_logger("llm_client")
2527

2628

2729
def normalize_base_url(base_url: str) -> str:
28-
"""Normalize API base URL by ensuring /v1 suffix when needed.
29-
30-
Handles various edge cases:
31-
- Removes leading/trailing whitespace
32-
- Only adds /v1 if domain has no path, or path is empty/root
33-
- Removes trailing slashes from /v1 (e.g., /v1/ -> /v1)
34-
- Preserves custom paths (e.g., /custom stays as /custom)
35-
36-
Args:
37-
base_url: Raw base URL string
38-
39-
Returns:
40-
Normalized base URL
41-
42-
Examples:
43-
>>> normalize_base_url("https://api.openai.com")
44-
'https://api.openai.com/v1'
45-
>>> normalize_base_url("https://api.openai.com/v1/")
46-
'https://api.openai.com/v1'
47-
>>> normalize_base_url("https://api.openai.com/custom")
48-
'https://api.openai.com/custom'
49-
>>> normalize_base_url(" https://api.openai.com ")
50-
'https://api.openai.com/v1'
51-
"""
30+
"""Normalize API base URL by ensuring /v1 suffix when needed."""
5231
url = base_url.strip()
5332
parsed = urlparse(url)
5433
path = parsed.path.rstrip("/")
@@ -71,19 +50,11 @@ def normalize_base_url(base_url: str) -> str:
7150

7251

7352
def get_llm_client() -> OpenAI:
74-
"""Get global LLM client instance (thread-safe singleton).
75-
76-
Returns:
77-
Global OpenAI client instance
78-
79-
Raises:
80-
ValueError: If OPENAI_BASE_URL or OPENAI_API_KEY env vars not set
81-
"""
53+
"""Get global LLM client instance (thread-safe singleton)."""
8254
global _global_client
8355

8456
if _global_client is None:
8557
with _client_lock:
86-
# Double-check locking pattern
8758
if _global_client is None:
8859
base_url = os.getenv("OPENAI_BASE_URL", "").strip()
8960
base_url = normalize_base_url(base_url)
@@ -94,7 +65,11 @@ def get_llm_client() -> OpenAI:
9465
"OPENAI_BASE_URL and OPENAI_API_KEY environment variables must be set"
9566
)
9667

97-
_global_client = OpenAI(base_url=base_url, api_key=api_key)
68+
_global_client = OpenAI(
69+
base_url=base_url,
70+
api_key=api_key,
71+
http_client=create_logging_http_client(),
72+
)
9873

9974
return _global_client
10075

@@ -105,35 +80,19 @@ def before_sleep_log(retry_state: RetryCallState) -> None:
10580
)
10681

10782

108-
@memoize(get_llm_cache(), expire=3600, typed=True)
10983
@retry(
11084
stop=stop_after_attempt(10),
11185
wait=wait_random_exponential(multiplier=1, min=5, max=60),
11286
retry=retry_if_exception_type(openai.RateLimitError),
11387
before_sleep=before_sleep_log,
11488
)
115-
def call_llm(
89+
def _call_llm_api(
11690
messages: List[dict],
11791
model: str,
11892
temperature: float = 1,
11993
**kwargs: Any,
12094
) -> Any:
121-
"""Call LLM API with automatic caching.
122-
123-
Uses global LLM client configured via environment variables.
124-
125-
Args:
126-
messages: Chat messages list
127-
model: Model name
128-
temperature: Sampling temperature
129-
**kwargs: Additional parameters for API call
130-
131-
Returns:
132-
API response object
133-
134-
Raises:
135-
ValueError: If response is invalid (empty choices or content)
136-
"""
95+
"""实际调用 LLM API(带重试)"""
13796
client = get_llm_client()
13897

13998
response = client.chat.completions.create(
@@ -143,7 +102,22 @@ def call_llm(
143102
**kwargs,
144103
)
145104

146-
# Validate response (exceptions are not cached by diskcache)
105+
# 记录响应内容
106+
log_llm_response(response)
107+
108+
return response
109+
110+
111+
@memoize(get_llm_cache(), expire=3600, typed=True)
112+
def call_llm(
113+
messages: List[dict],
114+
model: str,
115+
temperature: float = 1,
116+
**kwargs: Any,
117+
) -> Any:
118+
"""Call LLM API with automatic caching."""
119+
response = _call_llm_api(messages, model, temperature, **kwargs)
120+
147121
if not (
148122
response
149123
and hasattr(response, "choices")

app/core/llm/request_logger.py

Lines changed: 114 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,114 @@
1+
import json
2+
import threading
3+
import time
4+
from datetime import datetime
5+
from typing import Any, Dict
6+
7+
import httpx
8+
9+
from app.config import LOG_PATH
10+
11+
12+
LLM_LOG_FILE = LOG_PATH / "llm_requests.jsonl"
13+
MAX_LOG_SIZE = 10 * 1024 * 1024 # 10MB
14+
15+
16+
_log_lock = threading.Lock()
17+
_pending_requests: Dict[int, Dict[str, Any]] = {} # 暂存请求信息,等待响应后合并
18+
19+
20+
# ==================== 日志写入 ====================
21+
22+
23+
def _rotate_if_needed() -> None:
24+
"""日志文件过大时轮转"""
25+
if not LLM_LOG_FILE.exists():
26+
return
27+
if LLM_LOG_FILE.stat().st_size < MAX_LOG_SIZE:
28+
return
29+
30+
backup = LLM_LOG_FILE.with_suffix(".jsonl.old")
31+
if backup.exists():
32+
backup.unlink()
33+
LLM_LOG_FILE.rename(backup)
34+
35+
36+
def _write_log(entry: Dict[str, Any]) -> None:
37+
"""写入日志"""
38+
try:
39+
LOG_PATH.mkdir(parents=True, exist_ok=True)
40+
with _log_lock:
41+
_rotate_if_needed()
42+
with open(LLM_LOG_FILE, "a", encoding="utf-8") as f:
43+
f.write(json.dumps(entry, ensure_ascii=False) + "\n")
44+
except Exception:
45+
pass
46+
47+
48+
# ==================== HTTPX Hooks ====================
49+
50+
51+
def _on_request(request: httpx.Request) -> None:
52+
"""请求发送前:暂存请求信息"""
53+
if "/chat/completions" not in str(request.url):
54+
return
55+
56+
try:
57+
request_body = json.loads(request.content.decode("utf-8"))
58+
except (json.JSONDecodeError, UnicodeDecodeError):
59+
request_body = {"raw": request.content.decode("utf-8", errors="replace")}
60+
61+
_pending_requests[id(request)] = {
62+
"start_time": time.time(),
63+
"url": str(request.url),
64+
"request": request_body,
65+
}
66+
67+
68+
def _on_response(response: httpx.Response) -> None:
69+
"""响应接收后:记录状态码和耗时"""
70+
request = response.request
71+
pending = _pending_requests.get(id(request))
72+
if not pending:
73+
return
74+
75+
pending["status"] = response.status_code
76+
pending["duration_ms"] = int((time.time() - pending["start_time"]) * 1000)
77+
78+
79+
# ==================== 公开 API ====================
80+
81+
82+
def create_logging_http_client() -> httpx.Client:
83+
"""创建带日志记录的 HTTPX 客户端"""
84+
return httpx.Client(
85+
event_hooks={
86+
"request": [_on_request],
87+
"response": [_on_response],
88+
}
89+
)
90+
91+
92+
def log_llm_response(response: Any) -> None:
93+
"""记录完整的请求+响应(在 SDK 解析响应后调用)"""
94+
# 取出最近的 pending request
95+
if not _pending_requests:
96+
return
97+
key = next(iter(_pending_requests))
98+
pending = _pending_requests.pop(key)
99+
100+
# 序列化完整响应体
101+
response_data = {}
102+
if response and hasattr(response, "model_dump"):
103+
response_data = response.model_dump()
104+
105+
log_entry = {
106+
"time": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
107+
"url": pending.get("url", ""),
108+
"status": pending.get("status", 0),
109+
"duration_ms": pending.get("duration_ms", 0),
110+
"request": pending.get("request", {}),
111+
"response": response_data,
112+
}
113+
114+
_write_log(log_entry)

app/core/subtitle/font_utils.py

Lines changed: 5 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -71,37 +71,28 @@ def get_builtin_fonts() -> tuple[Dict[str, str], ...]:
7171

7272

7373
@lru_cache(maxsize=64)
74-
def _load_font(font_path_or_name: str, size: int) -> FontType:
75-
"""Internal cached font loader"""
76-
return ImageFont.truetype(font_path_or_name, size)
77-
78-
7974
def get_font(size: int, font_name: str = "") -> FontType:
8075
"""Get font object (built-in fonts first, then system fonts)"""
8176
if font_name:
8277
builtin_fonts = get_builtin_fonts()
8378
for builtin in builtin_fonts:
8479
if builtin["name"] == font_name:
8580
try:
86-
font = _load_font(builtin["path"], size)
81+
font = ImageFont.truetype(builtin["path"], size)
8782
logger.debug(f"Loaded built-in font: '{font_name}'")
8883
return font
8984
except Exception as e:
9085
logger.warning(f"Failed to load built-in font: {e}")
9186
break
9287

9388
try:
94-
font = _load_font(font_name, size)
89+
font = ImageFont.truetype(font_name, size)
9590
logger.debug(f"Loaded system font: '{font_name}'")
9691
return font
9792
except (OSError, IOError):
9893
logger.warning(f"Cannot load font '{font_name}', using fallback")
9994

100-
fallback_fonts = []
101-
102-
for builtin in get_builtin_fonts():
103-
fallback_fonts.append(builtin["name"])
104-
95+
fallback_fonts = [f["name"] for f in get_builtin_fonts()]
10596
fallback_fonts.extend(
10697
[
10798
"PingFang SC",
@@ -116,7 +107,7 @@ def get_font(size: int, font_name: str = "") -> FontType:
116107

117108
for fallback in fallback_fonts:
118109
try:
119-
font = _load_font(fallback, size)
110+
font = ImageFont.truetype(fallback, size)
120111
logger.info(f"Using fallback font: '{fallback}'")
121112
return font
122113
except Exception:
@@ -174,5 +165,6 @@ def get_ass_to_pil_ratio(font_name: str) -> float:
174165
def clear_font_cache():
175166
"""Clear font cache"""
176167
get_builtin_fonts.cache_clear()
168+
get_font.cache_clear()
177169
get_ass_to_pil_ratio.cache_clear()
178170
logger.info("Font cache cleared")

0 commit comments

Comments
 (0)