Skip to content

Commit 92e7aaa

Browse files
yuneng-berriclaude
andcommitted
[Refactor] Extract helper methods in guardrail handlers to fix PLR0915
Ruff flagged `process_output_streaming_response` (A2A) and `process_output_response` (Anthropic) for exceeding 50 statements. Extract inline logic into private helpers to bring both under the limit. Co-Authored-By: Claude Opus 4.6 (1M context) <[email protected]>
1 parent 40d4e79 commit 92e7aaa

File tree

2 files changed

+143
-95
lines changed

2 files changed

+143
-95
lines changed

litellm/llms/a2a/chat/guardrail_translation/handler.py

Lines changed: 43 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -234,37 +234,13 @@ async def process_output_streaming_response(
234234
then the combined guardrailed text is written into the first chunk that had text
235235
and all other text parts in other chunks are cleared (in-place).
236236
"""
237-
from litellm.llms.a2a.common_utils import extract_text_from_a2a_response
238-
239-
# Parse each item; keep alignment with responses_so_far (None where unparseable)
240-
parsed: List[Optional[Dict[str, Any]]] = [None] * len(responses_so_far)
241-
for i, item in enumerate(responses_so_far):
242-
if isinstance(item, dict):
243-
obj = item
244-
elif isinstance(item, str):
245-
try:
246-
obj = json.loads(item.strip())
247-
except (json.JSONDecodeError, TypeError):
248-
continue
249-
else:
250-
continue
251-
if isinstance(obj.get("result"), dict):
252-
parsed[i] = obj
253-
254-
valid_parsed = [(i, obj) for i, obj in enumerate(parsed) if obj is not None]
237+
parsed, valid_parsed = self._parse_streaming_responses(responses_so_far)
255238
if not valid_parsed:
256239
return responses_so_far
257240

258-
# Collect text from each chunk in order (by original index in responses_so_far)
259-
text_parts: List[str] = []
260-
chunk_indices_with_text: List[int] = [] # indices into valid_parsed
261-
for idx, (orig_i, obj) in enumerate(valid_parsed):
262-
t = extract_text_from_a2a_response(obj)
263-
if t:
264-
text_parts.append(t)
265-
chunk_indices_with_text.append(orig_i)
266-
267-
combined_text = "".join(text_parts)
241+
combined_text, chunk_indices_with_text = (
242+
self._collect_text_from_parsed_chunks(valid_parsed)
243+
)
268244
if not combined_text:
269245
return responses_so_far
270246

@@ -337,6 +313,45 @@ async def process_output_streaming_response(
337313

338314
return responses_so_far
339315

316+
def _parse_streaming_responses(
317+
self,
318+
responses_so_far: List[Any],
319+
) -> Tuple[
320+
List[Optional[Dict[str, Any]]], List[Tuple[int, Dict[str, Any]]]
321+
]:
322+
"""Parse JSON-RPC items, returning aligned parsed list and valid entries."""
323+
parsed: List[Optional[Dict[str, Any]]] = [None] * len(responses_so_far)
324+
for i, item in enumerate(responses_so_far):
325+
if isinstance(item, dict):
326+
obj = item
327+
elif isinstance(item, str):
328+
try:
329+
obj = json.loads(item.strip())
330+
except (json.JSONDecodeError, TypeError):
331+
continue
332+
else:
333+
continue
334+
if isinstance(obj.get("result"), dict):
335+
parsed[i] = obj
336+
valid_parsed = [(i, obj) for i, obj in enumerate(parsed) if obj is not None]
337+
return parsed, valid_parsed
338+
339+
def _collect_text_from_parsed_chunks(
340+
self,
341+
valid_parsed: List[Tuple[int, Dict[str, Any]]],
342+
) -> Tuple[str, List[int]]:
343+
"""Collect text from parsed chunks, returning combined text and indices."""
344+
from litellm.llms.a2a.common_utils import extract_text_from_a2a_response
345+
346+
text_parts: List[str] = []
347+
chunk_indices_with_text: List[int] = []
348+
for _idx, (orig_i, obj) in enumerate(valid_parsed):
349+
t = extract_text_from_a2a_response(obj)
350+
if t:
351+
text_parts.append(t)
352+
chunk_indices_with_text.append(orig_i)
353+
return "".join(text_parts), chunk_indices_with_text
354+
340355
def _extract_texts_from_result(
341356
self,
342357
result: Dict[str, Any],

litellm/llms/anthropic/chat/guardrail_translation/handler.py

Lines changed: 100 additions & 67 deletions
Original file line numberDiff line numberDiff line change
@@ -277,82 +277,26 @@ async def process_output_response(
277277
images_to_check: List[str] = []
278278
tool_calls_to_check: List[ChatCompletionToolCallChunk] = []
279279
task_mappings: List[Tuple[int, Optional[int]]] = []
280-
# Track (content_index, None) for each text
281-
282-
# Handle both dict and object responses
283-
response_content: List[Any] = []
284-
if isinstance(response, dict):
285-
response_content = response.get("content", []) or []
286-
elif hasattr(response, "content"):
287-
content = getattr(response, "content", None)
288-
response_content = content or []
289-
else:
290-
response_content = []
291280

281+
response_content = self._get_response_content(response)
292282
if not response_content:
293283
return response
294284

295285
# Step 1: Extract all text content and tool calls from response
296-
for content_idx, content_block in enumerate(response_content):
297-
# Handle both dict and Pydantic object content blocks
298-
block_dict: Dict[str, Any] = {}
299-
if isinstance(content_block, dict):
300-
block_type = content_block.get("type")
301-
block_dict = cast(Dict[str, Any], content_block)
302-
elif hasattr(content_block, "type"):
303-
block_type = getattr(content_block, "type", None)
304-
# Convert Pydantic object to dict for processing
305-
if hasattr(content_block, "model_dump"):
306-
block_dict = content_block.model_dump()
307-
else:
308-
block_dict = {
309-
"type": block_type,
310-
"text": getattr(content_block, "text", None),
311-
}
312-
else:
313-
continue
314-
315-
if block_type in ["text", "tool_use"]:
316-
self._extract_output_text_and_images(
317-
content_block=block_dict,
318-
content_idx=content_idx,
319-
texts_to_check=texts_to_check,
320-
images_to_check=images_to_check,
321-
task_mappings=task_mappings,
322-
tool_calls_to_check=tool_calls_to_check,
323-
)
286+
self._extract_from_content_blocks(
287+
response_content, texts_to_check, images_to_check,
288+
task_mappings, tool_calls_to_check,
289+
)
324290

325291
# Step 2: Apply guardrail to all texts in batch
326292
if texts_to_check or tool_calls_to_check:
327-
# Use the real request_data if provided (proxy path), otherwise
328-
# create a standalone dict (SDK / direct-call path).
329-
if request_data is None:
330-
request_data = {"response": response}
331-
else:
332-
if "response" not in request_data:
333-
request_data["response"] = response
334-
335-
# Add user API key metadata with prefixed keys
336-
if "litellm_metadata" not in request_data:
337-
user_metadata = self.transform_user_api_key_dict_to_metadata(
338-
user_api_key_dict
339-
)
340-
if user_metadata:
341-
request_data["litellm_metadata"] = user_metadata
293+
request_data = self._prepare_request_data(
294+
request_data, response, user_api_key_dict, key="response",
295+
)
342296

343-
inputs = GenericGuardrailAPIInputs(texts=texts_to_check)
344-
if images_to_check:
345-
inputs["images"] = images_to_check
346-
if tool_calls_to_check:
347-
inputs["tool_calls"] = tool_calls_to_check
348-
# Include model information from the response if available
349-
response_model = None
350-
if isinstance(response, dict):
351-
response_model = response.get("model")
352-
elif hasattr(response, "model"):
353-
response_model = getattr(response, "model", None)
354-
if response_model:
355-
inputs["model"] = response_model
297+
inputs = self._build_guardrail_inputs(
298+
texts_to_check, images_to_check, tool_calls_to_check, response,
299+
)
356300

357301
guardrailed_inputs = await guardrail_to_apply.apply_guardrail(
358302
inputs=inputs,
@@ -440,6 +384,95 @@ async def process_output_streaming_response(
440384
)
441385
return responses_so_far
442386

387+
def _prepare_request_data(
388+
self,
389+
request_data: Optional[dict],
390+
response: Any,
391+
user_api_key_dict: Optional[Any],
392+
key: str,
393+
) -> dict:
394+
"""Ensure request_data has the response/responses_so_far key and metadata."""
395+
if request_data is None:
396+
request_data = {key: response}
397+
else:
398+
if key not in request_data:
399+
request_data[key] = response
400+
401+
if "litellm_metadata" not in request_data:
402+
user_metadata = self.transform_user_api_key_dict_to_metadata(
403+
user_api_key_dict
404+
)
405+
if user_metadata:
406+
request_data["litellm_metadata"] = user_metadata
407+
return request_data
408+
409+
@staticmethod
410+
def _get_response_content(response: Any) -> List[Any]:
411+
"""Extract content list from a dict or object response."""
412+
if isinstance(response, dict):
413+
return response.get("content", []) or []
414+
elif hasattr(response, "content"):
415+
return getattr(response, "content", None) or []
416+
return []
417+
418+
def _extract_from_content_blocks(
419+
self,
420+
response_content: List[Any],
421+
texts_to_check: List[str],
422+
images_to_check: List[str],
423+
task_mappings: List[Tuple[int, Optional[int]]],
424+
tool_calls_to_check: List["ChatCompletionToolCallChunk"],
425+
) -> None:
426+
"""Extract text, images, and tool calls from content blocks."""
427+
for content_idx, content_block in enumerate(response_content):
428+
block_dict: Dict[str, Any] = {}
429+
if isinstance(content_block, dict):
430+
block_type = content_block.get("type")
431+
block_dict = cast(Dict[str, Any], content_block)
432+
elif hasattr(content_block, "type"):
433+
block_type = getattr(content_block, "type", None)
434+
if hasattr(content_block, "model_dump"):
435+
block_dict = content_block.model_dump()
436+
else:
437+
block_dict = {
438+
"type": block_type,
439+
"text": getattr(content_block, "text", None),
440+
}
441+
else:
442+
continue
443+
444+
if block_type in ["text", "tool_use"]:
445+
self._extract_output_text_and_images(
446+
content_block=block_dict,
447+
content_idx=content_idx,
448+
texts_to_check=texts_to_check,
449+
images_to_check=images_to_check,
450+
task_mappings=task_mappings,
451+
tool_calls_to_check=tool_calls_to_check,
452+
)
453+
454+
@staticmethod
455+
def _build_guardrail_inputs(
456+
texts_to_check: List[str],
457+
images_to_check: List[str],
458+
tool_calls_to_check: List["ChatCompletionToolCallChunk"],
459+
response: Any,
460+
) -> "GenericGuardrailAPIInputs":
461+
"""Build GenericGuardrailAPIInputs with optional images, tool calls, model."""
462+
inputs = GenericGuardrailAPIInputs(texts=texts_to_check)
463+
if images_to_check:
464+
inputs["images"] = images_to_check
465+
if tool_calls_to_check:
466+
inputs["tool_calls"] = tool_calls_to_check
467+
response_model = None
468+
if isinstance(response, dict):
469+
response_model = response.get("model")
470+
elif hasattr(response, "model"):
471+
response_model = getattr(response, "model", None)
472+
if response_model:
473+
inputs["model"] = response_model
474+
return inputs
475+
443476
def get_streaming_string_so_far(self, responses_so_far: List[Any]) -> str:
444477
"""
445478
Parse streaming responses and extract accumulated text content.

0 commit comments

Comments
 (0)