11import { type GroqModelId , groqDefaultModelId , groqModels } from "@roo-code/types"
2- import { Anthropic } from "@anthropic-ai/sdk"
3- import OpenAI from "openai"
42
53import type { ApiHandlerOptions } from "../../shared/api"
6- import type { ApiHandlerCreateMessageMetadata } from "../index"
7- import { ApiStream } from "../transform/stream"
8- import { convertToOpenAiMessages } from "../transform/openai-format"
9- import { calculateApiCostOpenAI } from "../../shared/cost"
104
115import { BaseOpenAiCompatibleProvider } from "./base-openai-compatible-provider"
126
13- // Enhanced usage interface to support Groq's cached token fields
14- interface GroqUsage extends OpenAI . CompletionUsage {
15- prompt_tokens_details ?: {
16- cached_tokens ?: number
17- }
18- }
19-
207export class GroqHandler extends BaseOpenAiCompatibleProvider < GroqModelId > {
218 constructor ( options : ApiHandlerOptions ) {
229 super ( {
@@ -29,50 +16,4 @@ export class GroqHandler extends BaseOpenAiCompatibleProvider<GroqModelId> {
2916 defaultTemperature : 0.5 ,
3017 } )
3118 }
32-
33- override async * createMessage (
34- systemPrompt : string ,
35- messages : Anthropic . Messages . MessageParam [ ] ,
36- metadata ?: ApiHandlerCreateMessageMetadata ,
37- ) : ApiStream {
38- const stream = await this . createStream ( systemPrompt , messages , metadata )
39-
40- for await ( const chunk of stream ) {
41- const delta = chunk . choices [ 0 ] ?. delta
42-
43- if ( delta ?. content ) {
44- yield {
45- type : "text" ,
46- text : delta . content ,
47- }
48- }
49-
50- if ( chunk . usage ) {
51- yield * this . yieldUsage ( chunk . usage as GroqUsage )
52- }
53- }
54- }
55-
56- private async * yieldUsage ( usage : GroqUsage | undefined ) : ApiStream {
57- const { info } = this . getModel ( )
58- const inputTokens = usage ?. prompt_tokens || 0
59- const outputTokens = usage ?. completion_tokens || 0
60-
61- const cacheReadTokens = usage ?. prompt_tokens_details ?. cached_tokens || 0
62-
63- // Groq does not track cache writes
64- const cacheWriteTokens = 0
65-
66- // Calculate cost using OpenAI-compatible cost calculation
67- const { totalCost } = calculateApiCostOpenAI ( info , inputTokens , outputTokens , cacheWriteTokens , cacheReadTokens )
68-
69- yield {
70- type : "usage" ,
71- inputTokens,
72- outputTokens,
73- cacheWriteTokens,
74- cacheReadTokens,
75- totalCost,
76- }
77- }
7819}
0 commit comments