|
| 1 | +import { LEPTON } from '../../globals'; |
| 2 | +import { |
| 3 | + ChatCompletionResponse, |
| 4 | + ErrorResponse, |
| 5 | + ProviderConfig, |
| 6 | +} from '../types'; |
| 7 | +import { OpenAIErrorResponseTransform } from '../openai/utils'; |
| 8 | + |
| 9 | +interface LeptonChatCompleteResponse extends ChatCompletionResponse {} |
| 10 | + |
| 11 | +export const LeptonChatCompleteConfig: ProviderConfig = { |
| 12 | + model: { |
| 13 | + param: 'model', |
| 14 | + required: true, |
| 15 | + }, |
| 16 | + messages: { |
| 17 | + param: 'messages', |
| 18 | + required: true, |
| 19 | + }, |
| 20 | + temperature: { |
| 21 | + param: 'temperature', |
| 22 | + default: 0.7, |
| 23 | + min: 0, |
| 24 | + max: 2, |
| 25 | + }, |
| 26 | + top_p: { |
| 27 | + param: 'top_p', |
| 28 | + default: 1, |
| 29 | + min: 0, |
| 30 | + max: 1, |
| 31 | + }, |
| 32 | + n: { |
| 33 | + param: 'n', |
| 34 | + default: 1, |
| 35 | + }, |
| 36 | + max_tokens: { |
| 37 | + param: 'max_tokens', |
| 38 | + default: 256, |
| 39 | + min: 0, |
| 40 | + }, |
| 41 | + stop: { |
| 42 | + param: 'stop', |
| 43 | + }, |
| 44 | + stream: { |
| 45 | + param: 'stream', |
| 46 | + default: false, |
| 47 | + }, |
| 48 | + stream_options: { |
| 49 | + param: 'stream_options', |
| 50 | + }, |
| 51 | + presence_penalty: { |
| 52 | + param: 'presence_penalty', |
| 53 | + default: 0, |
| 54 | + min: -2, |
| 55 | + max: 2, |
| 56 | + }, |
| 57 | + frequency_penalty: { |
| 58 | + param: 'frequency_penalty', |
| 59 | + default: 0, |
| 60 | + min: -2, |
| 61 | + max: 2, |
| 62 | + }, |
| 63 | + logit_bias: { |
| 64 | + param: 'logit_bias', |
| 65 | + }, |
| 66 | + user: { |
| 67 | + param: 'user', |
| 68 | + }, |
| 69 | + tools: { |
| 70 | + param: 'tools', |
| 71 | + }, |
| 72 | + seed: { |
| 73 | + param: 'seed', |
| 74 | + }, |
| 75 | + logprobs: { |
| 76 | + param: 'logprobs', |
| 77 | + default: false, |
| 78 | + }, |
| 79 | + top_logprobs: { |
| 80 | + param: 'top_logprobs', |
| 81 | + default: 0, |
| 82 | + }, |
| 83 | + chat_template_kwargs: { |
| 84 | + param: 'chat_template_kwargs', |
| 85 | + }, |
| 86 | + length_penalty: { |
| 87 | + param: 'length_penalty', |
| 88 | + default: 1, |
| 89 | + }, |
| 90 | + repetition_penalty: { |
| 91 | + param: 'repetition_penalty', |
| 92 | + default: 1, |
| 93 | + }, |
| 94 | + dry_multiplier: { |
| 95 | + param: 'dry_multiplier', |
| 96 | + default: 0, |
| 97 | + }, |
| 98 | + dry_base: { |
| 99 | + param: 'dry_base', |
| 100 | + default: 1.75, |
| 101 | + }, |
| 102 | + dry_allowed_length: { |
| 103 | + param: 'dry_allowed_length', |
| 104 | + default: 2, |
| 105 | + }, |
| 106 | + do_early_stopping: { |
| 107 | + param: 'do_early_stopping', |
| 108 | + default: false, |
| 109 | + }, |
| 110 | + beam_size: { |
| 111 | + param: 'beam_size', |
| 112 | + default: 1, |
| 113 | + }, |
| 114 | + top_k: { |
| 115 | + param: 'top_k', |
| 116 | + default: 50, |
| 117 | + }, |
| 118 | + min_p: { |
| 119 | + param: 'min_p', |
| 120 | + default: 0, |
| 121 | + }, |
| 122 | + id: { |
| 123 | + param: 'id', |
| 124 | + }, |
| 125 | + require_audio: { |
| 126 | + param: 'require_audio', |
| 127 | + default: false, |
| 128 | + }, |
| 129 | + tts_preset_id: { |
| 130 | + param: 'tts_preset_id', |
| 131 | + default: 'jessica', |
| 132 | + }, |
| 133 | + tts_audio_format: { |
| 134 | + param: 'tts_audio_format', |
| 135 | + default: 'mp3', |
| 136 | + }, |
| 137 | + tts_audio_bitrate: { |
| 138 | + param: 'tts_audio_bitrate', |
| 139 | + default: 64, |
| 140 | + }, |
| 141 | + audio_history_b64: { |
| 142 | + param: 'audio_history_b64', |
| 143 | + }, |
| 144 | + for_quant_calibration: { |
| 145 | + param: 'for_quant_calibration', |
| 146 | + default: false, |
| 147 | + }, |
| 148 | +}; |
| 149 | + |
| 150 | +interface LeptonStreamChunk { |
| 151 | + id: string; |
| 152 | + object: string; |
| 153 | + created: number; |
| 154 | + model: string; |
| 155 | + choices: { |
| 156 | + delta: { |
| 157 | + role?: string | null; |
| 158 | + content?: string; |
| 159 | + }; |
| 160 | + index: number; |
| 161 | + finish_reason: string | null; |
| 162 | + }[]; |
| 163 | +} |
| 164 | + |
| 165 | +export const LeptonChatCompleteResponseTransform: ( |
| 166 | + response: LeptonChatCompleteResponse | ErrorResponse, |
| 167 | + responseStatus: number |
| 168 | +) => ChatCompletionResponse | ErrorResponse = (response, responseStatus) => { |
| 169 | + if (responseStatus !== 200 && 'error' in response) { |
| 170 | + return OpenAIErrorResponseTransform(response, LEPTON); |
| 171 | + } |
| 172 | + |
| 173 | + Object.defineProperty(response, 'provider', { |
| 174 | + value: LEPTON, |
| 175 | + enumerable: true, |
| 176 | + }); |
| 177 | + |
| 178 | + return response; |
| 179 | +}; |
| 180 | + |
| 181 | +export const LeptonChatCompleteStreamChunkTransform = ( |
| 182 | + responseChunk: string |
| 183 | +) => { |
| 184 | + let chunk = responseChunk.trim(); |
| 185 | + chunk = chunk.replace(/^data: /, ''); |
| 186 | + chunk = chunk.trim(); |
| 187 | + |
| 188 | + if (chunk === '[DONE]') { |
| 189 | + return `data: ${chunk}\n\n`; |
| 190 | + } |
| 191 | + |
| 192 | + try { |
| 193 | + const parsedChunk: LeptonStreamChunk = JSON.parse(chunk); |
| 194 | + return ( |
| 195 | + `data: ${JSON.stringify({ |
| 196 | + id: parsedChunk.id, |
| 197 | + object: parsedChunk.object, |
| 198 | + created: parsedChunk.created, |
| 199 | + model: parsedChunk.model, |
| 200 | + provider: LEPTON, |
| 201 | + choices: [ |
| 202 | + { |
| 203 | + index: parsedChunk.choices[0].index, |
| 204 | + delta: parsedChunk.choices[0].delta, |
| 205 | + finish_reason: parsedChunk.choices[0].finish_reason, |
| 206 | + }, |
| 207 | + ], |
| 208 | + })}` + '\n\n' |
| 209 | + ); |
| 210 | + } catch (error) { |
| 211 | + console.error('Error parsing Lepton stream chunk:', error); |
| 212 | + return `data: ${chunk}\n\n`; |
| 213 | + } |
| 214 | +}; |
0 commit comments