We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent f9e0a15 commit f12bb75Copy full SHA for f12bb75
outlines/models/llamacpp.py
@@ -197,7 +197,7 @@ def format_chat_input(self, model_input: Chat) -> list:
197
198
def format_output_type(
199
self, output_type: Optional[OutlinesLogitsProcessor] = None,
200
- ) -> "LogitsProcessorList":
+ ) -> Optional["LogitsProcessorList"]:
201
"""Generate the logits processor argument to pass to the model.
202
203
Parameters
@@ -213,7 +213,9 @@ def format_output_type(
213
"""
214
from llama_cpp import LogitsProcessorList
215
216
- return LogitsProcessorList([output_type])
+ if output_type is not None:
217
+ return LogitsProcessorList([output_type])
218
+ return None
219
220
221
class LlamaCpp(Model):
0 commit comments