Skip to content

Commit bd25154

Browse files
didier-durand2015aroras
authored andcommitted
[Doc]: fix various typos in multiple files (vllm-project#23179)
Signed-off-by: Didier Durand <[email protected]>
1 parent 794ba82 commit bd25154

File tree

7 files changed

+12
-12
lines changed

7 files changed

+12
-12
lines changed

vllm/beam_search.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ class BeamSearchSequence:
1818
The text field is optional and will only be filled when the sequence is
1919
about to be returned to the user.
2020
"""
21-
# The tokens includes the prompt.
21+
# The tokens include the prompt.
2222
tokens: list[int]
2323
logprobs: list[dict[int, Logprob]]
2424
lora_request: Optional[LoRARequest] = None

vllm/compilation/backends.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -484,7 +484,7 @@ def __call__(self, graph: fx.GraphModule, example_inputs) -> Callable:
484484

485485
factors = []
486486
# 0. factors come from the env, for example, The values of
487-
# VLLM_PP_LAYER_PARTITION will affects the computation graph.
487+
# VLLM_PP_LAYER_PARTITION will affect the computation graph.
488488
env_hash = envs.compute_hash()
489489
factors.append(env_hash)
490490

vllm/engine/arg_utils.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -605,7 +605,7 @@ def add_cli_args(parser: FlexibleArgumentParser) -> FlexibleArgumentParser:
605605
**guided_decoding_kwargs["disable_additional_properties"])
606606
guided_decoding_group.add_argument(
607607
"--reasoning-parser",
608-
# This choices is a special case because it's not static
608+
# This choice is a special case because it's not static
609609
choices=list(ReasoningParserManager.reasoning_parsers),
610610
**guided_decoding_kwargs["reasoning_backend"])
611611

@@ -1047,7 +1047,7 @@ def create_speculative_config(
10471047
# details from the config directly
10481048
# no user input required / expected
10491049
if isinstance(hf_config, SpeculatorsConfig):
1050-
# We create one since we dont create one
1050+
# We create one since we don't create one
10511051
self.speculative_config = {}
10521052
self.speculative_config[
10531053
"num_speculative_tokens"] = hf_config.num_lookahead_tokens
@@ -1775,7 +1775,7 @@ def disable_log_requests(self, value: bool):
17751775
def add_cli_args(parser: FlexibleArgumentParser,
17761776
async_args_only: bool = False) -> FlexibleArgumentParser:
17771777
# Initialize plugin to update the parser, for example, The plugin may
1778-
# adding a new kind of quantization method to --quantization argument or
1778+
# add a new kind of quantization method to --quantization argument or
17791779
# a new device to --device argument.
17801780
load_general_plugins()
17811781
if not async_args_only:

vllm/engine/multiprocessing/client.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -539,7 +539,7 @@ async def _process_request(
539539
if request_id in self.output_queues:
540540
raise ValueError(f"Request {request_id} already exists")
541541

542-
# 1) Create output queue for this requests.
542+
# 1) Create output queue for this request.
543543
queue: asyncio.Queue[Union[RequestOutput,
544544
BaseException]] = asyncio.Queue()
545545
self.output_queues[request_id] = queue
@@ -651,7 +651,7 @@ async def add_lora(self, lora_request: LoRARequest) -> None:
651651
# Uses the same I/O as generate requests
652652
request = RPCLoadAdapterRequest(lora_request)
653653

654-
# Create output queue for this requests.
654+
# Create output queue for this request.
655655
queue: asyncio.Queue[Union[None, BaseException]] = asyncio.Queue()
656656
self.output_queues[request.request_id] = queue
657657

vllm/entrypoints/chat_utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1330,7 +1330,7 @@ def apply_mistral_chat_template(
13301330
# mistral-common uses assert statements to stop processing of input
13311331
# if input does not comply with the expected format.
13321332
# We convert those assertion errors to ValueErrors so they can be
1333-
# are properly caught in the preprocessing_input step
1333+
# properly caught in the preprocessing_input step
13341334
except (AssertionError, MistralCommonException) as e:
13351335
raise ValueError(str(e)) from e
13361336

vllm/utils/__init__.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2482,7 +2482,7 @@ class PlaceholderModule(_PlaceholderBase):
24822482
A placeholder object to use when a module does not exist.
24832483
24842484
This enables more informative errors when trying to access attributes
2485-
of a module that does not exists.
2485+
of a module that does not exist.
24862486
"""
24872487

24882488
def __init__(self, name: str) -> None:
@@ -3109,7 +3109,7 @@ class LazyLoader(types.ModuleType):
31093109
"""
31103110
LazyLoader module borrowed from Tensorflow
31113111
https://github.com/tensorflow/tensorflow/blob/main/tensorflow/python/util/lazy_loader.py
3112-
with a addition of "module caching".
3112+
with an addition of "module caching".
31133113
31143114
Lazily import a module, mainly to avoid pulling in large dependencies.
31153115
Modules such as `xgrammar` might do additional side effects, so we

vllm/v1/structured_output/__init__.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -267,7 +267,7 @@ def should_advance(self, request: Request) -> bool:
267267
assert request.structured_output_request is not None
268268
assert request.structured_output_request.grammar is not None
269269
# by default, we should always advance
270-
# for cases that doesn't uses thinking mode.
270+
# for cases that don't use thinking mode.
271271
if self.reasoner is not None:
272272
structured_req = request.structured_output_request
273273

@@ -276,7 +276,7 @@ def should_advance(self, request: Request) -> bool:
276276

277277
# Check if reasoning ends in *this* step
278278
if self.reasoner.is_reasoning_end(request.all_token_ids):
279-
# Reasoning just ended, so we shouldn't advanced til
279+
# Reasoning just ended, so we shouldn't advance til
280280
# next pass
281281
structured_req.reasoning_ended = True
282282

0 commit comments

Comments
 (0)