Skip to content

Commit bee40f5

Browse files
committed
remove unnecessary else
Signed-off-by: Kush Gupta <[email protected]>
1 parent 3f29dcc commit bee40f5

File tree

1 file changed

+8
-8
lines changed

1 file changed

+8
-8
lines changed

ramalama/model.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -616,14 +616,14 @@ def build_exec_args_bench(self, args, model_path):
616616

617617
if getattr(args, "runtime", None) == "mlx":
618618
raise NotImplementedError("Benchmarking is not supported by the MLX runtime.")
619-
else:
620-
# Default llama.cpp benchmarking
621-
exec_args = ["llama-bench"]
622-
set_accel_env_vars()
623-
gpu_args = self.gpu_args(args=args)
624-
if gpu_args is not None:
625-
exec_args.extend(gpu_args)
626-
exec_args += ["-m", exec_model_path]
619+
620+
# Default llama.cpp benchmarking
621+
exec_args = ["llama-bench"]
622+
set_accel_env_vars()
623+
gpu_args = self.gpu_args(args=args)
624+
if gpu_args is not None:
625+
exec_args.extend(gpu_args)
626+
exec_args += ["-m", exec_model_path]
627627

628628
return exec_args
629629

0 commit comments

Comments
 (0)