Skip to content

Commit 61efb04

Browse files
authored
Merge pull request #1605 from rhatdan/chat
Switchout hasattr for getattr wherever possible
2 parents 9140476 + 932a1d8 commit 61efb04

File tree

10 files changed

+55
-55
lines changed

10 files changed

+55
-55
lines changed

.github/workflows/ci.yml

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -207,6 +207,10 @@ jobs:
207207
sudo mv /tmp/daemon.json /etc/docker/daemon.json
208208
cat /etc/docker/daemon.json
209209
sudo systemctl restart docker.service
210+
sudo mkdir -m a=rwx -p /mnt/tmp /mnt/runner
211+
sudo mkdir -m o=rwx -p /home/runner/.local
212+
sudo chown runner:runner /mnt/runner /home/runner/.local
213+
sudo mount --bind /mnt/runner /home/runner/.local
210214
df -h
211215
212216
- name: Build a container for CPU inferencing

ramalama/cli.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -262,7 +262,7 @@ def parse_arguments(parser):
262262

263263
def post_parse_setup(args):
264264
"""Perform additional setup after parsing arguments."""
265-
if hasattr(args, "MODEL") and args.subcommand != "rm":
265+
if getattr(args, "MODEL", None) and args.subcommand != "rm":
266266
resolved_model = shortnames.resolve(args.MODEL)
267267
if resolved_model:
268268
args.UNRESOLVED_MODEL = args.MODEL

ramalama/engine.py

Lines changed: 19 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -57,11 +57,11 @@ def add_pull_newer(self):
5757
self.exec_args += ["--pull", self.args.pull]
5858

5959
def add_network(self):
60-
if hasattr(self.args, "network") and self.args.network:
60+
if getattr(self.args, "network", None):
6161
self.exec_args += ["--network", self.args.network]
6262

6363
def add_oci_runtime(self):
64-
if hasattr(self.args, "oci_runtime") and self.args.oci_runtime:
64+
if getattr(self.args, "oci_runtime", None):
6565
self.exec_args += ["--runtime", self.args.oci_runtime]
6666
return
6767
if check_nvidia() == "cuda":
@@ -71,13 +71,13 @@ def add_oci_runtime(self):
7171
self.exec_args += ["--runtime", "/usr/bin/nvidia-container-runtime"]
7272

7373
def add_privileged_options(self):
74-
if hasattr(self.args, "privileged") and self.args.privileged:
74+
if getattr(self.args, "privileged", False):
7575
self.exec_args += ["--privileged"]
7676
else:
7777
self.exec_args += [
7878
"--security-opt=label=disable",
7979
]
80-
if not hasattr(self.args, "nocapdrop"):
80+
if not getattr(self.args, "nocapdrop", False):
8181
self.exec_args += [
8282
"--cap-drop=all",
8383
"--security-opt=no-new-privileges",
@@ -89,12 +89,9 @@ def cap_add(self, cap):
8989
def use_tty(self):
9090
if not sys.stdin.isatty():
9191
return False
92-
if not (hasattr(self.args, "ARGS") and self.args.ARGS):
92+
if getattr(self.args, "ARGS", None):
9393
return False
94-
if not (hasattr(self.args, "subcommand") and self.args.subcommand == "run"):
95-
return False
96-
97-
return True
94+
return getattr(self.args, "subcommand", "") != "run"
9895

9996
def add_subcommand_env(self):
10097
if EMOJI and self.use_tty():
@@ -104,20 +101,19 @@ def add_subcommand_env(self):
104101
self.exec_args += ["--env", "LLAMA_PROMPT_PREFIX=🐋 > "]
105102

106103
def add_env_option(self):
107-
if hasattr(self.args, "env"):
108-
for env in self.args.env:
109-
self.exec_args += ["--env", env]
104+
for env in getattr(self.args, "env", []):
105+
self.exec_args += ["--env", env]
110106

111107
def add_tty_option(self):
112108
if self.use_tty():
113109
self.exec_args += ["-t"]
114110

115111
def add_detach_option(self):
116-
if hasattr(self.args, "detach") and self.args.detach is True:
112+
if getattr(self.args, "detach", False):
117113
self.exec_args += ["-d"]
118114

119115
def add_port_option(self):
120-
if not hasattr(self.args, "port") or not self.args.port or self.args.port == "":
116+
if getattr(self.args, "port", "") == "":
121117
return
122118

123119
if self.args.port.count(":") > 0:
@@ -126,7 +122,7 @@ def add_port_option(self):
126122
self.exec_args += ["-p", f"{self.args.port}:{self.args.port}"]
127123

128124
def add_device_options(self):
129-
if hasattr(self.args, "device") and self.args.device:
125+
if getattr(self.args, "device", None):
130126
for device_arg in self.args.device:
131127
self.exec_args += ["--device", device_arg]
132128

@@ -151,7 +147,7 @@ def add_device_options(self):
151147
self.exec_args += ["-e", f"{k}={v}"]
152148

153149
def add_rag(self):
154-
if not hasattr(self.args, "rag") or not self.args.rag:
150+
if not getattr(self.args, "rag", None):
155151
return
156152

157153
if os.path.exists(self.args.rag):
@@ -162,7 +158,7 @@ def add_rag(self):
162158
self.exec_args.append(f"--mount=type=image,source={self.args.rag},destination=/rag,rw=true")
163159

164160
def handle_podman_specifics(self):
165-
if hasattr(self.args, "podman_keep_groups") and self.args.podman_keep_groups:
161+
if getattr(self.args, "podman_keep_groups", None):
166162
self.exec_args += ["--group-add", "keep-groups"]
167163

168164
def add(self, newargs):
@@ -195,10 +191,10 @@ def images(args):
195191
raise ValueError("no container manager (Podman, Docker) found")
196192

197193
conman_args = [conman, "images"]
198-
if hasattr(args, "noheading") and args.noheading:
194+
if getattr(args, "noheading", False):
199195
conman_args += ["--noheading"]
200196

201-
if hasattr(args, "notrunc") and args.notrunc:
197+
if getattr(args, "notrunc", False):
202198
conman_args += ["--no-trunc"]
203199

204200
if args.format:
@@ -220,10 +216,10 @@ def containers(args):
220216
raise ValueError("no container manager (Podman, Docker) found")
221217

222218
conman_args = [conman, "ps", "-a", "--filter", "label=ai.ramalama"]
223-
if hasattr(args, "noheading") and args.noheading:
219+
if getattr(args, "noheading", False):
224220
conman_args += ["--noheading"]
225221

226-
if hasattr(args, "notrunc") and args.notrunc:
222+
if getattr(args, "notrunc", False):
227223
conman_args += ["--no-trunc"]
228224

229225
if args.format:
@@ -332,6 +328,5 @@ def add_labels(args, add_label):
332328
"subcommand": "ai.ramalama.command",
333329
}
334330
for arg, label_prefix in label_map.items():
335-
if hasattr(args, arg):
336-
if value := getattr(args, arg):
337-
add_label(f"{label_prefix}={value}")
331+
if value := getattr(args, arg, None):
332+
add_label(f"{label_prefix}={value}")

ramalama/kube.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -7,11 +7,9 @@
77

88
class Kube:
99
def __init__(self, model, chat_template, args, exec_args):
10-
self.ai_image = model
11-
if hasattr(args, "MODEL"):
12-
self.ai_image = args.MODEL
10+
self.ai_image = getattr(args, "MODEL", model)
1311
self.ai_image = self.ai_image.removeprefix("oci://")
14-
if hasattr(args, "name") and args.name:
12+
if getattr(args, "name", None):
1513
self.name = args.name
1614
else:
1715
self.name = genname()

ramalama/model.py

Lines changed: 20 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -197,18 +197,18 @@ def remove(self, args):
197197
return
198198

199199
def get_container_name(self, args):
200-
if hasattr(args, "name") and args.name:
200+
if getattr(args, "name", None):
201201
return args.name
202202

203203
return genname()
204204

205205
def base(self, args, name):
206206
# force accel_image to use -rag version. Drop TAG if it exists
207207
# so that accel_image will add -rag to the image specification.
208-
if args.image == self.default_image and (hasattr(args, "rag") and args.rag):
208+
if args.image == self.default_image and getattr(args, "rag", None):
209209
args.image = rag_image(args.image)
210210
self.engine = Engine(args)
211-
if args.subcommand == "run" and not (hasattr(args, "ARGS") and args.ARGS) and sys.stdin.isatty():
211+
if args.subcommand == "run" and not getattr(args, "ARGS", None) and sys.stdin.isatty():
212212
self.engine.add(["-i"])
213213

214214
self.engine.add(
@@ -236,7 +236,7 @@ def add_oci_runtime(self, conman_args, args):
236236
return conman_args
237237

238238
def add_rag(self, exec_args, args):
239-
if not hasattr(args, "rag") or not args.rag:
239+
if not getattr(args, "rag", None):
240240
return exec_args
241241

242242
if os.path.exists(args.rag):
@@ -290,8 +290,8 @@ def exec_model_in_container(self, model_path, cmd_args, args):
290290
def setup_mounts(self, model_path, args):
291291
if args.runtime == "vllm":
292292
model_base = ""
293-
if self.model_store and hasattr(self, 'model_tag'):
294-
ref_file = self.model_store.get_ref_file(self.model_tag)
293+
if self.model_store and getattr(self, 'model_tag', None):
294+
ref_file = self.store.get_ref_file(self.model_tag)
295295
if ref_file and hasattr(ref_file, 'hash'):
296296
model_base = self.model_store.model_base_directory
297297
if not model_base and (model_path and os.path.exists(model_path)):
@@ -457,18 +457,22 @@ def build_exec_args_bench(self, args, model_path):
457457
return exec_args
458458

459459
def validate_args(self, args):
460+
# If --container was specified return valid
460461
if args.container:
461462
return
462463
if args.privileged:
463464
raise KeyError(
464465
"--nocontainer and --privileged options conflict. The --privileged option requires a container."
465466
)
466-
if hasattr(args, "name") and args.name:
467-
if hasattr(args, "generate"):
468-
# Do not fail on serve if user specified --generate
469-
if args.generate:
470-
return
471-
raise KeyError("--nocontainer and --name options conflict. The --name option requires a container.")
467+
# If --name was not specified return valid
468+
if not getattr(args, "name", None):
469+
return
470+
# If --generate was specified return valid
471+
if getattr(args, "generate", False):
472+
# Do not fail on serve if user specified --generate
473+
return
474+
475+
raise KeyError("--nocontainer and --name options conflict. The --name option requires a container.")
472476

473477
def vllm_serve(self, args, exec_model_path):
474478
exec_args = [
@@ -525,7 +529,7 @@ def llama_serve(self, args, exec_model_path, chat_template_path, mmproj_path):
525529
if args.debug:
526530
exec_args += ["-v"]
527531

528-
if hasattr(args, "webui") and args.webui == "off":
532+
if getattr(args, "webui", "") == "off":
529533
exec_args.extend(["--no-webui"])
530534

531535
if check_nvidia() or check_metal(args):
@@ -576,7 +580,7 @@ def handle_runtime(self, args, exec_args, exec_model_path):
576580
self.model_name,
577581
]
578582

579-
if hasattr(args, 'runtime_args') and args.runtime_args:
583+
if getattr(args, 'runtime_args', None):
580584
exec_args.extend(args.runtime_args)
581585
else:
582586
gpu_args = self.gpu_args(args=args)
@@ -653,7 +657,7 @@ def serve(self, args, quiet=False):
653657
return
654658

655659
# Add rag chatbot
656-
if hasattr(args, "rag") and args.rag:
660+
if getattr(args, "rag", None):
657661
exec_args = [
658662
"bash",
659663
"-c",
@@ -734,7 +738,7 @@ def get_available_port_if_any() -> int:
734738

735739
def compute_serving_port(args, quiet=False) -> str:
736740
# user probably specified a custom port, don't override the choice
737-
if hasattr(args, "port") and args.port not in ["", str(DEFAULT_PORT)]:
741+
if getattr(args, "port", "") not in ["", str(DEFAULT_PORT)]:
738742
target_port = args.port
739743
else:
740744
# otherwise compute a random serving port in the range

ramalama/model_factory.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ def __init__(
3636

3737
self.pruned_model = self.prune_model_input()
3838
self.draft_model = None
39-
if hasattr(args, 'model_draft') and args.model_draft:
39+
if getattr(args, 'model_draft', None):
4040
dm_args = copy.deepcopy(args)
4141
dm_args.model_draft = None
4242
self.draft_model = ModelFactory(args.model_draft, dm_args, ignore_stderr=True).create()
@@ -133,7 +133,7 @@ def create_oci(self) -> OCI:
133133
def create_url(self) -> URL:
134134
model = URL(self.pruned_model, self.store_path, urlparse(self.model).scheme)
135135
model.draft_model = self.draft_model
136-
if hasattr(self, 'split_model'):
136+
if getattr(self, 'split_model', None):
137137
model.split_model = self.split_model
138138
model.mnt_path = self.mnt_path
139139
return model

ramalama/oci.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -178,7 +178,7 @@ def _generate_containerfile(self, source_model, args):
178178
# Generate the containerfile content
179179
# Keep this in sync with docs/ramalama-oci.5.md !
180180
is_car = args.type == "car"
181-
has_gguf = hasattr(args, 'gguf') and args.gguf is not None
181+
has_gguf = getattr(args, 'gguf', None) is not None
182182
content = ""
183183

184184
model_name = source_model.model_name

ramalama/quadlet.py

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -6,9 +6,7 @@
66

77
class Quadlet:
88
def __init__(self, model, chat_template, args, exec_args):
9-
self.ai_image = model
10-
if hasattr(args, "MODEL"):
11-
self.ai_image = args.MODEL
9+
self.ai_image = getattr(args, "MODEL", model)
1210
self.ai_image = self.ai_image.removeprefix("oci://")
1311
if args.name:
1412
self.name = args.name
@@ -83,7 +81,7 @@ def _gen_image(self, name, image):
8381
return image_file
8482

8583
def _gen_name(self, quadlet_file: UnitFile):
86-
if hasattr(self.args, "name") and self.args.name:
84+
if getattr(self.args, "name", None):
8785
quadlet_file.add("Container", "ContainerName", f"{self.args.name}")
8886

8987
def _gen_model_volume(self, quadlet_file: UnitFile):
@@ -111,13 +109,13 @@ def _gen_model_volume(self, quadlet_file: UnitFile):
111109
return files
112110

113111
def _gen_port(self, quadlet_file: UnitFile):
114-
if hasattr(self.args, "port") and self.args.port != "":
112+
if getattr(self.args, "port", "") != "":
115113
quadlet_file.add("Container", "PublishPort", f"{self.args.port}:{self.args.port}")
116114

117115
def _gen_rag_volume(self, quadlet_file: UnitFile):
118116
files: list[UnitFile] = []
119117

120-
if not hasattr(self.args, "rag") or not self.rag:
118+
if not getattr(self.args, "rag", None):
121119
return files
122120

123121
rag_volume_file_name = f"{self.rag_name}.volume"

ramalama/stack.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ class Stack:
2020

2121
def __init__(self, args):
2222
self.args = args
23-
self.name = args.name if hasattr(args, "name") and args.name else genname()
23+
self.name = getattr(args, "name", None) or genname()
2424
if os.path.basename(args.engine) != "podman":
2525
raise ValueError("llama-stack requires use of the Podman container engine")
2626
self.host = "127.0.0.1"

test/system/050-pull.bats

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -77,6 +77,7 @@ load setup_suite
7777
is "$output" ".*Felladrin/gguf-smollm-360M-instruct-add-basics/smollm-360M-instruct-add-basics.IQ2_XXS" "image was actually pulled locally"
7878
run_ramalama rm huggingface://Felladrin/gguf-smollm-360M-instruct-add-basics/smollm-360M-instruct-add-basics.IQ2_XXS.gguf
7979

80+
skip_if_no_hf-cli
8081
run_ramalama pull hf://TinyLlama/TinyLlama-1.1B-Chat-v1.0
8182
run_ramalama list
8283
is "$output" ".*TinyLlama/TinyLlama-1.1B-Chat-v1.0" "image was actually pulled locally"

0 commit comments

Comments
 (0)