Skip to content

Commit b877f1a

Browse files
committed
Switchout hasattr for getattr wherever possible
This greatly simplifies the code logic. Signed-off-by: Daniel J Walsh <[email protected]>
1 parent 681c488 commit b877f1a

File tree

9 files changed

+53
-54
lines changed

9 files changed

+53
-54
lines changed

.github/workflows/ci.yml

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -207,6 +207,10 @@ jobs:
207207
sudo mv /tmp/daemon.json /etc/docker/daemon.json
208208
cat /etc/docker/daemon.json
209209
sudo systemctl restart docker.service
210+
sudo mkdir -m a=rwx -p /mnt/tmp /mnt/runner
211+
sudo mkdir -m o=rwx -p /home/runner/.local
212+
sudo chown runner:runner /mnt/runner /home/runner/.local
213+
sudo mount --bind /mnt/runner /home/runner/.local
210214
df -h
211215
212216
- name: Build a container for CPU inferencing

ramalama/cli.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -270,7 +270,7 @@ def parse_arguments(parser):
270270

271271
def post_parse_setup(args):
272272
"""Perform additional setup after parsing arguments."""
273-
if hasattr(args, "MODEL") and args.subcommand != "rm":
273+
if getattr(args, "MODEL", None) and args.subcommand != "rm":
274274
resolved_model = shortnames.resolve(args.MODEL)
275275
if resolved_model:
276276
args.UNRESOLVED_MODEL = args.MODEL

ramalama/engine.py

Lines changed: 19 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -57,11 +57,11 @@ def add_pull_newer(self):
5757
self.exec_args += ["--pull", self.args.pull]
5858

5959
def add_network(self):
60-
if hasattr(self.args, "network") and self.args.network:
60+
if getattr(self.args, "network", None):
6161
self.exec_args += ["--network", self.args.network]
6262

6363
def add_oci_runtime(self):
64-
if hasattr(self.args, "oci_runtime") and self.args.oci_runtime:
64+
if getattr(self.args, "oci_runtime", None):
6565
self.exec_args += ["--runtime", self.args.oci_runtime]
6666
return
6767
if check_nvidia() == "cuda":
@@ -71,13 +71,13 @@ def add_oci_runtime(self):
7171
self.exec_args += ["--runtime", "/usr/bin/nvidia-container-runtime"]
7272

7373
def add_privileged_options(self):
74-
if hasattr(self.args, "privileged") and self.args.privileged:
74+
if getattr(self.args, "privileged", False):
7575
self.exec_args += ["--privileged"]
7676
else:
7777
self.exec_args += [
7878
"--security-opt=label=disable",
7979
]
80-
if not hasattr(self.args, "nocapdrop"):
80+
if not getattr(self.args, "nocapdrop", False):
8181
self.exec_args += [
8282
"--cap-drop=all",
8383
"--security-opt=no-new-privileges",
@@ -89,12 +89,9 @@ def cap_add(self, cap):
8989
def use_tty(self):
9090
if not sys.stdin.isatty():
9191
return False
92-
if not (hasattr(self.args, "ARGS") and self.args.ARGS):
92+
if getattr(self.args, "ARGS", None):
9393
return False
94-
if not (hasattr(self.args, "subcommand") and self.args.subcommand == "run"):
95-
return False
96-
97-
return True
94+
return getattr(self.args, "subcommand", "") != "run"
9895

9996
def add_subcommand_env(self):
10097
if EMOJI and self.use_tty():
@@ -104,20 +101,19 @@ def add_subcommand_env(self):
104101
self.exec_args += ["--env", "LLAMA_PROMPT_PREFIX=🐋 > "]
105102

106103
def add_env_option(self):
107-
if hasattr(self.args, "env"):
108-
for env in self.args.env:
109-
self.exec_args += ["--env", env]
104+
for env in getattr(self.args, "env", []):
105+
self.exec_args += ["--env", env]
110106

111107
def add_tty_option(self):
112108
if self.use_tty():
113109
self.exec_args += ["-t"]
114110

115111
def add_detach_option(self):
116-
if hasattr(self.args, "detach") and self.args.detach is True:
112+
if getattr(self.args, "detach", False):
117113
self.exec_args += ["-d"]
118114

119115
def add_port_option(self):
120-
if not hasattr(self.args, "port") or not self.args.port or self.args.port == "":
116+
if getattr(self.args, "port", "") == "":
121117
return
122118

123119
if self.args.port.count(":") > 0:
@@ -126,7 +122,7 @@ def add_port_option(self):
126122
self.exec_args += ["-p", f"{self.args.port}:{self.args.port}"]
127123

128124
def add_device_options(self):
129-
if hasattr(self.args, "device") and self.args.device:
125+
if getattr(self.args, "device", None):
130126
for device_arg in self.args.device:
131127
self.exec_args += ["--device", device_arg]
132128

@@ -151,7 +147,7 @@ def add_device_options(self):
151147
self.exec_args += ["-e", f"{k}={v}"]
152148

153149
def add_rag(self):
154-
if not hasattr(self.args, "rag") or not self.args.rag:
150+
if not getattr(self.args, "rag", None):
155151
return
156152

157153
if os.path.exists(self.args.rag):
@@ -162,7 +158,7 @@ def add_rag(self):
162158
self.exec_args.append(f"--mount=type=image,source={self.args.rag},destination=/rag,rw=true")
163159

164160
def handle_podman_specifics(self):
165-
if hasattr(self.args, "podman_keep_groups") and self.args.podman_keep_groups:
161+
if getattr(self.args, "podman_keep_groups", None):
166162
self.exec_args += ["--group-add", "keep-groups"]
167163

168164
def add(self, newargs):
@@ -195,10 +191,10 @@ def images(args):
195191
raise ValueError("no container manager (Podman, Docker) found")
196192

197193
conman_args = [conman, "images"]
198-
if hasattr(args, "noheading") and args.noheading:
194+
if getattr(args, "noheading", False):
199195
conman_args += ["--noheading"]
200196

201-
if hasattr(args, "notrunc") and args.notrunc:
197+
if getattr(args, "notrunc", False):
202198
conman_args += ["--no-trunc"]
203199

204200
if args.format:
@@ -220,10 +216,10 @@ def containers(args):
220216
raise ValueError("no container manager (Podman, Docker) found")
221217

222218
conman_args = [conman, "ps", "-a", "--filter", "label=ai.ramalama"]
223-
if hasattr(args, "noheading") and args.noheading:
219+
if getattr(args, "noheading", False):
224220
conman_args += ["--noheading"]
225221

226-
if hasattr(args, "notrunc") and args.notrunc:
222+
if getattr(args, "notrunc", False):
227223
conman_args += ["--no-trunc"]
228224

229225
if args.format:
@@ -332,6 +328,5 @@ def add_labels(args, add_label):
332328
"subcommand": "ai.ramalama.command",
333329
}
334330
for arg, label_prefix in label_map.items():
335-
if hasattr(args, arg):
336-
if value := getattr(args, arg):
337-
add_label(f"{label_prefix}={value}")
331+
if value := getattr(args, arg, None):
332+
add_label(f"{label_prefix}={value}")

ramalama/kube.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -7,11 +7,9 @@
77

88
class Kube:
99
def __init__(self, model, chat_template, args, exec_args):
10-
self.ai_image = model
11-
if hasattr(args, "MODEL"):
12-
self.ai_image = args.MODEL
10+
self.ai_image = getattr(args, "MODEL", model)
1311
self.ai_image = self.ai_image.removeprefix("oci://")
14-
if hasattr(args, "name") and args.name:
12+
if getattr(args, "name", None):
1513
self.name = args.name
1614
else:
1715
self.name = genname()

ramalama/model.py

Lines changed: 19 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -198,18 +198,18 @@ def remove(self, args):
198198
self.garbage_collection(args)
199199

200200
def get_container_name(self, args):
201-
if hasattr(args, "name") and args.name:
201+
if getattr(args, "name", None):
202202
return args.name
203203

204204
return genname()
205205

206206
def base(self, args, name):
207207
# force accel_image to use -rag version. Drop TAG if it exists
208208
# so that accel_image will add -rag to the image specification.
209-
if args.image == self.default_image and (hasattr(args, "rag") and args.rag):
209+
if args.image == self.default_image and getattr(args, "rag", None):
210210
args.image = rag_image(args.image)
211211
self.engine = Engine(args)
212-
if args.subcommand == "run" and not (hasattr(args, "ARGS") and args.ARGS) and sys.stdin.isatty():
212+
if args.subcommand == "run" and not getattr(args, "ARGS", None) and sys.stdin.isatty():
213213
self.engine.add(["-i"])
214214

215215
self.engine.add(
@@ -237,7 +237,7 @@ def add_oci_runtime(self, conman_args, args):
237237
return conman_args
238238

239239
def add_rag(self, exec_args, args):
240-
if not hasattr(args, "rag") or not args.rag:
240+
if not getattr(args, "rag", None):
241241
return exec_args
242242

243243
if os.path.exists(args.rag):
@@ -291,7 +291,7 @@ def exec_model_in_container(self, model_path, cmd_args, args):
291291
def setup_mounts(self, model_path, args):
292292
if args.runtime == "vllm":
293293
model_base = ""
294-
if self.store and hasattr(self, 'model_tag'):
294+
if self.store and getattr(self, 'model_tag', None):
295295
ref_file = self.store.get_ref_file(self.model_tag)
296296
if ref_file and hasattr(ref_file, 'hash'):
297297
model_base = self.store.model_base_directory
@@ -464,18 +464,22 @@ def build_exec_args_bench(self, args, model_path):
464464
return exec_args
465465

466466
def validate_args(self, args):
467+
# If --container was specified return valid
467468
if args.container:
468469
return
469470
if args.privileged:
470471
raise KeyError(
471472
"--nocontainer and --privileged options conflict. The --privileged option requires a container."
472473
)
473-
if hasattr(args, "name") and args.name:
474-
if hasattr(args, "generate"):
475-
# Do not fail on serve if user specified --generate
476-
if args.generate:
477-
return
478-
raise KeyError("--nocontainer and --name options conflict. The --name option requires a container.")
474+
# If --name was not specified return valid
475+
if not getattr(args, "name", None):
476+
return
477+
# If --generate was specified return valid
478+
if getattr(args, "generate", False):
479+
# Do not fail on serve if user specified --generate
480+
return
481+
482+
raise KeyError("--nocontainer and --name options conflict. The --name option requires a container.")
479483

480484
def vllm_serve(self, args, exec_model_path):
481485
exec_args = [
@@ -532,7 +536,7 @@ def llama_serve(self, args, exec_model_path, chat_template_path, mmproj_path):
532536
if args.debug:
533537
exec_args += ["-v"]
534538

535-
if hasattr(args, "webui") and args.webui == "off":
539+
if getattr(args, "webui", "") == "off":
536540
exec_args.extend(["--no-webui"])
537541

538542
if check_nvidia() or check_metal(args):
@@ -583,7 +587,7 @@ def handle_runtime(self, args, exec_args, exec_model_path):
583587
self.model_name,
584588
]
585589

586-
if hasattr(args, 'runtime_args') and args.runtime_args:
590+
if getattr(args, 'runtime_args', None):
587591
exec_args.extend(args.runtime_args)
588592
else:
589593
gpu_args = self.gpu_args(args=args)
@@ -660,7 +664,7 @@ def serve(self, args, quiet=False):
660664
return
661665

662666
# Add rag chatbot
663-
if hasattr(args, "rag") and args.rag:
667+
if getattr(args, "rag", None):
664668
exec_args = [
665669
"bash",
666670
"-c",
@@ -741,7 +745,7 @@ def get_available_port_if_any() -> int:
741745

742746
def compute_serving_port(args, quiet=False) -> str:
743747
# user probably specified a custom port, don't override the choice
744-
if hasattr(args, "port") and args.port not in ["", str(DEFAULT_PORT)]:
748+
if getattr(args, "port", "") not in ["", str(DEFAULT_PORT)]:
745749
target_port = args.port
746750
else:
747751
# otherwise compute a random serving port in the range

ramalama/model_factory.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ def __init__(
3838

3939
self.pruned_model = self.prune_model_input()
4040
self.draft_model = None
41-
if hasattr(args, 'model_draft') and args.model_draft:
41+
if getattr(args, 'model_draft', None):
4242
dm_args = copy.deepcopy(args)
4343
dm_args.model_draft = None
4444
self.draft_model = ModelFactory(args.model_draft, dm_args, ignore_stderr=True).create()
@@ -145,7 +145,7 @@ def create_url(self) -> URL:
145145
model = URL(self.pruned_model, urlparse(self.model).scheme)
146146
self.set_optional_model_store(model)
147147
model.draft_model = self.draft_model
148-
if hasattr(self, 'split_model'):
148+
if getattr(self, 'split_model', None):
149149
model.split_model = self.split_model
150150
model.mnt_path = self.mnt_path
151151
return model

ramalama/oci.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -178,7 +178,7 @@ def _generate_containerfile(self, source_model, args):
178178
# Generate the containerfile content
179179
# Keep this in sync with docs/ramalama-oci.5.md !
180180
is_car = args.type == "car"
181-
has_gguf = hasattr(args, 'gguf') and args.gguf is not None
181+
has_gguf = getattr(args, 'gguf', None) is not None
182182
content = ""
183183

184184
model_name = source_model.model_name

ramalama/quadlet.py

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -6,9 +6,7 @@
66

77
class Quadlet:
88
def __init__(self, model, chat_template, args, exec_args):
9-
self.ai_image = model
10-
if hasattr(args, "MODEL"):
11-
self.ai_image = args.MODEL
9+
self.ai_image = getattr(args, "MODEL", model)
1210
self.ai_image = self.ai_image.removeprefix("oci://")
1311
if args.name:
1412
self.name = args.name
@@ -83,7 +81,7 @@ def _gen_image(self, name, image):
8381
return image_file
8482

8583
def _gen_name(self, quadlet_file: UnitFile):
86-
if hasattr(self.args, "name") and self.args.name:
84+
if getattr(self.args, "name", None):
8785
quadlet_file.add("Container", "ContainerName", f"{self.args.name}")
8886

8987
def _gen_model_volume(self, quadlet_file: UnitFile):
@@ -111,13 +109,13 @@ def _gen_model_volume(self, quadlet_file: UnitFile):
111109
return files
112110

113111
def _gen_port(self, quadlet_file: UnitFile):
114-
if hasattr(self.args, "port") and self.args.port != "":
112+
if getattr(self.args, "port", "") != "":
115113
quadlet_file.add("Container", "PublishPort", f"{self.args.port}:{self.args.port}")
116114

117115
def _gen_rag_volume(self, quadlet_file: UnitFile):
118116
files: list[UnitFile] = []
119117

120-
if not hasattr(self.args, "rag") or not self.rag:
118+
if not getattr(self.args, "rag", None):
121119
return files
122120

123121
rag_volume_file_name = f"{self.rag_name}.volume"

ramalama/stack.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ class Stack:
2020

2121
def __init__(self, args):
2222
self.args = args
23-
self.name = args.name if hasattr(args, "name") and args.name else genname()
23+
self.name = getattr(args, "name", None) or genname()
2424
if os.path.basename(args.engine) != "podman":
2525
raise ValueError("llama-stack requires use of the Podman container engine")
2626
self.host = "127.0.0.1"

0 commit comments

Comments
 (0)