Skip to content

Commit 4ca967c

Browse files
committed
Split out step resource request management
Before this change, all steps' resource requests (*not* limits) were zeroed out unless that value for that metric represented the maximum across all steps. If step 2 requested the maximum memory of all steps, but not the max CPU, its memory request would persist but its CPU request would be zeroed out. After this change, the max request for each resource type (CPU, memory, ephermeral storage) is calculated, then applied to the *last* step's container. The effect is the same, unless Pods' resource requests are updated to account for exited containers (afaik they're not), and this is simpler to express and explain. And since all the code for this is in its own method and file, it's easier to test in isolation.
1 parent 8909b63 commit 4ca967c

File tree

4 files changed

+138
-130
lines changed

4 files changed

+138
-130
lines changed

pkg/pod/pod.go

Lines changed: 2 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,6 @@ import (
2828
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1"
2929
"github.com/tektoncd/pipeline/pkg/names"
3030
corev1 "k8s.io/api/core/v1"
31-
"k8s.io/apimachinery/pkg/api/resource"
3231
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
3332
"k8s.io/apimachinery/pkg/runtime/schema"
3433
"k8s.io/client-go/kubernetes"
@@ -77,8 +76,6 @@ var (
7776
VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}},
7877
}}
7978

80-
zeroQty = resource.MustParse("0")
81-
8279
// Random byte reader used for pod name generation.
8380
// var for testing.
8481
randReader = rand.Reader
@@ -136,12 +133,8 @@ func MakePod(images pipeline.Images, taskRun *v1alpha1.TaskRun, taskSpec v1alpha
136133
initContainers = append(initContainers, entrypointInit)
137134
volumes = append(volumes, toolsVolume, downwardVolume)
138135

139-
// Zero out non-max resource requests.
140-
// TODO(#1605): Split this out so it's more easily testable.
141-
maxIndicesByResource := findMaxResourceRequest(taskSpec.Steps, corev1.ResourceCPU, corev1.ResourceMemory, corev1.ResourceEphemeralStorage)
142-
for i := range stepContainers {
143-
zeroNonMaxResourceRequests(&stepContainers[i], i, maxIndicesByResource)
144-
}
136+
// Zero out non-max resource requests, move max resource requests to the last step.
137+
stepContainers = resolveResourceRequests(stepContainers)
145138

146139
// Add implicit env vars.
147140
// They're prepended to the list, so that if the user specified any
@@ -255,43 +248,3 @@ func makeLabels(s *v1alpha1.TaskRun) map[string]string {
255248
labels[taskRunLabelKey] = s.Name
256249
return labels
257250
}
258-
259-
// zeroNonMaxResourceRequests zeroes out the container's cpu, memory, or
260-
// ephemeral storage resource requests if the container does not have the
261-
// largest request out of all containers in the pod. This is done because Tekton
262-
// overwrites each container's entrypoint to make containers effectively execute
263-
// one at a time, so we want pods to only request the maximum resources needed
264-
// at any single point in time. If no container has an explicit resource
265-
// request, all requests are set to 0.
266-
func zeroNonMaxResourceRequests(c *corev1.Container, stepIndex int, maxIndicesByResource map[corev1.ResourceName]int) {
267-
if c.Resources.Requests == nil {
268-
c.Resources.Requests = corev1.ResourceList{}
269-
}
270-
for name, maxIdx := range maxIndicesByResource {
271-
if maxIdx != stepIndex {
272-
c.Resources.Requests[name] = zeroQty
273-
}
274-
}
275-
}
276-
277-
// findMaxResourceRequest returns the index of the container with the maximum
278-
// request for the given resource from among the given set of containers.
279-
func findMaxResourceRequest(steps []v1alpha1.Step, resourceNames ...corev1.ResourceName) map[corev1.ResourceName]int {
280-
maxIdxs := make(map[corev1.ResourceName]int, len(resourceNames))
281-
maxReqs := make(map[corev1.ResourceName]resource.Quantity, len(resourceNames))
282-
for _, name := range resourceNames {
283-
maxIdxs[name] = -1
284-
maxReqs[name] = zeroQty
285-
}
286-
for i, s := range steps {
287-
for _, name := range resourceNames {
288-
maxReq := maxReqs[name]
289-
req, exists := s.Container.Resources.Requests[name]
290-
if exists && req.Cmp(maxReq) > 0 {
291-
maxIdxs[name] = i
292-
maxReqs[name] = req
293-
}
294-
}
295-
}
296-
return maxIdxs
297-
}

pkg/pod/pod_test.go

Lines changed: 12 additions & 81 deletions
Original file line numberDiff line numberDiff line change
@@ -39,9 +39,6 @@ var (
3939
CredsImage: "override-with-creds:latest",
4040
ShellImage: "busybox",
4141
}
42-
resourceQuantityCmp = cmp.Comparer(func(x, y resource.Quantity) bool {
43-
return x.Cmp(y) == 0
44-
})
4542
)
4643

4744
func TestMakePod(t *testing.T) {
@@ -103,13 +100,7 @@ func TestMakePod(t *testing.T) {
103100
Env: implicitEnvVars,
104101
VolumeMounts: append([]corev1.VolumeMount{toolsMount, downwardMount}, implicitVolumeMounts...),
105102
WorkingDir: workspaceDir,
106-
Resources: corev1.ResourceRequirements{
107-
Requests: corev1.ResourceList{
108-
corev1.ResourceCPU: resource.MustParse("0"),
109-
corev1.ResourceMemory: resource.MustParse("0"),
110-
corev1.ResourceEphemeralStorage: resource.MustParse("0"),
111-
},
112-
},
103+
Resources: corev1.ResourceRequirements{Requests: allZeroQty()},
113104
}},
114105
Volumes: append(implicitVolumes, toolsVolume, downwardVolume),
115106
},
@@ -160,13 +151,7 @@ func TestMakePod(t *testing.T) {
160151
Env: implicitEnvVars,
161152
VolumeMounts: append([]corev1.VolumeMount{toolsMount, downwardMount}, implicitVolumeMounts...),
162153
WorkingDir: workspaceDir,
163-
Resources: corev1.ResourceRequirements{
164-
Requests: corev1.ResourceList{
165-
corev1.ResourceCPU: resource.MustParse("0"),
166-
corev1.ResourceMemory: resource.MustParse("0"),
167-
corev1.ResourceEphemeralStorage: resource.MustParse("0"),
168-
},
169-
},
154+
Resources: corev1.ResourceRequirements{Requests: allZeroQty()},
170155
}},
171156
Volumes: append(implicitVolumes, secretsVolume, toolsVolume, downwardVolume),
172157
},
@@ -209,13 +194,7 @@ func TestMakePod(t *testing.T) {
209194
Env: implicitEnvVars,
210195
VolumeMounts: append([]corev1.VolumeMount{toolsMount, downwardMount}, implicitVolumeMounts...),
211196
WorkingDir: workspaceDir,
212-
Resources: corev1.ResourceRequirements{
213-
Requests: corev1.ResourceList{
214-
corev1.ResourceCPU: resource.MustParse("0"),
215-
corev1.ResourceMemory: resource.MustParse("0"),
216-
corev1.ResourceEphemeralStorage: resource.MustParse("0"),
217-
},
218-
},
197+
Resources: corev1.ResourceRequirements{Requests: allZeroQty()},
219198
}},
220199
Volumes: append(implicitVolumes, toolsVolume, downwardVolume),
221200
SecurityContext: &corev1.PodSecurityContext{
@@ -254,13 +233,7 @@ func TestMakePod(t *testing.T) {
254233
Env: implicitEnvVars,
255234
VolumeMounts: append([]corev1.VolumeMount{toolsMount, downwardMount}, implicitVolumeMounts...),
256235
WorkingDir: workspaceDir,
257-
Resources: corev1.ResourceRequirements{
258-
Requests: corev1.ResourceList{
259-
corev1.ResourceCPU: resource.MustParse("0"),
260-
corev1.ResourceMemory: resource.MustParse("0"),
261-
corev1.ResourceEphemeralStorage: resource.MustParse("0"),
262-
},
263-
},
236+
Resources: corev1.ResourceRequirements{Requests: allZeroQty()},
264237
}},
265238
Volumes: append(implicitVolumes, toolsVolume, downwardVolume),
266239
},
@@ -293,13 +266,7 @@ func TestMakePod(t *testing.T) {
293266
Env: implicitEnvVars,
294267
VolumeMounts: append([]corev1.VolumeMount{toolsMount, downwardMount}, implicitVolumeMounts...),
295268
WorkingDir: workspaceDir,
296-
Resources: corev1.ResourceRequirements{
297-
Requests: corev1.ResourceList{
298-
corev1.ResourceCPU: resource.MustParse("0"),
299-
corev1.ResourceMemory: resource.MustParse("0"),
300-
corev1.ResourceEphemeralStorage: resource.MustParse("0"),
301-
},
302-
},
269+
Resources: corev1.ResourceRequirements{Requests: allZeroQty()},
303270
}},
304271
Volumes: append(implicitVolumes, toolsVolume, downwardVolume),
305272
},
@@ -342,13 +309,7 @@ func TestMakePod(t *testing.T) {
342309
Env: implicitEnvVars,
343310
VolumeMounts: append([]corev1.VolumeMount{toolsMount, downwardMount}, implicitVolumeMounts...),
344311
WorkingDir: filepath.Join(workspaceDir, "test"),
345-
Resources: corev1.ResourceRequirements{
346-
Requests: corev1.ResourceList{
347-
corev1.ResourceCPU: resource.MustParse("0"),
348-
corev1.ResourceMemory: resource.MustParse("0"),
349-
corev1.ResourceEphemeralStorage: resource.MustParse("0"),
350-
},
351-
},
312+
Resources: corev1.ResourceRequirements{Requests: allZeroQty()},
352313
}},
353314
Volumes: append(implicitVolumes, toolsVolume, downwardVolume),
354315
},
@@ -386,13 +347,7 @@ func TestMakePod(t *testing.T) {
386347
Env: implicitEnvVars,
387348
VolumeMounts: append([]corev1.VolumeMount{toolsMount, downwardMount}, implicitVolumeMounts...),
388349
WorkingDir: workspaceDir,
389-
Resources: corev1.ResourceRequirements{
390-
Requests: corev1.ResourceList{
391-
corev1.ResourceCPU: resource.MustParse("0"),
392-
corev1.ResourceMemory: resource.MustParse("0"),
393-
corev1.ResourceEphemeralStorage: resource.MustParse("0"),
394-
},
395-
},
350+
Resources: corev1.ResourceRequirements{Requests: allZeroQty()},
396351
}, {
397352
Name: "sidecar-sc-name",
398353
Image: "sidecar-image",
@@ -445,13 +400,7 @@ func TestMakePod(t *testing.T) {
445400
Env: implicitEnvVars,
446401
VolumeMounts: append([]corev1.VolumeMount{toolsMount, downwardMount}, implicitVolumeMounts...),
447402
WorkingDir: workspaceDir,
448-
Resources: corev1.ResourceRequirements{
449-
Requests: corev1.ResourceList{
450-
corev1.ResourceCPU: resource.MustParse("8"),
451-
corev1.ResourceMemory: resource.MustParse("0"),
452-
corev1.ResourceEphemeralStorage: resource.MustParse("0"),
453-
},
454-
},
403+
Resources: corev1.ResourceRequirements{Requests: allZeroQty()},
455404
}, {
456405
Name: "step-unnamed-1",
457406
Image: "image",
@@ -470,7 +419,7 @@ func TestMakePod(t *testing.T) {
470419
WorkingDir: workspaceDir,
471420
Resources: corev1.ResourceRequirements{
472421
Requests: corev1.ResourceList{
473-
corev1.ResourceCPU: resource.MustParse("0"),
422+
corev1.ResourceCPU: resource.MustParse("8"),
474423
corev1.ResourceMemory: resource.MustParse("100Gi"),
475424
corev1.ResourceEphemeralStorage: resource.MustParse("0"),
476425
},
@@ -550,13 +499,7 @@ script-heredoc-randomly-generated-6nl7g
550499
Env: append(implicitEnvVars, corev1.EnvVar{Name: "FOO", Value: "bar"}),
551500
VolumeMounts: append([]corev1.VolumeMount{scriptsVolumeMount, toolsMount, downwardMount}, implicitVolumeMounts...),
552501
WorkingDir: workspaceDir,
553-
Resources: corev1.ResourceRequirements{
554-
Requests: corev1.ResourceList{
555-
corev1.ResourceCPU: resource.MustParse("0"),
556-
corev1.ResourceMemory: resource.MustParse("0"),
557-
corev1.ResourceEphemeralStorage: resource.MustParse("0"),
558-
},
559-
},
502+
Resources: corev1.ResourceRequirements{Requests: allZeroQty()},
560503
}, {
561504
Name: "step-two",
562505
Image: "image",
@@ -573,13 +516,7 @@ script-heredoc-randomly-generated-6nl7g
573516
Env: append(implicitEnvVars, corev1.EnvVar{Name: "FOO", Value: "bar"}),
574517
VolumeMounts: append([]corev1.VolumeMount{{Name: "i-have-a-volume-mount"}, scriptsVolumeMount, toolsMount}, implicitVolumeMounts...),
575518
WorkingDir: workspaceDir,
576-
Resources: corev1.ResourceRequirements{
577-
Requests: corev1.ResourceList{
578-
corev1.ResourceCPU: resource.MustParse("0"),
579-
corev1.ResourceMemory: resource.MustParse("0"),
580-
corev1.ResourceEphemeralStorage: resource.MustParse("0"),
581-
},
582-
},
519+
Resources: corev1.ResourceRequirements{Requests: allZeroQty()},
583520
}, {
584521
Name: "step-regular-step",
585522
Image: "image",
@@ -599,13 +536,7 @@ script-heredoc-randomly-generated-6nl7g
599536
Env: append(implicitEnvVars, corev1.EnvVar{Name: "FOO", Value: "bar"}),
600537
VolumeMounts: append([]corev1.VolumeMount{toolsMount}, implicitVolumeMounts...),
601538
WorkingDir: workspaceDir,
602-
Resources: corev1.ResourceRequirements{
603-
Requests: corev1.ResourceList{
604-
corev1.ResourceCPU: resource.MustParse("0"),
605-
corev1.ResourceMemory: resource.MustParse("0"),
606-
corev1.ResourceEphemeralStorage: resource.MustParse("0"),
607-
},
608-
},
539+
Resources: corev1.ResourceRequirements{Requests: allZeroQty()},
609540
}},
610541
Volumes: append(implicitVolumes, scriptsVolume, toolsVolume, downwardVolume),
611542
},

pkg/pod/resource_request.go

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
package pod
2+
3+
import (
4+
corev1 "k8s.io/api/core/v1"
5+
"k8s.io/apimachinery/pkg/api/resource"
6+
)
7+
8+
var zeroQty = resource.MustParse("0")
9+
10+
func allZeroQty() corev1.ResourceList {
11+
return corev1.ResourceList{
12+
corev1.ResourceCPU: zeroQty,
13+
corev1.ResourceMemory: zeroQty,
14+
corev1.ResourceEphemeralStorage: zeroQty,
15+
}
16+
}
17+
18+
func resolveResourceRequests(containers []corev1.Container) []corev1.Container {
19+
max := allZeroQty()
20+
for _, c := range containers {
21+
for k, v := range c.Resources.Requests {
22+
if v.Cmp(max[k]) > 0 {
23+
max[k] = v
24+
}
25+
}
26+
}
27+
28+
// Set resource requests for all steps but the theh last container to
29+
// zero.
30+
for i := range containers[:len(containers)-1] {
31+
containers[i].Resources.Requests = allZeroQty()
32+
}
33+
// Set the last container's request to the max of all resources.
34+
containers[len(containers)-1].Resources.Requests = max
35+
return containers
36+
}

pkg/pod/resource_request_test.go

Lines changed: 88 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,88 @@
1+
package pod
2+
3+
import (
4+
"testing"
5+
6+
"github.com/google/go-cmp/cmp"
7+
corev1 "k8s.io/api/core/v1"
8+
"k8s.io/apimachinery/pkg/api/resource"
9+
)
10+
11+
var resourceQuantityCmp = cmp.Comparer(func(x, y resource.Quantity) bool {
12+
return x.Cmp(y) == 0
13+
})
14+
15+
func TestResolveResourceRequests(t *testing.T) {
16+
for _, c := range []struct {
17+
desc string
18+
in, want []corev1.Container
19+
}{{
20+
desc: "three steps, no requests",
21+
in: []corev1.Container{{}, {}, {}},
22+
want: []corev1.Container{{
23+
Resources: corev1.ResourceRequirements{Requests: allZeroQty()},
24+
}, {
25+
Resources: corev1.ResourceRequirements{Requests: allZeroQty()},
26+
}, {
27+
Resources: corev1.ResourceRequirements{Requests: allZeroQty()},
28+
}},
29+
}, {
30+
desc: "requests are moved, limits aren't changed",
31+
in: []corev1.Container{{
32+
Resources: corev1.ResourceRequirements{
33+
Requests: corev1.ResourceList{
34+
corev1.ResourceCPU: resource.MustParse("10"),
35+
},
36+
},
37+
}, {
38+
Resources: corev1.ResourceRequirements{
39+
Requests: corev1.ResourceList{
40+
corev1.ResourceMemory: resource.MustParse("10Gi"),
41+
},
42+
Limits: corev1.ResourceList{
43+
corev1.ResourceMemory: resource.MustParse("11Gi"),
44+
},
45+
},
46+
}, {
47+
Resources: corev1.ResourceRequirements{
48+
Requests: corev1.ResourceList{
49+
corev1.ResourceEphemeralStorage: resource.MustParse("100Gi"),
50+
},
51+
Limits: corev1.ResourceList{
52+
corev1.ResourceMemory: resource.MustParse("100Gi"),
53+
},
54+
},
55+
}},
56+
want: []corev1.Container{{
57+
// All zeroed out.
58+
Resources: corev1.ResourceRequirements{Requests: allZeroQty()},
59+
}, {
60+
// Requests zeroed out, limits remain.
61+
Resources: corev1.ResourceRequirements{
62+
Requests: allZeroQty(),
63+
Limits: corev1.ResourceList{
64+
corev1.ResourceMemory: resource.MustParse("11Gi"),
65+
},
66+
},
67+
}, {
68+
// Requests to the max, limits remain.
69+
Resources: corev1.ResourceRequirements{
70+
Requests: corev1.ResourceList{
71+
corev1.ResourceCPU: resource.MustParse("10"),
72+
corev1.ResourceMemory: resource.MustParse("10Gi"),
73+
corev1.ResourceEphemeralStorage: resource.MustParse("100Gi"),
74+
},
75+
Limits: corev1.ResourceList{
76+
corev1.ResourceMemory: resource.MustParse("100Gi"),
77+
},
78+
},
79+
}},
80+
}} {
81+
t.Run(c.desc, func(t *testing.T) {
82+
got := resolveResourceRequests(c.in)
83+
if d := cmp.Diff(c.want, got, resourceQuantityCmp); d != "" {
84+
t.Errorf("Diff(-want, +got): %s", d)
85+
}
86+
})
87+
}
88+
}

0 commit comments

Comments
 (0)