@@ -223,7 +223,7 @@ func TestFindTopologyAssignment(t *testing.T) {
223223 wantAssignment * kueue.TopologyAssignment
224224 wantReason string
225225 // TODO: remove after dropping the LeastAllocatedTAS feature gate
226- leastAllocated bool
226+ enableTASLeastAllocated bool
227227 }{
228228 // TODO: remove suffixes LeastAllocated/MostAllocated after dropping the LeastAllocatedTAS feature gate
229229 "minimize the number of used racks before optimizing the number of nodes; LeastAllocated" : {
@@ -329,7 +329,7 @@ func TestFindTopologyAssignment(t *testing.T) {
329329 },
330330 },
331331 },
332- leastAllocated : true ,
332+ enableTASLeastAllocated : true ,
333333 },
334334 "block required; 4 pods fit into one host each; LeastAllocated" : {
335335 nodes : binaryTreesNodes ,
@@ -370,7 +370,7 @@ func TestFindTopologyAssignment(t *testing.T) {
370370 },
371371 },
372372 },
373- leastAllocated : true ,
373+ enableTASLeastAllocated : true ,
374374 },
375375 "block required; 4 pods fit into one host each; MostAllocated" : {
376376 nodes : binaryTreesNodes ,
@@ -411,7 +411,7 @@ func TestFindTopologyAssignment(t *testing.T) {
411411 },
412412 },
413413 },
414- leastAllocated : false ,
414+ enableTASLeastAllocated : false ,
415415 },
416416 "host required; single Pod fits in the host; LeastAllocated" : {
417417 // TODO: remove after dropping the LeastAllocatedTAS feature gate
@@ -435,7 +435,7 @@ func TestFindTopologyAssignment(t *testing.T) {
435435 },
436436 },
437437 },
438- leastAllocated : true ,
438+ enableTASLeastAllocated : true ,
439439 },
440440 "host required; single Pod fits in the host; MostAllocated" : {
441441 nodes : defaultNodes ,
@@ -458,7 +458,7 @@ func TestFindTopologyAssignment(t *testing.T) {
458458 },
459459 },
460460 },
461- leastAllocated : false ,
461+ enableTASLeastAllocated : false ,
462462 },
463463 "rack required; single Pod fits in a rack; LeastAllocated" : {
464464 // TODO: remove after dropping the LeastAllocatedTAS feature gate
@@ -483,7 +483,7 @@ func TestFindTopologyAssignment(t *testing.T) {
483483 },
484484 },
485485 },
486- leastAllocated : true ,
486+ enableTASLeastAllocated : true ,
487487 },
488488 "rack required; multiple Pods fits in a rack; LeastAllocated" : {
489489 nodes : defaultNodes ,
@@ -507,7 +507,7 @@ func TestFindTopologyAssignment(t *testing.T) {
507507 },
508508 },
509509 },
510- leastAllocated : true ,
510+ enableTASLeastAllocated : true ,
511511 },
512512 "rack required; multiple Pods fit in a rack; MostAllocated" : {
513513 nodes : defaultNodes ,
@@ -531,7 +531,7 @@ func TestFindTopologyAssignment(t *testing.T) {
531531 },
532532 },
533533 },
534- leastAllocated : false ,
534+ enableTASLeastAllocated : false ,
535535 },
536536 "block preferred; Pods fit in 2 blocks; MostAllocated" : {
537537 nodes : []corev1.Node {
@@ -582,7 +582,7 @@ func TestFindTopologyAssignment(t *testing.T) {
582582 },
583583 },
584584 },
585- leastAllocated : false ,
585+ enableTASLeastAllocated : false ,
586586 },
587587 "rack required; multiple Pods fit in some racks; MostAllocated" : {
588588 nodes : defaultNodes ,
@@ -606,7 +606,7 @@ func TestFindTopologyAssignment(t *testing.T) {
606606 },
607607 },
608608 },
609- leastAllocated : false ,
609+ enableTASLeastAllocated : false ,
610610 },
611611 "rack required; too many pods to fit in any rack; LeastAllocated" : {
612612 nodes : defaultNodes ,
@@ -617,9 +617,9 @@ func TestFindTopologyAssignment(t *testing.T) {
617617 requests : resources.Requests {
618618 corev1 .ResourceCPU : 1000 ,
619619 },
620- count : 4 ,
621- wantReason : `topology "default" allows to fit only 3 out of 4 pod(s)` ,
622- leastAllocated : true ,
620+ count : 4 ,
621+ wantReason : `topology "default" allows to fit only 3 out of 4 pod(s)` ,
622+ enableTASLeastAllocated : true ,
623623 },
624624 "block required; single Pod fits in a block; LeastAllocated" : {
625625 // TODO: remove after dropping the LeastAllocatedTAS feature gate
@@ -647,7 +647,7 @@ func TestFindTopologyAssignment(t *testing.T) {
647647 },
648648 },
649649 },
650- leastAllocated : true ,
650+ enableTASLeastAllocated : true ,
651651 },
652652 "block required; single Pod fits in a block and a single rack; MostAllocated" : {
653653 nodes : defaultNodes ,
@@ -674,7 +674,7 @@ func TestFindTopologyAssignment(t *testing.T) {
674674 },
675675 },
676676 },
677- leastAllocated : false ,
677+ enableTASLeastAllocated : false ,
678678 },
679679 "block required; single Pod fits in a block spread across two racks; MostAllocated" : {
680680 nodes : defaultNodes ,
@@ -708,7 +708,7 @@ func TestFindTopologyAssignment(t *testing.T) {
708708 },
709709 },
710710 },
711- leastAllocated : false ,
711+ enableTASLeastAllocated : false ,
712712 },
713713 "block required; Pods fit in a block spread across two racks; LeastAllocated" : {
714714 nodes : defaultNodes ,
@@ -739,7 +739,7 @@ func TestFindTopologyAssignment(t *testing.T) {
739739 },
740740 },
741741 },
742- leastAllocated : true ,
742+ enableTASLeastAllocated : true ,
743743 },
744744 "block required; single Pod which cannot be split; LeastAllocated" : {
745745 nodes : defaultNodes ,
@@ -750,9 +750,9 @@ func TestFindTopologyAssignment(t *testing.T) {
750750 requests : resources.Requests {
751751 corev1 .ResourceCPU : 4000 ,
752752 },
753- count : 1 ,
754- wantReason : `topology "default" doesn't allow to fit any of 1 pod(s)` ,
755- leastAllocated : true ,
753+ count : 1 ,
754+ wantReason : `topology "default" doesn't allow to fit any of 1 pod(s)` ,
755+ enableTASLeastAllocated : true ,
756756 },
757757 "block required; too many Pods to fit requested; LeastAllocated" : {
758758 nodes : defaultNodes ,
@@ -763,9 +763,9 @@ func TestFindTopologyAssignment(t *testing.T) {
763763 requests : resources.Requests {
764764 corev1 .ResourceCPU : 1000 ,
765765 },
766- count : 5 ,
767- wantReason : `topology "default" allows to fit only 4 out of 5 pod(s)` ,
768- leastAllocated : true ,
766+ count : 5 ,
767+ wantReason : `topology "default" allows to fit only 4 out of 5 pod(s)` ,
768+ enableTASLeastAllocated : true ,
769769 },
770770 "rack required; single Pod requiring memory; LeastAllocated" : {
771771 // TODO: remove after dropping the LeastAllocatedTAS feature gate
@@ -790,7 +790,7 @@ func TestFindTopologyAssignment(t *testing.T) {
790790 },
791791 },
792792 },
793- leastAllocated : true ,
793+ enableTASLeastAllocated : true ,
794794 },
795795 "rack preferred; but only block can accommodate the workload; LeastAllocated" : {
796796 nodes : defaultNodes ,
@@ -821,7 +821,7 @@ func TestFindTopologyAssignment(t *testing.T) {
821821 },
822822 },
823823 },
824- leastAllocated : true ,
824+ enableTASLeastAllocated : true ,
825825 },
826826 "rack preferred; but only multiple blocks can accommodate the workload; LeastAllocated" : {
827827 nodes : defaultNodes ,
@@ -859,7 +859,7 @@ func TestFindTopologyAssignment(t *testing.T) {
859859 },
860860 },
861861 },
862- leastAllocated : true ,
862+ enableTASLeastAllocated : true ,
863863 },
864864 "block preferred; but only multiple blocks can accommodate the workload; LeastAllocated" : {
865865 nodes : defaultNodes ,
@@ -897,7 +897,7 @@ func TestFindTopologyAssignment(t *testing.T) {
897897 },
898898 },
899899 },
900- leastAllocated : true ,
900+ enableTASLeastAllocated : true ,
901901 },
902902 "block preferred; but the workload cannot be accommodate in entire topology; LeastAllocated" : {
903903 nodes : defaultNodes ,
@@ -908,9 +908,9 @@ func TestFindTopologyAssignment(t *testing.T) {
908908 requests : resources.Requests {
909909 corev1 .ResourceCPU : 1000 ,
910910 },
911- count : 10 ,
912- wantReason : `topology "default" allows to fit only 7 out of 10 pod(s)` ,
913- leastAllocated : true ,
911+ count : 10 ,
912+ wantReason : `topology "default" allows to fit only 7 out of 10 pod(s)` ,
913+ enableTASLeastAllocated : true ,
914914 },
915915 "only nodes with matching labels are considered; no matching node; LeastAllocated" : {
916916 nodes : []corev1.Node {
@@ -933,9 +933,9 @@ func TestFindTopologyAssignment(t *testing.T) {
933933 requests : resources.Requests {
934934 corev1 .ResourceCPU : 1000 ,
935935 },
936- count : 1 ,
937- wantReason : "no topology domains at level: kubernetes.io/hostname" ,
938- leastAllocated : true ,
936+ count : 1 ,
937+ wantReason : "no topology domains at level: kubernetes.io/hostname" ,
938+ enableTASLeastAllocated : true ,
939939 },
940940 "only nodes with matching labels are considered; matching node is found; LeastAllocated" : {
941941 nodes : []corev1.Node {
@@ -971,7 +971,7 @@ func TestFindTopologyAssignment(t *testing.T) {
971971 },
972972 },
973973 },
974- leastAllocated : true ,
974+ enableTASLeastAllocated : true ,
975975 },
976976 "only nodes with matching levels are considered; no host label on node; LeastAllocated" : {
977977 nodes : []corev1.Node {
@@ -993,9 +993,9 @@ func TestFindTopologyAssignment(t *testing.T) {
993993 requests : resources.Requests {
994994 corev1 .ResourceCPU : 1000 ,
995995 },
996- count : 1 ,
997- wantReason : "no topology domains at level: cloud.com/topology-rack" ,
998- leastAllocated : true ,
996+ count : 1 ,
997+ wantReason : "no topology domains at level: cloud.com/topology-rack" ,
998+ enableTASLeastAllocated : true ,
999999 },
10001000 "don't consider unscheduled Pods when computing capacity; LeastAllocated" : {
10011001 // the Pod is not scheduled (no NodeName set, so is not blocking capacity)
@@ -1033,7 +1033,7 @@ func TestFindTopologyAssignment(t *testing.T) {
10331033 },
10341034 },
10351035 },
1036- leastAllocated : true ,
1036+ enableTASLeastAllocated : true ,
10371037 },
10381038 "don't consider terminal pods when computing the capacity; LeastAllocated" : {
10391039 nodes : []corev1.Node {
@@ -1075,7 +1075,7 @@ func TestFindTopologyAssignment(t *testing.T) {
10751075 },
10761076 },
10771077 },
1078- leastAllocated : true ,
1078+ enableTASLeastAllocated : true ,
10791079 },
10801080 "include usage from pending scheduled non-TAS pods, blocked assignment; LeastAllocated" : {
10811081 // there is not enough free capacity on the only node x1
@@ -1102,9 +1102,9 @@ func TestFindTopologyAssignment(t *testing.T) {
11021102 requests : resources.Requests {
11031103 corev1 .ResourceCPU : 600 ,
11041104 },
1105- count : 1 ,
1106- wantReason : `topology "default" doesn't allow to fit any of 1 pod(s)` ,
1107- leastAllocated : true ,
1105+ count : 1 ,
1106+ wantReason : `topology "default" doesn't allow to fit any of 1 pod(s)` ,
1107+ enableTASLeastAllocated : true ,
11081108 },
11091109 "include usage from running non-TAS pods, blocked assignment; LeastAllocated" : {
11101110 // there is not enough free capacity on the only node x1
@@ -1131,9 +1131,9 @@ func TestFindTopologyAssignment(t *testing.T) {
11311131 requests : resources.Requests {
11321132 corev1 .ResourceCPU : 600 ,
11331133 },
1134- count : 1 ,
1135- wantReason : `topology "default" doesn't allow to fit any of 1 pod(s)` ,
1136- leastAllocated : true ,
1134+ count : 1 ,
1135+ wantReason : `topology "default" doesn't allow to fit any of 1 pod(s)` ,
1136+ enableTASLeastAllocated : true ,
11371137 },
11381138 "include usage from running non-TAS pods, found free capacity on another node; LeastAllocated" : {
11391139 // there is not enough free capacity on the node x1 as the
@@ -1180,7 +1180,7 @@ func TestFindTopologyAssignment(t *testing.T) {
11801180 },
11811181 },
11821182 },
1183- leastAllocated : true ,
1183+ enableTASLeastAllocated : true ,
11841184 },
11851185 "no assignment as node is not ready; LeastAllocated" : {
11861186 nodes : []corev1.Node {
@@ -1208,9 +1208,9 @@ func TestFindTopologyAssignment(t *testing.T) {
12081208 requests : resources.Requests {
12091209 corev1 .ResourceCPU : 1000 ,
12101210 },
1211- count : 1 ,
1212- wantReason : "no topology domains at level: kubernetes.io/hostname" ,
1213- leastAllocated : true ,
1211+ count : 1 ,
1212+ wantReason : "no topology domains at level: kubernetes.io/hostname" ,
1213+ enableTASLeastAllocated : true ,
12141214 },
12151215 "no assignment as node is unschedulable; LeastAllocated" : {
12161216 nodes : []corev1.Node {
@@ -1235,9 +1235,9 @@ func TestFindTopologyAssignment(t *testing.T) {
12351235 requests : resources.Requests {
12361236 corev1 .ResourceCPU : 1000 ,
12371237 },
1238- count : 1 ,
1239- wantReason : "no topology domains at level: kubernetes.io/hostname" ,
1240- leastAllocated : true ,
1238+ count : 1 ,
1239+ wantReason : "no topology domains at level: kubernetes.io/hostname" ,
1240+ enableTASLeastAllocated : true ,
12411241 },
12421242 "skip node which has untolerated taint; LeastAllocated" : {
12431243 nodes : []corev1.Node {
@@ -1266,9 +1266,9 @@ func TestFindTopologyAssignment(t *testing.T) {
12661266 requests : resources.Requests {
12671267 corev1 .ResourceCPU : 1000 ,
12681268 },
1269- count : 1 ,
1270- wantReason : `topology "default" doesn't allow to fit any of 1 pod(s)` ,
1271- leastAllocated : true ,
1269+ count : 1 ,
1270+ wantReason : `topology "default" doesn't allow to fit any of 1 pod(s)` ,
1271+ enableTASLeastAllocated : true ,
12721272 },
12731273 "allow to schedule on node with tolerated taint; LeastAllocated" : {
12741274 nodes : []corev1.Node {
@@ -1316,14 +1316,14 @@ func TestFindTopologyAssignment(t *testing.T) {
13161316 },
13171317 },
13181318 },
1319- leastAllocated : true ,
1319+ enableTASLeastAllocated : true ,
13201320 },
13211321 }
13221322 for name , tc := range cases {
13231323 t .Run (name , func (t * testing.T ) {
13241324 ctx := context .Background ()
13251325 // TODO: remove after dropping the LeastAllocatedTAS feature gate
1326- features .SetFeatureGateDuringTest (t , features .LeastAllocatedTAS , tc .leastAllocated )
1326+ features .SetFeatureGateDuringTest (t , features .TASLeastAllocated , tc .enableTASLeastAllocated )
13271327
13281328 initialObjects := make ([]client.Object , 0 )
13291329 for i := range tc .nodes {
0 commit comments