Skip to content

Commit 995e44f

Browse files
Pierre Tascitekton-robot
authored andcommitted
Prior to this commit QPS, threads-per-controller and maximum burst were not configurable when launching the Tekton controller. This meant that Tekton deployments could not be tuned to the environments it was deployed in.
This commit makes DefaultThreadsPerController, QPS, and Burst configurable via flags. This helps tune a deployment depending on expected concurrency in production.
1 parent 78f5479 commit 995e44f

File tree

3 files changed

+23
-3
lines changed

3 files changed

+23
-3
lines changed

cmd/controller/main.go

Lines changed: 20 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,8 @@ import (
2525
"github.com/tektoncd/pipeline/pkg/reconciler/taskrun"
2626
"github.com/tektoncd/pipeline/pkg/version"
2727
corev1 "k8s.io/api/core/v1"
28+
"k8s.io/client-go/rest"
29+
"knative.dev/pkg/controller"
2830
"knative.dev/pkg/injection"
2931
"knative.dev/pkg/injection/sharedmain"
3032
"knative.dev/pkg/signals"
@@ -48,6 +50,12 @@ var (
4850
imageDigestExporterImage = flag.String("imagedigest-exporter-image", "", "The container image containing our image digest exporter binary.")
4951
namespace = flag.String("namespace", corev1.NamespaceAll, "Namespace to restrict informer to. Optional, defaults to all namespaces.")
5052
versionGiven = flag.String("version", "devel", "Version of Tekton running")
53+
qps = flag.Int("kube-api-qps", int(rest.DefaultQPS), "Maximum QPS to the master from this client")
54+
burst = flag.Int("kube-api-burst", rest.DefaultBurst, "Maximum burst for throttle")
55+
threadsPerController = flag.Int("threads-per-controller", controller.DefaultThreadsPerController, "Threads (goroutines) to create per controller")
56+
disableHighAvailability = flag.Bool("disable-ha", false, "Whether to disable high-availability functionality for this component. This flag will be deprecated "+
57+
"and removed when we have promoted this feature to stable, so do not pass it without filing an "+
58+
"issue upstream!")
5159
)
5260

5361
func main() {
@@ -68,7 +76,18 @@ func main() {
6876
if err := images.Validate(); err != nil {
6977
log.Fatal(err)
7078
}
71-
sharedmain.MainWithContext(injection.WithNamespaceScope(signals.NewContext(), *namespace), ControllerLogKey,
79+
controller.DefaultThreadsPerController = *threadsPerController
80+
81+
cfg := sharedmain.ParseAndGetConfigOrDie()
82+
// multiply by 2, no of controllers being created
83+
cfg.QPS = 2 * float32(*qps)
84+
cfg.Burst = 2 * *burst
85+
86+
ctx := injection.WithNamespaceScope(signals.NewContext(), *namespace)
87+
if !*disableHighAvailability {
88+
ctx = sharedmain.WithHADisabled(ctx)
89+
}
90+
sharedmain.MainWithConfig(ctx, ControllerLogKey, cfg,
7291
taskrun.NewController(*namespace, images),
7392
pipelinerun.NewController(*namespace, images),
7493
)

pkg/pod/entrypoint.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -209,7 +209,7 @@ func StopSidecars(nopImage string, kubeclient kubernetes.Interface, pod corev1.P
209209
}
210210
if updated {
211211
if _, err := kubeclient.CoreV1().Pods(newPod.Namespace).Update(newPod); err != nil {
212-
return fmt.Errorf("error adding ready annotation to Pod %q: %w", pod.Name, err)
212+
return fmt.Errorf("error stopping sidecars of Pod %q: %w", pod.Name, err)
213213
}
214214
}
215215
return nil

pkg/reconciler/taskrun/taskrun.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -122,6 +122,7 @@ func (c *Reconciler) ReconcileKind(ctx context.Context, tr *v1beta1.TaskRun) pkg
122122
c.timeoutHandler.Release(tr.GetNamespacedName())
123123
pod, err := c.KubeClientSet.CoreV1().Pods(tr.Namespace).Get(tr.Status.PodName, metav1.GetOptions{})
124124
if err == nil {
125+
logger.Debugf("Stopping sidecars for TaskRun %q of Pod %q", tr.Name, tr.Status.PodName)
125126
err = podconvert.StopSidecars(c.Images.NopImage, c.KubeClientSet, *pod)
126127
if err == nil {
127128
// Check if any SidecarStatuses are still shown as Running after stopping
@@ -504,7 +505,7 @@ func (c *Reconciler) handlePodCreationError(ctx context.Context, tr *v1beta1.Tas
504505
func (c *Reconciler) failTaskRun(ctx context.Context, tr *v1beta1.TaskRun, reason v1beta1.TaskRunReason, message string) error {
505506
logger := logging.FromContext(ctx)
506507

507-
logger.Warn("stopping task run %q because of %q", tr.Name, reason)
508+
logger.Warnf("stopping task run %q because of %q", tr.Name, reason)
508509
tr.Status.MarkResourceFailed(reason, errors.New(message))
509510

510511
completionTime := metav1.Time{Time: time.Now()}

0 commit comments

Comments
 (0)