Skip to content

Commit bc2ae0f

Browse files
committed
workqueue: drop @Bind from create_worker()
Currently, create_worker()'s callers are responsible for deciding whether the newly created worker should be bound to the associated CPU and create_worker() sets WORKER_UNBOUND only for the workers for the unbound global_cwq. Creation during normal operation is always via maybe_create_worker() and @Bind is true. For workers created during hotplug, @Bind is false. Normal operation path is planned to be used even while the CPU is going through hotplug operations or offline and this static decision won't work. Drop @Bind from create_worker() and decide whether to bind by looking at GCWQ_DISASSOCIATED. create_worker() will also set WORKER_UNBOUND autmatically if disassociated. To avoid flipping GCWQ_DISASSOCIATED while create_worker() is in progress, the flag is now allowed to be changed only while holding all manager_mutexes on the global_cwq. This requires that GCWQ_DISASSOCIATED is not cleared behind trustee's back. CPU_ONLINE no longer clears DISASSOCIATED before flushing trustee, which clears DISASSOCIATED before rebinding remaining workers if asked to release. For cases where trustee isn't around, CPU_ONLINE clears DISASSOCIATED after flushing trustee. Also, now, first_idle has UNBOUND set on creation which is explicitly cleared by CPU_ONLINE while binding it. These convolutions will soon be removed by further simplification of CPU hotplug path. Signed-off-by: Tejun Heo <[email protected]> Acked-by: "Rafael J. Wysocki" <[email protected]>
1 parent 6037315 commit bc2ae0f

File tree

1 file changed

+45
-19
lines changed

1 file changed

+45
-19
lines changed

kernel/workqueue.c

Lines changed: 45 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,22 @@
4545
#include "workqueue_sched.h"
4646

4747
enum {
48-
/* global_cwq flags */
48+
/*
49+
* global_cwq flags
50+
*
51+
* A bound gcwq is either associated or disassociated with its CPU.
52+
* While associated (!DISASSOCIATED), all workers are bound to the
53+
* CPU and none has %WORKER_UNBOUND set and concurrency management
54+
* is in effect.
55+
*
56+
* While DISASSOCIATED, the cpu may be offline and all workers have
57+
* %WORKER_UNBOUND set and concurrency management disabled, and may
58+
* be executing on any CPU. The gcwq behaves as an unbound one.
59+
*
60+
* Note that DISASSOCIATED can be flipped only while holding
61+
* managership of all pools on the gcwq to avoid changing binding
62+
* state while create_worker() is in progress.
63+
*/
4964
GCWQ_DISASSOCIATED = 1 << 0, /* cpu can't serve workers */
5065
GCWQ_FREEZING = 1 << 1, /* freeze in progress */
5166

@@ -1334,7 +1349,6 @@ static struct worker *alloc_worker(void)
13341349
/**
13351350
* create_worker - create a new workqueue worker
13361351
* @pool: pool the new worker will belong to
1337-
* @bind: whether to set affinity to @cpu or not
13381352
*
13391353
* Create a new worker which is bound to @pool. The returned worker
13401354
* can be started by calling start_worker() or destroyed using
@@ -1346,10 +1360,9 @@ static struct worker *alloc_worker(void)
13461360
* RETURNS:
13471361
* Pointer to the newly created worker.
13481362
*/
1349-
static struct worker *create_worker(struct worker_pool *pool, bool bind)
1363+
static struct worker *create_worker(struct worker_pool *pool)
13501364
{
13511365
struct global_cwq *gcwq = pool->gcwq;
1352-
bool on_unbound_cpu = gcwq->cpu == WORK_CPU_UNBOUND;
13531366
const char *pri = worker_pool_pri(pool) ? "H" : "";
13541367
struct worker *worker = NULL;
13551368
int id = -1;
@@ -1370,7 +1383,7 @@ static struct worker *create_worker(struct worker_pool *pool, bool bind)
13701383
worker->pool = pool;
13711384
worker->id = id;
13721385

1373-
if (!on_unbound_cpu)
1386+
if (gcwq->cpu != WORK_CPU_UNBOUND)
13741387
worker->task = kthread_create_on_node(worker_thread,
13751388
worker, cpu_to_node(gcwq->cpu),
13761389
"kworker/%u:%d%s", gcwq->cpu, id, pri);
@@ -1384,15 +1397,19 @@ static struct worker *create_worker(struct worker_pool *pool, bool bind)
13841397
set_user_nice(worker->task, HIGHPRI_NICE_LEVEL);
13851398

13861399
/*
1387-
* An unbound worker will become a regular one if CPU comes online
1388-
* later on. Make sure every worker has PF_THREAD_BOUND set.
1400+
* Determine CPU binding of the new worker depending on
1401+
* %GCWQ_DISASSOCIATED. The caller is responsible for ensuring the
1402+
* flag remains stable across this function. See the comments
1403+
* above the flag definition for details.
1404+
*
1405+
* As an unbound worker may later become a regular one if CPU comes
1406+
* online, make sure every worker has %PF_THREAD_BOUND set.
13891407
*/
1390-
if (bind && !on_unbound_cpu)
1408+
if (!(gcwq->flags & GCWQ_DISASSOCIATED)) {
13911409
kthread_bind(worker->task, gcwq->cpu);
1392-
else {
1410+
} else {
13931411
worker->task->flags |= PF_THREAD_BOUND;
1394-
if (on_unbound_cpu)
1395-
worker->flags |= WORKER_UNBOUND;
1412+
worker->flags |= WORKER_UNBOUND;
13961413
}
13971414

13981415
return worker;
@@ -1568,7 +1585,7 @@ __acquires(&gcwq->lock)
15681585
while (true) {
15691586
struct worker *worker;
15701587

1571-
worker = create_worker(pool, true);
1588+
worker = create_worker(pool);
15721589
if (worker) {
15731590
del_timer_sync(&pool->mayday_timer);
15741591
spin_lock_irq(&gcwq->lock);
@@ -3420,12 +3437,10 @@ static int __cpuinit trustee_thread(void *__gcwq)
34203437

34213438
if (need_to_create_worker(pool)) {
34223439
spin_unlock_irq(&gcwq->lock);
3423-
worker = create_worker(pool, false);
3440+
worker = create_worker(pool);
34243441
spin_lock_irq(&gcwq->lock);
3425-
if (worker) {
3426-
worker->flags |= WORKER_UNBOUND;
3442+
if (worker)
34273443
start_worker(worker);
3428-
}
34293444
}
34303445
}
34313446

@@ -3463,6 +3478,10 @@ static int __cpuinit trustee_thread(void *__gcwq)
34633478
for_each_worker_pool(pool, gcwq)
34643479
WARN_ON(!list_empty(&pool->idle_list));
34653480

3481+
/* if we're reassociating, clear DISASSOCIATED */
3482+
if (gcwq->trustee_state == TRUSTEE_RELEASE)
3483+
gcwq->flags &= ~GCWQ_DISASSOCIATED;
3484+
34663485
for_each_busy_worker(worker, i, pos, gcwq) {
34673486
struct work_struct *rebind_work = &worker->rebind_work;
34683487

@@ -3546,7 +3565,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
35463565
i = 0;
35473566
for_each_worker_pool(pool, gcwq) {
35483567
BUG_ON(pool->first_idle);
3549-
new_workers[i] = create_worker(pool, false);
3568+
new_workers[i] = create_worker(pool);
35503569
if (!new_workers[i++])
35513570
goto err_destroy;
35523571
}
@@ -3584,13 +3603,19 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
35843603

35853604
case CPU_DOWN_FAILED:
35863605
case CPU_ONLINE:
3587-
gcwq->flags &= ~GCWQ_DISASSOCIATED;
35883606
if (gcwq->trustee_state != TRUSTEE_DONE) {
35893607
gcwq->trustee_state = TRUSTEE_RELEASE;
35903608
wake_up_process(gcwq->trustee);
35913609
wait_trustee_state(gcwq, TRUSTEE_DONE);
35923610
}
35933611

3612+
/*
3613+
* Either DISASSOCIATED is already cleared or no worker is
3614+
* left on the gcwq. Safe to clear DISASSOCIATED without
3615+
* claiming managers.
3616+
*/
3617+
gcwq->flags &= ~GCWQ_DISASSOCIATED;
3618+
35943619
/*
35953620
* Trustee is done and there might be no worker left.
35963621
* Put the first_idle in and request a real manager to
@@ -3601,6 +3626,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
36013626
kthread_bind(pool->first_idle->task, cpu);
36023627
spin_lock_irq(&gcwq->lock);
36033628
pool->flags |= POOL_MANAGE_WORKERS;
3629+
pool->first_idle->flags &= ~WORKER_UNBOUND;
36043630
start_worker(pool->first_idle);
36053631
pool->first_idle = NULL;
36063632
}
@@ -3899,7 +3925,7 @@ static int __init init_workqueues(void)
38993925
for_each_worker_pool(pool, gcwq) {
39003926
struct worker *worker;
39013927

3902-
worker = create_worker(pool, true);
3928+
worker = create_worker(pool);
39033929
BUG_ON(!worker);
39043930
spin_lock_irq(&gcwq->lock);
39053931
start_worker(worker);

0 commit comments

Comments
 (0)