ipv4: Fix ip_getsockopt for IP_PKTOPTIONS
[deliverable/linux.git] / kernel / workqueue.c
1 /*
2 * kernel/workqueue.c - generic async execution with shared worker pool
3 *
4 * Copyright (C) 2002 Ingo Molnar
5 *
6 * Derived from the taskqueue/keventd code by:
7 * David Woodhouse <dwmw2@infradead.org>
8 * Andrew Morton
9 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
10 * Theodore Ts'o <tytso@mit.edu>
11 *
12 * Made to use alloc_percpu by Christoph Lameter.
13 *
14 * Copyright (C) 2010 SUSE Linux Products GmbH
15 * Copyright (C) 2010 Tejun Heo <tj@kernel.org>
16 *
17 * This is the generic async execution mechanism. Work items as are
18 * executed in process context. The worker pool is shared and
19 * automatically managed. There is one worker pool for each CPU and
20 * one extra for works which are better served by workers which are
21 * not bound to any specific CPU.
22 *
23 * Please read Documentation/workqueue.txt for details.
24 */
25
26 #include <linux/module.h>
27 #include <linux/kernel.h>
28 #include <linux/sched.h>
29 #include <linux/init.h>
30 #include <linux/signal.h>
31 #include <linux/completion.h>
32 #include <linux/workqueue.h>
33 #include <linux/slab.h>
34 #include <linux/cpu.h>
35 #include <linux/notifier.h>
36 #include <linux/kthread.h>
37 #include <linux/hardirq.h>
38 #include <linux/mempolicy.h>
39 #include <linux/freezer.h>
40 #include <linux/kallsyms.h>
41 #include <linux/debug_locks.h>
42 #include <linux/lockdep.h>
43 #include <linux/idr.h>
44
45 #include "workqueue_sched.h"
46
47 enum {
48 /* global_cwq flags */
49 GCWQ_MANAGE_WORKERS = 1 << 0, /* need to manage workers */
50 GCWQ_MANAGING_WORKERS = 1 << 1, /* managing workers */
51 GCWQ_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */
52 GCWQ_FREEZING = 1 << 3, /* freeze in progress */
53 GCWQ_HIGHPRI_PENDING = 1 << 4, /* highpri works on queue */
54
55 /* worker flags */
56 WORKER_STARTED = 1 << 0, /* started */
57 WORKER_DIE = 1 << 1, /* die die die */
58 WORKER_IDLE = 1 << 2, /* is idle */
59 WORKER_PREP = 1 << 3, /* preparing to run works */
60 WORKER_ROGUE = 1 << 4, /* not bound to any cpu */
61 WORKER_REBIND = 1 << 5, /* mom is home, come back */
62 WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */
63 WORKER_UNBOUND = 1 << 7, /* worker is unbound */
64
65 WORKER_NOT_RUNNING = WORKER_PREP | WORKER_ROGUE | WORKER_REBIND |
66 WORKER_CPU_INTENSIVE | WORKER_UNBOUND,
67
68 /* gcwq->trustee_state */
69 TRUSTEE_START = 0, /* start */
70 TRUSTEE_IN_CHARGE = 1, /* trustee in charge of gcwq */
71 TRUSTEE_BUTCHER = 2, /* butcher workers */
72 TRUSTEE_RELEASE = 3, /* release workers */
73 TRUSTEE_DONE = 4, /* trustee is done */
74
75 BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */
76 BUSY_WORKER_HASH_SIZE = 1 << BUSY_WORKER_HASH_ORDER,
77 BUSY_WORKER_HASH_MASK = BUSY_WORKER_HASH_SIZE - 1,
78
79 MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */
80 IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */
81
82 MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2,
83 /* call for help after 10ms
84 (min two ticks) */
85 MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */
86 CREATE_COOLDOWN = HZ, /* time to breath after fail */
87 TRUSTEE_COOLDOWN = HZ / 10, /* for trustee draining */
88
89 /*
90 * Rescue workers are used only on emergencies and shared by
91 * all cpus. Give -20.
92 */
93 RESCUER_NICE_LEVEL = -20,
94 };
95
96 /*
97 * Structure fields follow one of the following exclusion rules.
98 *
99 * I: Modifiable by initialization/destruction paths and read-only for
100 * everyone else.
101 *
102 * P: Preemption protected. Disabling preemption is enough and should
103 * only be modified and accessed from the local cpu.
104 *
105 * L: gcwq->lock protected. Access with gcwq->lock held.
106 *
107 * X: During normal operation, modification requires gcwq->lock and
108 * should be done only from local cpu. Either disabling preemption
109 * on local cpu or grabbing gcwq->lock is enough for read access.
110 * If GCWQ_DISASSOCIATED is set, it's identical to L.
111 *
112 * F: wq->flush_mutex protected.
113 *
114 * W: workqueue_lock protected.
115 */
116
117 struct global_cwq;
118
119 /*
120 * The poor guys doing the actual heavy lifting. All on-duty workers
121 * are either serving the manager role, on idle list or on busy hash.
122 */
123 struct worker {
124 /* on idle list while idle, on busy hash table while busy */
125 union {
126 struct list_head entry; /* L: while idle */
127 struct hlist_node hentry; /* L: while busy */
128 };
129
130 struct work_struct *current_work; /* L: work being processed */
131 struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */
132 struct list_head scheduled; /* L: scheduled works */
133 struct task_struct *task; /* I: worker task */
134 struct global_cwq *gcwq; /* I: the associated gcwq */
135 /* 64 bytes boundary on 64bit, 32 on 32bit */
136 unsigned long last_active; /* L: last active timestamp */
137 unsigned int flags; /* X: flags */
138 int id; /* I: worker id */
139 struct work_struct rebind_work; /* L: rebind worker to cpu */
140 };
141
142 /*
143 * Global per-cpu workqueue. There's one and only one for each cpu
144 * and all works are queued and processed here regardless of their
145 * target workqueues.
146 */
147 struct global_cwq {
148 spinlock_t lock; /* the gcwq lock */
149 struct list_head worklist; /* L: list of pending works */
150 unsigned int cpu; /* I: the associated cpu */
151 unsigned int flags; /* L: GCWQ_* flags */
152
153 int nr_workers; /* L: total number of workers */
154 int nr_idle; /* L: currently idle ones */
155
156 /* workers are chained either in the idle_list or busy_hash */
157 struct list_head idle_list; /* X: list of idle workers */
158 struct hlist_head busy_hash[BUSY_WORKER_HASH_SIZE];
159 /* L: hash of busy workers */
160
161 struct timer_list idle_timer; /* L: worker idle timeout */
162 struct timer_list mayday_timer; /* L: SOS timer for dworkers */
163
164 struct ida worker_ida; /* L: for worker IDs */
165
166 struct task_struct *trustee; /* L: for gcwq shutdown */
167 unsigned int trustee_state; /* L: trustee state */
168 wait_queue_head_t trustee_wait; /* trustee wait */
169 struct worker *first_idle; /* L: first idle worker */
170 } ____cacheline_aligned_in_smp;
171
172 /*
173 * The per-CPU workqueue. The lower WORK_STRUCT_FLAG_BITS of
174 * work_struct->data are used for flags and thus cwqs need to be
175 * aligned at two's power of the number of flag bits.
176 */
177 struct cpu_workqueue_struct {
178 struct global_cwq *gcwq; /* I: the associated gcwq */
179 struct workqueue_struct *wq; /* I: the owning workqueue */
180 int work_color; /* L: current color */
181 int flush_color; /* L: flushing color */
182 int nr_in_flight[WORK_NR_COLORS];
183 /* L: nr of in_flight works */
184 int nr_active; /* L: nr of active works */
185 int max_active; /* L: max active works */
186 struct list_head delayed_works; /* L: delayed works */
187 };
188
189 /*
190 * Structure used to wait for workqueue flush.
191 */
192 struct wq_flusher {
193 struct list_head list; /* F: list of flushers */
194 int flush_color; /* F: flush color waiting for */
195 struct completion done; /* flush completion */
196 };
197
198 /*
199 * All cpumasks are assumed to be always set on UP and thus can't be
200 * used to determine whether there's something to be done.
201 */
202 #ifdef CONFIG_SMP
203 typedef cpumask_var_t mayday_mask_t;
204 #define mayday_test_and_set_cpu(cpu, mask) \
205 cpumask_test_and_set_cpu((cpu), (mask))
206 #define mayday_clear_cpu(cpu, mask) cpumask_clear_cpu((cpu), (mask))
207 #define for_each_mayday_cpu(cpu, mask) for_each_cpu((cpu), (mask))
208 #define alloc_mayday_mask(maskp, gfp) zalloc_cpumask_var((maskp), (gfp))
209 #define free_mayday_mask(mask) free_cpumask_var((mask))
210 #else
211 typedef unsigned long mayday_mask_t;
212 #define mayday_test_and_set_cpu(cpu, mask) test_and_set_bit(0, &(mask))
213 #define mayday_clear_cpu(cpu, mask) clear_bit(0, &(mask))
214 #define for_each_mayday_cpu(cpu, mask) if ((cpu) = 0, (mask))
215 #define alloc_mayday_mask(maskp, gfp) true
216 #define free_mayday_mask(mask) do { } while (0)
217 #endif
218
219 /*
220 * The externally visible workqueue abstraction is an array of
221 * per-CPU workqueues:
222 */
223 struct workqueue_struct {
224 unsigned int flags; /* W: WQ_* flags */
225 union {
226 struct cpu_workqueue_struct __percpu *pcpu;
227 struct cpu_workqueue_struct *single;
228 unsigned long v;
229 } cpu_wq; /* I: cwq's */
230 struct list_head list; /* W: list of all workqueues */
231
232 struct mutex flush_mutex; /* protects wq flushing */
233 int work_color; /* F: current work color */
234 int flush_color; /* F: current flush color */
235 atomic_t nr_cwqs_to_flush; /* flush in progress */
236 struct wq_flusher *first_flusher; /* F: first flusher */
237 struct list_head flusher_queue; /* F: flush waiters */
238 struct list_head flusher_overflow; /* F: flush overflow list */
239
240 mayday_mask_t mayday_mask; /* cpus requesting rescue */
241 struct worker *rescuer; /* I: rescue worker */
242
243 int nr_drainers; /* W: drain in progress */
244 int saved_max_active; /* W: saved cwq max_active */
245 const char *name; /* I: workqueue name */
246 #ifdef CONFIG_LOCKDEP
247 struct lockdep_map lockdep_map;
248 #endif
249 };
250
251 struct workqueue_struct *system_wq __read_mostly;
252 struct workqueue_struct *system_long_wq __read_mostly;
253 struct workqueue_struct *system_nrt_wq __read_mostly;
254 struct workqueue_struct *system_unbound_wq __read_mostly;
255 struct workqueue_struct *system_freezable_wq __read_mostly;
256 EXPORT_SYMBOL_GPL(system_wq);
257 EXPORT_SYMBOL_GPL(system_long_wq);
258 EXPORT_SYMBOL_GPL(system_nrt_wq);
259 EXPORT_SYMBOL_GPL(system_unbound_wq);
260 EXPORT_SYMBOL_GPL(system_freezable_wq);
261
262 #define CREATE_TRACE_POINTS
263 #include <trace/events/workqueue.h>
264
265 #define for_each_busy_worker(worker, i, pos, gcwq) \
266 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) \
267 hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry)
268
269 static inline int __next_gcwq_cpu(int cpu, const struct cpumask *mask,
270 unsigned int sw)
271 {
272 if (cpu < nr_cpu_ids) {
273 if (sw & 1) {
274 cpu = cpumask_next(cpu, mask);
275 if (cpu < nr_cpu_ids)
276 return cpu;
277 }
278 if (sw & 2)
279 return WORK_CPU_UNBOUND;
280 }
281 return WORK_CPU_NONE;
282 }
283
284 static inline int __next_wq_cpu(int cpu, const struct cpumask *mask,
285 struct workqueue_struct *wq)
286 {
287 return __next_gcwq_cpu(cpu, mask, !(wq->flags & WQ_UNBOUND) ? 1 : 2);
288 }
289
290 /*
291 * CPU iterators
292 *
293 * An extra gcwq is defined for an invalid cpu number
294 * (WORK_CPU_UNBOUND) to host workqueues which are not bound to any
295 * specific CPU. The following iterators are similar to
296 * for_each_*_cpu() iterators but also considers the unbound gcwq.
297 *
298 * for_each_gcwq_cpu() : possible CPUs + WORK_CPU_UNBOUND
299 * for_each_online_gcwq_cpu() : online CPUs + WORK_CPU_UNBOUND
300 * for_each_cwq_cpu() : possible CPUs for bound workqueues,
301 * WORK_CPU_UNBOUND for unbound workqueues
302 */
303 #define for_each_gcwq_cpu(cpu) \
304 for ((cpu) = __next_gcwq_cpu(-1, cpu_possible_mask, 3); \
305 (cpu) < WORK_CPU_NONE; \
306 (cpu) = __next_gcwq_cpu((cpu), cpu_possible_mask, 3))
307
308 #define for_each_online_gcwq_cpu(cpu) \
309 for ((cpu) = __next_gcwq_cpu(-1, cpu_online_mask, 3); \
310 (cpu) < WORK_CPU_NONE; \
311 (cpu) = __next_gcwq_cpu((cpu), cpu_online_mask, 3))
312
313 #define for_each_cwq_cpu(cpu, wq) \
314 for ((cpu) = __next_wq_cpu(-1, cpu_possible_mask, (wq)); \
315 (cpu) < WORK_CPU_NONE; \
316 (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, (wq)))
317
318 #ifdef CONFIG_DEBUG_OBJECTS_WORK
319
320 static struct debug_obj_descr work_debug_descr;
321
322 static void *work_debug_hint(void *addr)
323 {
324 return ((struct work_struct *) addr)->func;
325 }
326
327 /*
328 * fixup_init is called when:
329 * - an active object is initialized
330 */
331 static int work_fixup_init(void *addr, enum debug_obj_state state)
332 {
333 struct work_struct *work = addr;
334
335 switch (state) {
336 case ODEBUG_STATE_ACTIVE:
337 cancel_work_sync(work);
338 debug_object_init(work, &work_debug_descr);
339 return 1;
340 default:
341 return 0;
342 }
343 }
344
345 /*
346 * fixup_activate is called when:
347 * - an active object is activated
348 * - an unknown object is activated (might be a statically initialized object)
349 */
350 static int work_fixup_activate(void *addr, enum debug_obj_state state)
351 {
352 struct work_struct *work = addr;
353
354 switch (state) {
355
356 case ODEBUG_STATE_NOTAVAILABLE:
357 /*
358 * This is not really a fixup. The work struct was
359 * statically initialized. We just make sure that it
360 * is tracked in the object tracker.
361 */
362 if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) {
363 debug_object_init(work, &work_debug_descr);
364 debug_object_activate(work, &work_debug_descr);
365 return 0;
366 }
367 WARN_ON_ONCE(1);
368 return 0;
369
370 case ODEBUG_STATE_ACTIVE:
371 WARN_ON(1);
372
373 default:
374 return 0;
375 }
376 }
377
378 /*
379 * fixup_free is called when:
380 * - an active object is freed
381 */
382 static int work_fixup_free(void *addr, enum debug_obj_state state)
383 {
384 struct work_struct *work = addr;
385
386 switch (state) {
387 case ODEBUG_STATE_ACTIVE:
388 cancel_work_sync(work);
389 debug_object_free(work, &work_debug_descr);
390 return 1;
391 default:
392 return 0;
393 }
394 }
395
396 static struct debug_obj_descr work_debug_descr = {
397 .name = "work_struct",
398 .debug_hint = work_debug_hint,
399 .fixup_init = work_fixup_init,
400 .fixup_activate = work_fixup_activate,
401 .fixup_free = work_fixup_free,
402 };
403
404 static inline void debug_work_activate(struct work_struct *work)
405 {
406 debug_object_activate(work, &work_debug_descr);
407 }
408
409 static inline void debug_work_deactivate(struct work_struct *work)
410 {
411 debug_object_deactivate(work, &work_debug_descr);
412 }
413
414 void __init_work(struct work_struct *work, int onstack)
415 {
416 if (onstack)
417 debug_object_init_on_stack(work, &work_debug_descr);
418 else
419 debug_object_init(work, &work_debug_descr);
420 }
421 EXPORT_SYMBOL_GPL(__init_work);
422
423 void destroy_work_on_stack(struct work_struct *work)
424 {
425 debug_object_free(work, &work_debug_descr);
426 }
427 EXPORT_SYMBOL_GPL(destroy_work_on_stack);
428
429 #else
430 static inline void debug_work_activate(struct work_struct *work) { }
431 static inline void debug_work_deactivate(struct work_struct *work) { }
432 #endif
433
434 /* Serializes the accesses to the list of workqueues. */
435 static DEFINE_SPINLOCK(workqueue_lock);
436 static LIST_HEAD(workqueues);
437 static bool workqueue_freezing; /* W: have wqs started freezing? */
438
439 /*
440 * The almighty global cpu workqueues. nr_running is the only field
441 * which is expected to be used frequently by other cpus via
442 * try_to_wake_up(). Put it in a separate cacheline.
443 */
444 static DEFINE_PER_CPU(struct global_cwq, global_cwq);
445 static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, gcwq_nr_running);
446
447 /*
448 * Global cpu workqueue and nr_running counter for unbound gcwq. The
449 * gcwq is always online, has GCWQ_DISASSOCIATED set, and all its
450 * workers have WORKER_UNBOUND set.
451 */
452 static struct global_cwq unbound_global_cwq;
453 static atomic_t unbound_gcwq_nr_running = ATOMIC_INIT(0); /* always 0 */
454
455 static int worker_thread(void *__worker);
456
457 static struct global_cwq *get_gcwq(unsigned int cpu)
458 {
459 if (cpu != WORK_CPU_UNBOUND)
460 return &per_cpu(global_cwq, cpu);
461 else
462 return &unbound_global_cwq;
463 }
464
465 static atomic_t *get_gcwq_nr_running(unsigned int cpu)
466 {
467 if (cpu != WORK_CPU_UNBOUND)
468 return &per_cpu(gcwq_nr_running, cpu);
469 else
470 return &unbound_gcwq_nr_running;
471 }
472
473 static struct cpu_workqueue_struct *get_cwq(unsigned int cpu,
474 struct workqueue_struct *wq)
475 {
476 if (!(wq->flags & WQ_UNBOUND)) {
477 if (likely(cpu < nr_cpu_ids)) {
478 #ifdef CONFIG_SMP
479 return per_cpu_ptr(wq->cpu_wq.pcpu, cpu);
480 #else
481 return wq->cpu_wq.single;
482 #endif
483 }
484 } else if (likely(cpu == WORK_CPU_UNBOUND))
485 return wq->cpu_wq.single;
486 return NULL;
487 }
488
489 static unsigned int work_color_to_flags(int color)
490 {
491 return color << WORK_STRUCT_COLOR_SHIFT;
492 }
493
494 static int get_work_color(struct work_struct *work)
495 {
496 return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) &
497 ((1 << WORK_STRUCT_COLOR_BITS) - 1);
498 }
499
500 static int work_next_color(int color)
501 {
502 return (color + 1) % WORK_NR_COLORS;
503 }
504
505 /*
506 * A work's data points to the cwq with WORK_STRUCT_CWQ set while the
507 * work is on queue. Once execution starts, WORK_STRUCT_CWQ is
508 * cleared and the work data contains the cpu number it was last on.
509 *
510 * set_work_{cwq|cpu}() and clear_work_data() can be used to set the
511 * cwq, cpu or clear work->data. These functions should only be
512 * called while the work is owned - ie. while the PENDING bit is set.
513 *
514 * get_work_[g]cwq() can be used to obtain the gcwq or cwq
515 * corresponding to a work. gcwq is available once the work has been
516 * queued anywhere after initialization. cwq is available only from
517 * queueing until execution starts.
518 */
519 static inline void set_work_data(struct work_struct *work, unsigned long data,
520 unsigned long flags)
521 {
522 BUG_ON(!work_pending(work));
523 atomic_long_set(&work->data, data | flags | work_static(work));
524 }
525
526 static void set_work_cwq(struct work_struct *work,
527 struct cpu_workqueue_struct *cwq,
528 unsigned long extra_flags)
529 {
530 set_work_data(work, (unsigned long)cwq,
531 WORK_STRUCT_PENDING | WORK_STRUCT_CWQ | extra_flags);
532 }
533
534 static void set_work_cpu(struct work_struct *work, unsigned int cpu)
535 {
536 set_work_data(work, cpu << WORK_STRUCT_FLAG_BITS, WORK_STRUCT_PENDING);
537 }
538
539 static void clear_work_data(struct work_struct *work)
540 {
541 set_work_data(work, WORK_STRUCT_NO_CPU, 0);
542 }
543
544 static struct cpu_workqueue_struct *get_work_cwq(struct work_struct *work)
545 {
546 unsigned long data = atomic_long_read(&work->data);
547
548 if (data & WORK_STRUCT_CWQ)
549 return (void *)(data & WORK_STRUCT_WQ_DATA_MASK);
550 else
551 return NULL;
552 }
553
554 static struct global_cwq *get_work_gcwq(struct work_struct *work)
555 {
556 unsigned long data = atomic_long_read(&work->data);
557 unsigned int cpu;
558
559 if (data & WORK_STRUCT_CWQ)
560 return ((struct cpu_workqueue_struct *)
561 (data & WORK_STRUCT_WQ_DATA_MASK))->gcwq;
562
563 cpu = data >> WORK_STRUCT_FLAG_BITS;
564 if (cpu == WORK_CPU_NONE)
565 return NULL;
566
567 BUG_ON(cpu >= nr_cpu_ids && cpu != WORK_CPU_UNBOUND);
568 return get_gcwq(cpu);
569 }
570
571 /*
572 * Policy functions. These define the policies on how the global
573 * worker pool is managed. Unless noted otherwise, these functions
574 * assume that they're being called with gcwq->lock held.
575 */
576
577 static bool __need_more_worker(struct global_cwq *gcwq)
578 {
579 return !atomic_read(get_gcwq_nr_running(gcwq->cpu)) ||
580 gcwq->flags & GCWQ_HIGHPRI_PENDING;
581 }
582
583 /*
584 * Need to wake up a worker? Called from anything but currently
585 * running workers.
586 */
587 static bool need_more_worker(struct global_cwq *gcwq)
588 {
589 return !list_empty(&gcwq->worklist) && __need_more_worker(gcwq);
590 }
591
592 /* Can I start working? Called from busy but !running workers. */
593 static bool may_start_working(struct global_cwq *gcwq)
594 {
595 return gcwq->nr_idle;
596 }
597
598 /* Do I need to keep working? Called from currently running workers. */
599 static bool keep_working(struct global_cwq *gcwq)
600 {
601 atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu);
602
603 return !list_empty(&gcwq->worklist) &&
604 (atomic_read(nr_running) <= 1 ||
605 gcwq->flags & GCWQ_HIGHPRI_PENDING);
606 }
607
608 /* Do we need a new worker? Called from manager. */
609 static bool need_to_create_worker(struct global_cwq *gcwq)
610 {
611 return need_more_worker(gcwq) && !may_start_working(gcwq);
612 }
613
614 /* Do I need to be the manager? */
615 static bool need_to_manage_workers(struct global_cwq *gcwq)
616 {
617 return need_to_create_worker(gcwq) || gcwq->flags & GCWQ_MANAGE_WORKERS;
618 }
619
620 /* Do we have too many workers and should some go away? */
621 static bool too_many_workers(struct global_cwq *gcwq)
622 {
623 bool managing = gcwq->flags & GCWQ_MANAGING_WORKERS;
624 int nr_idle = gcwq->nr_idle + managing; /* manager is considered idle */
625 int nr_busy = gcwq->nr_workers - nr_idle;
626
627 return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
628 }
629
630 /*
631 * Wake up functions.
632 */
633
634 /* Return the first worker. Safe with preemption disabled */
635 static struct worker *first_worker(struct global_cwq *gcwq)
636 {
637 if (unlikely(list_empty(&gcwq->idle_list)))
638 return NULL;
639
640 return list_first_entry(&gcwq->idle_list, struct worker, entry);
641 }
642
643 /**
644 * wake_up_worker - wake up an idle worker
645 * @gcwq: gcwq to wake worker for
646 *
647 * Wake up the first idle worker of @gcwq.
648 *
649 * CONTEXT:
650 * spin_lock_irq(gcwq->lock).
651 */
652 static void wake_up_worker(struct global_cwq *gcwq)
653 {
654 struct worker *worker = first_worker(gcwq);
655
656 if (likely(worker))
657 wake_up_process(worker->task);
658 }
659
660 /**
661 * wq_worker_waking_up - a worker is waking up
662 * @task: task waking up
663 * @cpu: CPU @task is waking up to
664 *
665 * This function is called during try_to_wake_up() when a worker is
666 * being awoken.
667 *
668 * CONTEXT:
669 * spin_lock_irq(rq->lock)
670 */
671 void wq_worker_waking_up(struct task_struct *task, unsigned int cpu)
672 {
673 struct worker *worker = kthread_data(task);
674
675 if (!(worker->flags & WORKER_NOT_RUNNING))
676 atomic_inc(get_gcwq_nr_running(cpu));
677 }
678
679 /**
680 * wq_worker_sleeping - a worker is going to sleep
681 * @task: task going to sleep
682 * @cpu: CPU in question, must be the current CPU number
683 *
684 * This function is called during schedule() when a busy worker is
685 * going to sleep. Worker on the same cpu can be woken up by
686 * returning pointer to its task.
687 *
688 * CONTEXT:
689 * spin_lock_irq(rq->lock)
690 *
691 * RETURNS:
692 * Worker task on @cpu to wake up, %NULL if none.
693 */
694 struct task_struct *wq_worker_sleeping(struct task_struct *task,
695 unsigned int cpu)
696 {
697 struct worker *worker = kthread_data(task), *to_wakeup = NULL;
698 struct global_cwq *gcwq = get_gcwq(cpu);
699 atomic_t *nr_running = get_gcwq_nr_running(cpu);
700
701 if (worker->flags & WORKER_NOT_RUNNING)
702 return NULL;
703
704 /* this can only happen on the local cpu */
705 BUG_ON(cpu != raw_smp_processor_id());
706
707 /*
708 * The counterpart of the following dec_and_test, implied mb,
709 * worklist not empty test sequence is in insert_work().
710 * Please read comment there.
711 *
712 * NOT_RUNNING is clear. This means that trustee is not in
713 * charge and we're running on the local cpu w/ rq lock held
714 * and preemption disabled, which in turn means that none else
715 * could be manipulating idle_list, so dereferencing idle_list
716 * without gcwq lock is safe.
717 */
718 if (atomic_dec_and_test(nr_running) && !list_empty(&gcwq->worklist))
719 to_wakeup = first_worker(gcwq);
720 return to_wakeup ? to_wakeup->task : NULL;
721 }
722
723 /**
724 * worker_set_flags - set worker flags and adjust nr_running accordingly
725 * @worker: self
726 * @flags: flags to set
727 * @wakeup: wakeup an idle worker if necessary
728 *
729 * Set @flags in @worker->flags and adjust nr_running accordingly. If
730 * nr_running becomes zero and @wakeup is %true, an idle worker is
731 * woken up.
732 *
733 * CONTEXT:
734 * spin_lock_irq(gcwq->lock)
735 */
736 static inline void worker_set_flags(struct worker *worker, unsigned int flags,
737 bool wakeup)
738 {
739 struct global_cwq *gcwq = worker->gcwq;
740
741 WARN_ON_ONCE(worker->task != current);
742
743 /*
744 * If transitioning into NOT_RUNNING, adjust nr_running and
745 * wake up an idle worker as necessary if requested by
746 * @wakeup.
747 */
748 if ((flags & WORKER_NOT_RUNNING) &&
749 !(worker->flags & WORKER_NOT_RUNNING)) {
750 atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu);
751
752 if (wakeup) {
753 if (atomic_dec_and_test(nr_running) &&
754 !list_empty(&gcwq->worklist))
755 wake_up_worker(gcwq);
756 } else
757 atomic_dec(nr_running);
758 }
759
760 worker->flags |= flags;
761 }
762
763 /**
764 * worker_clr_flags - clear worker flags and adjust nr_running accordingly
765 * @worker: self
766 * @flags: flags to clear
767 *
768 * Clear @flags in @worker->flags and adjust nr_running accordingly.
769 *
770 * CONTEXT:
771 * spin_lock_irq(gcwq->lock)
772 */
773 static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
774 {
775 struct global_cwq *gcwq = worker->gcwq;
776 unsigned int oflags = worker->flags;
777
778 WARN_ON_ONCE(worker->task != current);
779
780 worker->flags &= ~flags;
781
782 /*
783 * If transitioning out of NOT_RUNNING, increment nr_running. Note
784 * that the nested NOT_RUNNING is not a noop. NOT_RUNNING is mask
785 * of multiple flags, not a single flag.
786 */
787 if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
788 if (!(worker->flags & WORKER_NOT_RUNNING))
789 atomic_inc(get_gcwq_nr_running(gcwq->cpu));
790 }
791
792 /**
793 * busy_worker_head - return the busy hash head for a work
794 * @gcwq: gcwq of interest
795 * @work: work to be hashed
796 *
797 * Return hash head of @gcwq for @work.
798 *
799 * CONTEXT:
800 * spin_lock_irq(gcwq->lock).
801 *
802 * RETURNS:
803 * Pointer to the hash head.
804 */
805 static struct hlist_head *busy_worker_head(struct global_cwq *gcwq,
806 struct work_struct *work)
807 {
808 const int base_shift = ilog2(sizeof(struct work_struct));
809 unsigned long v = (unsigned long)work;
810
811 /* simple shift and fold hash, do we need something better? */
812 v >>= base_shift;
813 v += v >> BUSY_WORKER_HASH_ORDER;
814 v &= BUSY_WORKER_HASH_MASK;
815
816 return &gcwq->busy_hash[v];
817 }
818
819 /**
820 * __find_worker_executing_work - find worker which is executing a work
821 * @gcwq: gcwq of interest
822 * @bwh: hash head as returned by busy_worker_head()
823 * @work: work to find worker for
824 *
825 * Find a worker which is executing @work on @gcwq. @bwh should be
826 * the hash head obtained by calling busy_worker_head() with the same
827 * work.
828 *
829 * CONTEXT:
830 * spin_lock_irq(gcwq->lock).
831 *
832 * RETURNS:
833 * Pointer to worker which is executing @work if found, NULL
834 * otherwise.
835 */
836 static struct worker *__find_worker_executing_work(struct global_cwq *gcwq,
837 struct hlist_head *bwh,
838 struct work_struct *work)
839 {
840 struct worker *worker;
841 struct hlist_node *tmp;
842
843 hlist_for_each_entry(worker, tmp, bwh, hentry)
844 if (worker->current_work == work)
845 return worker;
846 return NULL;
847 }
848
849 /**
850 * find_worker_executing_work - find worker which is executing a work
851 * @gcwq: gcwq of interest
852 * @work: work to find worker for
853 *
854 * Find a worker which is executing @work on @gcwq. This function is
855 * identical to __find_worker_executing_work() except that this
856 * function calculates @bwh itself.
857 *
858 * CONTEXT:
859 * spin_lock_irq(gcwq->lock).
860 *
861 * RETURNS:
862 * Pointer to worker which is executing @work if found, NULL
863 * otherwise.
864 */
865 static struct worker *find_worker_executing_work(struct global_cwq *gcwq,
866 struct work_struct *work)
867 {
868 return __find_worker_executing_work(gcwq, busy_worker_head(gcwq, work),
869 work);
870 }
871
872 /**
873 * gcwq_determine_ins_pos - find insertion position
874 * @gcwq: gcwq of interest
875 * @cwq: cwq a work is being queued for
876 *
877 * A work for @cwq is about to be queued on @gcwq, determine insertion
878 * position for the work. If @cwq is for HIGHPRI wq, the work is
879 * queued at the head of the queue but in FIFO order with respect to
880 * other HIGHPRI works; otherwise, at the end of the queue. This
881 * function also sets GCWQ_HIGHPRI_PENDING flag to hint @gcwq that
882 * there are HIGHPRI works pending.
883 *
884 * CONTEXT:
885 * spin_lock_irq(gcwq->lock).
886 *
887 * RETURNS:
888 * Pointer to inserstion position.
889 */
890 static inline struct list_head *gcwq_determine_ins_pos(struct global_cwq *gcwq,
891 struct cpu_workqueue_struct *cwq)
892 {
893 struct work_struct *twork;
894
895 if (likely(!(cwq->wq->flags & WQ_HIGHPRI)))
896 return &gcwq->worklist;
897
898 list_for_each_entry(twork, &gcwq->worklist, entry) {
899 struct cpu_workqueue_struct *tcwq = get_work_cwq(twork);
900
901 if (!(tcwq->wq->flags & WQ_HIGHPRI))
902 break;
903 }
904
905 gcwq->flags |= GCWQ_HIGHPRI_PENDING;
906 return &twork->entry;
907 }
908
909 /**
910 * insert_work - insert a work into gcwq
911 * @cwq: cwq @work belongs to
912 * @work: work to insert
913 * @head: insertion point
914 * @extra_flags: extra WORK_STRUCT_* flags to set
915 *
916 * Insert @work which belongs to @cwq into @gcwq after @head.
917 * @extra_flags is or'd to work_struct flags.
918 *
919 * CONTEXT:
920 * spin_lock_irq(gcwq->lock).
921 */
922 static void insert_work(struct cpu_workqueue_struct *cwq,
923 struct work_struct *work, struct list_head *head,
924 unsigned int extra_flags)
925 {
926 struct global_cwq *gcwq = cwq->gcwq;
927
928 /* we own @work, set data and link */
929 set_work_cwq(work, cwq, extra_flags);
930
931 /*
932 * Ensure that we get the right work->data if we see the
933 * result of list_add() below, see try_to_grab_pending().
934 */
935 smp_wmb();
936
937 list_add_tail(&work->entry, head);
938
939 /*
940 * Ensure either worker_sched_deactivated() sees the above
941 * list_add_tail() or we see zero nr_running to avoid workers
942 * lying around lazily while there are works to be processed.
943 */
944 smp_mb();
945
946 if (__need_more_worker(gcwq))
947 wake_up_worker(gcwq);
948 }
949
950 /*
951 * Test whether @work is being queued from another work executing on the
952 * same workqueue. This is rather expensive and should only be used from
953 * cold paths.
954 */
955 static bool is_chained_work(struct workqueue_struct *wq)
956 {
957 unsigned long flags;
958 unsigned int cpu;
959
960 for_each_gcwq_cpu(cpu) {
961 struct global_cwq *gcwq = get_gcwq(cpu);
962 struct worker *worker;
963 struct hlist_node *pos;
964 int i;
965
966 spin_lock_irqsave(&gcwq->lock, flags);
967 for_each_busy_worker(worker, i, pos, gcwq) {
968 if (worker->task != current)
969 continue;
970 spin_unlock_irqrestore(&gcwq->lock, flags);
971 /*
972 * I'm @worker, no locking necessary. See if @work
973 * is headed to the same workqueue.
974 */
975 return worker->current_cwq->wq == wq;
976 }
977 spin_unlock_irqrestore(&gcwq->lock, flags);
978 }
979 return false;
980 }
981
982 static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
983 struct work_struct *work)
984 {
985 struct global_cwq *gcwq;
986 struct cpu_workqueue_struct *cwq;
987 struct list_head *worklist;
988 unsigned int work_flags;
989 unsigned long flags;
990
991 debug_work_activate(work);
992
993 /* if dying, only works from the same workqueue are allowed */
994 if (unlikely(wq->flags & WQ_DRAINING) &&
995 WARN_ON_ONCE(!is_chained_work(wq)))
996 return;
997
998 /* determine gcwq to use */
999 if (!(wq->flags & WQ_UNBOUND)) {
1000 struct global_cwq *last_gcwq;
1001
1002 if (unlikely(cpu == WORK_CPU_UNBOUND))
1003 cpu = raw_smp_processor_id();
1004
1005 /*
1006 * It's multi cpu. If @wq is non-reentrant and @work
1007 * was previously on a different cpu, it might still
1008 * be running there, in which case the work needs to
1009 * be queued on that cpu to guarantee non-reentrance.
1010 */
1011 gcwq = get_gcwq(cpu);
1012 if (wq->flags & WQ_NON_REENTRANT &&
1013 (last_gcwq = get_work_gcwq(work)) && last_gcwq != gcwq) {
1014 struct worker *worker;
1015
1016 spin_lock_irqsave(&last_gcwq->lock, flags);
1017
1018 worker = find_worker_executing_work(last_gcwq, work);
1019
1020 if (worker && worker->current_cwq->wq == wq)
1021 gcwq = last_gcwq;
1022 else {
1023 /* meh... not running there, queue here */
1024 spin_unlock_irqrestore(&last_gcwq->lock, flags);
1025 spin_lock_irqsave(&gcwq->lock, flags);
1026 }
1027 } else
1028 spin_lock_irqsave(&gcwq->lock, flags);
1029 } else {
1030 gcwq = get_gcwq(WORK_CPU_UNBOUND);
1031 spin_lock_irqsave(&gcwq->lock, flags);
1032 }
1033
1034 /* gcwq determined, get cwq and queue */
1035 cwq = get_cwq(gcwq->cpu, wq);
1036 trace_workqueue_queue_work(cpu, cwq, work);
1037
1038 BUG_ON(!list_empty(&work->entry));
1039
1040 cwq->nr_in_flight[cwq->work_color]++;
1041 work_flags = work_color_to_flags(cwq->work_color);
1042
1043 if (likely(cwq->nr_active < cwq->max_active)) {
1044 trace_workqueue_activate_work(work);
1045 cwq->nr_active++;
1046 worklist = gcwq_determine_ins_pos(gcwq, cwq);
1047 } else {
1048 work_flags |= WORK_STRUCT_DELAYED;
1049 worklist = &cwq->delayed_works;
1050 }
1051
1052 insert_work(cwq, work, worklist, work_flags);
1053
1054 spin_unlock_irqrestore(&gcwq->lock, flags);
1055 }
1056
1057 /**
1058 * queue_work - queue work on a workqueue
1059 * @wq: workqueue to use
1060 * @work: work to queue
1061 *
1062 * Returns 0 if @work was already on a queue, non-zero otherwise.
1063 *
1064 * We queue the work to the CPU on which it was submitted, but if the CPU dies
1065 * it can be processed by another CPU.
1066 */
1067 int queue_work(struct workqueue_struct *wq, struct work_struct *work)
1068 {
1069 int ret;
1070
1071 ret = queue_work_on(get_cpu(), wq, work);
1072 put_cpu();
1073
1074 return ret;
1075 }
1076 EXPORT_SYMBOL_GPL(queue_work);
1077
1078 /**
1079 * queue_work_on - queue work on specific cpu
1080 * @cpu: CPU number to execute work on
1081 * @wq: workqueue to use
1082 * @work: work to queue
1083 *
1084 * Returns 0 if @work was already on a queue, non-zero otherwise.
1085 *
1086 * We queue the work to a specific CPU, the caller must ensure it
1087 * can't go away.
1088 */
1089 int
1090 queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
1091 {
1092 int ret = 0;
1093
1094 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1095 __queue_work(cpu, wq, work);
1096 ret = 1;
1097 }
1098 return ret;
1099 }
1100 EXPORT_SYMBOL_GPL(queue_work_on);
1101
1102 static void delayed_work_timer_fn(unsigned long __data)
1103 {
1104 struct delayed_work *dwork = (struct delayed_work *)__data;
1105 struct cpu_workqueue_struct *cwq = get_work_cwq(&dwork->work);
1106
1107 __queue_work(smp_processor_id(), cwq->wq, &dwork->work);
1108 }
1109
1110 /**
1111 * queue_delayed_work - queue work on a workqueue after delay
1112 * @wq: workqueue to use
1113 * @dwork: delayable work to queue
1114 * @delay: number of jiffies to wait before queueing
1115 *
1116 * Returns 0 if @work was already on a queue, non-zero otherwise.
1117 */
1118 int queue_delayed_work(struct workqueue_struct *wq,
1119 struct delayed_work *dwork, unsigned long delay)
1120 {
1121 if (delay == 0)
1122 return queue_work(wq, &dwork->work);
1123
1124 return queue_delayed_work_on(-1, wq, dwork, delay);
1125 }
1126 EXPORT_SYMBOL_GPL(queue_delayed_work);
1127
1128 /**
1129 * queue_delayed_work_on - queue work on specific CPU after delay
1130 * @cpu: CPU number to execute work on
1131 * @wq: workqueue to use
1132 * @dwork: work to queue
1133 * @delay: number of jiffies to wait before queueing
1134 *
1135 * Returns 0 if @work was already on a queue, non-zero otherwise.
1136 */
1137 int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
1138 struct delayed_work *dwork, unsigned long delay)
1139 {
1140 int ret = 0;
1141 struct timer_list *timer = &dwork->timer;
1142 struct work_struct *work = &dwork->work;
1143
1144 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1145 unsigned int lcpu;
1146
1147 BUG_ON(timer_pending(timer));
1148 BUG_ON(!list_empty(&work->entry));
1149
1150 timer_stats_timer_set_start_info(&dwork->timer);
1151
1152 /*
1153 * This stores cwq for the moment, for the timer_fn.
1154 * Note that the work's gcwq is preserved to allow
1155 * reentrance detection for delayed works.
1156 */
1157 if (!(wq->flags & WQ_UNBOUND)) {
1158 struct global_cwq *gcwq = get_work_gcwq(work);
1159
1160 if (gcwq && gcwq->cpu != WORK_CPU_UNBOUND)
1161 lcpu = gcwq->cpu;
1162 else
1163 lcpu = raw_smp_processor_id();
1164 } else
1165 lcpu = WORK_CPU_UNBOUND;
1166
1167 set_work_cwq(work, get_cwq(lcpu, wq), 0);
1168
1169 timer->expires = jiffies + delay;
1170 timer->data = (unsigned long)dwork;
1171 timer->function = delayed_work_timer_fn;
1172
1173 if (unlikely(cpu >= 0))
1174 add_timer_on(timer, cpu);
1175 else
1176 add_timer(timer);
1177 ret = 1;
1178 }
1179 return ret;
1180 }
1181 EXPORT_SYMBOL_GPL(queue_delayed_work_on);
1182
1183 /**
1184 * worker_enter_idle - enter idle state
1185 * @worker: worker which is entering idle state
1186 *
1187 * @worker is entering idle state. Update stats and idle timer if
1188 * necessary.
1189 *
1190 * LOCKING:
1191 * spin_lock_irq(gcwq->lock).
1192 */
1193 static void worker_enter_idle(struct worker *worker)
1194 {
1195 struct global_cwq *gcwq = worker->gcwq;
1196
1197 BUG_ON(worker->flags & WORKER_IDLE);
1198 BUG_ON(!list_empty(&worker->entry) &&
1199 (worker->hentry.next || worker->hentry.pprev));
1200
1201 /* can't use worker_set_flags(), also called from start_worker() */
1202 worker->flags |= WORKER_IDLE;
1203 gcwq->nr_idle++;
1204 worker->last_active = jiffies;
1205
1206 /* idle_list is LIFO */
1207 list_add(&worker->entry, &gcwq->idle_list);
1208
1209 if (likely(!(worker->flags & WORKER_ROGUE))) {
1210 if (too_many_workers(gcwq) && !timer_pending(&gcwq->idle_timer))
1211 mod_timer(&gcwq->idle_timer,
1212 jiffies + IDLE_WORKER_TIMEOUT);
1213 } else
1214 wake_up_all(&gcwq->trustee_wait);
1215
1216 /* sanity check nr_running */
1217 WARN_ON_ONCE(gcwq->nr_workers == gcwq->nr_idle &&
1218 atomic_read(get_gcwq_nr_running(gcwq->cpu)));
1219 }
1220
1221 /**
1222 * worker_leave_idle - leave idle state
1223 * @worker: worker which is leaving idle state
1224 *
1225 * @worker is leaving idle state. Update stats.
1226 *
1227 * LOCKING:
1228 * spin_lock_irq(gcwq->lock).
1229 */
1230 static void worker_leave_idle(struct worker *worker)
1231 {
1232 struct global_cwq *gcwq = worker->gcwq;
1233
1234 BUG_ON(!(worker->flags & WORKER_IDLE));
1235 worker_clr_flags(worker, WORKER_IDLE);
1236 gcwq->nr_idle--;
1237 list_del_init(&worker->entry);
1238 }
1239
1240 /**
1241 * worker_maybe_bind_and_lock - bind worker to its cpu if possible and lock gcwq
1242 * @worker: self
1243 *
1244 * Works which are scheduled while the cpu is online must at least be
1245 * scheduled to a worker which is bound to the cpu so that if they are
1246 * flushed from cpu callbacks while cpu is going down, they are
1247 * guaranteed to execute on the cpu.
1248 *
1249 * This function is to be used by rogue workers and rescuers to bind
1250 * themselves to the target cpu and may race with cpu going down or
1251 * coming online. kthread_bind() can't be used because it may put the
1252 * worker to already dead cpu and set_cpus_allowed_ptr() can't be used
1253 * verbatim as it's best effort and blocking and gcwq may be
1254 * [dis]associated in the meantime.
1255 *
1256 * This function tries set_cpus_allowed() and locks gcwq and verifies
1257 * the binding against GCWQ_DISASSOCIATED which is set during
1258 * CPU_DYING and cleared during CPU_ONLINE, so if the worker enters
1259 * idle state or fetches works without dropping lock, it can guarantee
1260 * the scheduling requirement described in the first paragraph.
1261 *
1262 * CONTEXT:
1263 * Might sleep. Called without any lock but returns with gcwq->lock
1264 * held.
1265 *
1266 * RETURNS:
1267 * %true if the associated gcwq is online (@worker is successfully
1268 * bound), %false if offline.
1269 */
1270 static bool worker_maybe_bind_and_lock(struct worker *worker)
1271 __acquires(&gcwq->lock)
1272 {
1273 struct global_cwq *gcwq = worker->gcwq;
1274 struct task_struct *task = worker->task;
1275
1276 while (true) {
1277 /*
1278 * The following call may fail, succeed or succeed
1279 * without actually migrating the task to the cpu if
1280 * it races with cpu hotunplug operation. Verify
1281 * against GCWQ_DISASSOCIATED.
1282 */
1283 if (!(gcwq->flags & GCWQ_DISASSOCIATED))
1284 set_cpus_allowed_ptr(task, get_cpu_mask(gcwq->cpu));
1285
1286 spin_lock_irq(&gcwq->lock);
1287 if (gcwq->flags & GCWQ_DISASSOCIATED)
1288 return false;
1289 if (task_cpu(task) == gcwq->cpu &&
1290 cpumask_equal(&current->cpus_allowed,
1291 get_cpu_mask(gcwq->cpu)))
1292 return true;
1293 spin_unlock_irq(&gcwq->lock);
1294
1295 /*
1296 * We've raced with CPU hot[un]plug. Give it a breather
1297 * and retry migration. cond_resched() is required here;
1298 * otherwise, we might deadlock against cpu_stop trying to
1299 * bring down the CPU on non-preemptive kernel.
1300 */
1301 cpu_relax();
1302 cond_resched();
1303 }
1304 }
1305
1306 /*
1307 * Function for worker->rebind_work used to rebind rogue busy workers
1308 * to the associated cpu which is coming back online. This is
1309 * scheduled by cpu up but can race with other cpu hotplug operations
1310 * and may be executed twice without intervening cpu down.
1311 */
1312 static void worker_rebind_fn(struct work_struct *work)
1313 {
1314 struct worker *worker = container_of(work, struct worker, rebind_work);
1315 struct global_cwq *gcwq = worker->gcwq;
1316
1317 if (worker_maybe_bind_and_lock(worker))
1318 worker_clr_flags(worker, WORKER_REBIND);
1319
1320 spin_unlock_irq(&gcwq->lock);
1321 }
1322
1323 static struct worker *alloc_worker(void)
1324 {
1325 struct worker *worker;
1326
1327 worker = kzalloc(sizeof(*worker), GFP_KERNEL);
1328 if (worker) {
1329 INIT_LIST_HEAD(&worker->entry);
1330 INIT_LIST_HEAD(&worker->scheduled);
1331 INIT_WORK(&worker->rebind_work, worker_rebind_fn);
1332 /* on creation a worker is in !idle && prep state */
1333 worker->flags = WORKER_PREP;
1334 }
1335 return worker;
1336 }
1337
1338 /**
1339 * create_worker - create a new workqueue worker
1340 * @gcwq: gcwq the new worker will belong to
1341 * @bind: whether to set affinity to @cpu or not
1342 *
1343 * Create a new worker which is bound to @gcwq. The returned worker
1344 * can be started by calling start_worker() or destroyed using
1345 * destroy_worker().
1346 *
1347 * CONTEXT:
1348 * Might sleep. Does GFP_KERNEL allocations.
1349 *
1350 * RETURNS:
1351 * Pointer to the newly created worker.
1352 */
1353 static struct worker *create_worker(struct global_cwq *gcwq, bool bind)
1354 {
1355 bool on_unbound_cpu = gcwq->cpu == WORK_CPU_UNBOUND;
1356 struct worker *worker = NULL;
1357 int id = -1;
1358
1359 spin_lock_irq(&gcwq->lock);
1360 while (ida_get_new(&gcwq->worker_ida, &id)) {
1361 spin_unlock_irq(&gcwq->lock);
1362 if (!ida_pre_get(&gcwq->worker_ida, GFP_KERNEL))
1363 goto fail;
1364 spin_lock_irq(&gcwq->lock);
1365 }
1366 spin_unlock_irq(&gcwq->lock);
1367
1368 worker = alloc_worker();
1369 if (!worker)
1370 goto fail;
1371
1372 worker->gcwq = gcwq;
1373 worker->id = id;
1374
1375 if (!on_unbound_cpu)
1376 worker->task = kthread_create_on_node(worker_thread,
1377 worker,
1378 cpu_to_node(gcwq->cpu),
1379 "kworker/%u:%d", gcwq->cpu, id);
1380 else
1381 worker->task = kthread_create(worker_thread, worker,
1382 "kworker/u:%d", id);
1383 if (IS_ERR(worker->task))
1384 goto fail;
1385
1386 /*
1387 * A rogue worker will become a regular one if CPU comes
1388 * online later on. Make sure every worker has
1389 * PF_THREAD_BOUND set.
1390 */
1391 if (bind && !on_unbound_cpu)
1392 kthread_bind(worker->task, gcwq->cpu);
1393 else {
1394 worker->task->flags |= PF_THREAD_BOUND;
1395 if (on_unbound_cpu)
1396 worker->flags |= WORKER_UNBOUND;
1397 }
1398
1399 return worker;
1400 fail:
1401 if (id >= 0) {
1402 spin_lock_irq(&gcwq->lock);
1403 ida_remove(&gcwq->worker_ida, id);
1404 spin_unlock_irq(&gcwq->lock);
1405 }
1406 kfree(worker);
1407 return NULL;
1408 }
1409
1410 /**
1411 * start_worker - start a newly created worker
1412 * @worker: worker to start
1413 *
1414 * Make the gcwq aware of @worker and start it.
1415 *
1416 * CONTEXT:
1417 * spin_lock_irq(gcwq->lock).
1418 */
1419 static void start_worker(struct worker *worker)
1420 {
1421 worker->flags |= WORKER_STARTED;
1422 worker->gcwq->nr_workers++;
1423 worker_enter_idle(worker);
1424 wake_up_process(worker->task);
1425 }
1426
1427 /**
1428 * destroy_worker - destroy a workqueue worker
1429 * @worker: worker to be destroyed
1430 *
1431 * Destroy @worker and adjust @gcwq stats accordingly.
1432 *
1433 * CONTEXT:
1434 * spin_lock_irq(gcwq->lock) which is released and regrabbed.
1435 */
1436 static void destroy_worker(struct worker *worker)
1437 {
1438 struct global_cwq *gcwq = worker->gcwq;
1439 int id = worker->id;
1440
1441 /* sanity check frenzy */
1442 BUG_ON(worker->current_work);
1443 BUG_ON(!list_empty(&worker->scheduled));
1444
1445 if (worker->flags & WORKER_STARTED)
1446 gcwq->nr_workers--;
1447 if (worker->flags & WORKER_IDLE)
1448 gcwq->nr_idle--;
1449
1450 list_del_init(&worker->entry);
1451 worker->flags |= WORKER_DIE;
1452
1453 spin_unlock_irq(&gcwq->lock);
1454
1455 kthread_stop(worker->task);
1456 kfree(worker);
1457
1458 spin_lock_irq(&gcwq->lock);
1459 ida_remove(&gcwq->worker_ida, id);
1460 }
1461
1462 static void idle_worker_timeout(unsigned long __gcwq)
1463 {
1464 struct global_cwq *gcwq = (void *)__gcwq;
1465
1466 spin_lock_irq(&gcwq->lock);
1467
1468 if (too_many_workers(gcwq)) {
1469 struct worker *worker;
1470 unsigned long expires;
1471
1472 /* idle_list is kept in LIFO order, check the last one */
1473 worker = list_entry(gcwq->idle_list.prev, struct worker, entry);
1474 expires = worker->last_active + IDLE_WORKER_TIMEOUT;
1475
1476 if (time_before(jiffies, expires))
1477 mod_timer(&gcwq->idle_timer, expires);
1478 else {
1479 /* it's been idle for too long, wake up manager */
1480 gcwq->flags |= GCWQ_MANAGE_WORKERS;
1481 wake_up_worker(gcwq);
1482 }
1483 }
1484
1485 spin_unlock_irq(&gcwq->lock);
1486 }
1487
1488 static bool send_mayday(struct work_struct *work)
1489 {
1490 struct cpu_workqueue_struct *cwq = get_work_cwq(work);
1491 struct workqueue_struct *wq = cwq->wq;
1492 unsigned int cpu;
1493
1494 if (!(wq->flags & WQ_RESCUER))
1495 return false;
1496
1497 /* mayday mayday mayday */
1498 cpu = cwq->gcwq->cpu;
1499 /* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */
1500 if (cpu == WORK_CPU_UNBOUND)
1501 cpu = 0;
1502 if (!mayday_test_and_set_cpu(cpu, wq->mayday_mask))
1503 wake_up_process(wq->rescuer->task);
1504 return true;
1505 }
1506
1507 static void gcwq_mayday_timeout(unsigned long __gcwq)
1508 {
1509 struct global_cwq *gcwq = (void *)__gcwq;
1510 struct work_struct *work;
1511
1512 spin_lock_irq(&gcwq->lock);
1513
1514 if (need_to_create_worker(gcwq)) {
1515 /*
1516 * We've been trying to create a new worker but
1517 * haven't been successful. We might be hitting an
1518 * allocation deadlock. Send distress signals to
1519 * rescuers.
1520 */
1521 list_for_each_entry(work, &gcwq->worklist, entry)
1522 send_mayday(work);
1523 }
1524
1525 spin_unlock_irq(&gcwq->lock);
1526
1527 mod_timer(&gcwq->mayday_timer, jiffies + MAYDAY_INTERVAL);
1528 }
1529
1530 /**
1531 * maybe_create_worker - create a new worker if necessary
1532 * @gcwq: gcwq to create a new worker for
1533 *
1534 * Create a new worker for @gcwq if necessary. @gcwq is guaranteed to
1535 * have at least one idle worker on return from this function. If
1536 * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
1537 * sent to all rescuers with works scheduled on @gcwq to resolve
1538 * possible allocation deadlock.
1539 *
1540 * On return, need_to_create_worker() is guaranteed to be false and
1541 * may_start_working() true.
1542 *
1543 * LOCKING:
1544 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1545 * multiple times. Does GFP_KERNEL allocations. Called only from
1546 * manager.
1547 *
1548 * RETURNS:
1549 * false if no action was taken and gcwq->lock stayed locked, true
1550 * otherwise.
1551 */
1552 static bool maybe_create_worker(struct global_cwq *gcwq)
1553 __releases(&gcwq->lock)
1554 __acquires(&gcwq->lock)
1555 {
1556 if (!need_to_create_worker(gcwq))
1557 return false;
1558 restart:
1559 spin_unlock_irq(&gcwq->lock);
1560
1561 /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
1562 mod_timer(&gcwq->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
1563
1564 while (true) {
1565 struct worker *worker;
1566
1567 worker = create_worker(gcwq, true);
1568 if (worker) {
1569 del_timer_sync(&gcwq->mayday_timer);
1570 spin_lock_irq(&gcwq->lock);
1571 start_worker(worker);
1572 BUG_ON(need_to_create_worker(gcwq));
1573 return true;
1574 }
1575
1576 if (!need_to_create_worker(gcwq))
1577 break;
1578
1579 __set_current_state(TASK_INTERRUPTIBLE);
1580 schedule_timeout(CREATE_COOLDOWN);
1581
1582 if (!need_to_create_worker(gcwq))
1583 break;
1584 }
1585
1586 del_timer_sync(&gcwq->mayday_timer);
1587 spin_lock_irq(&gcwq->lock);
1588 if (need_to_create_worker(gcwq))
1589 goto restart;
1590 return true;
1591 }
1592
1593 /**
1594 * maybe_destroy_worker - destroy workers which have been idle for a while
1595 * @gcwq: gcwq to destroy workers for
1596 *
1597 * Destroy @gcwq workers which have been idle for longer than
1598 * IDLE_WORKER_TIMEOUT.
1599 *
1600 * LOCKING:
1601 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1602 * multiple times. Called only from manager.
1603 *
1604 * RETURNS:
1605 * false if no action was taken and gcwq->lock stayed locked, true
1606 * otherwise.
1607 */
1608 static bool maybe_destroy_workers(struct global_cwq *gcwq)
1609 {
1610 bool ret = false;
1611
1612 while (too_many_workers(gcwq)) {
1613 struct worker *worker;
1614 unsigned long expires;
1615
1616 worker = list_entry(gcwq->idle_list.prev, struct worker, entry);
1617 expires = worker->last_active + IDLE_WORKER_TIMEOUT;
1618
1619 if (time_before(jiffies, expires)) {
1620 mod_timer(&gcwq->idle_timer, expires);
1621 break;
1622 }
1623
1624 destroy_worker(worker);
1625 ret = true;
1626 }
1627
1628 return ret;
1629 }
1630
1631 /**
1632 * manage_workers - manage worker pool
1633 * @worker: self
1634 *
1635 * Assume the manager role and manage gcwq worker pool @worker belongs
1636 * to. At any given time, there can be only zero or one manager per
1637 * gcwq. The exclusion is handled automatically by this function.
1638 *
1639 * The caller can safely start processing works on false return. On
1640 * true return, it's guaranteed that need_to_create_worker() is false
1641 * and may_start_working() is true.
1642 *
1643 * CONTEXT:
1644 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1645 * multiple times. Does GFP_KERNEL allocations.
1646 *
1647 * RETURNS:
1648 * false if no action was taken and gcwq->lock stayed locked, true if
1649 * some action was taken.
1650 */
1651 static bool manage_workers(struct worker *worker)
1652 {
1653 struct global_cwq *gcwq = worker->gcwq;
1654 bool ret = false;
1655
1656 if (gcwq->flags & GCWQ_MANAGING_WORKERS)
1657 return ret;
1658
1659 gcwq->flags &= ~GCWQ_MANAGE_WORKERS;
1660 gcwq->flags |= GCWQ_MANAGING_WORKERS;
1661
1662 /*
1663 * Destroy and then create so that may_start_working() is true
1664 * on return.
1665 */
1666 ret |= maybe_destroy_workers(gcwq);
1667 ret |= maybe_create_worker(gcwq);
1668
1669 gcwq->flags &= ~GCWQ_MANAGING_WORKERS;
1670
1671 /*
1672 * The trustee might be waiting to take over the manager
1673 * position, tell it we're done.
1674 */
1675 if (unlikely(gcwq->trustee))
1676 wake_up_all(&gcwq->trustee_wait);
1677
1678 return ret;
1679 }
1680
1681 /**
1682 * move_linked_works - move linked works to a list
1683 * @work: start of series of works to be scheduled
1684 * @head: target list to append @work to
1685 * @nextp: out paramter for nested worklist walking
1686 *
1687 * Schedule linked works starting from @work to @head. Work series to
1688 * be scheduled starts at @work and includes any consecutive work with
1689 * WORK_STRUCT_LINKED set in its predecessor.
1690 *
1691 * If @nextp is not NULL, it's updated to point to the next work of
1692 * the last scheduled work. This allows move_linked_works() to be
1693 * nested inside outer list_for_each_entry_safe().
1694 *
1695 * CONTEXT:
1696 * spin_lock_irq(gcwq->lock).
1697 */
1698 static void move_linked_works(struct work_struct *work, struct list_head *head,
1699 struct work_struct **nextp)
1700 {
1701 struct work_struct *n;
1702
1703 /*
1704 * Linked worklist will always end before the end of the list,
1705 * use NULL for list head.
1706 */
1707 list_for_each_entry_safe_from(work, n, NULL, entry) {
1708 list_move_tail(&work->entry, head);
1709 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
1710 break;
1711 }
1712
1713 /*
1714 * If we're already inside safe list traversal and have moved
1715 * multiple works to the scheduled queue, the next position
1716 * needs to be updated.
1717 */
1718 if (nextp)
1719 *nextp = n;
1720 }
1721
1722 static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
1723 {
1724 struct work_struct *work = list_first_entry(&cwq->delayed_works,
1725 struct work_struct, entry);
1726 struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq);
1727
1728 trace_workqueue_activate_work(work);
1729 move_linked_works(work, pos, NULL);
1730 __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
1731 cwq->nr_active++;
1732 }
1733
1734 /**
1735 * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
1736 * @cwq: cwq of interest
1737 * @color: color of work which left the queue
1738 * @delayed: for a delayed work
1739 *
1740 * A work either has completed or is removed from pending queue,
1741 * decrement nr_in_flight of its cwq and handle workqueue flushing.
1742 *
1743 * CONTEXT:
1744 * spin_lock_irq(gcwq->lock).
1745 */
1746 static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color,
1747 bool delayed)
1748 {
1749 /* ignore uncolored works */
1750 if (color == WORK_NO_COLOR)
1751 return;
1752
1753 cwq->nr_in_flight[color]--;
1754
1755 if (!delayed) {
1756 cwq->nr_active--;
1757 if (!list_empty(&cwq->delayed_works)) {
1758 /* one down, submit a delayed one */
1759 if (cwq->nr_active < cwq->max_active)
1760 cwq_activate_first_delayed(cwq);
1761 }
1762 }
1763
1764 /* is flush in progress and are we at the flushing tip? */
1765 if (likely(cwq->flush_color != color))
1766 return;
1767
1768 /* are there still in-flight works? */
1769 if (cwq->nr_in_flight[color])
1770 return;
1771
1772 /* this cwq is done, clear flush_color */
1773 cwq->flush_color = -1;
1774
1775 /*
1776 * If this was the last cwq, wake up the first flusher. It
1777 * will handle the rest.
1778 */
1779 if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush))
1780 complete(&cwq->wq->first_flusher->done);
1781 }
1782
1783 /**
1784 * process_one_work - process single work
1785 * @worker: self
1786 * @work: work to process
1787 *
1788 * Process @work. This function contains all the logics necessary to
1789 * process a single work including synchronization against and
1790 * interaction with other workers on the same cpu, queueing and
1791 * flushing. As long as context requirement is met, any worker can
1792 * call this function to process a work.
1793 *
1794 * CONTEXT:
1795 * spin_lock_irq(gcwq->lock) which is released and regrabbed.
1796 */
1797 static void process_one_work(struct worker *worker, struct work_struct *work)
1798 __releases(&gcwq->lock)
1799 __acquires(&gcwq->lock)
1800 {
1801 struct cpu_workqueue_struct *cwq = get_work_cwq(work);
1802 struct global_cwq *gcwq = cwq->gcwq;
1803 struct hlist_head *bwh = busy_worker_head(gcwq, work);
1804 bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE;
1805 work_func_t f = work->func;
1806 int work_color;
1807 struct worker *collision;
1808 #ifdef CONFIG_LOCKDEP
1809 /*
1810 * It is permissible to free the struct work_struct from
1811 * inside the function that is called from it, this we need to
1812 * take into account for lockdep too. To avoid bogus "held
1813 * lock freed" warnings as well as problems when looking into
1814 * work->lockdep_map, make a copy and use that here.
1815 */
1816 struct lockdep_map lockdep_map = work->lockdep_map;
1817 #endif
1818 /*
1819 * A single work shouldn't be executed concurrently by
1820 * multiple workers on a single cpu. Check whether anyone is
1821 * already processing the work. If so, defer the work to the
1822 * currently executing one.
1823 */
1824 collision = __find_worker_executing_work(gcwq, bwh, work);
1825 if (unlikely(collision)) {
1826 move_linked_works(work, &collision->scheduled, NULL);
1827 return;
1828 }
1829
1830 /* claim and process */
1831 debug_work_deactivate(work);
1832 hlist_add_head(&worker->hentry, bwh);
1833 worker->current_work = work;
1834 worker->current_cwq = cwq;
1835 work_color = get_work_color(work);
1836
1837 /* record the current cpu number in the work data and dequeue */
1838 set_work_cpu(work, gcwq->cpu);
1839 list_del_init(&work->entry);
1840
1841 /*
1842 * If HIGHPRI_PENDING, check the next work, and, if HIGHPRI,
1843 * wake up another worker; otherwise, clear HIGHPRI_PENDING.
1844 */
1845 if (unlikely(gcwq->flags & GCWQ_HIGHPRI_PENDING)) {
1846 struct work_struct *nwork = list_first_entry(&gcwq->worklist,
1847 struct work_struct, entry);
1848
1849 if (!list_empty(&gcwq->worklist) &&
1850 get_work_cwq(nwork)->wq->flags & WQ_HIGHPRI)
1851 wake_up_worker(gcwq);
1852 else
1853 gcwq->flags &= ~GCWQ_HIGHPRI_PENDING;
1854 }
1855
1856 /*
1857 * CPU intensive works don't participate in concurrency
1858 * management. They're the scheduler's responsibility.
1859 */
1860 if (unlikely(cpu_intensive))
1861 worker_set_flags(worker, WORKER_CPU_INTENSIVE, true);
1862
1863 spin_unlock_irq(&gcwq->lock);
1864
1865 work_clear_pending(work);
1866 lock_map_acquire_read(&cwq->wq->lockdep_map);
1867 lock_map_acquire(&lockdep_map);
1868 trace_workqueue_execute_start(work);
1869 f(work);
1870 /*
1871 * While we must be careful to not use "work" after this, the trace
1872 * point will only record its address.
1873 */
1874 trace_workqueue_execute_end(work);
1875 lock_map_release(&lockdep_map);
1876 lock_map_release(&cwq->wq->lockdep_map);
1877
1878 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
1879 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
1880 "%s/0x%08x/%d\n",
1881 current->comm, preempt_count(), task_pid_nr(current));
1882 printk(KERN_ERR " last function: ");
1883 print_symbol("%s\n", (unsigned long)f);
1884 debug_show_held_locks(current);
1885 dump_stack();
1886 }
1887
1888 spin_lock_irq(&gcwq->lock);
1889
1890 /* clear cpu intensive status */
1891 if (unlikely(cpu_intensive))
1892 worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
1893
1894 /* we're done with it, release */
1895 hlist_del_init(&worker->hentry);
1896 worker->current_work = NULL;
1897 worker->current_cwq = NULL;
1898 cwq_dec_nr_in_flight(cwq, work_color, false);
1899 }
1900
1901 /**
1902 * process_scheduled_works - process scheduled works
1903 * @worker: self
1904 *
1905 * Process all scheduled works. Please note that the scheduled list
1906 * may change while processing a work, so this function repeatedly
1907 * fetches a work from the top and executes it.
1908 *
1909 * CONTEXT:
1910 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1911 * multiple times.
1912 */
1913 static void process_scheduled_works(struct worker *worker)
1914 {
1915 while (!list_empty(&worker->scheduled)) {
1916 struct work_struct *work = list_first_entry(&worker->scheduled,
1917 struct work_struct, entry);
1918 process_one_work(worker, work);
1919 }
1920 }
1921
1922 /**
1923 * worker_thread - the worker thread function
1924 * @__worker: self
1925 *
1926 * The gcwq worker thread function. There's a single dynamic pool of
1927 * these per each cpu. These workers process all works regardless of
1928 * their specific target workqueue. The only exception is works which
1929 * belong to workqueues with a rescuer which will be explained in
1930 * rescuer_thread().
1931 */
1932 static int worker_thread(void *__worker)
1933 {
1934 struct worker *worker = __worker;
1935 struct global_cwq *gcwq = worker->gcwq;
1936
1937 /* tell the scheduler that this is a workqueue worker */
1938 worker->task->flags |= PF_WQ_WORKER;
1939 woke_up:
1940 spin_lock_irq(&gcwq->lock);
1941
1942 /* DIE can be set only while we're idle, checking here is enough */
1943 if (worker->flags & WORKER_DIE) {
1944 spin_unlock_irq(&gcwq->lock);
1945 worker->task->flags &= ~PF_WQ_WORKER;
1946 return 0;
1947 }
1948
1949 worker_leave_idle(worker);
1950 recheck:
1951 /* no more worker necessary? */
1952 if (!need_more_worker(gcwq))
1953 goto sleep;
1954
1955 /* do we need to manage? */
1956 if (unlikely(!may_start_working(gcwq)) && manage_workers(worker))
1957 goto recheck;
1958
1959 /*
1960 * ->scheduled list can only be filled while a worker is
1961 * preparing to process a work or actually processing it.
1962 * Make sure nobody diddled with it while I was sleeping.
1963 */
1964 BUG_ON(!list_empty(&worker->scheduled));
1965
1966 /*
1967 * When control reaches this point, we're guaranteed to have
1968 * at least one idle worker or that someone else has already
1969 * assumed the manager role.
1970 */
1971 worker_clr_flags(worker, WORKER_PREP);
1972
1973 do {
1974 struct work_struct *work =
1975 list_first_entry(&gcwq->worklist,
1976 struct work_struct, entry);
1977
1978 if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
1979 /* optimization path, not strictly necessary */
1980 process_one_work(worker, work);
1981 if (unlikely(!list_empty(&worker->scheduled)))
1982 process_scheduled_works(worker);
1983 } else {
1984 move_linked_works(work, &worker->scheduled, NULL);
1985 process_scheduled_works(worker);
1986 }
1987 } while (keep_working(gcwq));
1988
1989 worker_set_flags(worker, WORKER_PREP, false);
1990 sleep:
1991 if (unlikely(need_to_manage_workers(gcwq)) && manage_workers(worker))
1992 goto recheck;
1993
1994 /*
1995 * gcwq->lock is held and there's no work to process and no
1996 * need to manage, sleep. Workers are woken up only while
1997 * holding gcwq->lock or from local cpu, so setting the
1998 * current state before releasing gcwq->lock is enough to
1999 * prevent losing any event.
2000 */
2001 worker_enter_idle(worker);
2002 __set_current_state(TASK_INTERRUPTIBLE);
2003 spin_unlock_irq(&gcwq->lock);
2004 schedule();
2005 goto woke_up;
2006 }
2007
2008 /**
2009 * rescuer_thread - the rescuer thread function
2010 * @__wq: the associated workqueue
2011 *
2012 * Workqueue rescuer thread function. There's one rescuer for each
2013 * workqueue which has WQ_RESCUER set.
2014 *
2015 * Regular work processing on a gcwq may block trying to create a new
2016 * worker which uses GFP_KERNEL allocation which has slight chance of
2017 * developing into deadlock if some works currently on the same queue
2018 * need to be processed to satisfy the GFP_KERNEL allocation. This is
2019 * the problem rescuer solves.
2020 *
2021 * When such condition is possible, the gcwq summons rescuers of all
2022 * workqueues which have works queued on the gcwq and let them process
2023 * those works so that forward progress can be guaranteed.
2024 *
2025 * This should happen rarely.
2026 */
2027 static int rescuer_thread(void *__wq)
2028 {
2029 struct workqueue_struct *wq = __wq;
2030 struct worker *rescuer = wq->rescuer;
2031 struct list_head *scheduled = &rescuer->scheduled;
2032 bool is_unbound = wq->flags & WQ_UNBOUND;
2033 unsigned int cpu;
2034
2035 set_user_nice(current, RESCUER_NICE_LEVEL);
2036 repeat:
2037 set_current_state(TASK_INTERRUPTIBLE);
2038
2039 if (kthread_should_stop())
2040 return 0;
2041
2042 /*
2043 * See whether any cpu is asking for help. Unbounded
2044 * workqueues use cpu 0 in mayday_mask for CPU_UNBOUND.
2045 */
2046 for_each_mayday_cpu(cpu, wq->mayday_mask) {
2047 unsigned int tcpu = is_unbound ? WORK_CPU_UNBOUND : cpu;
2048 struct cpu_workqueue_struct *cwq = get_cwq(tcpu, wq);
2049 struct global_cwq *gcwq = cwq->gcwq;
2050 struct work_struct *work, *n;
2051
2052 __set_current_state(TASK_RUNNING);
2053 mayday_clear_cpu(cpu, wq->mayday_mask);
2054
2055 /* migrate to the target cpu if possible */
2056 rescuer->gcwq = gcwq;
2057 worker_maybe_bind_and_lock(rescuer);
2058
2059 /*
2060 * Slurp in all works issued via this workqueue and
2061 * process'em.
2062 */
2063 BUG_ON(!list_empty(&rescuer->scheduled));
2064 list_for_each_entry_safe(work, n, &gcwq->worklist, entry)
2065 if (get_work_cwq(work) == cwq)
2066 move_linked_works(work, scheduled, &n);
2067
2068 process_scheduled_works(rescuer);
2069
2070 /*
2071 * Leave this gcwq. If keep_working() is %true, notify a
2072 * regular worker; otherwise, we end up with 0 concurrency
2073 * and stalling the execution.
2074 */
2075 if (keep_working(gcwq))
2076 wake_up_worker(gcwq);
2077
2078 spin_unlock_irq(&gcwq->lock);
2079 }
2080
2081 schedule();
2082 goto repeat;
2083 }
2084
2085 struct wq_barrier {
2086 struct work_struct work;
2087 struct completion done;
2088 };
2089
2090 static void wq_barrier_func(struct work_struct *work)
2091 {
2092 struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
2093 complete(&barr->done);
2094 }
2095
2096 /**
2097 * insert_wq_barrier - insert a barrier work
2098 * @cwq: cwq to insert barrier into
2099 * @barr: wq_barrier to insert
2100 * @target: target work to attach @barr to
2101 * @worker: worker currently executing @target, NULL if @target is not executing
2102 *
2103 * @barr is linked to @target such that @barr is completed only after
2104 * @target finishes execution. Please note that the ordering
2105 * guarantee is observed only with respect to @target and on the local
2106 * cpu.
2107 *
2108 * Currently, a queued barrier can't be canceled. This is because
2109 * try_to_grab_pending() can't determine whether the work to be
2110 * grabbed is at the head of the queue and thus can't clear LINKED
2111 * flag of the previous work while there must be a valid next work
2112 * after a work with LINKED flag set.
2113 *
2114 * Note that when @worker is non-NULL, @target may be modified
2115 * underneath us, so we can't reliably determine cwq from @target.
2116 *
2117 * CONTEXT:
2118 * spin_lock_irq(gcwq->lock).
2119 */
2120 static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
2121 struct wq_barrier *barr,
2122 struct work_struct *target, struct worker *worker)
2123 {
2124 struct list_head *head;
2125 unsigned int linked = 0;
2126
2127 /*
2128 * debugobject calls are safe here even with gcwq->lock locked
2129 * as we know for sure that this will not trigger any of the
2130 * checks and call back into the fixup functions where we
2131 * might deadlock.
2132 */
2133 INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
2134 __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
2135 init_completion(&barr->done);
2136
2137 /*
2138 * If @target is currently being executed, schedule the
2139 * barrier to the worker; otherwise, put it after @target.
2140 */
2141 if (worker)
2142 head = worker->scheduled.next;
2143 else {
2144 unsigned long *bits = work_data_bits(target);
2145
2146 head = target->entry.next;
2147 /* there can already be other linked works, inherit and set */
2148 linked = *bits & WORK_STRUCT_LINKED;
2149 __set_bit(WORK_STRUCT_LINKED_BIT, bits);
2150 }
2151
2152 debug_work_activate(&barr->work);
2153 insert_work(cwq, &barr->work, head,
2154 work_color_to_flags(WORK_NO_COLOR) | linked);
2155 }
2156
2157 /**
2158 * flush_workqueue_prep_cwqs - prepare cwqs for workqueue flushing
2159 * @wq: workqueue being flushed
2160 * @flush_color: new flush color, < 0 for no-op
2161 * @work_color: new work color, < 0 for no-op
2162 *
2163 * Prepare cwqs for workqueue flushing.
2164 *
2165 * If @flush_color is non-negative, flush_color on all cwqs should be
2166 * -1. If no cwq has in-flight commands at the specified color, all
2167 * cwq->flush_color's stay at -1 and %false is returned. If any cwq
2168 * has in flight commands, its cwq->flush_color is set to
2169 * @flush_color, @wq->nr_cwqs_to_flush is updated accordingly, cwq
2170 * wakeup logic is armed and %true is returned.
2171 *
2172 * The caller should have initialized @wq->first_flusher prior to
2173 * calling this function with non-negative @flush_color. If
2174 * @flush_color is negative, no flush color update is done and %false
2175 * is returned.
2176 *
2177 * If @work_color is non-negative, all cwqs should have the same
2178 * work_color which is previous to @work_color and all will be
2179 * advanced to @work_color.
2180 *
2181 * CONTEXT:
2182 * mutex_lock(wq->flush_mutex).
2183 *
2184 * RETURNS:
2185 * %true if @flush_color >= 0 and there's something to flush. %false
2186 * otherwise.
2187 */
2188 static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq,
2189 int flush_color, int work_color)
2190 {
2191 bool wait = false;
2192 unsigned int cpu;
2193
2194 if (flush_color >= 0) {
2195 BUG_ON(atomic_read(&wq->nr_cwqs_to_flush));
2196 atomic_set(&wq->nr_cwqs_to_flush, 1);
2197 }
2198
2199 for_each_cwq_cpu(cpu, wq) {
2200 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2201 struct global_cwq *gcwq = cwq->gcwq;
2202
2203 spin_lock_irq(&gcwq->lock);
2204
2205 if (flush_color >= 0) {
2206 BUG_ON(cwq->flush_color != -1);
2207
2208 if (cwq->nr_in_flight[flush_color]) {
2209 cwq->flush_color = flush_color;
2210 atomic_inc(&wq->nr_cwqs_to_flush);
2211 wait = true;
2212 }
2213 }
2214
2215 if (work_color >= 0) {
2216 BUG_ON(work_color != work_next_color(cwq->work_color));
2217 cwq->work_color = work_color;
2218 }
2219
2220 spin_unlock_irq(&gcwq->lock);
2221 }
2222
2223 if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush))
2224 complete(&wq->first_flusher->done);
2225
2226 return wait;
2227 }
2228
2229 /**
2230 * flush_workqueue - ensure that any scheduled work has run to completion.
2231 * @wq: workqueue to flush
2232 *
2233 * Forces execution of the workqueue and blocks until its completion.
2234 * This is typically used in driver shutdown handlers.
2235 *
2236 * We sleep until all works which were queued on entry have been handled,
2237 * but we are not livelocked by new incoming ones.
2238 */
2239 void flush_workqueue(struct workqueue_struct *wq)
2240 {
2241 struct wq_flusher this_flusher = {
2242 .list = LIST_HEAD_INIT(this_flusher.list),
2243 .flush_color = -1,
2244 .done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done),
2245 };
2246 int next_color;
2247
2248 lock_map_acquire(&wq->lockdep_map);
2249 lock_map_release(&wq->lockdep_map);
2250
2251 mutex_lock(&wq->flush_mutex);
2252
2253 /*
2254 * Start-to-wait phase
2255 */
2256 next_color = work_next_color(wq->work_color);
2257
2258 if (next_color != wq->flush_color) {
2259 /*
2260 * Color space is not full. The current work_color
2261 * becomes our flush_color and work_color is advanced
2262 * by one.
2263 */
2264 BUG_ON(!list_empty(&wq->flusher_overflow));
2265 this_flusher.flush_color = wq->work_color;
2266 wq->work_color = next_color;
2267
2268 if (!wq->first_flusher) {
2269 /* no flush in progress, become the first flusher */
2270 BUG_ON(wq->flush_color != this_flusher.flush_color);
2271
2272 wq->first_flusher = &this_flusher;
2273
2274 if (!flush_workqueue_prep_cwqs(wq, wq->flush_color,
2275 wq->work_color)) {
2276 /* nothing to flush, done */
2277 wq->flush_color = next_color;
2278 wq->first_flusher = NULL;
2279 goto out_unlock;
2280 }
2281 } else {
2282 /* wait in queue */
2283 BUG_ON(wq->flush_color == this_flusher.flush_color);
2284 list_add_tail(&this_flusher.list, &wq->flusher_queue);
2285 flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
2286 }
2287 } else {
2288 /*
2289 * Oops, color space is full, wait on overflow queue.
2290 * The next flush completion will assign us
2291 * flush_color and transfer to flusher_queue.
2292 */
2293 list_add_tail(&this_flusher.list, &wq->flusher_overflow);
2294 }
2295
2296 mutex_unlock(&wq->flush_mutex);
2297
2298 wait_for_completion(&this_flusher.done);
2299
2300 /*
2301 * Wake-up-and-cascade phase
2302 *
2303 * First flushers are responsible for cascading flushes and
2304 * handling overflow. Non-first flushers can simply return.
2305 */
2306 if (wq->first_flusher != &this_flusher)
2307 return;
2308
2309 mutex_lock(&wq->flush_mutex);
2310
2311 /* we might have raced, check again with mutex held */
2312 if (wq->first_flusher != &this_flusher)
2313 goto out_unlock;
2314
2315 wq->first_flusher = NULL;
2316
2317 BUG_ON(!list_empty(&this_flusher.list));
2318 BUG_ON(wq->flush_color != this_flusher.flush_color);
2319
2320 while (true) {
2321 struct wq_flusher *next, *tmp;
2322
2323 /* complete all the flushers sharing the current flush color */
2324 list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
2325 if (next->flush_color != wq->flush_color)
2326 break;
2327 list_del_init(&next->list);
2328 complete(&next->done);
2329 }
2330
2331 BUG_ON(!list_empty(&wq->flusher_overflow) &&
2332 wq->flush_color != work_next_color(wq->work_color));
2333
2334 /* this flush_color is finished, advance by one */
2335 wq->flush_color = work_next_color(wq->flush_color);
2336
2337 /* one color has been freed, handle overflow queue */
2338 if (!list_empty(&wq->flusher_overflow)) {
2339 /*
2340 * Assign the same color to all overflowed
2341 * flushers, advance work_color and append to
2342 * flusher_queue. This is the start-to-wait
2343 * phase for these overflowed flushers.
2344 */
2345 list_for_each_entry(tmp, &wq->flusher_overflow, list)
2346 tmp->flush_color = wq->work_color;
2347
2348 wq->work_color = work_next_color(wq->work_color);
2349
2350 list_splice_tail_init(&wq->flusher_overflow,
2351 &wq->flusher_queue);
2352 flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
2353 }
2354
2355 if (list_empty(&wq->flusher_queue)) {
2356 BUG_ON(wq->flush_color != wq->work_color);
2357 break;
2358 }
2359
2360 /*
2361 * Need to flush more colors. Make the next flusher
2362 * the new first flusher and arm cwqs.
2363 */
2364 BUG_ON(wq->flush_color == wq->work_color);
2365 BUG_ON(wq->flush_color != next->flush_color);
2366
2367 list_del_init(&next->list);
2368 wq->first_flusher = next;
2369
2370 if (flush_workqueue_prep_cwqs(wq, wq->flush_color, -1))
2371 break;
2372
2373 /*
2374 * Meh... this color is already done, clear first
2375 * flusher and repeat cascading.
2376 */
2377 wq->first_flusher = NULL;
2378 }
2379
2380 out_unlock:
2381 mutex_unlock(&wq->flush_mutex);
2382 }
2383 EXPORT_SYMBOL_GPL(flush_workqueue);
2384
2385 /**
2386 * drain_workqueue - drain a workqueue
2387 * @wq: workqueue to drain
2388 *
2389 * Wait until the workqueue becomes empty. While draining is in progress,
2390 * only chain queueing is allowed. IOW, only currently pending or running
2391 * work items on @wq can queue further work items on it. @wq is flushed
2392 * repeatedly until it becomes empty. The number of flushing is detemined
2393 * by the depth of chaining and should be relatively short. Whine if it
2394 * takes too long.
2395 */
2396 void drain_workqueue(struct workqueue_struct *wq)
2397 {
2398 unsigned int flush_cnt = 0;
2399 unsigned int cpu;
2400
2401 /*
2402 * __queue_work() needs to test whether there are drainers, is much
2403 * hotter than drain_workqueue() and already looks at @wq->flags.
2404 * Use WQ_DRAINING so that queue doesn't have to check nr_drainers.
2405 */
2406 spin_lock(&workqueue_lock);
2407 if (!wq->nr_drainers++)
2408 wq->flags |= WQ_DRAINING;
2409 spin_unlock(&workqueue_lock);
2410 reflush:
2411 flush_workqueue(wq);
2412
2413 for_each_cwq_cpu(cpu, wq) {
2414 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2415
2416 if (!cwq->nr_active && list_empty(&cwq->delayed_works))
2417 continue;
2418
2419 if (++flush_cnt == 10 ||
2420 (flush_cnt % 100 == 0 && flush_cnt <= 1000))
2421 pr_warning("workqueue %s: flush on destruction isn't complete after %u tries\n",
2422 wq->name, flush_cnt);
2423 goto reflush;
2424 }
2425
2426 spin_lock(&workqueue_lock);
2427 if (!--wq->nr_drainers)
2428 wq->flags &= ~WQ_DRAINING;
2429 spin_unlock(&workqueue_lock);
2430 }
2431 EXPORT_SYMBOL_GPL(drain_workqueue);
2432
2433 static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
2434 bool wait_executing)
2435 {
2436 struct worker *worker = NULL;
2437 struct global_cwq *gcwq;
2438 struct cpu_workqueue_struct *cwq;
2439
2440 might_sleep();
2441 gcwq = get_work_gcwq(work);
2442 if (!gcwq)
2443 return false;
2444
2445 spin_lock_irq(&gcwq->lock);
2446 if (!list_empty(&work->entry)) {
2447 /*
2448 * See the comment near try_to_grab_pending()->smp_rmb().
2449 * If it was re-queued to a different gcwq under us, we
2450 * are not going to wait.
2451 */
2452 smp_rmb();
2453 cwq = get_work_cwq(work);
2454 if (unlikely(!cwq || gcwq != cwq->gcwq))
2455 goto already_gone;
2456 } else if (wait_executing) {
2457 worker = find_worker_executing_work(gcwq, work);
2458 if (!worker)
2459 goto already_gone;
2460 cwq = worker->current_cwq;
2461 } else
2462 goto already_gone;
2463
2464 insert_wq_barrier(cwq, barr, work, worker);
2465 spin_unlock_irq(&gcwq->lock);
2466
2467 /*
2468 * If @max_active is 1 or rescuer is in use, flushing another work
2469 * item on the same workqueue may lead to deadlock. Make sure the
2470 * flusher is not running on the same workqueue by verifying write
2471 * access.
2472 */
2473 if (cwq->wq->saved_max_active == 1 || cwq->wq->flags & WQ_RESCUER)
2474 lock_map_acquire(&cwq->wq->lockdep_map);
2475 else
2476 lock_map_acquire_read(&cwq->wq->lockdep_map);
2477 lock_map_release(&cwq->wq->lockdep_map);
2478
2479 return true;
2480 already_gone:
2481 spin_unlock_irq(&gcwq->lock);
2482 return false;
2483 }
2484
2485 /**
2486 * flush_work - wait for a work to finish executing the last queueing instance
2487 * @work: the work to flush
2488 *
2489 * Wait until @work has finished execution. This function considers
2490 * only the last queueing instance of @work. If @work has been
2491 * enqueued across different CPUs on a non-reentrant workqueue or on
2492 * multiple workqueues, @work might still be executing on return on
2493 * some of the CPUs from earlier queueing.
2494 *
2495 * If @work was queued only on a non-reentrant, ordered or unbound
2496 * workqueue, @work is guaranteed to be idle on return if it hasn't
2497 * been requeued since flush started.
2498 *
2499 * RETURNS:
2500 * %true if flush_work() waited for the work to finish execution,
2501 * %false if it was already idle.
2502 */
2503 bool flush_work(struct work_struct *work)
2504 {
2505 struct wq_barrier barr;
2506
2507 if (start_flush_work(work, &barr, true)) {
2508 wait_for_completion(&barr.done);
2509 destroy_work_on_stack(&barr.work);
2510 return true;
2511 } else
2512 return false;
2513 }
2514 EXPORT_SYMBOL_GPL(flush_work);
2515
2516 static bool wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work)
2517 {
2518 struct wq_barrier barr;
2519 struct worker *worker;
2520
2521 spin_lock_irq(&gcwq->lock);
2522
2523 worker = find_worker_executing_work(gcwq, work);
2524 if (unlikely(worker))
2525 insert_wq_barrier(worker->current_cwq, &barr, work, worker);
2526
2527 spin_unlock_irq(&gcwq->lock);
2528
2529 if (unlikely(worker)) {
2530 wait_for_completion(&barr.done);
2531 destroy_work_on_stack(&barr.work);
2532 return true;
2533 } else
2534 return false;
2535 }
2536
2537 static bool wait_on_work(struct work_struct *work)
2538 {
2539 bool ret = false;
2540 int cpu;
2541
2542 might_sleep();
2543
2544 lock_map_acquire(&work->lockdep_map);
2545 lock_map_release(&work->lockdep_map);
2546
2547 for_each_gcwq_cpu(cpu)
2548 ret |= wait_on_cpu_work(get_gcwq(cpu), work);
2549 return ret;
2550 }
2551
2552 /**
2553 * flush_work_sync - wait until a work has finished execution
2554 * @work: the work to flush
2555 *
2556 * Wait until @work has finished execution. On return, it's
2557 * guaranteed that all queueing instances of @work which happened
2558 * before this function is called are finished. In other words, if
2559 * @work hasn't been requeued since this function was called, @work is
2560 * guaranteed to be idle on return.
2561 *
2562 * RETURNS:
2563 * %true if flush_work_sync() waited for the work to finish execution,
2564 * %false if it was already idle.
2565 */
2566 bool flush_work_sync(struct work_struct *work)
2567 {
2568 struct wq_barrier barr;
2569 bool pending, waited;
2570
2571 /* we'll wait for executions separately, queue barr only if pending */
2572 pending = start_flush_work(work, &barr, false);
2573
2574 /* wait for executions to finish */
2575 waited = wait_on_work(work);
2576
2577 /* wait for the pending one */
2578 if (pending) {
2579 wait_for_completion(&barr.done);
2580 destroy_work_on_stack(&barr.work);
2581 }
2582
2583 return pending || waited;
2584 }
2585 EXPORT_SYMBOL_GPL(flush_work_sync);
2586
2587 /*
2588 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
2589 * so this work can't be re-armed in any way.
2590 */
2591 static int try_to_grab_pending(struct work_struct *work)
2592 {
2593 struct global_cwq *gcwq;
2594 int ret = -1;
2595
2596 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
2597 return 0;
2598
2599 /*
2600 * The queueing is in progress, or it is already queued. Try to
2601 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
2602 */
2603 gcwq = get_work_gcwq(work);
2604 if (!gcwq)
2605 return ret;
2606
2607 spin_lock_irq(&gcwq->lock);
2608 if (!list_empty(&work->entry)) {
2609 /*
2610 * This work is queued, but perhaps we locked the wrong gcwq.
2611 * In that case we must see the new value after rmb(), see
2612 * insert_work()->wmb().
2613 */
2614 smp_rmb();
2615 if (gcwq == get_work_gcwq(work)) {
2616 debug_work_deactivate(work);
2617 list_del_init(&work->entry);
2618 cwq_dec_nr_in_flight(get_work_cwq(work),
2619 get_work_color(work),
2620 *work_data_bits(work) & WORK_STRUCT_DELAYED);
2621 ret = 1;
2622 }
2623 }
2624 spin_unlock_irq(&gcwq->lock);
2625
2626 return ret;
2627 }
2628
2629 static bool __cancel_work_timer(struct work_struct *work,
2630 struct timer_list* timer)
2631 {
2632 int ret;
2633
2634 do {
2635 ret = (timer && likely(del_timer(timer)));
2636 if (!ret)
2637 ret = try_to_grab_pending(work);
2638 wait_on_work(work);
2639 } while (unlikely(ret < 0));
2640
2641 clear_work_data(work);
2642 return ret;
2643 }
2644
2645 /**
2646 * cancel_work_sync - cancel a work and wait for it to finish
2647 * @work: the work to cancel
2648 *
2649 * Cancel @work and wait for its execution to finish. This function
2650 * can be used even if the work re-queues itself or migrates to
2651 * another workqueue. On return from this function, @work is
2652 * guaranteed to be not pending or executing on any CPU.
2653 *
2654 * cancel_work_sync(&delayed_work->work) must not be used for
2655 * delayed_work's. Use cancel_delayed_work_sync() instead.
2656 *
2657 * The caller must ensure that the workqueue on which @work was last
2658 * queued can't be destroyed before this function returns.
2659 *
2660 * RETURNS:
2661 * %true if @work was pending, %false otherwise.
2662 */
2663 bool cancel_work_sync(struct work_struct *work)
2664 {
2665 return __cancel_work_timer(work, NULL);
2666 }
2667 EXPORT_SYMBOL_GPL(cancel_work_sync);
2668
2669 /**
2670 * flush_delayed_work - wait for a dwork to finish executing the last queueing
2671 * @dwork: the delayed work to flush
2672 *
2673 * Delayed timer is cancelled and the pending work is queued for
2674 * immediate execution. Like flush_work(), this function only
2675 * considers the last queueing instance of @dwork.
2676 *
2677 * RETURNS:
2678 * %true if flush_work() waited for the work to finish execution,
2679 * %false if it was already idle.
2680 */
2681 bool flush_delayed_work(struct delayed_work *dwork)
2682 {
2683 if (del_timer_sync(&dwork->timer))
2684 __queue_work(raw_smp_processor_id(),
2685 get_work_cwq(&dwork->work)->wq, &dwork->work);
2686 return flush_work(&dwork->work);
2687 }
2688 EXPORT_SYMBOL(flush_delayed_work);
2689
2690 /**
2691 * flush_delayed_work_sync - wait for a dwork to finish
2692 * @dwork: the delayed work to flush
2693 *
2694 * Delayed timer is cancelled and the pending work is queued for
2695 * execution immediately. Other than timer handling, its behavior
2696 * is identical to flush_work_sync().
2697 *
2698 * RETURNS:
2699 * %true if flush_work_sync() waited for the work to finish execution,
2700 * %false if it was already idle.
2701 */
2702 bool flush_delayed_work_sync(struct delayed_work *dwork)
2703 {
2704 if (del_timer_sync(&dwork->timer))
2705 __queue_work(raw_smp_processor_id(),
2706 get_work_cwq(&dwork->work)->wq, &dwork->work);
2707 return flush_work_sync(&dwork->work);
2708 }
2709 EXPORT_SYMBOL(flush_delayed_work_sync);
2710
2711 /**
2712 * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
2713 * @dwork: the delayed work cancel
2714 *
2715 * This is cancel_work_sync() for delayed works.
2716 *
2717 * RETURNS:
2718 * %true if @dwork was pending, %false otherwise.
2719 */
2720 bool cancel_delayed_work_sync(struct delayed_work *dwork)
2721 {
2722 return __cancel_work_timer(&dwork->work, &dwork->timer);
2723 }
2724 EXPORT_SYMBOL(cancel_delayed_work_sync);
2725
2726 /**
2727 * schedule_work - put work task in global workqueue
2728 * @work: job to be done
2729 *
2730 * Returns zero if @work was already on the kernel-global workqueue and
2731 * non-zero otherwise.
2732 *
2733 * This puts a job in the kernel-global workqueue if it was not already
2734 * queued and leaves it in the same position on the kernel-global
2735 * workqueue otherwise.
2736 */
2737 int schedule_work(struct work_struct *work)
2738 {
2739 return queue_work(system_wq, work);
2740 }
2741 EXPORT_SYMBOL(schedule_work);
2742
2743 /*
2744 * schedule_work_on - put work task on a specific cpu
2745 * @cpu: cpu to put the work task on
2746 * @work: job to be done
2747 *
2748 * This puts a job on a specific cpu
2749 */
2750 int schedule_work_on(int cpu, struct work_struct *work)
2751 {
2752 return queue_work_on(cpu, system_wq, work);
2753 }
2754 EXPORT_SYMBOL(schedule_work_on);
2755
2756 /**
2757 * schedule_delayed_work - put work task in global workqueue after delay
2758 * @dwork: job to be done
2759 * @delay: number of jiffies to wait or 0 for immediate execution
2760 *
2761 * After waiting for a given time this puts a job in the kernel-global
2762 * workqueue.
2763 */
2764 int schedule_delayed_work(struct delayed_work *dwork,
2765 unsigned long delay)
2766 {
2767 return queue_delayed_work(system_wq, dwork, delay);
2768 }
2769 EXPORT_SYMBOL(schedule_delayed_work);
2770
2771 /**
2772 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
2773 * @cpu: cpu to use
2774 * @dwork: job to be done
2775 * @delay: number of jiffies to wait
2776 *
2777 * After waiting for a given time this puts a job in the kernel-global
2778 * workqueue on the specified CPU.
2779 */
2780 int schedule_delayed_work_on(int cpu,
2781 struct delayed_work *dwork, unsigned long delay)
2782 {
2783 return queue_delayed_work_on(cpu, system_wq, dwork, delay);
2784 }
2785 EXPORT_SYMBOL(schedule_delayed_work_on);
2786
2787 /**
2788 * schedule_on_each_cpu - execute a function synchronously on each online CPU
2789 * @func: the function to call
2790 *
2791 * schedule_on_each_cpu() executes @func on each online CPU using the
2792 * system workqueue and blocks until all CPUs have completed.
2793 * schedule_on_each_cpu() is very slow.
2794 *
2795 * RETURNS:
2796 * 0 on success, -errno on failure.
2797 */
2798 int schedule_on_each_cpu(work_func_t func)
2799 {
2800 int cpu;
2801 struct work_struct __percpu *works;
2802
2803 works = alloc_percpu(struct work_struct);
2804 if (!works)
2805 return -ENOMEM;
2806
2807 get_online_cpus();
2808
2809 for_each_online_cpu(cpu) {
2810 struct work_struct *work = per_cpu_ptr(works, cpu);
2811
2812 INIT_WORK(work, func);
2813 schedule_work_on(cpu, work);
2814 }
2815
2816 for_each_online_cpu(cpu)
2817 flush_work(per_cpu_ptr(works, cpu));
2818
2819 put_online_cpus();
2820 free_percpu(works);
2821 return 0;
2822 }
2823
2824 /**
2825 * flush_scheduled_work - ensure that any scheduled work has run to completion.
2826 *
2827 * Forces execution of the kernel-global workqueue and blocks until its
2828 * completion.
2829 *
2830 * Think twice before calling this function! It's very easy to get into
2831 * trouble if you don't take great care. Either of the following situations
2832 * will lead to deadlock:
2833 *
2834 * One of the work items currently on the workqueue needs to acquire
2835 * a lock held by your code or its caller.
2836 *
2837 * Your code is running in the context of a work routine.
2838 *
2839 * They will be detected by lockdep when they occur, but the first might not
2840 * occur very often. It depends on what work items are on the workqueue and
2841 * what locks they need, which you have no control over.
2842 *
2843 * In most situations flushing the entire workqueue is overkill; you merely
2844 * need to know that a particular work item isn't queued and isn't running.
2845 * In such cases you should use cancel_delayed_work_sync() or
2846 * cancel_work_sync() instead.
2847 */
2848 void flush_scheduled_work(void)
2849 {
2850 flush_workqueue(system_wq);
2851 }
2852 EXPORT_SYMBOL(flush_scheduled_work);
2853
2854 /**
2855 * execute_in_process_context - reliably execute the routine with user context
2856 * @fn: the function to execute
2857 * @ew: guaranteed storage for the execute work structure (must
2858 * be available when the work executes)
2859 *
2860 * Executes the function immediately if process context is available,
2861 * otherwise schedules the function for delayed execution.
2862 *
2863 * Returns: 0 - function was executed
2864 * 1 - function was scheduled for execution
2865 */
2866 int execute_in_process_context(work_func_t fn, struct execute_work *ew)
2867 {
2868 if (!in_interrupt()) {
2869 fn(&ew->work);
2870 return 0;
2871 }
2872
2873 INIT_WORK(&ew->work, fn);
2874 schedule_work(&ew->work);
2875
2876 return 1;
2877 }
2878 EXPORT_SYMBOL_GPL(execute_in_process_context);
2879
2880 int keventd_up(void)
2881 {
2882 return system_wq != NULL;
2883 }
2884
2885 static int alloc_cwqs(struct workqueue_struct *wq)
2886 {
2887 /*
2888 * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS.
2889 * Make sure that the alignment isn't lower than that of
2890 * unsigned long long.
2891 */
2892 const size_t size = sizeof(struct cpu_workqueue_struct);
2893 const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS,
2894 __alignof__(unsigned long long));
2895 #ifdef CONFIG_SMP
2896 bool percpu = !(wq->flags & WQ_UNBOUND);
2897 #else
2898 bool percpu = false;
2899 #endif
2900
2901 if (percpu)
2902 wq->cpu_wq.pcpu = __alloc_percpu(size, align);
2903 else {
2904 void *ptr;
2905
2906 /*
2907 * Allocate enough room to align cwq and put an extra
2908 * pointer at the end pointing back to the originally
2909 * allocated pointer which will be used for free.
2910 */
2911 ptr = kzalloc(size + align + sizeof(void *), GFP_KERNEL);
2912 if (ptr) {
2913 wq->cpu_wq.single = PTR_ALIGN(ptr, align);
2914 *(void **)(wq->cpu_wq.single + 1) = ptr;
2915 }
2916 }
2917
2918 /* just in case, make sure it's actually aligned */
2919 BUG_ON(!IS_ALIGNED(wq->cpu_wq.v, align));
2920 return wq->cpu_wq.v ? 0 : -ENOMEM;
2921 }
2922
2923 static void free_cwqs(struct workqueue_struct *wq)
2924 {
2925 #ifdef CONFIG_SMP
2926 bool percpu = !(wq->flags & WQ_UNBOUND);
2927 #else
2928 bool percpu = false;
2929 #endif
2930
2931 if (percpu)
2932 free_percpu(wq->cpu_wq.pcpu);
2933 else if (wq->cpu_wq.single) {
2934 /* the pointer to free is stored right after the cwq */
2935 kfree(*(void **)(wq->cpu_wq.single + 1));
2936 }
2937 }
2938
2939 static int wq_clamp_max_active(int max_active, unsigned int flags,
2940 const char *name)
2941 {
2942 int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE;
2943
2944 if (max_active < 1 || max_active > lim)
2945 printk(KERN_WARNING "workqueue: max_active %d requested for %s "
2946 "is out of range, clamping between %d and %d\n",
2947 max_active, name, 1, lim);
2948
2949 return clamp_val(max_active, 1, lim);
2950 }
2951
2952 struct workqueue_struct *__alloc_workqueue_key(const char *name,
2953 unsigned int flags,
2954 int max_active,
2955 struct lock_class_key *key,
2956 const char *lock_name)
2957 {
2958 struct workqueue_struct *wq;
2959 unsigned int cpu;
2960
2961 /*
2962 * Workqueues which may be used during memory reclaim should
2963 * have a rescuer to guarantee forward progress.
2964 */
2965 if (flags & WQ_MEM_RECLAIM)
2966 flags |= WQ_RESCUER;
2967
2968 /*
2969 * Unbound workqueues aren't concurrency managed and should be
2970 * dispatched to workers immediately.
2971 */
2972 if (flags & WQ_UNBOUND)
2973 flags |= WQ_HIGHPRI;
2974
2975 max_active = max_active ?: WQ_DFL_ACTIVE;
2976 max_active = wq_clamp_max_active(max_active, flags, name);
2977
2978 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
2979 if (!wq)
2980 goto err;
2981
2982 wq->flags = flags;
2983 wq->saved_max_active = max_active;
2984 mutex_init(&wq->flush_mutex);
2985 atomic_set(&wq->nr_cwqs_to_flush, 0);
2986 INIT_LIST_HEAD(&wq->flusher_queue);
2987 INIT_LIST_HEAD(&wq->flusher_overflow);
2988
2989 wq->name = name;
2990 lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
2991 INIT_LIST_HEAD(&wq->list);
2992
2993 if (alloc_cwqs(wq) < 0)
2994 goto err;
2995
2996 for_each_cwq_cpu(cpu, wq) {
2997 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2998 struct global_cwq *gcwq = get_gcwq(cpu);
2999
3000 BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK);
3001 cwq->gcwq = gcwq;
3002 cwq->wq = wq;
3003 cwq->flush_color = -1;
3004 cwq->max_active = max_active;
3005 INIT_LIST_HEAD(&cwq->delayed_works);
3006 }
3007
3008 if (flags & WQ_RESCUER) {
3009 struct worker *rescuer;
3010
3011 if (!alloc_mayday_mask(&wq->mayday_mask, GFP_KERNEL))
3012 goto err;
3013
3014 wq->rescuer = rescuer = alloc_worker();
3015 if (!rescuer)
3016 goto err;
3017
3018 rescuer->task = kthread_create(rescuer_thread, wq, "%s", name);
3019 if (IS_ERR(rescuer->task))
3020 goto err;
3021
3022 rescuer->task->flags |= PF_THREAD_BOUND;
3023 wake_up_process(rescuer->task);
3024 }
3025
3026 /*
3027 * workqueue_lock protects global freeze state and workqueues
3028 * list. Grab it, set max_active accordingly and add the new
3029 * workqueue to workqueues list.
3030 */
3031 spin_lock(&workqueue_lock);
3032
3033 if (workqueue_freezing && wq->flags & WQ_FREEZABLE)
3034 for_each_cwq_cpu(cpu, wq)
3035 get_cwq(cpu, wq)->max_active = 0;
3036
3037 list_add(&wq->list, &workqueues);
3038
3039 spin_unlock(&workqueue_lock);
3040
3041 return wq;
3042 err:
3043 if (wq) {
3044 free_cwqs(wq);
3045 free_mayday_mask(wq->mayday_mask);
3046 kfree(wq->rescuer);
3047 kfree(wq);
3048 }
3049 return NULL;
3050 }
3051 EXPORT_SYMBOL_GPL(__alloc_workqueue_key);
3052
3053 /**
3054 * destroy_workqueue - safely terminate a workqueue
3055 * @wq: target workqueue
3056 *
3057 * Safely destroy a workqueue. All work currently pending will be done first.
3058 */
3059 void destroy_workqueue(struct workqueue_struct *wq)
3060 {
3061 unsigned int cpu;
3062
3063 /* drain it before proceeding with destruction */
3064 drain_workqueue(wq);
3065
3066 /*
3067 * wq list is used to freeze wq, remove from list after
3068 * flushing is complete in case freeze races us.
3069 */
3070 spin_lock(&workqueue_lock);
3071 list_del(&wq->list);
3072 spin_unlock(&workqueue_lock);
3073
3074 /* sanity check */
3075 for_each_cwq_cpu(cpu, wq) {
3076 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3077 int i;
3078
3079 for (i = 0; i < WORK_NR_COLORS; i++)
3080 BUG_ON(cwq->nr_in_flight[i]);
3081 BUG_ON(cwq->nr_active);
3082 BUG_ON(!list_empty(&cwq->delayed_works));
3083 }
3084
3085 if (wq->flags & WQ_RESCUER) {
3086 kthread_stop(wq->rescuer->task);
3087 free_mayday_mask(wq->mayday_mask);
3088 kfree(wq->rescuer);
3089 }
3090
3091 free_cwqs(wq);
3092 kfree(wq);
3093 }
3094 EXPORT_SYMBOL_GPL(destroy_workqueue);
3095
3096 /**
3097 * workqueue_set_max_active - adjust max_active of a workqueue
3098 * @wq: target workqueue
3099 * @max_active: new max_active value.
3100 *
3101 * Set max_active of @wq to @max_active.
3102 *
3103 * CONTEXT:
3104 * Don't call from IRQ context.
3105 */
3106 void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
3107 {
3108 unsigned int cpu;
3109
3110 max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
3111
3112 spin_lock(&workqueue_lock);
3113
3114 wq->saved_max_active = max_active;
3115
3116 for_each_cwq_cpu(cpu, wq) {
3117 struct global_cwq *gcwq = get_gcwq(cpu);
3118
3119 spin_lock_irq(&gcwq->lock);
3120
3121 if (!(wq->flags & WQ_FREEZABLE) ||
3122 !(gcwq->flags & GCWQ_FREEZING))
3123 get_cwq(gcwq->cpu, wq)->max_active = max_active;
3124
3125 spin_unlock_irq(&gcwq->lock);
3126 }
3127
3128 spin_unlock(&workqueue_lock);
3129 }
3130 EXPORT_SYMBOL_GPL(workqueue_set_max_active);
3131
3132 /**
3133 * workqueue_congested - test whether a workqueue is congested
3134 * @cpu: CPU in question
3135 * @wq: target workqueue
3136 *
3137 * Test whether @wq's cpu workqueue for @cpu is congested. There is
3138 * no synchronization around this function and the test result is
3139 * unreliable and only useful as advisory hints or for debugging.
3140 *
3141 * RETURNS:
3142 * %true if congested, %false otherwise.
3143 */
3144 bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq)
3145 {
3146 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3147
3148 return !list_empty(&cwq->delayed_works);
3149 }
3150 EXPORT_SYMBOL_GPL(workqueue_congested);
3151
3152 /**
3153 * work_cpu - return the last known associated cpu for @work
3154 * @work: the work of interest
3155 *
3156 * RETURNS:
3157 * CPU number if @work was ever queued. WORK_CPU_NONE otherwise.
3158 */
3159 unsigned int work_cpu(struct work_struct *work)
3160 {
3161 struct global_cwq *gcwq = get_work_gcwq(work);
3162
3163 return gcwq ? gcwq->cpu : WORK_CPU_NONE;
3164 }
3165 EXPORT_SYMBOL_GPL(work_cpu);
3166
3167 /**
3168 * work_busy - test whether a work is currently pending or running
3169 * @work: the work to be tested
3170 *
3171 * Test whether @work is currently pending or running. There is no
3172 * synchronization around this function and the test result is
3173 * unreliable and only useful as advisory hints or for debugging.
3174 * Especially for reentrant wqs, the pending state might hide the
3175 * running state.
3176 *
3177 * RETURNS:
3178 * OR'd bitmask of WORK_BUSY_* bits.
3179 */
3180 unsigned int work_busy(struct work_struct *work)
3181 {
3182 struct global_cwq *gcwq = get_work_gcwq(work);
3183 unsigned long flags;
3184 unsigned int ret = 0;
3185
3186 if (!gcwq)
3187 return false;
3188
3189 spin_lock_irqsave(&gcwq->lock, flags);
3190
3191 if (work_pending(work))
3192 ret |= WORK_BUSY_PENDING;
3193 if (find_worker_executing_work(gcwq, work))
3194 ret |= WORK_BUSY_RUNNING;
3195
3196 spin_unlock_irqrestore(&gcwq->lock, flags);
3197
3198 return ret;
3199 }
3200 EXPORT_SYMBOL_GPL(work_busy);
3201
3202 /*
3203 * CPU hotplug.
3204 *
3205 * There are two challenges in supporting CPU hotplug. Firstly, there
3206 * are a lot of assumptions on strong associations among work, cwq and
3207 * gcwq which make migrating pending and scheduled works very
3208 * difficult to implement without impacting hot paths. Secondly,
3209 * gcwqs serve mix of short, long and very long running works making
3210 * blocked draining impractical.
3211 *
3212 * This is solved by allowing a gcwq to be detached from CPU, running
3213 * it with unbound (rogue) workers and allowing it to be reattached
3214 * later if the cpu comes back online. A separate thread is created
3215 * to govern a gcwq in such state and is called the trustee of the
3216 * gcwq.
3217 *
3218 * Trustee states and their descriptions.
3219 *
3220 * START Command state used on startup. On CPU_DOWN_PREPARE, a
3221 * new trustee is started with this state.
3222 *
3223 * IN_CHARGE Once started, trustee will enter this state after
3224 * assuming the manager role and making all existing
3225 * workers rogue. DOWN_PREPARE waits for trustee to
3226 * enter this state. After reaching IN_CHARGE, trustee
3227 * tries to execute the pending worklist until it's empty
3228 * and the state is set to BUTCHER, or the state is set
3229 * to RELEASE.
3230 *
3231 * BUTCHER Command state which is set by the cpu callback after
3232 * the cpu has went down. Once this state is set trustee
3233 * knows that there will be no new works on the worklist
3234 * and once the worklist is empty it can proceed to
3235 * killing idle workers.
3236 *
3237 * RELEASE Command state which is set by the cpu callback if the
3238 * cpu down has been canceled or it has come online
3239 * again. After recognizing this state, trustee stops
3240 * trying to drain or butcher and clears ROGUE, rebinds
3241 * all remaining workers back to the cpu and releases
3242 * manager role.
3243 *
3244 * DONE Trustee will enter this state after BUTCHER or RELEASE
3245 * is complete.
3246 *
3247 * trustee CPU draining
3248 * took over down complete
3249 * START -----------> IN_CHARGE -----------> BUTCHER -----------> DONE
3250 * | | ^
3251 * | CPU is back online v return workers |
3252 * ----------------> RELEASE --------------
3253 */
3254
3255 /**
3256 * trustee_wait_event_timeout - timed event wait for trustee
3257 * @cond: condition to wait for
3258 * @timeout: timeout in jiffies
3259 *
3260 * wait_event_timeout() for trustee to use. Handles locking and
3261 * checks for RELEASE request.
3262 *
3263 * CONTEXT:
3264 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
3265 * multiple times. To be used by trustee.
3266 *
3267 * RETURNS:
3268 * Positive indicating left time if @cond is satisfied, 0 if timed
3269 * out, -1 if canceled.
3270 */
3271 #define trustee_wait_event_timeout(cond, timeout) ({ \
3272 long __ret = (timeout); \
3273 while (!((cond) || (gcwq->trustee_state == TRUSTEE_RELEASE)) && \
3274 __ret) { \
3275 spin_unlock_irq(&gcwq->lock); \
3276 __wait_event_timeout(gcwq->trustee_wait, (cond) || \
3277 (gcwq->trustee_state == TRUSTEE_RELEASE), \
3278 __ret); \
3279 spin_lock_irq(&gcwq->lock); \
3280 } \
3281 gcwq->trustee_state == TRUSTEE_RELEASE ? -1 : (__ret); \
3282 })
3283
3284 /**
3285 * trustee_wait_event - event wait for trustee
3286 * @cond: condition to wait for
3287 *
3288 * wait_event() for trustee to use. Automatically handles locking and
3289 * checks for CANCEL request.
3290 *
3291 * CONTEXT:
3292 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
3293 * multiple times. To be used by trustee.
3294 *
3295 * RETURNS:
3296 * 0 if @cond is satisfied, -1 if canceled.
3297 */
3298 #define trustee_wait_event(cond) ({ \
3299 long __ret1; \
3300 __ret1 = trustee_wait_event_timeout(cond, MAX_SCHEDULE_TIMEOUT);\
3301 __ret1 < 0 ? -1 : 0; \
3302 })
3303
3304 static int __cpuinit trustee_thread(void *__gcwq)
3305 {
3306 struct global_cwq *gcwq = __gcwq;
3307 struct worker *worker;
3308 struct work_struct *work;
3309 struct hlist_node *pos;
3310 long rc;
3311 int i;
3312
3313 BUG_ON(gcwq->cpu != smp_processor_id());
3314
3315 spin_lock_irq(&gcwq->lock);
3316 /*
3317 * Claim the manager position and make all workers rogue.
3318 * Trustee must be bound to the target cpu and can't be
3319 * cancelled.
3320 */
3321 BUG_ON(gcwq->cpu != smp_processor_id());
3322 rc = trustee_wait_event(!(gcwq->flags & GCWQ_MANAGING_WORKERS));
3323 BUG_ON(rc < 0);
3324
3325 gcwq->flags |= GCWQ_MANAGING_WORKERS;
3326
3327 list_for_each_entry(worker, &gcwq->idle_list, entry)
3328 worker->flags |= WORKER_ROGUE;
3329
3330 for_each_busy_worker(worker, i, pos, gcwq)
3331 worker->flags |= WORKER_ROGUE;
3332
3333 /*
3334 * Call schedule() so that we cross rq->lock and thus can
3335 * guarantee sched callbacks see the rogue flag. This is
3336 * necessary as scheduler callbacks may be invoked from other
3337 * cpus.
3338 */
3339 spin_unlock_irq(&gcwq->lock);
3340 schedule();
3341 spin_lock_irq(&gcwq->lock);
3342
3343 /*
3344 * Sched callbacks are disabled now. Zap nr_running. After
3345 * this, nr_running stays zero and need_more_worker() and
3346 * keep_working() are always true as long as the worklist is
3347 * not empty.
3348 */
3349 atomic_set(get_gcwq_nr_running(gcwq->cpu), 0);
3350
3351 spin_unlock_irq(&gcwq->lock);
3352 del_timer_sync(&gcwq->idle_timer);
3353 spin_lock_irq(&gcwq->lock);
3354
3355 /*
3356 * We're now in charge. Notify and proceed to drain. We need
3357 * to keep the gcwq running during the whole CPU down
3358 * procedure as other cpu hotunplug callbacks may need to
3359 * flush currently running tasks.
3360 */
3361 gcwq->trustee_state = TRUSTEE_IN_CHARGE;
3362 wake_up_all(&gcwq->trustee_wait);
3363
3364 /*
3365 * The original cpu is in the process of dying and may go away
3366 * anytime now. When that happens, we and all workers would
3367 * be migrated to other cpus. Try draining any left work. We
3368 * want to get it over with ASAP - spam rescuers, wake up as
3369 * many idlers as necessary and create new ones till the
3370 * worklist is empty. Note that if the gcwq is frozen, there
3371 * may be frozen works in freezable cwqs. Don't declare
3372 * completion while frozen.
3373 */
3374 while (gcwq->nr_workers != gcwq->nr_idle ||
3375 gcwq->flags & GCWQ_FREEZING ||
3376 gcwq->trustee_state == TRUSTEE_IN_CHARGE) {
3377 int nr_works = 0;
3378
3379 list_for_each_entry(work, &gcwq->worklist, entry) {
3380 send_mayday(work);
3381 nr_works++;
3382 }
3383
3384 list_for_each_entry(worker, &gcwq->idle_list, entry) {
3385 if (!nr_works--)
3386 break;
3387 wake_up_process(worker->task);
3388 }
3389
3390 if (need_to_create_worker(gcwq)) {
3391 spin_unlock_irq(&gcwq->lock);
3392 worker = create_worker(gcwq, false);
3393 spin_lock_irq(&gcwq->lock);
3394 if (worker) {
3395 worker->flags |= WORKER_ROGUE;
3396 start_worker(worker);
3397 }
3398 }
3399
3400 /* give a breather */
3401 if (trustee_wait_event_timeout(false, TRUSTEE_COOLDOWN) < 0)
3402 break;
3403 }
3404
3405 /*
3406 * Either all works have been scheduled and cpu is down, or
3407 * cpu down has already been canceled. Wait for and butcher
3408 * all workers till we're canceled.
3409 */
3410 do {
3411 rc = trustee_wait_event(!list_empty(&gcwq->idle_list));
3412 while (!list_empty(&gcwq->idle_list))
3413 destroy_worker(list_first_entry(&gcwq->idle_list,
3414 struct worker, entry));
3415 } while (gcwq->nr_workers && rc >= 0);
3416
3417 /*
3418 * At this point, either draining has completed and no worker
3419 * is left, or cpu down has been canceled or the cpu is being
3420 * brought back up. There shouldn't be any idle one left.
3421 * Tell the remaining busy ones to rebind once it finishes the
3422 * currently scheduled works by scheduling the rebind_work.
3423 */
3424 WARN_ON(!list_empty(&gcwq->idle_list));
3425
3426 for_each_busy_worker(worker, i, pos, gcwq) {
3427 struct work_struct *rebind_work = &worker->rebind_work;
3428
3429 /*
3430 * Rebind_work may race with future cpu hotplug
3431 * operations. Use a separate flag to mark that
3432 * rebinding is scheduled.
3433 */
3434 worker->flags |= WORKER_REBIND;
3435 worker->flags &= ~WORKER_ROGUE;
3436
3437 /* queue rebind_work, wq doesn't matter, use the default one */
3438 if (test_and_set_bit(WORK_STRUCT_PENDING_BIT,
3439 work_data_bits(rebind_work)))
3440 continue;
3441
3442 debug_work_activate(rebind_work);
3443 insert_work(get_cwq(gcwq->cpu, system_wq), rebind_work,
3444 worker->scheduled.next,
3445 work_color_to_flags(WORK_NO_COLOR));
3446 }
3447
3448 /* relinquish manager role */
3449 gcwq->flags &= ~GCWQ_MANAGING_WORKERS;
3450
3451 /* notify completion */
3452 gcwq->trustee = NULL;
3453 gcwq->trustee_state = TRUSTEE_DONE;
3454 wake_up_all(&gcwq->trustee_wait);
3455 spin_unlock_irq(&gcwq->lock);
3456 return 0;
3457 }
3458
3459 /**
3460 * wait_trustee_state - wait for trustee to enter the specified state
3461 * @gcwq: gcwq the trustee of interest belongs to
3462 * @state: target state to wait for
3463 *
3464 * Wait for the trustee to reach @state. DONE is already matched.
3465 *
3466 * CONTEXT:
3467 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
3468 * multiple times. To be used by cpu_callback.
3469 */
3470 static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state)
3471 __releases(&gcwq->lock)
3472 __acquires(&gcwq->lock)
3473 {
3474 if (!(gcwq->trustee_state == state ||
3475 gcwq->trustee_state == TRUSTEE_DONE)) {
3476 spin_unlock_irq(&gcwq->lock);
3477 __wait_event(gcwq->trustee_wait,
3478 gcwq->trustee_state == state ||
3479 gcwq->trustee_state == TRUSTEE_DONE);
3480 spin_lock_irq(&gcwq->lock);
3481 }
3482 }
3483
3484 static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
3485 unsigned long action,
3486 void *hcpu)
3487 {
3488 unsigned int cpu = (unsigned long)hcpu;
3489 struct global_cwq *gcwq = get_gcwq(cpu);
3490 struct task_struct *new_trustee = NULL;
3491 struct worker *uninitialized_var(new_worker);
3492 unsigned long flags;
3493
3494 action &= ~CPU_TASKS_FROZEN;
3495
3496 switch (action) {
3497 case CPU_DOWN_PREPARE:
3498 new_trustee = kthread_create(trustee_thread, gcwq,
3499 "workqueue_trustee/%d\n", cpu);
3500 if (IS_ERR(new_trustee))
3501 return notifier_from_errno(PTR_ERR(new_trustee));
3502 kthread_bind(new_trustee, cpu);
3503 /* fall through */
3504 case CPU_UP_PREPARE:
3505 BUG_ON(gcwq->first_idle);
3506 new_worker = create_worker(gcwq, false);
3507 if (!new_worker) {
3508 if (new_trustee)
3509 kthread_stop(new_trustee);
3510 return NOTIFY_BAD;
3511 }
3512 }
3513
3514 /* some are called w/ irq disabled, don't disturb irq status */
3515 spin_lock_irqsave(&gcwq->lock, flags);
3516
3517 switch (action) {
3518 case CPU_DOWN_PREPARE:
3519 /* initialize trustee and tell it to acquire the gcwq */
3520 BUG_ON(gcwq->trustee || gcwq->trustee_state != TRUSTEE_DONE);
3521 gcwq->trustee = new_trustee;
3522 gcwq->trustee_state = TRUSTEE_START;
3523 wake_up_process(gcwq->trustee);
3524 wait_trustee_state(gcwq, TRUSTEE_IN_CHARGE);
3525 /* fall through */
3526 case CPU_UP_PREPARE:
3527 BUG_ON(gcwq->first_idle);
3528 gcwq->first_idle = new_worker;
3529 break;
3530
3531 case CPU_DYING:
3532 /*
3533 * Before this, the trustee and all workers except for
3534 * the ones which are still executing works from
3535 * before the last CPU down must be on the cpu. After
3536 * this, they'll all be diasporas.
3537 */
3538 gcwq->flags |= GCWQ_DISASSOCIATED;
3539 break;
3540
3541 case CPU_POST_DEAD:
3542 gcwq->trustee_state = TRUSTEE_BUTCHER;
3543 /* fall through */
3544 case CPU_UP_CANCELED:
3545 destroy_worker(gcwq->first_idle);
3546 gcwq->first_idle = NULL;
3547 break;
3548
3549 case CPU_DOWN_FAILED:
3550 case CPU_ONLINE:
3551 gcwq->flags &= ~GCWQ_DISASSOCIATED;
3552 if (gcwq->trustee_state != TRUSTEE_DONE) {
3553 gcwq->trustee_state = TRUSTEE_RELEASE;
3554 wake_up_process(gcwq->trustee);
3555 wait_trustee_state(gcwq, TRUSTEE_DONE);
3556 }
3557
3558 /*
3559 * Trustee is done and there might be no worker left.
3560 * Put the first_idle in and request a real manager to
3561 * take a look.
3562 */
3563 spin_unlock_irq(&gcwq->lock);
3564 kthread_bind(gcwq->first_idle->task, cpu);
3565 spin_lock_irq(&gcwq->lock);
3566 gcwq->flags |= GCWQ_MANAGE_WORKERS;
3567 start_worker(gcwq->first_idle);
3568 gcwq->first_idle = NULL;
3569 break;
3570 }
3571
3572 spin_unlock_irqrestore(&gcwq->lock, flags);
3573
3574 return notifier_from_errno(0);
3575 }
3576
3577 #ifdef CONFIG_SMP
3578
3579 struct work_for_cpu {
3580 struct completion completion;
3581 long (*fn)(void *);
3582 void *arg;
3583 long ret;
3584 };
3585
3586 static int do_work_for_cpu(void *_wfc)
3587 {
3588 struct work_for_cpu *wfc = _wfc;
3589 wfc->ret = wfc->fn(wfc->arg);
3590 complete(&wfc->completion);
3591 return 0;
3592 }
3593
3594 /**
3595 * work_on_cpu - run a function in user context on a particular cpu
3596 * @cpu: the cpu to run on
3597 * @fn: the function to run
3598 * @arg: the function arg
3599 *
3600 * This will return the value @fn returns.
3601 * It is up to the caller to ensure that the cpu doesn't go offline.
3602 * The caller must not hold any locks which would prevent @fn from completing.
3603 */
3604 long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
3605 {
3606 struct task_struct *sub_thread;
3607 struct work_for_cpu wfc = {
3608 .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
3609 .fn = fn,
3610 .arg = arg,
3611 };
3612
3613 sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu");
3614 if (IS_ERR(sub_thread))
3615 return PTR_ERR(sub_thread);
3616 kthread_bind(sub_thread, cpu);
3617 wake_up_process(sub_thread);
3618 wait_for_completion(&wfc.completion);
3619 return wfc.ret;
3620 }
3621 EXPORT_SYMBOL_GPL(work_on_cpu);
3622 #endif /* CONFIG_SMP */
3623
3624 #ifdef CONFIG_FREEZER
3625
3626 /**
3627 * freeze_workqueues_begin - begin freezing workqueues
3628 *
3629 * Start freezing workqueues. After this function returns, all freezable
3630 * workqueues will queue new works to their frozen_works list instead of
3631 * gcwq->worklist.
3632 *
3633 * CONTEXT:
3634 * Grabs and releases workqueue_lock and gcwq->lock's.
3635 */
3636 void freeze_workqueues_begin(void)
3637 {
3638 unsigned int cpu;
3639
3640 spin_lock(&workqueue_lock);
3641
3642 BUG_ON(workqueue_freezing);
3643 workqueue_freezing = true;
3644
3645 for_each_gcwq_cpu(cpu) {
3646 struct global_cwq *gcwq = get_gcwq(cpu);
3647 struct workqueue_struct *wq;
3648
3649 spin_lock_irq(&gcwq->lock);
3650
3651 BUG_ON(gcwq->flags & GCWQ_FREEZING);
3652 gcwq->flags |= GCWQ_FREEZING;
3653
3654 list_for_each_entry(wq, &workqueues, list) {
3655 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3656
3657 if (cwq && wq->flags & WQ_FREEZABLE)
3658 cwq->max_active = 0;
3659 }
3660
3661 spin_unlock_irq(&gcwq->lock);
3662 }
3663
3664 spin_unlock(&workqueue_lock);
3665 }
3666
3667 /**
3668 * freeze_workqueues_busy - are freezable workqueues still busy?
3669 *
3670 * Check whether freezing is complete. This function must be called
3671 * between freeze_workqueues_begin() and thaw_workqueues().
3672 *
3673 * CONTEXT:
3674 * Grabs and releases workqueue_lock.
3675 *
3676 * RETURNS:
3677 * %true if some freezable workqueues are still busy. %false if freezing
3678 * is complete.
3679 */
3680 bool freeze_workqueues_busy(void)
3681 {
3682 unsigned int cpu;
3683 bool busy = false;
3684
3685 spin_lock(&workqueue_lock);
3686
3687 BUG_ON(!workqueue_freezing);
3688
3689 for_each_gcwq_cpu(cpu) {
3690 struct workqueue_struct *wq;
3691 /*
3692 * nr_active is monotonically decreasing. It's safe
3693 * to peek without lock.
3694 */
3695 list_for_each_entry(wq, &workqueues, list) {
3696 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3697
3698 if (!cwq || !(wq->flags & WQ_FREEZABLE))
3699 continue;
3700
3701 BUG_ON(cwq->nr_active < 0);
3702 if (cwq->nr_active) {
3703 busy = true;
3704 goto out_unlock;
3705 }
3706 }
3707 }
3708 out_unlock:
3709 spin_unlock(&workqueue_lock);
3710 return busy;
3711 }
3712
3713 /**
3714 * thaw_workqueues - thaw workqueues
3715 *
3716 * Thaw workqueues. Normal queueing is restored and all collected
3717 * frozen works are transferred to their respective gcwq worklists.
3718 *
3719 * CONTEXT:
3720 * Grabs and releases workqueue_lock and gcwq->lock's.
3721 */
3722 void thaw_workqueues(void)
3723 {
3724 unsigned int cpu;
3725
3726 spin_lock(&workqueue_lock);
3727
3728 if (!workqueue_freezing)
3729 goto out_unlock;
3730
3731 for_each_gcwq_cpu(cpu) {
3732 struct global_cwq *gcwq = get_gcwq(cpu);
3733 struct workqueue_struct *wq;
3734
3735 spin_lock_irq(&gcwq->lock);
3736
3737 BUG_ON(!(gcwq->flags & GCWQ_FREEZING));
3738 gcwq->flags &= ~GCWQ_FREEZING;
3739
3740 list_for_each_entry(wq, &workqueues, list) {
3741 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3742
3743 if (!cwq || !(wq->flags & WQ_FREEZABLE))
3744 continue;
3745
3746 /* restore max_active and repopulate worklist */
3747 cwq->max_active = wq->saved_max_active;
3748
3749 while (!list_empty(&cwq->delayed_works) &&
3750 cwq->nr_active < cwq->max_active)
3751 cwq_activate_first_delayed(cwq);
3752 }
3753
3754 wake_up_worker(gcwq);
3755
3756 spin_unlock_irq(&gcwq->lock);
3757 }
3758
3759 workqueue_freezing = false;
3760 out_unlock:
3761 spin_unlock(&workqueue_lock);
3762 }
3763 #endif /* CONFIG_FREEZER */
3764
3765 static int __init init_workqueues(void)
3766 {
3767 unsigned int cpu;
3768 int i;
3769
3770 cpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE);
3771
3772 /* initialize gcwqs */
3773 for_each_gcwq_cpu(cpu) {
3774 struct global_cwq *gcwq = get_gcwq(cpu);
3775
3776 spin_lock_init(&gcwq->lock);
3777 INIT_LIST_HEAD(&gcwq->worklist);
3778 gcwq->cpu = cpu;
3779 gcwq->flags |= GCWQ_DISASSOCIATED;
3780
3781 INIT_LIST_HEAD(&gcwq->idle_list);
3782 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)
3783 INIT_HLIST_HEAD(&gcwq->busy_hash[i]);
3784
3785 init_timer_deferrable(&gcwq->idle_timer);
3786 gcwq->idle_timer.function = idle_worker_timeout;
3787 gcwq->idle_timer.data = (unsigned long)gcwq;
3788
3789 setup_timer(&gcwq->mayday_timer, gcwq_mayday_timeout,
3790 (unsigned long)gcwq);
3791
3792 ida_init(&gcwq->worker_ida);
3793
3794 gcwq->trustee_state = TRUSTEE_DONE;
3795 init_waitqueue_head(&gcwq->trustee_wait);
3796 }
3797
3798 /* create the initial worker */
3799 for_each_online_gcwq_cpu(cpu) {
3800 struct global_cwq *gcwq = get_gcwq(cpu);
3801 struct worker *worker;
3802
3803 if (cpu != WORK_CPU_UNBOUND)
3804 gcwq->flags &= ~GCWQ_DISASSOCIATED;
3805 worker = create_worker(gcwq, true);
3806 BUG_ON(!worker);
3807 spin_lock_irq(&gcwq->lock);
3808 start_worker(worker);
3809 spin_unlock_irq(&gcwq->lock);
3810 }
3811
3812 system_wq = alloc_workqueue("events", 0, 0);
3813 system_long_wq = alloc_workqueue("events_long", 0, 0);
3814 system_nrt_wq = alloc_workqueue("events_nrt", WQ_NON_REENTRANT, 0);
3815 system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
3816 WQ_UNBOUND_MAX_ACTIVE);
3817 system_freezable_wq = alloc_workqueue("events_freezable",
3818 WQ_FREEZABLE, 0);
3819 BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq ||
3820 !system_unbound_wq || !system_freezable_wq);
3821 return 0;
3822 }
3823 early_initcall(init_workqueues);
This page took 0.109054 seconds and 5 git commands to generate.