tracing/ftrace: alloc the started cpumask for the trace file
[deliverable/linux.git] / kernel / workqueue.c
CommitLineData
1da177e4
LT
1/*
2 * linux/kernel/workqueue.c
3 *
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
6 *
7 * Started by Ingo Molnar, Copyright (C) 2002
8 *
9 * Derived from the taskqueue/keventd code by:
10 *
11 * David Woodhouse <dwmw2@infradead.org>
e1f8e874 12 * Andrew Morton
1da177e4
LT
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
89ada679 15 *
cde53535 16 * Made to use alloc_percpu by Christoph Lameter.
1da177e4
LT
17 */
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/sched.h>
22#include <linux/init.h>
23#include <linux/signal.h>
24#include <linux/completion.h>
25#include <linux/workqueue.h>
26#include <linux/slab.h>
27#include <linux/cpu.h>
28#include <linux/notifier.h>
29#include <linux/kthread.h>
1fa44eca 30#include <linux/hardirq.h>
46934023 31#include <linux/mempolicy.h>
341a5958 32#include <linux/freezer.h>
d5abe669
PZ
33#include <linux/kallsyms.h>
34#include <linux/debug_locks.h>
4e6045f1 35#include <linux/lockdep.h>
e1d8aa9f 36#include <trace/workqueue.h>
1da177e4
LT
37
38/*
f756d5e2
NL
39 * The per-CPU workqueue (if single thread, we always use the first
40 * possible cpu).
1da177e4
LT
41 */
42struct cpu_workqueue_struct {
43
44 spinlock_t lock;
45
1da177e4
LT
46 struct list_head worklist;
47 wait_queue_head_t more_work;
3af24433 48 struct work_struct *current_work;
1da177e4
LT
49
50 struct workqueue_struct *wq;
36c8b586 51 struct task_struct *thread;
1da177e4
LT
52
53 int run_depth; /* Detect run_workqueue() recursion depth */
54} ____cacheline_aligned;
55
56/*
57 * The externally visible workqueue abstraction is an array of
58 * per-CPU workqueues:
59 */
60struct workqueue_struct {
89ada679 61 struct cpu_workqueue_struct *cpu_wq;
cce1a165 62 struct list_head list;
1da177e4 63 const char *name;
cce1a165 64 int singlethread;
319c2a98 65 int freezeable; /* Freeze threads during suspend */
0d557dc9 66 int rt;
4e6045f1
JB
67#ifdef CONFIG_LOCKDEP
68 struct lockdep_map lockdep_map;
69#endif
1da177e4
LT
70};
71
95402b38
GS
72/* Serializes the accesses to the list of workqueues. */
73static DEFINE_SPINLOCK(workqueue_lock);
1da177e4
LT
74static LIST_HEAD(workqueues);
75
3af24433 76static int singlethread_cpu __read_mostly;
e7577c50 77static const struct cpumask *cpu_singlethread_map __read_mostly;
14441960
ON
78/*
79 * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD
80 * flushes cwq->worklist. This means that flush_workqueue/wait_on_work
81 * which comes in between can't use for_each_online_cpu(). We could
82 * use cpu_possible_map, the cpumask below is more a documentation
83 * than optimization.
84 */
e7577c50 85static cpumask_var_t cpu_populated_map __read_mostly;
f756d5e2 86
1da177e4 87/* If it's single threaded, it isn't in the list of workqueues. */
6cc88bc4 88static inline int is_wq_single_threaded(struct workqueue_struct *wq)
1da177e4 89{
cce1a165 90 return wq->singlethread;
1da177e4
LT
91}
92
e7577c50 93static const struct cpumask *wq_cpu_map(struct workqueue_struct *wq)
b1f4ec17 94{
6cc88bc4 95 return is_wq_single_threaded(wq)
e7577c50 96 ? cpu_singlethread_map : cpu_populated_map;
b1f4ec17
ON
97}
98
a848e3b6
ON
99static
100struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu)
101{
6cc88bc4 102 if (unlikely(is_wq_single_threaded(wq)))
a848e3b6
ON
103 cpu = singlethread_cpu;
104 return per_cpu_ptr(wq->cpu_wq, cpu);
105}
106
4594bf15
DH
107/*
108 * Set the workqueue on which a work item is to be run
109 * - Must *only* be called if the pending flag is set
110 */
ed7c0fee
ON
111static inline void set_wq_data(struct work_struct *work,
112 struct cpu_workqueue_struct *cwq)
365970a1 113{
4594bf15
DH
114 unsigned long new;
115
116 BUG_ON(!work_pending(work));
365970a1 117
ed7c0fee 118 new = (unsigned long) cwq | (1UL << WORK_STRUCT_PENDING);
a08727ba
LT
119 new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
120 atomic_long_set(&work->data, new);
365970a1
DH
121}
122
ed7c0fee
ON
123static inline
124struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
365970a1 125{
a08727ba 126 return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
365970a1
DH
127}
128
e1d8aa9f
FW
129DEFINE_TRACE(workqueue_insertion);
130
b89deed3 131static void insert_work(struct cpu_workqueue_struct *cwq,
1a4d9b0a 132 struct work_struct *work, struct list_head *head)
b89deed3 133{
e1d8aa9f
FW
134 trace_workqueue_insertion(cwq->thread, work);
135
b89deed3 136 set_wq_data(work, cwq);
6e84d644
ON
137 /*
138 * Ensure that we get the right work->data if we see the
139 * result of list_add() below, see try_to_grab_pending().
140 */
141 smp_wmb();
1a4d9b0a 142 list_add_tail(&work->entry, head);
b89deed3
ON
143 wake_up(&cwq->more_work);
144}
145
1da177e4
LT
146static void __queue_work(struct cpu_workqueue_struct *cwq,
147 struct work_struct *work)
148{
149 unsigned long flags;
150
151 spin_lock_irqsave(&cwq->lock, flags);
1a4d9b0a 152 insert_work(cwq, work, &cwq->worklist);
1da177e4
LT
153 spin_unlock_irqrestore(&cwq->lock, flags);
154}
155
0fcb78c2
REB
156/**
157 * queue_work - queue work on a workqueue
158 * @wq: workqueue to use
159 * @work: work to queue
160 *
057647fc 161 * Returns 0 if @work was already on a queue, non-zero otherwise.
1da177e4 162 *
00dfcaf7
ON
163 * We queue the work to the CPU on which it was submitted, but if the CPU dies
164 * it can be processed by another CPU.
1da177e4 165 */
7ad5b3a5 166int queue_work(struct workqueue_struct *wq, struct work_struct *work)
1da177e4 167{
ef1ca236
ON
168 int ret;
169
170 ret = queue_work_on(get_cpu(), wq, work);
171 put_cpu();
172
1da177e4
LT
173 return ret;
174}
ae90dd5d 175EXPORT_SYMBOL_GPL(queue_work);
1da177e4 176
c1a220e7
ZR
177/**
178 * queue_work_on - queue work on specific cpu
179 * @cpu: CPU number to execute work on
180 * @wq: workqueue to use
181 * @work: work to queue
182 *
183 * Returns 0 if @work was already on a queue, non-zero otherwise.
184 *
185 * We queue the work to a specific CPU, the caller must ensure it
186 * can't go away.
187 */
188int
189queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
190{
191 int ret = 0;
192
193 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
194 BUG_ON(!list_empty(&work->entry));
195 __queue_work(wq_per_cpu(wq, cpu), work);
196 ret = 1;
197 }
198 return ret;
199}
200EXPORT_SYMBOL_GPL(queue_work_on);
201
6d141c3f 202static void delayed_work_timer_fn(unsigned long __data)
1da177e4 203{
52bad64d 204 struct delayed_work *dwork = (struct delayed_work *)__data;
ed7c0fee
ON
205 struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work);
206 struct workqueue_struct *wq = cwq->wq;
1da177e4 207
a848e3b6 208 __queue_work(wq_per_cpu(wq, smp_processor_id()), &dwork->work);
1da177e4
LT
209}
210
0fcb78c2
REB
211/**
212 * queue_delayed_work - queue work on a workqueue after delay
213 * @wq: workqueue to use
af9997e4 214 * @dwork: delayable work to queue
0fcb78c2
REB
215 * @delay: number of jiffies to wait before queueing
216 *
057647fc 217 * Returns 0 if @work was already on a queue, non-zero otherwise.
0fcb78c2 218 */
7ad5b3a5 219int queue_delayed_work(struct workqueue_struct *wq,
52bad64d 220 struct delayed_work *dwork, unsigned long delay)
1da177e4 221{
52bad64d 222 if (delay == 0)
63bc0362 223 return queue_work(wq, &dwork->work);
1da177e4 224
63bc0362 225 return queue_delayed_work_on(-1, wq, dwork, delay);
1da177e4 226}
ae90dd5d 227EXPORT_SYMBOL_GPL(queue_delayed_work);
1da177e4 228
0fcb78c2
REB
229/**
230 * queue_delayed_work_on - queue work on specific CPU after delay
231 * @cpu: CPU number to execute work on
232 * @wq: workqueue to use
af9997e4 233 * @dwork: work to queue
0fcb78c2
REB
234 * @delay: number of jiffies to wait before queueing
235 *
057647fc 236 * Returns 0 if @work was already on a queue, non-zero otherwise.
0fcb78c2 237 */
7a6bc1cd 238int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
52bad64d 239 struct delayed_work *dwork, unsigned long delay)
7a6bc1cd
VP
240{
241 int ret = 0;
52bad64d
DH
242 struct timer_list *timer = &dwork->timer;
243 struct work_struct *work = &dwork->work;
7a6bc1cd 244
a08727ba 245 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
7a6bc1cd
VP
246 BUG_ON(timer_pending(timer));
247 BUG_ON(!list_empty(&work->entry));
248
8a3e77cc
AL
249 timer_stats_timer_set_start_info(&dwork->timer);
250
ed7c0fee 251 /* This stores cwq for the moment, for the timer_fn */
a848e3b6 252 set_wq_data(work, wq_per_cpu(wq, raw_smp_processor_id()));
7a6bc1cd 253 timer->expires = jiffies + delay;
52bad64d 254 timer->data = (unsigned long)dwork;
7a6bc1cd 255 timer->function = delayed_work_timer_fn;
63bc0362
ON
256
257 if (unlikely(cpu >= 0))
258 add_timer_on(timer, cpu);
259 else
260 add_timer(timer);
7a6bc1cd
VP
261 ret = 1;
262 }
263 return ret;
264}
ae90dd5d 265EXPORT_SYMBOL_GPL(queue_delayed_work_on);
1da177e4 266
e1d8aa9f
FW
267DEFINE_TRACE(workqueue_execution);
268
858119e1 269static void run_workqueue(struct cpu_workqueue_struct *cwq)
1da177e4 270{
f293ea92 271 spin_lock_irq(&cwq->lock);
1da177e4
LT
272 cwq->run_depth++;
273 if (cwq->run_depth > 3) {
274 /* morton gets to eat his hat */
275 printk("%s: recursion depth exceeded: %d\n",
af1f16d0 276 __func__, cwq->run_depth);
1da177e4
LT
277 dump_stack();
278 }
279 while (!list_empty(&cwq->worklist)) {
280 struct work_struct *work = list_entry(cwq->worklist.next,
281 struct work_struct, entry);
6bb49e59 282 work_func_t f = work->func;
4e6045f1
JB
283#ifdef CONFIG_LOCKDEP
284 /*
285 * It is permissible to free the struct work_struct
286 * from inside the function that is called from it,
287 * this we need to take into account for lockdep too.
288 * To avoid bogus "held lock freed" warnings as well
289 * as problems when looking into work->lockdep_map,
290 * make a copy and use that here.
291 */
292 struct lockdep_map lockdep_map = work->lockdep_map;
293#endif
e1d8aa9f 294 trace_workqueue_execution(cwq->thread, work);
b89deed3 295 cwq->current_work = work;
1da177e4 296 list_del_init(cwq->worklist.next);
f293ea92 297 spin_unlock_irq(&cwq->lock);
1da177e4 298
365970a1 299 BUG_ON(get_wq_data(work) != cwq);
23b2e599 300 work_clear_pending(work);
3295f0ef
IM
301 lock_map_acquire(&cwq->wq->lockdep_map);
302 lock_map_acquire(&lockdep_map);
65f27f38 303 f(work);
3295f0ef
IM
304 lock_map_release(&lockdep_map);
305 lock_map_release(&cwq->wq->lockdep_map);
1da177e4 306
d5abe669
PZ
307 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
308 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
309 "%s/0x%08x/%d\n",
310 current->comm, preempt_count(),
ba25f9dc 311 task_pid_nr(current));
d5abe669
PZ
312 printk(KERN_ERR " last function: ");
313 print_symbol("%s\n", (unsigned long)f);
314 debug_show_held_locks(current);
315 dump_stack();
316 }
317
f293ea92 318 spin_lock_irq(&cwq->lock);
b89deed3 319 cwq->current_work = NULL;
1da177e4
LT
320 }
321 cwq->run_depth--;
f293ea92 322 spin_unlock_irq(&cwq->lock);
1da177e4
LT
323}
324
325static int worker_thread(void *__cwq)
326{
327 struct cpu_workqueue_struct *cwq = __cwq;
3af24433 328 DEFINE_WAIT(wait);
1da177e4 329
83144186
RW
330 if (cwq->wq->freezeable)
331 set_freezable();
1da177e4
LT
332
333 set_user_nice(current, -5);
1da177e4 334
3af24433 335 for (;;) {
3af24433 336 prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
14441960
ON
337 if (!freezing(current) &&
338 !kthread_should_stop() &&
339 list_empty(&cwq->worklist))
1da177e4 340 schedule();
3af24433
ON
341 finish_wait(&cwq->more_work, &wait);
342
85f4186a
ON
343 try_to_freeze();
344
14441960 345 if (kthread_should_stop())
3af24433 346 break;
1da177e4 347
3af24433 348 run_workqueue(cwq);
1da177e4 349 }
3af24433 350
1da177e4
LT
351 return 0;
352}
353
fc2e4d70
ON
354struct wq_barrier {
355 struct work_struct work;
356 struct completion done;
357};
358
359static void wq_barrier_func(struct work_struct *work)
360{
361 struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
362 complete(&barr->done);
363}
364
83c22520 365static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
1a4d9b0a 366 struct wq_barrier *barr, struct list_head *head)
fc2e4d70
ON
367{
368 INIT_WORK(&barr->work, wq_barrier_func);
369 __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work));
370
371 init_completion(&barr->done);
83c22520 372
1a4d9b0a 373 insert_work(cwq, &barr->work, head);
fc2e4d70
ON
374}
375
14441960 376static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
1da177e4 377{
14441960
ON
378 int active;
379
1da177e4
LT
380 if (cwq->thread == current) {
381 /*
382 * Probably keventd trying to flush its own queue. So simply run
383 * it by hand rather than deadlocking.
384 */
385 run_workqueue(cwq);
14441960 386 active = 1;
1da177e4 387 } else {
fc2e4d70 388 struct wq_barrier barr;
1da177e4 389
14441960 390 active = 0;
83c22520
ON
391 spin_lock_irq(&cwq->lock);
392 if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
1a4d9b0a 393 insert_wq_barrier(cwq, &barr, &cwq->worklist);
83c22520
ON
394 active = 1;
395 }
396 spin_unlock_irq(&cwq->lock);
1da177e4 397
d721304d 398 if (active)
83c22520 399 wait_for_completion(&barr.done);
1da177e4 400 }
14441960
ON
401
402 return active;
1da177e4
LT
403}
404
0fcb78c2 405/**
1da177e4 406 * flush_workqueue - ensure that any scheduled work has run to completion.
0fcb78c2 407 * @wq: workqueue to flush
1da177e4
LT
408 *
409 * Forces execution of the workqueue and blocks until its completion.
410 * This is typically used in driver shutdown handlers.
411 *
fc2e4d70
ON
412 * We sleep until all works which were queued on entry have been handled,
413 * but we are not livelocked by new incoming ones.
1da177e4
LT
414 *
415 * This function used to run the workqueues itself. Now we just wait for the
416 * helper threads to do it.
417 */
7ad5b3a5 418void flush_workqueue(struct workqueue_struct *wq)
1da177e4 419{
e7577c50 420 const struct cpumask *cpu_map = wq_cpu_map(wq);
cce1a165 421 int cpu;
1da177e4 422
b1f4ec17 423 might_sleep();
3295f0ef
IM
424 lock_map_acquire(&wq->lockdep_map);
425 lock_map_release(&wq->lockdep_map);
363ab6f1 426 for_each_cpu_mask_nr(cpu, *cpu_map)
b1f4ec17 427 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
1da177e4 428}
ae90dd5d 429EXPORT_SYMBOL_GPL(flush_workqueue);
1da177e4 430
db700897
ON
431/**
432 * flush_work - block until a work_struct's callback has terminated
433 * @work: the work which is to be flushed
434 *
a67da70d
ON
435 * Returns false if @work has already terminated.
436 *
db700897
ON
437 * It is expected that, prior to calling flush_work(), the caller has
438 * arranged for the work to not be requeued, otherwise it doesn't make
439 * sense to use this function.
440 */
441int flush_work(struct work_struct *work)
442{
443 struct cpu_workqueue_struct *cwq;
444 struct list_head *prev;
445 struct wq_barrier barr;
446
447 might_sleep();
448 cwq = get_wq_data(work);
449 if (!cwq)
450 return 0;
451
3295f0ef
IM
452 lock_map_acquire(&cwq->wq->lockdep_map);
453 lock_map_release(&cwq->wq->lockdep_map);
a67da70d 454
db700897
ON
455 prev = NULL;
456 spin_lock_irq(&cwq->lock);
457 if (!list_empty(&work->entry)) {
458 /*
459 * See the comment near try_to_grab_pending()->smp_rmb().
460 * If it was re-queued under us we are not going to wait.
461 */
462 smp_rmb();
463 if (unlikely(cwq != get_wq_data(work)))
464 goto out;
465 prev = &work->entry;
466 } else {
467 if (cwq->current_work != work)
468 goto out;
469 prev = &cwq->worklist;
470 }
471 insert_wq_barrier(cwq, &barr, prev->next);
472out:
473 spin_unlock_irq(&cwq->lock);
474 if (!prev)
475 return 0;
476
477 wait_for_completion(&barr.done);
478 return 1;
479}
480EXPORT_SYMBOL_GPL(flush_work);
481
6e84d644 482/*
1f1f642e 483 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
6e84d644
ON
484 * so this work can't be re-armed in any way.
485 */
486static int try_to_grab_pending(struct work_struct *work)
487{
488 struct cpu_workqueue_struct *cwq;
1f1f642e 489 int ret = -1;
6e84d644
ON
490
491 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work)))
1f1f642e 492 return 0;
6e84d644
ON
493
494 /*
495 * The queueing is in progress, or it is already queued. Try to
496 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
497 */
498
499 cwq = get_wq_data(work);
500 if (!cwq)
501 return ret;
502
503 spin_lock_irq(&cwq->lock);
504 if (!list_empty(&work->entry)) {
505 /*
506 * This work is queued, but perhaps we locked the wrong cwq.
507 * In that case we must see the new value after rmb(), see
508 * insert_work()->wmb().
509 */
510 smp_rmb();
511 if (cwq == get_wq_data(work)) {
512 list_del_init(&work->entry);
513 ret = 1;
514 }
515 }
516 spin_unlock_irq(&cwq->lock);
517
518 return ret;
519}
520
521static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq,
b89deed3
ON
522 struct work_struct *work)
523{
524 struct wq_barrier barr;
525 int running = 0;
526
527 spin_lock_irq(&cwq->lock);
528 if (unlikely(cwq->current_work == work)) {
1a4d9b0a 529 insert_wq_barrier(cwq, &barr, cwq->worklist.next);
b89deed3
ON
530 running = 1;
531 }
532 spin_unlock_irq(&cwq->lock);
533
3af24433 534 if (unlikely(running))
b89deed3 535 wait_for_completion(&barr.done);
b89deed3
ON
536}
537
6e84d644 538static void wait_on_work(struct work_struct *work)
b89deed3
ON
539{
540 struct cpu_workqueue_struct *cwq;
28e53bdd 541 struct workqueue_struct *wq;
e7577c50 542 const struct cpumask *cpu_map;
b1f4ec17 543 int cpu;
b89deed3 544
f293ea92
ON
545 might_sleep();
546
3295f0ef
IM
547 lock_map_acquire(&work->lockdep_map);
548 lock_map_release(&work->lockdep_map);
4e6045f1 549
b89deed3 550 cwq = get_wq_data(work);
b89deed3 551 if (!cwq)
3af24433 552 return;
b89deed3 553
28e53bdd
ON
554 wq = cwq->wq;
555 cpu_map = wq_cpu_map(wq);
556
363ab6f1 557 for_each_cpu_mask_nr(cpu, *cpu_map)
6e84d644
ON
558 wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
559}
560
1f1f642e
ON
561static int __cancel_work_timer(struct work_struct *work,
562 struct timer_list* timer)
563{
564 int ret;
565
566 do {
567 ret = (timer && likely(del_timer(timer)));
568 if (!ret)
569 ret = try_to_grab_pending(work);
570 wait_on_work(work);
571 } while (unlikely(ret < 0));
572
573 work_clear_pending(work);
574 return ret;
575}
576
6e84d644
ON
577/**
578 * cancel_work_sync - block until a work_struct's callback has terminated
579 * @work: the work which is to be flushed
580 *
1f1f642e
ON
581 * Returns true if @work was pending.
582 *
6e84d644
ON
583 * cancel_work_sync() will cancel the work if it is queued. If the work's
584 * callback appears to be running, cancel_work_sync() will block until it
585 * has completed.
586 *
587 * It is possible to use this function if the work re-queues itself. It can
588 * cancel the work even if it migrates to another workqueue, however in that
589 * case it only guarantees that work->func() has completed on the last queued
590 * workqueue.
591 *
592 * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
593 * pending, otherwise it goes into a busy-wait loop until the timer expires.
594 *
595 * The caller must ensure that workqueue_struct on which this work was last
596 * queued can't be destroyed before this function returns.
597 */
1f1f642e 598int cancel_work_sync(struct work_struct *work)
6e84d644 599{
1f1f642e 600 return __cancel_work_timer(work, NULL);
b89deed3 601}
28e53bdd 602EXPORT_SYMBOL_GPL(cancel_work_sync);
b89deed3 603
6e84d644 604/**
f5a421a4 605 * cancel_delayed_work_sync - reliably kill off a delayed work.
6e84d644
ON
606 * @dwork: the delayed work struct
607 *
1f1f642e
ON
608 * Returns true if @dwork was pending.
609 *
6e84d644
ON
610 * It is possible to use this function if @dwork rearms itself via queue_work()
611 * or queue_delayed_work(). See also the comment for cancel_work_sync().
612 */
1f1f642e 613int cancel_delayed_work_sync(struct delayed_work *dwork)
6e84d644 614{
1f1f642e 615 return __cancel_work_timer(&dwork->work, &dwork->timer);
6e84d644 616}
f5a421a4 617EXPORT_SYMBOL(cancel_delayed_work_sync);
1da177e4 618
6e84d644 619static struct workqueue_struct *keventd_wq __read_mostly;
1da177e4 620
0fcb78c2
REB
621/**
622 * schedule_work - put work task in global workqueue
623 * @work: job to be done
624 *
625 * This puts a job in the kernel-global workqueue.
626 */
7ad5b3a5 627int schedule_work(struct work_struct *work)
1da177e4
LT
628{
629 return queue_work(keventd_wq, work);
630}
ae90dd5d 631EXPORT_SYMBOL(schedule_work);
1da177e4 632
c1a220e7
ZR
633/*
634 * schedule_work_on - put work task on a specific cpu
635 * @cpu: cpu to put the work task on
636 * @work: job to be done
637 *
638 * This puts a job on a specific cpu
639 */
640int schedule_work_on(int cpu, struct work_struct *work)
641{
642 return queue_work_on(cpu, keventd_wq, work);
643}
644EXPORT_SYMBOL(schedule_work_on);
645
0fcb78c2
REB
646/**
647 * schedule_delayed_work - put work task in global workqueue after delay
52bad64d
DH
648 * @dwork: job to be done
649 * @delay: number of jiffies to wait or 0 for immediate execution
0fcb78c2
REB
650 *
651 * After waiting for a given time this puts a job in the kernel-global
652 * workqueue.
653 */
7ad5b3a5 654int schedule_delayed_work(struct delayed_work *dwork,
82f67cd9 655 unsigned long delay)
1da177e4 656{
52bad64d 657 return queue_delayed_work(keventd_wq, dwork, delay);
1da177e4 658}
ae90dd5d 659EXPORT_SYMBOL(schedule_delayed_work);
1da177e4 660
0fcb78c2
REB
661/**
662 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
663 * @cpu: cpu to use
52bad64d 664 * @dwork: job to be done
0fcb78c2
REB
665 * @delay: number of jiffies to wait
666 *
667 * After waiting for a given time this puts a job in the kernel-global
668 * workqueue on the specified CPU.
669 */
1da177e4 670int schedule_delayed_work_on(int cpu,
52bad64d 671 struct delayed_work *dwork, unsigned long delay)
1da177e4 672{
52bad64d 673 return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
1da177e4 674}
ae90dd5d 675EXPORT_SYMBOL(schedule_delayed_work_on);
1da177e4 676
b6136773
AM
677/**
678 * schedule_on_each_cpu - call a function on each online CPU from keventd
679 * @func: the function to call
b6136773
AM
680 *
681 * Returns zero on success.
682 * Returns -ve errno on failure.
683 *
b6136773
AM
684 * schedule_on_each_cpu() is very slow.
685 */
65f27f38 686int schedule_on_each_cpu(work_func_t func)
15316ba8
CL
687{
688 int cpu;
b6136773 689 struct work_struct *works;
15316ba8 690
b6136773
AM
691 works = alloc_percpu(struct work_struct);
692 if (!works)
15316ba8 693 return -ENOMEM;
b6136773 694
95402b38 695 get_online_cpus();
15316ba8 696 for_each_online_cpu(cpu) {
9bfb1839
IM
697 struct work_struct *work = per_cpu_ptr(works, cpu);
698
699 INIT_WORK(work, func);
8de6d308 700 schedule_work_on(cpu, work);
15316ba8 701 }
8616a89a
ON
702 for_each_online_cpu(cpu)
703 flush_work(per_cpu_ptr(works, cpu));
95402b38 704 put_online_cpus();
b6136773 705 free_percpu(works);
15316ba8
CL
706 return 0;
707}
708
1da177e4
LT
709void flush_scheduled_work(void)
710{
711 flush_workqueue(keventd_wq);
712}
ae90dd5d 713EXPORT_SYMBOL(flush_scheduled_work);
1da177e4 714
1fa44eca
JB
715/**
716 * execute_in_process_context - reliably execute the routine with user context
717 * @fn: the function to execute
1fa44eca
JB
718 * @ew: guaranteed storage for the execute work structure (must
719 * be available when the work executes)
720 *
721 * Executes the function immediately if process context is available,
722 * otherwise schedules the function for delayed execution.
723 *
724 * Returns: 0 - function was executed
725 * 1 - function was scheduled for execution
726 */
65f27f38 727int execute_in_process_context(work_func_t fn, struct execute_work *ew)
1fa44eca
JB
728{
729 if (!in_interrupt()) {
65f27f38 730 fn(&ew->work);
1fa44eca
JB
731 return 0;
732 }
733
65f27f38 734 INIT_WORK(&ew->work, fn);
1fa44eca
JB
735 schedule_work(&ew->work);
736
737 return 1;
738}
739EXPORT_SYMBOL_GPL(execute_in_process_context);
740
1da177e4
LT
741int keventd_up(void)
742{
743 return keventd_wq != NULL;
744}
745
746int current_is_keventd(void)
747{
748 struct cpu_workqueue_struct *cwq;
d243769d 749 int cpu = raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */
1da177e4
LT
750 int ret = 0;
751
752 BUG_ON(!keventd_wq);
753
89ada679 754 cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
1da177e4
LT
755 if (current == cwq->thread)
756 ret = 1;
757
758 return ret;
759
760}
761
3af24433
ON
762static struct cpu_workqueue_struct *
763init_cpu_workqueue(struct workqueue_struct *wq, int cpu)
1da177e4 764{
89ada679 765 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
1da177e4 766
3af24433
ON
767 cwq->wq = wq;
768 spin_lock_init(&cwq->lock);
769 INIT_LIST_HEAD(&cwq->worklist);
770 init_waitqueue_head(&cwq->more_work);
771
772 return cwq;
1da177e4
LT
773}
774
e1d8aa9f
FW
775DEFINE_TRACE(workqueue_creation);
776
3af24433
ON
777static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
778{
0d557dc9 779 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
3af24433 780 struct workqueue_struct *wq = cwq->wq;
6cc88bc4 781 const char *fmt = is_wq_single_threaded(wq) ? "%s" : "%s/%d";
3af24433
ON
782 struct task_struct *p;
783
784 p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu);
785 /*
786 * Nobody can add the work_struct to this cwq,
787 * if (caller is __create_workqueue)
788 * nobody should see this wq
789 * else // caller is CPU_UP_PREPARE
790 * cpu is not on cpu_online_map
791 * so we can abort safely.
792 */
793 if (IS_ERR(p))
794 return PTR_ERR(p);
0d557dc9
HC
795 if (cwq->wq->rt)
796 sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
3af24433 797 cwq->thread = p;
3af24433 798
e1d8aa9f
FW
799 trace_workqueue_creation(cwq->thread, cpu);
800
3af24433
ON
801 return 0;
802}
803
06ba38a9
ON
804static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
805{
806 struct task_struct *p = cwq->thread;
807
808 if (p != NULL) {
809 if (cpu >= 0)
810 kthread_bind(p, cpu);
811 wake_up_process(p);
812 }
813}
814
4e6045f1
JB
815struct workqueue_struct *__create_workqueue_key(const char *name,
816 int singlethread,
817 int freezeable,
0d557dc9 818 int rt,
eb13ba87
JB
819 struct lock_class_key *key,
820 const char *lock_name)
1da177e4 821{
1da177e4 822 struct workqueue_struct *wq;
3af24433
ON
823 struct cpu_workqueue_struct *cwq;
824 int err = 0, cpu;
1da177e4 825
3af24433
ON
826 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
827 if (!wq)
828 return NULL;
829
830 wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
831 if (!wq->cpu_wq) {
832 kfree(wq);
833 return NULL;
834 }
835
836 wq->name = name;
eb13ba87 837 lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
cce1a165 838 wq->singlethread = singlethread;
3af24433 839 wq->freezeable = freezeable;
0d557dc9 840 wq->rt = rt;
cce1a165 841 INIT_LIST_HEAD(&wq->list);
3af24433
ON
842
843 if (singlethread) {
3af24433
ON
844 cwq = init_cpu_workqueue(wq, singlethread_cpu);
845 err = create_workqueue_thread(cwq, singlethread_cpu);
06ba38a9 846 start_workqueue_thread(cwq, -1);
3af24433 847 } else {
3da1c84c 848 cpu_maps_update_begin();
6af8bf3d
ON
849 /*
850 * We must place this wq on list even if the code below fails.
851 * cpu_down(cpu) can remove cpu from cpu_populated_map before
852 * destroy_workqueue() takes the lock, in that case we leak
853 * cwq[cpu]->thread.
854 */
95402b38 855 spin_lock(&workqueue_lock);
3af24433 856 list_add(&wq->list, &workqueues);
95402b38 857 spin_unlock(&workqueue_lock);
6af8bf3d
ON
858 /*
859 * We must initialize cwqs for each possible cpu even if we
860 * are going to call destroy_workqueue() finally. Otherwise
861 * cpu_up() can hit the uninitialized cwq once we drop the
862 * lock.
863 */
3af24433
ON
864 for_each_possible_cpu(cpu) {
865 cwq = init_cpu_workqueue(wq, cpu);
866 if (err || !cpu_online(cpu))
867 continue;
868 err = create_workqueue_thread(cwq, cpu);
06ba38a9 869 start_workqueue_thread(cwq, cpu);
1da177e4 870 }
3da1c84c 871 cpu_maps_update_done();
3af24433
ON
872 }
873
874 if (err) {
875 destroy_workqueue(wq);
876 wq = NULL;
877 }
878 return wq;
879}
4e6045f1 880EXPORT_SYMBOL_GPL(__create_workqueue_key);
1da177e4 881
e1d8aa9f
FW
882DEFINE_TRACE(workqueue_destruction);
883
1e35eaa2 884static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
3af24433 885{
14441960 886 /*
3da1c84c
ON
887 * Our caller is either destroy_workqueue() or CPU_POST_DEAD,
888 * cpu_add_remove_lock protects cwq->thread.
14441960
ON
889 */
890 if (cwq->thread == NULL)
891 return;
3af24433 892
3295f0ef
IM
893 lock_map_acquire(&cwq->wq->lockdep_map);
894 lock_map_release(&cwq->wq->lockdep_map);
4e6045f1 895
13c22168 896 flush_cpu_workqueue(cwq);
14441960 897 /*
3da1c84c 898 * If the caller is CPU_POST_DEAD and cwq->worklist was not empty,
13c22168
ON
899 * a concurrent flush_workqueue() can insert a barrier after us.
900 * However, in that case run_workqueue() won't return and check
901 * kthread_should_stop() until it flushes all work_struct's.
14441960
ON
902 * When ->worklist becomes empty it is safe to exit because no
903 * more work_structs can be queued on this cwq: flush_workqueue
904 * checks list_empty(), and a "normal" queue_work() can't use
905 * a dead CPU.
906 */
e1d8aa9f 907 trace_workqueue_destruction(cwq->thread);
14441960
ON
908 kthread_stop(cwq->thread);
909 cwq->thread = NULL;
3af24433
ON
910}
911
912/**
913 * destroy_workqueue - safely terminate a workqueue
914 * @wq: target workqueue
915 *
916 * Safely destroy a workqueue. All work currently pending will be done first.
917 */
918void destroy_workqueue(struct workqueue_struct *wq)
919{
e7577c50 920 const struct cpumask *cpu_map = wq_cpu_map(wq);
b1f4ec17 921 int cpu;
3af24433 922
3da1c84c 923 cpu_maps_update_begin();
95402b38 924 spin_lock(&workqueue_lock);
b1f4ec17 925 list_del(&wq->list);
95402b38 926 spin_unlock(&workqueue_lock);
3af24433 927
363ab6f1 928 for_each_cpu_mask_nr(cpu, *cpu_map)
1e35eaa2 929 cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
3da1c84c 930 cpu_maps_update_done();
9b41ea72 931
3af24433
ON
932 free_percpu(wq->cpu_wq);
933 kfree(wq);
934}
935EXPORT_SYMBOL_GPL(destroy_workqueue);
936
937static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
938 unsigned long action,
939 void *hcpu)
940{
941 unsigned int cpu = (unsigned long)hcpu;
942 struct cpu_workqueue_struct *cwq;
943 struct workqueue_struct *wq;
8448502c 944 int ret = NOTIFY_OK;
3af24433 945
8bb78442
RW
946 action &= ~CPU_TASKS_FROZEN;
947
3af24433 948 switch (action) {
3af24433 949 case CPU_UP_PREPARE:
e7577c50 950 cpumask_set_cpu(cpu, cpu_populated_map);
3af24433 951 }
8448502c 952undo:
3af24433
ON
953 list_for_each_entry(wq, &workqueues, list) {
954 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
955
956 switch (action) {
957 case CPU_UP_PREPARE:
958 if (!create_workqueue_thread(cwq, cpu))
959 break;
95402b38
GS
960 printk(KERN_ERR "workqueue [%s] for %i failed\n",
961 wq->name, cpu);
8448502c
ON
962 action = CPU_UP_CANCELED;
963 ret = NOTIFY_BAD;
964 goto undo;
3af24433
ON
965
966 case CPU_ONLINE:
06ba38a9 967 start_workqueue_thread(cwq, cpu);
3af24433
ON
968 break;
969
970 case CPU_UP_CANCELED:
06ba38a9 971 start_workqueue_thread(cwq, -1);
3da1c84c 972 case CPU_POST_DEAD:
1e35eaa2 973 cleanup_workqueue_thread(cwq);
3af24433
ON
974 break;
975 }
1da177e4
LT
976 }
977
00dfcaf7
ON
978 switch (action) {
979 case CPU_UP_CANCELED:
3da1c84c 980 case CPU_POST_DEAD:
e7577c50 981 cpumask_clear_cpu(cpu, cpu_populated_map);
00dfcaf7
ON
982 }
983
8448502c 984 return ret;
1da177e4 985}
1da177e4 986
2d3854a3 987#ifdef CONFIG_SMP
8ccad40d
RR
988static struct workqueue_struct *work_on_cpu_wq __read_mostly;
989
2d3854a3
RR
990struct work_for_cpu {
991 struct work_struct work;
992 long (*fn)(void *);
993 void *arg;
994 long ret;
995};
996
997static void do_work_for_cpu(struct work_struct *w)
998{
999 struct work_for_cpu *wfc = container_of(w, struct work_for_cpu, work);
1000
1001 wfc->ret = wfc->fn(wfc->arg);
1002}
1003
1004/**
1005 * work_on_cpu - run a function in user context on a particular cpu
1006 * @cpu: the cpu to run on
1007 * @fn: the function to run
1008 * @arg: the function arg
1009 *
31ad9081
RR
1010 * This will return the value @fn returns.
1011 * It is up to the caller to ensure that the cpu doesn't go offline.
2d3854a3
RR
1012 */
1013long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
1014{
1015 struct work_for_cpu wfc;
1016
1017 INIT_WORK(&wfc.work, do_work_for_cpu);
1018 wfc.fn = fn;
1019 wfc.arg = arg;
8ccad40d 1020 queue_work_on(cpu, work_on_cpu_wq, &wfc.work);
31ad9081 1021 flush_work(&wfc.work);
2d3854a3
RR
1022
1023 return wfc.ret;
1024}
1025EXPORT_SYMBOL_GPL(work_on_cpu);
1026#endif /* CONFIG_SMP */
1027
c12920d1 1028void __init init_workqueues(void)
1da177e4 1029{
e7577c50
RR
1030 alloc_cpumask_var(&cpu_populated_map, GFP_KERNEL);
1031
1032 cpumask_copy(cpu_populated_map, cpu_online_mask);
1033 singlethread_cpu = cpumask_first(cpu_possible_mask);
1034 cpu_singlethread_map = cpumask_of(singlethread_cpu);
1da177e4
LT
1035 hotcpu_notifier(workqueue_cpu_callback, 0);
1036 keventd_wq = create_workqueue("events");
1037 BUG_ON(!keventd_wq);
8ccad40d
RR
1038#ifdef CONFIG_SMP
1039 work_on_cpu_wq = create_workqueue("work_on_cpu");
1040 BUG_ON(!work_on_cpu_wq);
1041#endif
1da177e4 1042}
This page took 0.641467 seconds and 5 git commands to generate.