workqueue: Add system wide power_efficient workqueues
[deliverable/linux.git] / include / linux / workqueue.h
CommitLineData
1da177e4
LT
1/*
2 * workqueue.h --- work queue handling for Linux.
3 */
4
5#ifndef _LINUX_WORKQUEUE_H
6#define _LINUX_WORKQUEUE_H
7
8#include <linux/timer.h>
9#include <linux/linkage.h>
10#include <linux/bitops.h>
4e6045f1 11#include <linux/lockdep.h>
7a22ad75 12#include <linux/threads.h>
60063497 13#include <linux/atomic.h>
7a4e344c 14#include <linux/cpumask.h>
1da177e4
LT
15
16struct workqueue_struct;
17
65f27f38
DH
18struct work_struct;
19typedef void (*work_func_t)(struct work_struct *work);
d8e794df 20void delayed_work_timer_fn(unsigned long __data);
6bb49e59 21
a08727ba
LT
22/*
23 * The first word is the work queue pointer and the flags rolled into
24 * one
25 */
26#define work_data_bits(work) ((unsigned long *)(&(work)->data))
27
22df02bb
TH
28enum {
29 WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */
8a2e8e5d 30 WORK_STRUCT_DELAYED_BIT = 1, /* work item is delayed */
112202d9 31 WORK_STRUCT_PWQ_BIT = 2, /* data points to pwq */
8a2e8e5d 32 WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */
22df02bb 33#ifdef CONFIG_DEBUG_OBJECTS_WORK
8a2e8e5d
TH
34 WORK_STRUCT_STATIC_BIT = 4, /* static initializer (debugobjects) */
35 WORK_STRUCT_COLOR_SHIFT = 5, /* color for workqueue flushing */
0f900049 36#else
8a2e8e5d 37 WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */
22df02bb
TH
38#endif
39
73f53c4a
TH
40 WORK_STRUCT_COLOR_BITS = 4,
41
22df02bb 42 WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
8a2e8e5d 43 WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT,
112202d9 44 WORK_STRUCT_PWQ = 1 << WORK_STRUCT_PWQ_BIT,
affee4b2 45 WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT,
22df02bb
TH
46#ifdef CONFIG_DEBUG_OBJECTS_WORK
47 WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT,
48#else
49 WORK_STRUCT_STATIC = 0,
50#endif
51
73f53c4a
TH
52 /*
53 * The last color is no color used for works which don't
54 * participate in workqueue flushing.
55 */
56 WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1,
57 WORK_NO_COLOR = WORK_NR_COLORS,
58
bdbc5dd7 59 /* special cpu IDs */
f3421797 60 WORK_CPU_UNBOUND = NR_CPUS,
6be19588 61 WORK_CPU_END = NR_CPUS + 1,
bdbc5dd7 62
73f53c4a 63 /*
112202d9
TH
64 * Reserve 7 bits off of pwq pointer w/ debugobjects turned off.
65 * This makes pwqs aligned to 256 bytes and allows 15 workqueue
66 * flush colors.
73f53c4a
TH
67 */
68 WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT +
69 WORK_STRUCT_COLOR_BITS,
70
112202d9 71 /* data contains off-queue information when !WORK_STRUCT_PWQ */
45d9550a 72 WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT,
bbb68dfa
TH
73
74 WORK_OFFQ_CANCELING = (1 << WORK_OFFQ_FLAG_BASE),
75
715b06b8
TH
76 /*
77 * When a work item is off queue, its high bits point to the last
7c3eed5c
TH
78 * pool it was on. Cap at 31 bits and use the highest number to
79 * indicate that no pool is associated.
715b06b8 80 */
bbb68dfa 81 WORK_OFFQ_FLAG_BITS = 1,
7c3eed5c
TH
82 WORK_OFFQ_POOL_SHIFT = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS,
83 WORK_OFFQ_LEFT = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT,
84 WORK_OFFQ_POOL_BITS = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31,
85 WORK_OFFQ_POOL_NONE = (1LU << WORK_OFFQ_POOL_BITS) - 1,
b5490077
TH
86
87 /* convenience constants */
0f900049 88 WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1,
22df02bb 89 WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
7c3eed5c 90 WORK_STRUCT_NO_POOL = (unsigned long)WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT,
dcd989cb
TH
91
92 /* bit mask for work_busy() return values */
93 WORK_BUSY_PENDING = 1 << 0,
94 WORK_BUSY_RUNNING = 1 << 1,
3d1cb205
TH
95
96 /* maximum string length for set_worker_desc() */
97 WORKER_DESC_LEN = 24,
22df02bb
TH
98};
99
1da177e4 100struct work_struct {
a08727ba 101 atomic_long_t data;
1da177e4 102 struct list_head entry;
6bb49e59 103 work_func_t func;
4e6045f1
JB
104#ifdef CONFIG_LOCKDEP
105 struct lockdep_map lockdep_map;
106#endif
52bad64d
DH
107};
108
7c3eed5c 109#define WORK_DATA_INIT() ATOMIC_LONG_INIT(WORK_STRUCT_NO_POOL)
7a22ad75 110#define WORK_DATA_STATIC_INIT() \
7c3eed5c 111 ATOMIC_LONG_INIT(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC)
a08727ba 112
52bad64d
DH
113struct delayed_work {
114 struct work_struct work;
1da177e4 115 struct timer_list timer;
60c057bc
LJ
116
117 /* target workqueue and CPU ->timer uses to queue ->work */
118 struct workqueue_struct *wq;
1265057f 119 int cpu;
1da177e4
LT
120};
121
7a4e344c
TH
122/*
123 * A struct for workqueue attributes. This can be used to change
124 * attributes of an unbound workqueue.
d55262c4
TH
125 *
126 * Unlike other fields, ->no_numa isn't a property of a worker_pool. It
127 * only modifies how apply_workqueue_attrs() select pools and thus doesn't
128 * participate in pool hash calculations or equality comparisons.
7a4e344c
TH
129 */
130struct workqueue_attrs {
131 int nice; /* nice level */
132 cpumask_var_t cpumask; /* allowed CPUs */
d55262c4 133 bool no_numa; /* disable NUMA affinity */
7a4e344c
TH
134};
135
bf6aede7
JD
136static inline struct delayed_work *to_delayed_work(struct work_struct *work)
137{
138 return container_of(work, struct delayed_work, work);
139}
140
1fa44eca
JB
141struct execute_work {
142 struct work_struct work;
143};
144
4e6045f1
JB
145#ifdef CONFIG_LOCKDEP
146/*
147 * NB: because we have to copy the lockdep_map, setting _key
148 * here is required, otherwise it could get initialised to the
149 * copy of the lockdep_map!
150 */
151#define __WORK_INIT_LOCKDEP_MAP(n, k) \
152 .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k),
153#else
154#define __WORK_INIT_LOCKDEP_MAP(n, k)
155#endif
156
ee64e7f6
TH
157#define __WORK_INITIALIZER(n, f) { \
158 .data = WORK_DATA_STATIC_INIT(), \
159 .entry = { &(n).entry, &(n).entry }, \
160 .func = (f), \
161 __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \
65f27f38
DH
162 }
163
f991b318 164#define __DELAYED_WORK_INITIALIZER(n, f, tflags) { \
ee64e7f6 165 .work = __WORK_INITIALIZER((n).work, (f)), \
f991b318 166 .timer = __TIMER_INITIALIZER(delayed_work_timer_fn, \
e0aecdd8
TH
167 0, (unsigned long)&(n), \
168 (tflags) | TIMER_IRQSAFE), \
dd6414b5
PC
169 }
170
ee64e7f6 171#define DECLARE_WORK(n, f) \
65f27f38
DH
172 struct work_struct n = __WORK_INITIALIZER(n, f)
173
ee64e7f6 174#define DECLARE_DELAYED_WORK(n, f) \
f991b318 175 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0)
65f27f38 176
203b42f7 177#define DECLARE_DEFERRABLE_WORK(n, f) \
f991b318 178 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE)
dd6414b5 179
1da177e4 180/*
65f27f38 181 * initialize a work item's function pointer
1da177e4 182 */
ee64e7f6
TH
183#define PREPARE_WORK(_work, _func) \
184 do { \
185 (_work)->func = (_func); \
1da177e4
LT
186 } while (0)
187
ee64e7f6 188#define PREPARE_DELAYED_WORK(_work, _func) \
65f27f38 189 PREPARE_WORK(&(_work)->work, (_func))
52bad64d 190
dc186ad7
TG
191#ifdef CONFIG_DEBUG_OBJECTS_WORK
192extern void __init_work(struct work_struct *work, int onstack);
193extern void destroy_work_on_stack(struct work_struct *work);
4690c4ab
TH
194static inline unsigned int work_static(struct work_struct *work)
195{
22df02bb 196 return *work_data_bits(work) & WORK_STRUCT_STATIC;
4690c4ab 197}
dc186ad7
TG
198#else
199static inline void __init_work(struct work_struct *work, int onstack) { }
200static inline void destroy_work_on_stack(struct work_struct *work) { }
4690c4ab 201static inline unsigned int work_static(struct work_struct *work) { return 0; }
dc186ad7
TG
202#endif
203
1da177e4 204/*
52bad64d 205 * initialize all of a work item in one go
a08727ba 206 *
b9049df5 207 * NOTE! No point in using "atomic_long_set()": using a direct
a08727ba
LT
208 * assignment of the work data initializer allows the compiler
209 * to generate better code.
1da177e4 210 */
4e6045f1 211#ifdef CONFIG_LOCKDEP
dc186ad7 212#define __INIT_WORK(_work, _func, _onstack) \
65f27f38 213 do { \
4e6045f1
JB
214 static struct lock_class_key __key; \
215 \
dc186ad7 216 __init_work((_work), _onstack); \
23b2e599 217 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
ee64e7f6 218 lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0); \
65f27f38
DH
219 INIT_LIST_HEAD(&(_work)->entry); \
220 PREPARE_WORK((_work), (_func)); \
221 } while (0)
4e6045f1 222#else
dc186ad7 223#define __INIT_WORK(_work, _func, _onstack) \
4e6045f1 224 do { \
dc186ad7 225 __init_work((_work), _onstack); \
4e6045f1
JB
226 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
227 INIT_LIST_HEAD(&(_work)->entry); \
228 PREPARE_WORK((_work), (_func)); \
229 } while (0)
230#endif
65f27f38 231
ee64e7f6
TH
232#define INIT_WORK(_work, _func) \
233 do { \
234 __INIT_WORK((_work), (_func), 0); \
dc186ad7
TG
235 } while (0)
236
ee64e7f6
TH
237#define INIT_WORK_ONSTACK(_work, _func) \
238 do { \
239 __INIT_WORK((_work), (_func), 1); \
dc186ad7
TG
240 } while (0)
241
f991b318 242#define __INIT_DELAYED_WORK(_work, _func, _tflags) \
ee64e7f6
TH
243 do { \
244 INIT_WORK(&(_work)->work, (_func)); \
f991b318 245 __setup_timer(&(_work)->timer, delayed_work_timer_fn, \
e0aecdd8
TH
246 (unsigned long)(_work), \
247 (_tflags) | TIMER_IRQSAFE); \
52bad64d
DH
248 } while (0)
249
f991b318 250#define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags) \
ee64e7f6
TH
251 do { \
252 INIT_WORK_ONSTACK(&(_work)->work, (_func)); \
f991b318
TH
253 __setup_timer_on_stack(&(_work)->timer, \
254 delayed_work_timer_fn, \
255 (unsigned long)(_work), \
e0aecdd8 256 (_tflags) | TIMER_IRQSAFE); \
6d612b0f
PZ
257 } while (0)
258
f991b318
TH
259#define INIT_DELAYED_WORK(_work, _func) \
260 __INIT_DELAYED_WORK(_work, _func, 0)
261
262#define INIT_DELAYED_WORK_ONSTACK(_work, _func) \
263 __INIT_DELAYED_WORK_ONSTACK(_work, _func, 0)
264
203b42f7 265#define INIT_DEFERRABLE_WORK(_work, _func) \
f991b318
TH
266 __INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE)
267
268#define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func) \
269 __INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE)
28287033 270
365970a1
DH
271/**
272 * work_pending - Find out whether a work item is currently pending
273 * @work: The work item in question
274 */
275#define work_pending(work) \
22df02bb 276 test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
365970a1
DH
277
278/**
279 * delayed_work_pending - Find out whether a delayable work item is currently
280 * pending
281 * @work: The work item in question
282 */
0221872a
LT
283#define delayed_work_pending(w) \
284 work_pending(&(w)->work)
365970a1 285
65f27f38 286/**
23b2e599
ON
287 * work_clear_pending - for internal use only, mark a work item as not pending
288 * @work: The work item in question
65f27f38 289 */
23b2e599 290#define work_clear_pending(work) \
22df02bb 291 clear_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
65f27f38 292
c54fce6e
TH
293/*
294 * Workqueue flags and constants. For details, please refer to
295 * Documentation/workqueue.txt.
296 */
97e37d7b 297enum {
bdbc5dd7 298 WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */
c7fc77f7 299 WQ_UNBOUND = 1 << 1, /* not bound to any cpu */
58a69cb4 300 WQ_FREEZABLE = 1 << 2, /* freeze during suspend */
6370a6ad 301 WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */
649027d7 302 WQ_HIGHPRI = 1 << 4, /* high priority */
fb0e7beb 303 WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */
226223ab 304 WQ_SYSFS = 1 << 6, /* visible in sysfs, see wq_sysfs_register() */
b71ab8c2 305
cee22a15
VK
306 /*
307 * Per-cpu workqueues are generally preferred because they tend to
308 * show better performance thanks to cache locality. Per-cpu
309 * workqueues exclude the scheduler from choosing the CPU to
310 * execute the worker threads, which has an unfortunate side effect
311 * of increasing power consumption.
312 *
313 * The scheduler considers a CPU idle if it doesn't have any task
314 * to execute and tries to keep idle cores idle to conserve power;
315 * however, for example, a per-cpu work item scheduled from an
316 * interrupt handler on an idle CPU will force the scheduler to
317 * excute the work item on that CPU breaking the idleness, which in
318 * turn may lead to more scheduling choices which are sub-optimal
319 * in terms of power consumption.
320 *
321 * Workqueues marked with WQ_POWER_EFFICIENT are per-cpu by default
322 * but become unbound if workqueue.power_efficient kernel param is
323 * specified. Per-cpu workqueues which are identified to
324 * contribute significantly to power-consumption are identified and
325 * marked with this flag and enabling the power_efficient mode
326 * leads to noticeable power saving at the cost of small
327 * performance disadvantage.
328 *
329 * http://thread.gmane.org/gmane.linux.kernel/1480396
330 */
331 WQ_POWER_EFFICIENT = 1 << 7,
332
618b01eb 333 __WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */
8719dcea 334 __WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */
e41e704b 335
b71ab8c2 336 WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
f3421797 337 WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
b71ab8c2 338 WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
97e37d7b 339};
52bad64d 340
f3421797
TH
341/* unbound wq's aren't per-cpu, scale max_active according to #cpus */
342#define WQ_UNBOUND_MAX_ACTIVE \
343 max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU)
65f27f38 344
d320c038
TH
345/*
346 * System-wide workqueues which are always present.
347 *
348 * system_wq is the one used by schedule[_delayed]_work[_on]().
349 * Multi-CPU multi-threaded. There are users which expect relatively
350 * short queue flush time. Don't queue works which can run for too
351 * long.
352 *
353 * system_long_wq is similar to system_wq but may host long running
354 * works. Queue flushing might take relatively long.
355 *
f3421797
TH
356 * system_unbound_wq is unbound workqueue. Workers are not bound to
357 * any specific CPU, not concurrency managed, and all queued works are
358 * executed immediately as long as max_active limit is not reached and
359 * resources are available.
4149efb2 360 *
24d51add
TH
361 * system_freezable_wq is equivalent to system_wq except that it's
362 * freezable.
0668106c
VK
363 *
364 * *_power_efficient_wq are inclined towards saving power and converted
365 * into WQ_UNBOUND variants if 'wq_power_efficient' is enabled; otherwise,
366 * they are same as their non-power-efficient counterparts - e.g.
367 * system_power_efficient_wq is identical to system_wq if
368 * 'wq_power_efficient' is disabled. See WQ_POWER_EFFICIENT for more info.
d320c038
TH
369 */
370extern struct workqueue_struct *system_wq;
371extern struct workqueue_struct *system_long_wq;
f3421797 372extern struct workqueue_struct *system_unbound_wq;
24d51add 373extern struct workqueue_struct *system_freezable_wq;
0668106c
VK
374extern struct workqueue_struct *system_power_efficient_wq;
375extern struct workqueue_struct *system_freezable_power_efficient_wq;
ae930e0f 376
3b07e9ca 377static inline struct workqueue_struct * __deprecated __system_nrt_wq(void)
ae930e0f
TH
378{
379 return system_wq;
380}
381
3b07e9ca 382static inline struct workqueue_struct * __deprecated __system_nrt_freezable_wq(void)
ae930e0f
TH
383{
384 return system_freezable_wq;
385}
386
387/* equivlalent to system_wq and system_freezable_wq, deprecated */
388#define system_nrt_wq __system_nrt_wq()
389#define system_nrt_freezable_wq __system_nrt_freezable_wq()
52bad64d 390
4e6045f1 391extern struct workqueue_struct *
b196be89
TH
392__alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
393 struct lock_class_key *key, const char *lock_name, ...) __printf(1, 6);
4e6045f1 394
b196be89
TH
395/**
396 * alloc_workqueue - allocate a workqueue
397 * @fmt: printf format for the name of the workqueue
398 * @flags: WQ_* flags
399 * @max_active: max in-flight work items, 0 for default
400 * @args: args for @fmt
401 *
402 * Allocate a workqueue with the specified parameters. For detailed
403 * information on WQ_* flags, please refer to Documentation/workqueue.txt.
404 *
405 * The __lock_name macro dance is to guarantee that single lock_class_key
406 * doesn't end up with different namesm, which isn't allowed by lockdep.
407 *
408 * RETURNS:
409 * Pointer to the allocated workqueue on success, %NULL on failure.
410 */
4e6045f1 411#ifdef CONFIG_LOCKDEP
ee64e7f6
TH
412#define alloc_workqueue(fmt, flags, max_active, args...) \
413({ \
414 static struct lock_class_key __key; \
415 const char *__lock_name; \
416 \
417 if (__builtin_constant_p(fmt)) \
418 __lock_name = (fmt); \
419 else \
420 __lock_name = #fmt; \
421 \
422 __alloc_workqueue_key((fmt), (flags), (max_active), \
423 &__key, __lock_name, ##args); \
4e6045f1
JB
424})
425#else
ee64e7f6
TH
426#define alloc_workqueue(fmt, flags, max_active, args...) \
427 __alloc_workqueue_key((fmt), (flags), (max_active), \
b196be89 428 NULL, NULL, ##args)
4e6045f1
JB
429#endif
430
81dcaf65
TH
431/**
432 * alloc_ordered_workqueue - allocate an ordered workqueue
b196be89 433 * @fmt: printf format for the name of the workqueue
58a69cb4 434 * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
b196be89 435 * @args: args for @fmt
81dcaf65
TH
436 *
437 * Allocate an ordered workqueue. An ordered workqueue executes at
438 * most one work item at any given time in the queued order. They are
439 * implemented as unbound workqueues with @max_active of one.
440 *
441 * RETURNS:
442 * Pointer to the allocated workqueue on success, %NULL on failure.
443 */
ee64e7f6 444#define alloc_ordered_workqueue(fmt, flags, args...) \
8719dcea 445 alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args)
81dcaf65 446
ee64e7f6 447#define create_workqueue(name) \
6370a6ad 448 alloc_workqueue((name), WQ_MEM_RECLAIM, 1)
ee64e7f6 449#define create_freezable_workqueue(name) \
58a69cb4 450 alloc_workqueue((name), WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
ee64e7f6 451#define create_singlethread_workqueue(name) \
6370a6ad 452 alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
1da177e4
LT
453
454extern void destroy_workqueue(struct workqueue_struct *wq);
455
7a4e344c
TH
456struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask);
457void free_workqueue_attrs(struct workqueue_attrs *attrs);
9e8cd2f5
TH
458int apply_workqueue_attrs(struct workqueue_struct *wq,
459 const struct workqueue_attrs *attrs);
7a4e344c 460
d4283e93 461extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
c1a220e7 462 struct work_struct *work);
d4283e93 463extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
28e53bdd 464 struct delayed_work *work, unsigned long delay);
8376fe22
TH
465extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
466 struct delayed_work *dwork, unsigned long delay);
28e53bdd 467
b3c97528 468extern void flush_workqueue(struct workqueue_struct *wq);
9c5a2ba7 469extern void drain_workqueue(struct workqueue_struct *wq);
28e53bdd 470extern void flush_scheduled_work(void);
1da177e4 471
65f27f38 472extern int schedule_on_each_cpu(work_func_t func);
1da177e4 473
65f27f38 474int execute_in_process_context(work_func_t fn, struct execute_work *);
1da177e4 475
401a8d04
TH
476extern bool flush_work(struct work_struct *work);
477extern bool cancel_work_sync(struct work_struct *work);
478
479extern bool flush_delayed_work(struct delayed_work *dwork);
57b30ae7 480extern bool cancel_delayed_work(struct delayed_work *dwork);
401a8d04 481extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
28e53bdd 482
dcd989cb
TH
483extern void workqueue_set_max_active(struct workqueue_struct *wq,
484 int max_active);
e6267616 485extern bool current_is_workqueue_rescuer(void);
d84ff051 486extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
dcd989cb 487extern unsigned int work_busy(struct work_struct *work);
3d1cb205
TH
488extern __printf(1, 2) void set_worker_desc(const char *fmt, ...);
489extern void print_worker_info(const char *log_lvl, struct task_struct *task);
dcd989cb 490
8425e3d5
TH
491/**
492 * queue_work - queue work on a workqueue
493 * @wq: workqueue to use
494 * @work: work to queue
495 *
496 * Returns %false if @work was already on a queue, %true otherwise.
497 *
498 * We queue the work to the CPU on which it was submitted, but if the CPU dies
499 * it can be processed by another CPU.
500 */
501static inline bool queue_work(struct workqueue_struct *wq,
502 struct work_struct *work)
503{
504 return queue_work_on(WORK_CPU_UNBOUND, wq, work);
505}
506
507/**
508 * queue_delayed_work - queue work on a workqueue after delay
509 * @wq: workqueue to use
510 * @dwork: delayable work to queue
511 * @delay: number of jiffies to wait before queueing
512 *
513 * Equivalent to queue_delayed_work_on() but tries to use the local CPU.
514 */
515static inline bool queue_delayed_work(struct workqueue_struct *wq,
516 struct delayed_work *dwork,
517 unsigned long delay)
518{
519 return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
520}
521
522/**
523 * mod_delayed_work - modify delay of or queue a delayed work
524 * @wq: workqueue to use
525 * @dwork: work to queue
526 * @delay: number of jiffies to wait before queueing
527 *
528 * mod_delayed_work_on() on local CPU.
529 */
530static inline bool mod_delayed_work(struct workqueue_struct *wq,
531 struct delayed_work *dwork,
532 unsigned long delay)
533{
534 return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
535}
536
537/**
538 * schedule_work_on - put work task on a specific cpu
539 * @cpu: cpu to put the work task on
540 * @work: job to be done
541 *
542 * This puts a job on a specific cpu
543 */
544static inline bool schedule_work_on(int cpu, struct work_struct *work)
545{
546 return queue_work_on(cpu, system_wq, work);
547}
548
549/**
550 * schedule_work - put work task in global workqueue
551 * @work: job to be done
552 *
553 * Returns %false if @work was already on the kernel-global workqueue and
554 * %true otherwise.
555 *
556 * This puts a job in the kernel-global workqueue if it was not already
557 * queued and leaves it in the same position on the kernel-global
558 * workqueue otherwise.
559 */
560static inline bool schedule_work(struct work_struct *work)
561{
562 return queue_work(system_wq, work);
563}
564
565/**
566 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
567 * @cpu: cpu to use
568 * @dwork: job to be done
569 * @delay: number of jiffies to wait
570 *
571 * After waiting for a given time this puts a job in the kernel-global
572 * workqueue on the specified CPU.
573 */
574static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
575 unsigned long delay)
576{
577 return queue_delayed_work_on(cpu, system_wq, dwork, delay);
578}
579
580/**
581 * schedule_delayed_work - put work task in global workqueue after delay
582 * @dwork: job to be done
583 * @delay: number of jiffies to wait or 0 for immediate execution
584 *
585 * After waiting for a given time this puts a job in the kernel-global
586 * workqueue.
587 */
588static inline bool schedule_delayed_work(struct delayed_work *dwork,
589 unsigned long delay)
590{
591 return queue_delayed_work(system_wq, dwork, delay);
592}
593
594/**
595 * keventd_up - is workqueue initialized yet?
596 */
597static inline bool keventd_up(void)
598{
599 return system_wq != NULL;
600}
601
4e49627b
ON
602/*
603 * Like above, but uses del_timer() instead of del_timer_sync(). This means,
604 * if it returns 0 the timer function may be running and the queueing is in
605 * progress.
606 */
136b5721 607static inline bool __deprecated __cancel_delayed_work(struct delayed_work *work)
4e49627b 608{
401a8d04 609 bool ret;
4e49627b
ON
610
611 ret = del_timer(&work->timer);
612 if (ret)
613 work_clear_pending(&work->work);
614 return ret;
615}
616
606a5020 617/* used to be different but now identical to flush_work(), deprecated */
43829731 618static inline bool __deprecated flush_work_sync(struct work_struct *work)
606a5020
TH
619{
620 return flush_work(work);
621}
622
623/* used to be different but now identical to flush_delayed_work(), deprecated */
43829731 624static inline bool __deprecated flush_delayed_work_sync(struct delayed_work *dwork)
606a5020
TH
625{
626 return flush_delayed_work(dwork);
627}
628
2d3854a3 629#ifndef CONFIG_SMP
d84ff051 630static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
2d3854a3
RR
631{
632 return fn(arg);
633}
634#else
d84ff051 635long work_on_cpu(int cpu, long (*fn)(void *), void *arg);
2d3854a3 636#endif /* CONFIG_SMP */
a25909a4 637
a0a1a5fd
TH
638#ifdef CONFIG_FREEZER
639extern void freeze_workqueues_begin(void);
640extern bool freeze_workqueues_busy(void);
641extern void thaw_workqueues(void);
642#endif /* CONFIG_FREEZER */
643
226223ab
TH
644#ifdef CONFIG_SYSFS
645int workqueue_sysfs_register(struct workqueue_struct *wq);
646#else /* CONFIG_SYSFS */
647static inline int workqueue_sysfs_register(struct workqueue_struct *wq)
648{ return 0; }
649#endif /* CONFIG_SYSFS */
650
1da177e4 651#endif
This page took 1.235141 seconds and 5 git commands to generate.