workqueue: remove PREPARE_[DELAYED_]WORK()
[deliverable/linux.git] / include / linux / workqueue.h
CommitLineData
1da177e4
LT
1/*
2 * workqueue.h --- work queue handling for Linux.
3 */
4
5#ifndef _LINUX_WORKQUEUE_H
6#define _LINUX_WORKQUEUE_H
7
8#include <linux/timer.h>
9#include <linux/linkage.h>
10#include <linux/bitops.h>
4e6045f1 11#include <linux/lockdep.h>
7a22ad75 12#include <linux/threads.h>
60063497 13#include <linux/atomic.h>
7a4e344c 14#include <linux/cpumask.h>
1da177e4
LT
15
16struct workqueue_struct;
17
65f27f38
DH
18struct work_struct;
19typedef void (*work_func_t)(struct work_struct *work);
d8e794df 20void delayed_work_timer_fn(unsigned long __data);
6bb49e59 21
a08727ba
LT
22/*
23 * The first word is the work queue pointer and the flags rolled into
24 * one
25 */
26#define work_data_bits(work) ((unsigned long *)(&(work)->data))
27
22df02bb
TH
28enum {
29 WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */
8a2e8e5d 30 WORK_STRUCT_DELAYED_BIT = 1, /* work item is delayed */
112202d9 31 WORK_STRUCT_PWQ_BIT = 2, /* data points to pwq */
8a2e8e5d 32 WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */
22df02bb 33#ifdef CONFIG_DEBUG_OBJECTS_WORK
8a2e8e5d
TH
34 WORK_STRUCT_STATIC_BIT = 4, /* static initializer (debugobjects) */
35 WORK_STRUCT_COLOR_SHIFT = 5, /* color for workqueue flushing */
0f900049 36#else
8a2e8e5d 37 WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */
22df02bb
TH
38#endif
39
73f53c4a
TH
40 WORK_STRUCT_COLOR_BITS = 4,
41
22df02bb 42 WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
8a2e8e5d 43 WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT,
112202d9 44 WORK_STRUCT_PWQ = 1 << WORK_STRUCT_PWQ_BIT,
affee4b2 45 WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT,
22df02bb
TH
46#ifdef CONFIG_DEBUG_OBJECTS_WORK
47 WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT,
48#else
49 WORK_STRUCT_STATIC = 0,
50#endif
51
73f53c4a
TH
52 /*
53 * The last color is no color used for works which don't
54 * participate in workqueue flushing.
55 */
56 WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1,
57 WORK_NO_COLOR = WORK_NR_COLORS,
58
bdbc5dd7 59 /* special cpu IDs */
f3421797 60 WORK_CPU_UNBOUND = NR_CPUS,
6be19588 61 WORK_CPU_END = NR_CPUS + 1,
bdbc5dd7 62
73f53c4a 63 /*
112202d9
TH
64 * Reserve 7 bits off of pwq pointer w/ debugobjects turned off.
65 * This makes pwqs aligned to 256 bytes and allows 15 workqueue
66 * flush colors.
73f53c4a
TH
67 */
68 WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT +
69 WORK_STRUCT_COLOR_BITS,
70
112202d9 71 /* data contains off-queue information when !WORK_STRUCT_PWQ */
45d9550a 72 WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT,
bbb68dfa
TH
73
74 WORK_OFFQ_CANCELING = (1 << WORK_OFFQ_FLAG_BASE),
75
715b06b8
TH
76 /*
77 * When a work item is off queue, its high bits point to the last
7c3eed5c
TH
78 * pool it was on. Cap at 31 bits and use the highest number to
79 * indicate that no pool is associated.
715b06b8 80 */
bbb68dfa 81 WORK_OFFQ_FLAG_BITS = 1,
7c3eed5c
TH
82 WORK_OFFQ_POOL_SHIFT = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS,
83 WORK_OFFQ_LEFT = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT,
84 WORK_OFFQ_POOL_BITS = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31,
85 WORK_OFFQ_POOL_NONE = (1LU << WORK_OFFQ_POOL_BITS) - 1,
b5490077
TH
86
87 /* convenience constants */
0f900049 88 WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1,
22df02bb 89 WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
7c3eed5c 90 WORK_STRUCT_NO_POOL = (unsigned long)WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT,
dcd989cb
TH
91
92 /* bit mask for work_busy() return values */
93 WORK_BUSY_PENDING = 1 << 0,
94 WORK_BUSY_RUNNING = 1 << 1,
3d1cb205
TH
95
96 /* maximum string length for set_worker_desc() */
97 WORKER_DESC_LEN = 24,
22df02bb
TH
98};
99
1da177e4 100struct work_struct {
a08727ba 101 atomic_long_t data;
1da177e4 102 struct list_head entry;
6bb49e59 103 work_func_t func;
4e6045f1
JB
104#ifdef CONFIG_LOCKDEP
105 struct lockdep_map lockdep_map;
106#endif
52bad64d
DH
107};
108
7c3eed5c 109#define WORK_DATA_INIT() ATOMIC_LONG_INIT(WORK_STRUCT_NO_POOL)
7a22ad75 110#define WORK_DATA_STATIC_INIT() \
7c3eed5c 111 ATOMIC_LONG_INIT(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC)
a08727ba 112
52bad64d
DH
113struct delayed_work {
114 struct work_struct work;
1da177e4 115 struct timer_list timer;
60c057bc
LJ
116
117 /* target workqueue and CPU ->timer uses to queue ->work */
118 struct workqueue_struct *wq;
1265057f 119 int cpu;
1da177e4
LT
120};
121
7a4e344c
TH
122/*
123 * A struct for workqueue attributes. This can be used to change
124 * attributes of an unbound workqueue.
d55262c4
TH
125 *
126 * Unlike other fields, ->no_numa isn't a property of a worker_pool. It
127 * only modifies how apply_workqueue_attrs() select pools and thus doesn't
128 * participate in pool hash calculations or equality comparisons.
7a4e344c
TH
129 */
130struct workqueue_attrs {
131 int nice; /* nice level */
132 cpumask_var_t cpumask; /* allowed CPUs */
d55262c4 133 bool no_numa; /* disable NUMA affinity */
7a4e344c
TH
134};
135
bf6aede7
JD
136static inline struct delayed_work *to_delayed_work(struct work_struct *work)
137{
138 return container_of(work, struct delayed_work, work);
139}
140
1fa44eca
JB
141struct execute_work {
142 struct work_struct work;
143};
144
4e6045f1
JB
145#ifdef CONFIG_LOCKDEP
146/*
147 * NB: because we have to copy the lockdep_map, setting _key
148 * here is required, otherwise it could get initialised to the
149 * copy of the lockdep_map!
150 */
151#define __WORK_INIT_LOCKDEP_MAP(n, k) \
152 .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k),
153#else
154#define __WORK_INIT_LOCKDEP_MAP(n, k)
155#endif
156
ee64e7f6
TH
157#define __WORK_INITIALIZER(n, f) { \
158 .data = WORK_DATA_STATIC_INIT(), \
159 .entry = { &(n).entry, &(n).entry }, \
160 .func = (f), \
161 __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \
65f27f38
DH
162 }
163
f991b318 164#define __DELAYED_WORK_INITIALIZER(n, f, tflags) { \
ee64e7f6 165 .work = __WORK_INITIALIZER((n).work, (f)), \
f991b318 166 .timer = __TIMER_INITIALIZER(delayed_work_timer_fn, \
e0aecdd8
TH
167 0, (unsigned long)&(n), \
168 (tflags) | TIMER_IRQSAFE), \
dd6414b5
PC
169 }
170
ee64e7f6 171#define DECLARE_WORK(n, f) \
65f27f38
DH
172 struct work_struct n = __WORK_INITIALIZER(n, f)
173
ee64e7f6 174#define DECLARE_DELAYED_WORK(n, f) \
f991b318 175 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0)
65f27f38 176
203b42f7 177#define DECLARE_DEFERRABLE_WORK(n, f) \
f991b318 178 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE)
dd6414b5 179
dc186ad7
TG
180#ifdef CONFIG_DEBUG_OBJECTS_WORK
181extern void __init_work(struct work_struct *work, int onstack);
182extern void destroy_work_on_stack(struct work_struct *work);
4690c4ab
TH
183static inline unsigned int work_static(struct work_struct *work)
184{
22df02bb 185 return *work_data_bits(work) & WORK_STRUCT_STATIC;
4690c4ab 186}
dc186ad7
TG
187#else
188static inline void __init_work(struct work_struct *work, int onstack) { }
189static inline void destroy_work_on_stack(struct work_struct *work) { }
4690c4ab 190static inline unsigned int work_static(struct work_struct *work) { return 0; }
dc186ad7
TG
191#endif
192
1da177e4 193/*
52bad64d 194 * initialize all of a work item in one go
a08727ba 195 *
b9049df5 196 * NOTE! No point in using "atomic_long_set()": using a direct
a08727ba
LT
197 * assignment of the work data initializer allows the compiler
198 * to generate better code.
1da177e4 199 */
4e6045f1 200#ifdef CONFIG_LOCKDEP
dc186ad7 201#define __INIT_WORK(_work, _func, _onstack) \
65f27f38 202 do { \
4e6045f1
JB
203 static struct lock_class_key __key; \
204 \
dc186ad7 205 __init_work((_work), _onstack); \
23b2e599 206 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
ee64e7f6 207 lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0); \
65f27f38 208 INIT_LIST_HEAD(&(_work)->entry); \
f073f922 209 (_work)->func = (_func); \
65f27f38 210 } while (0)
4e6045f1 211#else
dc186ad7 212#define __INIT_WORK(_work, _func, _onstack) \
4e6045f1 213 do { \
dc186ad7 214 __init_work((_work), _onstack); \
4e6045f1
JB
215 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
216 INIT_LIST_HEAD(&(_work)->entry); \
f073f922 217 (_work)->func = (_func); \
4e6045f1
JB
218 } while (0)
219#endif
65f27f38 220
ee64e7f6
TH
221#define INIT_WORK(_work, _func) \
222 do { \
223 __INIT_WORK((_work), (_func), 0); \
dc186ad7
TG
224 } while (0)
225
ee64e7f6
TH
226#define INIT_WORK_ONSTACK(_work, _func) \
227 do { \
228 __INIT_WORK((_work), (_func), 1); \
dc186ad7
TG
229 } while (0)
230
f991b318 231#define __INIT_DELAYED_WORK(_work, _func, _tflags) \
ee64e7f6
TH
232 do { \
233 INIT_WORK(&(_work)->work, (_func)); \
f991b318 234 __setup_timer(&(_work)->timer, delayed_work_timer_fn, \
e0aecdd8
TH
235 (unsigned long)(_work), \
236 (_tflags) | TIMER_IRQSAFE); \
52bad64d
DH
237 } while (0)
238
f991b318 239#define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags) \
ee64e7f6
TH
240 do { \
241 INIT_WORK_ONSTACK(&(_work)->work, (_func)); \
f991b318
TH
242 __setup_timer_on_stack(&(_work)->timer, \
243 delayed_work_timer_fn, \
244 (unsigned long)(_work), \
e0aecdd8 245 (_tflags) | TIMER_IRQSAFE); \
6d612b0f
PZ
246 } while (0)
247
f991b318
TH
248#define INIT_DELAYED_WORK(_work, _func) \
249 __INIT_DELAYED_WORK(_work, _func, 0)
250
251#define INIT_DELAYED_WORK_ONSTACK(_work, _func) \
252 __INIT_DELAYED_WORK_ONSTACK(_work, _func, 0)
253
203b42f7 254#define INIT_DEFERRABLE_WORK(_work, _func) \
f991b318
TH
255 __INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE)
256
257#define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func) \
258 __INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE)
28287033 259
365970a1
DH
260/**
261 * work_pending - Find out whether a work item is currently pending
262 * @work: The work item in question
263 */
264#define work_pending(work) \
22df02bb 265 test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
365970a1
DH
266
267/**
268 * delayed_work_pending - Find out whether a delayable work item is currently
269 * pending
270 * @work: The work item in question
271 */
0221872a
LT
272#define delayed_work_pending(w) \
273 work_pending(&(w)->work)
365970a1 274
65f27f38 275/**
23b2e599
ON
276 * work_clear_pending - for internal use only, mark a work item as not pending
277 * @work: The work item in question
65f27f38 278 */
23b2e599 279#define work_clear_pending(work) \
22df02bb 280 clear_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
65f27f38 281
c54fce6e
TH
282/*
283 * Workqueue flags and constants. For details, please refer to
284 * Documentation/workqueue.txt.
285 */
97e37d7b 286enum {
12076373
TH
287 /*
288 * All wqs are now non-reentrant making the following flag
289 * meaningless. Will be removed.
290 */
291 WQ_NON_REENTRANT = 1 << 0, /* DEPRECATED */
292
c7fc77f7 293 WQ_UNBOUND = 1 << 1, /* not bound to any cpu */
58a69cb4 294 WQ_FREEZABLE = 1 << 2, /* freeze during suspend */
6370a6ad 295 WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */
649027d7 296 WQ_HIGHPRI = 1 << 4, /* high priority */
fb0e7beb 297 WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */
226223ab 298 WQ_SYSFS = 1 << 6, /* visible in sysfs, see wq_sysfs_register() */
b71ab8c2 299
cee22a15
VK
300 /*
301 * Per-cpu workqueues are generally preferred because they tend to
302 * show better performance thanks to cache locality. Per-cpu
303 * workqueues exclude the scheduler from choosing the CPU to
304 * execute the worker threads, which has an unfortunate side effect
305 * of increasing power consumption.
306 *
307 * The scheduler considers a CPU idle if it doesn't have any task
308 * to execute and tries to keep idle cores idle to conserve power;
309 * however, for example, a per-cpu work item scheduled from an
310 * interrupt handler on an idle CPU will force the scheduler to
311 * excute the work item on that CPU breaking the idleness, which in
312 * turn may lead to more scheduling choices which are sub-optimal
313 * in terms of power consumption.
314 *
315 * Workqueues marked with WQ_POWER_EFFICIENT are per-cpu by default
316 * but become unbound if workqueue.power_efficient kernel param is
317 * specified. Per-cpu workqueues which are identified to
318 * contribute significantly to power-consumption are identified and
319 * marked with this flag and enabling the power_efficient mode
320 * leads to noticeable power saving at the cost of small
321 * performance disadvantage.
322 *
323 * http://thread.gmane.org/gmane.linux.kernel/1480396
324 */
325 WQ_POWER_EFFICIENT = 1 << 7,
326
618b01eb 327 __WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */
8719dcea 328 __WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */
e41e704b 329
b71ab8c2 330 WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
f3421797 331 WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
b71ab8c2 332 WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
97e37d7b 333};
52bad64d 334
f3421797
TH
335/* unbound wq's aren't per-cpu, scale max_active according to #cpus */
336#define WQ_UNBOUND_MAX_ACTIVE \
337 max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU)
65f27f38 338
d320c038
TH
339/*
340 * System-wide workqueues which are always present.
341 *
342 * system_wq is the one used by schedule[_delayed]_work[_on]().
343 * Multi-CPU multi-threaded. There are users which expect relatively
344 * short queue flush time. Don't queue works which can run for too
345 * long.
346 *
347 * system_long_wq is similar to system_wq but may host long running
348 * works. Queue flushing might take relatively long.
349 *
f3421797
TH
350 * system_unbound_wq is unbound workqueue. Workers are not bound to
351 * any specific CPU, not concurrency managed, and all queued works are
352 * executed immediately as long as max_active limit is not reached and
353 * resources are available.
4149efb2 354 *
24d51add
TH
355 * system_freezable_wq is equivalent to system_wq except that it's
356 * freezable.
0668106c
VK
357 *
358 * *_power_efficient_wq are inclined towards saving power and converted
359 * into WQ_UNBOUND variants if 'wq_power_efficient' is enabled; otherwise,
360 * they are same as their non-power-efficient counterparts - e.g.
361 * system_power_efficient_wq is identical to system_wq if
362 * 'wq_power_efficient' is disabled. See WQ_POWER_EFFICIENT for more info.
d320c038
TH
363 */
364extern struct workqueue_struct *system_wq;
365extern struct workqueue_struct *system_long_wq;
f3421797 366extern struct workqueue_struct *system_unbound_wq;
24d51add 367extern struct workqueue_struct *system_freezable_wq;
0668106c
VK
368extern struct workqueue_struct *system_power_efficient_wq;
369extern struct workqueue_struct *system_freezable_power_efficient_wq;
ae930e0f 370
3b07e9ca 371static inline struct workqueue_struct * __deprecated __system_nrt_wq(void)
ae930e0f
TH
372{
373 return system_wq;
374}
375
3b07e9ca 376static inline struct workqueue_struct * __deprecated __system_nrt_freezable_wq(void)
ae930e0f
TH
377{
378 return system_freezable_wq;
379}
380
381/* equivlalent to system_wq and system_freezable_wq, deprecated */
382#define system_nrt_wq __system_nrt_wq()
383#define system_nrt_freezable_wq __system_nrt_freezable_wq()
52bad64d 384
4e6045f1 385extern struct workqueue_struct *
b196be89
TH
386__alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
387 struct lock_class_key *key, const char *lock_name, ...) __printf(1, 6);
4e6045f1 388
b196be89
TH
389/**
390 * alloc_workqueue - allocate a workqueue
391 * @fmt: printf format for the name of the workqueue
392 * @flags: WQ_* flags
393 * @max_active: max in-flight work items, 0 for default
394 * @args: args for @fmt
395 *
396 * Allocate a workqueue with the specified parameters. For detailed
397 * information on WQ_* flags, please refer to Documentation/workqueue.txt.
398 *
399 * The __lock_name macro dance is to guarantee that single lock_class_key
400 * doesn't end up with different namesm, which isn't allowed by lockdep.
401 *
402 * RETURNS:
403 * Pointer to the allocated workqueue on success, %NULL on failure.
404 */
4e6045f1 405#ifdef CONFIG_LOCKDEP
ee64e7f6
TH
406#define alloc_workqueue(fmt, flags, max_active, args...) \
407({ \
408 static struct lock_class_key __key; \
409 const char *__lock_name; \
410 \
fada94ee 411 __lock_name = #fmt#args; \
ee64e7f6
TH
412 \
413 __alloc_workqueue_key((fmt), (flags), (max_active), \
414 &__key, __lock_name, ##args); \
4e6045f1
JB
415})
416#else
ee64e7f6
TH
417#define alloc_workqueue(fmt, flags, max_active, args...) \
418 __alloc_workqueue_key((fmt), (flags), (max_active), \
b196be89 419 NULL, NULL, ##args)
4e6045f1
JB
420#endif
421
81dcaf65
TH
422/**
423 * alloc_ordered_workqueue - allocate an ordered workqueue
b196be89 424 * @fmt: printf format for the name of the workqueue
58a69cb4 425 * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
b196be89 426 * @args: args for @fmt
81dcaf65
TH
427 *
428 * Allocate an ordered workqueue. An ordered workqueue executes at
429 * most one work item at any given time in the queued order. They are
430 * implemented as unbound workqueues with @max_active of one.
431 *
432 * RETURNS:
433 * Pointer to the allocated workqueue on success, %NULL on failure.
434 */
ee64e7f6 435#define alloc_ordered_workqueue(fmt, flags, args...) \
8719dcea 436 alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args)
81dcaf65 437
ee64e7f6 438#define create_workqueue(name) \
d8537548 439 alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, (name))
ee64e7f6 440#define create_freezable_workqueue(name) \
d8537548
KC
441 alloc_workqueue("%s", WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, \
442 1, (name))
ee64e7f6 443#define create_singlethread_workqueue(name) \
d8537548 444 alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1, (name))
1da177e4
LT
445
446extern void destroy_workqueue(struct workqueue_struct *wq);
447
7a4e344c
TH
448struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask);
449void free_workqueue_attrs(struct workqueue_attrs *attrs);
9e8cd2f5
TH
450int apply_workqueue_attrs(struct workqueue_struct *wq,
451 const struct workqueue_attrs *attrs);
7a4e344c 452
d4283e93 453extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
c1a220e7 454 struct work_struct *work);
d4283e93 455extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
28e53bdd 456 struct delayed_work *work, unsigned long delay);
8376fe22
TH
457extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
458 struct delayed_work *dwork, unsigned long delay);
28e53bdd 459
b3c97528 460extern void flush_workqueue(struct workqueue_struct *wq);
9c5a2ba7 461extern void drain_workqueue(struct workqueue_struct *wq);
28e53bdd 462extern void flush_scheduled_work(void);
1da177e4 463
65f27f38 464extern int schedule_on_each_cpu(work_func_t func);
1da177e4 465
65f27f38 466int execute_in_process_context(work_func_t fn, struct execute_work *);
1da177e4 467
401a8d04
TH
468extern bool flush_work(struct work_struct *work);
469extern bool cancel_work_sync(struct work_struct *work);
470
471extern bool flush_delayed_work(struct delayed_work *dwork);
57b30ae7 472extern bool cancel_delayed_work(struct delayed_work *dwork);
401a8d04 473extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
28e53bdd 474
dcd989cb
TH
475extern void workqueue_set_max_active(struct workqueue_struct *wq,
476 int max_active);
e6267616 477extern bool current_is_workqueue_rescuer(void);
d84ff051 478extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
dcd989cb 479extern unsigned int work_busy(struct work_struct *work);
3d1cb205
TH
480extern __printf(1, 2) void set_worker_desc(const char *fmt, ...);
481extern void print_worker_info(const char *log_lvl, struct task_struct *task);
dcd989cb 482
8425e3d5
TH
483/**
484 * queue_work - queue work on a workqueue
485 * @wq: workqueue to use
486 * @work: work to queue
487 *
488 * Returns %false if @work was already on a queue, %true otherwise.
489 *
490 * We queue the work to the CPU on which it was submitted, but if the CPU dies
491 * it can be processed by another CPU.
492 */
493static inline bool queue_work(struct workqueue_struct *wq,
494 struct work_struct *work)
495{
496 return queue_work_on(WORK_CPU_UNBOUND, wq, work);
497}
498
499/**
500 * queue_delayed_work - queue work on a workqueue after delay
501 * @wq: workqueue to use
502 * @dwork: delayable work to queue
503 * @delay: number of jiffies to wait before queueing
504 *
505 * Equivalent to queue_delayed_work_on() but tries to use the local CPU.
506 */
507static inline bool queue_delayed_work(struct workqueue_struct *wq,
508 struct delayed_work *dwork,
509 unsigned long delay)
510{
511 return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
512}
513
514/**
515 * mod_delayed_work - modify delay of or queue a delayed work
516 * @wq: workqueue to use
517 * @dwork: work to queue
518 * @delay: number of jiffies to wait before queueing
519 *
520 * mod_delayed_work_on() on local CPU.
521 */
522static inline bool mod_delayed_work(struct workqueue_struct *wq,
523 struct delayed_work *dwork,
524 unsigned long delay)
525{
526 return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
527}
528
529/**
530 * schedule_work_on - put work task on a specific cpu
531 * @cpu: cpu to put the work task on
532 * @work: job to be done
533 *
534 * This puts a job on a specific cpu
535 */
536static inline bool schedule_work_on(int cpu, struct work_struct *work)
537{
538 return queue_work_on(cpu, system_wq, work);
539}
540
541/**
542 * schedule_work - put work task in global workqueue
543 * @work: job to be done
544 *
545 * Returns %false if @work was already on the kernel-global workqueue and
546 * %true otherwise.
547 *
548 * This puts a job in the kernel-global workqueue if it was not already
549 * queued and leaves it in the same position on the kernel-global
550 * workqueue otherwise.
551 */
552static inline bool schedule_work(struct work_struct *work)
553{
554 return queue_work(system_wq, work);
555}
556
557/**
558 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
559 * @cpu: cpu to use
560 * @dwork: job to be done
561 * @delay: number of jiffies to wait
562 *
563 * After waiting for a given time this puts a job in the kernel-global
564 * workqueue on the specified CPU.
565 */
566static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
567 unsigned long delay)
568{
569 return queue_delayed_work_on(cpu, system_wq, dwork, delay);
570}
571
572/**
573 * schedule_delayed_work - put work task in global workqueue after delay
574 * @dwork: job to be done
575 * @delay: number of jiffies to wait or 0 for immediate execution
576 *
577 * After waiting for a given time this puts a job in the kernel-global
578 * workqueue.
579 */
580static inline bool schedule_delayed_work(struct delayed_work *dwork,
581 unsigned long delay)
582{
583 return queue_delayed_work(system_wq, dwork, delay);
584}
585
586/**
587 * keventd_up - is workqueue initialized yet?
588 */
589static inline bool keventd_up(void)
590{
591 return system_wq != NULL;
592}
593
606a5020 594/* used to be different but now identical to flush_work(), deprecated */
43829731 595static inline bool __deprecated flush_work_sync(struct work_struct *work)
606a5020
TH
596{
597 return flush_work(work);
598}
599
600/* used to be different but now identical to flush_delayed_work(), deprecated */
43829731 601static inline bool __deprecated flush_delayed_work_sync(struct delayed_work *dwork)
606a5020
TH
602{
603 return flush_delayed_work(dwork);
604}
605
2d3854a3 606#ifndef CONFIG_SMP
d84ff051 607static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
2d3854a3
RR
608{
609 return fn(arg);
610}
611#else
d84ff051 612long work_on_cpu(int cpu, long (*fn)(void *), void *arg);
2d3854a3 613#endif /* CONFIG_SMP */
a25909a4 614
a0a1a5fd
TH
615#ifdef CONFIG_FREEZER
616extern void freeze_workqueues_begin(void);
617extern bool freeze_workqueues_busy(void);
618extern void thaw_workqueues(void);
619#endif /* CONFIG_FREEZER */
620
226223ab
TH
621#ifdef CONFIG_SYSFS
622int workqueue_sysfs_register(struct workqueue_struct *wq);
623#else /* CONFIG_SYSFS */
624static inline int workqueue_sysfs_register(struct workqueue_struct *wq)
625{ return 0; }
626#endif /* CONFIG_SYSFS */
627
1da177e4 628#endif
This page took 1.10097 seconds and 5 git commands to generate.