Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * workqueue.h --- work queue handling for Linux. | |
3 | */ | |
4 | ||
5 | #ifndef _LINUX_WORKQUEUE_H | |
6 | #define _LINUX_WORKQUEUE_H | |
7 | ||
8 | #include <linux/timer.h> | |
9 | #include <linux/linkage.h> | |
10 | #include <linux/bitops.h> | |
4e6045f1 | 11 | #include <linux/lockdep.h> |
7a22ad75 | 12 | #include <linux/threads.h> |
a08727ba | 13 | #include <asm/atomic.h> |
1da177e4 LT |
14 | |
15 | struct workqueue_struct; | |
16 | ||
65f27f38 DH |
17 | struct work_struct; |
18 | typedef void (*work_func_t)(struct work_struct *work); | |
6bb49e59 | 19 | |
a08727ba LT |
20 | /* |
21 | * The first word is the work queue pointer and the flags rolled into | |
22 | * one | |
23 | */ | |
24 | #define work_data_bits(work) ((unsigned long *)(&(work)->data)) | |
25 | ||
22df02bb TH |
26 | enum { |
27 | WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */ | |
8a2e8e5d TH |
28 | WORK_STRUCT_DELAYED_BIT = 1, /* work item is delayed */ |
29 | WORK_STRUCT_CWQ_BIT = 2, /* data points to cwq */ | |
30 | WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */ | |
22df02bb | 31 | #ifdef CONFIG_DEBUG_OBJECTS_WORK |
8a2e8e5d TH |
32 | WORK_STRUCT_STATIC_BIT = 4, /* static initializer (debugobjects) */ |
33 | WORK_STRUCT_COLOR_SHIFT = 5, /* color for workqueue flushing */ | |
0f900049 | 34 | #else |
8a2e8e5d | 35 | WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */ |
22df02bb TH |
36 | #endif |
37 | ||
73f53c4a TH |
38 | WORK_STRUCT_COLOR_BITS = 4, |
39 | ||
22df02bb | 40 | WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT, |
8a2e8e5d | 41 | WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT, |
e120153d | 42 | WORK_STRUCT_CWQ = 1 << WORK_STRUCT_CWQ_BIT, |
affee4b2 | 43 | WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT, |
22df02bb TH |
44 | #ifdef CONFIG_DEBUG_OBJECTS_WORK |
45 | WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT, | |
46 | #else | |
47 | WORK_STRUCT_STATIC = 0, | |
48 | #endif | |
49 | ||
73f53c4a TH |
50 | /* |
51 | * The last color is no color used for works which don't | |
52 | * participate in workqueue flushing. | |
53 | */ | |
54 | WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1, | |
55 | WORK_NO_COLOR = WORK_NR_COLORS, | |
56 | ||
bdbc5dd7 | 57 | /* special cpu IDs */ |
f3421797 TH |
58 | WORK_CPU_UNBOUND = NR_CPUS, |
59 | WORK_CPU_NONE = NR_CPUS + 1, | |
bdbc5dd7 TH |
60 | WORK_CPU_LAST = WORK_CPU_NONE, |
61 | ||
73f53c4a | 62 | /* |
e120153d | 63 | * Reserve 7 bits off of cwq pointer w/ debugobjects turned |
8a2e8e5d TH |
64 | * off. This makes cwqs aligned to 256 bytes and allows 15 |
65 | * workqueue flush colors. | |
73f53c4a TH |
66 | */ |
67 | WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT + | |
68 | WORK_STRUCT_COLOR_BITS, | |
69 | ||
0f900049 | 70 | WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1, |
22df02bb | 71 | WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK, |
bdbc5dd7 | 72 | WORK_STRUCT_NO_CPU = WORK_CPU_NONE << WORK_STRUCT_FLAG_BITS, |
dcd989cb TH |
73 | |
74 | /* bit mask for work_busy() return values */ | |
75 | WORK_BUSY_PENDING = 1 << 0, | |
76 | WORK_BUSY_RUNNING = 1 << 1, | |
22df02bb TH |
77 | }; |
78 | ||
1da177e4 | 79 | struct work_struct { |
a08727ba | 80 | atomic_long_t data; |
1da177e4 | 81 | struct list_head entry; |
6bb49e59 | 82 | work_func_t func; |
4e6045f1 JB |
83 | #ifdef CONFIG_LOCKDEP |
84 | struct lockdep_map lockdep_map; | |
85 | #endif | |
52bad64d DH |
86 | }; |
87 | ||
7a22ad75 TH |
88 | #define WORK_DATA_INIT() ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU) |
89 | #define WORK_DATA_STATIC_INIT() \ | |
90 | ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU | WORK_STRUCT_STATIC) | |
a08727ba | 91 | |
52bad64d DH |
92 | struct delayed_work { |
93 | struct work_struct work; | |
1da177e4 LT |
94 | struct timer_list timer; |
95 | }; | |
96 | ||
bf6aede7 JD |
97 | static inline struct delayed_work *to_delayed_work(struct work_struct *work) |
98 | { | |
99 | return container_of(work, struct delayed_work, work); | |
100 | } | |
101 | ||
1fa44eca JB |
102 | struct execute_work { |
103 | struct work_struct work; | |
104 | }; | |
105 | ||
4e6045f1 JB |
106 | #ifdef CONFIG_LOCKDEP |
107 | /* | |
108 | * NB: because we have to copy the lockdep_map, setting _key | |
109 | * here is required, otherwise it could get initialised to the | |
110 | * copy of the lockdep_map! | |
111 | */ | |
112 | #define __WORK_INIT_LOCKDEP_MAP(n, k) \ | |
113 | .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k), | |
114 | #else | |
115 | #define __WORK_INIT_LOCKDEP_MAP(n, k) | |
116 | #endif | |
117 | ||
65f27f38 | 118 | #define __WORK_INITIALIZER(n, f) { \ |
dc186ad7 | 119 | .data = WORK_DATA_STATIC_INIT(), \ |
23b2e599 | 120 | .entry = { &(n).entry, &(n).entry }, \ |
65f27f38 | 121 | .func = (f), \ |
4e6045f1 | 122 | __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \ |
65f27f38 DH |
123 | } |
124 | ||
125 | #define __DELAYED_WORK_INITIALIZER(n, f) { \ | |
126 | .work = __WORK_INITIALIZER((n).work, (f)), \ | |
127 | .timer = TIMER_INITIALIZER(NULL, 0, 0), \ | |
128 | } | |
129 | ||
dd6414b5 PC |
130 | #define __DEFERRED_WORK_INITIALIZER(n, f) { \ |
131 | .work = __WORK_INITIALIZER((n).work, (f)), \ | |
132 | .timer = TIMER_DEFERRED_INITIALIZER(NULL, 0, 0), \ | |
133 | } | |
134 | ||
65f27f38 DH |
135 | #define DECLARE_WORK(n, f) \ |
136 | struct work_struct n = __WORK_INITIALIZER(n, f) | |
137 | ||
65f27f38 DH |
138 | #define DECLARE_DELAYED_WORK(n, f) \ |
139 | struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f) | |
140 | ||
dd6414b5 PC |
141 | #define DECLARE_DEFERRED_WORK(n, f) \ |
142 | struct delayed_work n = __DEFERRED_WORK_INITIALIZER(n, f) | |
143 | ||
1da177e4 | 144 | /* |
65f27f38 | 145 | * initialize a work item's function pointer |
1da177e4 | 146 | */ |
65f27f38 | 147 | #define PREPARE_WORK(_work, _func) \ |
1da177e4 | 148 | do { \ |
52bad64d | 149 | (_work)->func = (_func); \ |
1da177e4 LT |
150 | } while (0) |
151 | ||
65f27f38 DH |
152 | #define PREPARE_DELAYED_WORK(_work, _func) \ |
153 | PREPARE_WORK(&(_work)->work, (_func)) | |
52bad64d | 154 | |
dc186ad7 TG |
155 | #ifdef CONFIG_DEBUG_OBJECTS_WORK |
156 | extern void __init_work(struct work_struct *work, int onstack); | |
157 | extern void destroy_work_on_stack(struct work_struct *work); | |
4690c4ab TH |
158 | static inline unsigned int work_static(struct work_struct *work) |
159 | { | |
22df02bb | 160 | return *work_data_bits(work) & WORK_STRUCT_STATIC; |
4690c4ab | 161 | } |
dc186ad7 TG |
162 | #else |
163 | static inline void __init_work(struct work_struct *work, int onstack) { } | |
164 | static inline void destroy_work_on_stack(struct work_struct *work) { } | |
4690c4ab | 165 | static inline unsigned int work_static(struct work_struct *work) { return 0; } |
dc186ad7 TG |
166 | #endif |
167 | ||
1da177e4 | 168 | /* |
52bad64d | 169 | * initialize all of a work item in one go |
a08727ba | 170 | * |
b9049df5 | 171 | * NOTE! No point in using "atomic_long_set()": using a direct |
a08727ba LT |
172 | * assignment of the work data initializer allows the compiler |
173 | * to generate better code. | |
1da177e4 | 174 | */ |
4e6045f1 | 175 | #ifdef CONFIG_LOCKDEP |
dc186ad7 | 176 | #define __INIT_WORK(_work, _func, _onstack) \ |
65f27f38 | 177 | do { \ |
4e6045f1 JB |
178 | static struct lock_class_key __key; \ |
179 | \ | |
dc186ad7 | 180 | __init_work((_work), _onstack); \ |
23b2e599 | 181 | (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ |
4e6045f1 | 182 | lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0);\ |
65f27f38 DH |
183 | INIT_LIST_HEAD(&(_work)->entry); \ |
184 | PREPARE_WORK((_work), (_func)); \ | |
185 | } while (0) | |
4e6045f1 | 186 | #else |
dc186ad7 | 187 | #define __INIT_WORK(_work, _func, _onstack) \ |
4e6045f1 | 188 | do { \ |
dc186ad7 | 189 | __init_work((_work), _onstack); \ |
4e6045f1 JB |
190 | (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ |
191 | INIT_LIST_HEAD(&(_work)->entry); \ | |
192 | PREPARE_WORK((_work), (_func)); \ | |
193 | } while (0) | |
194 | #endif | |
65f27f38 | 195 | |
dc186ad7 TG |
196 | #define INIT_WORK(_work, _func) \ |
197 | do { \ | |
198 | __INIT_WORK((_work), (_func), 0); \ | |
199 | } while (0) | |
200 | ||
ca1cab37 | 201 | #define INIT_WORK_ONSTACK(_work, _func) \ |
dc186ad7 TG |
202 | do { \ |
203 | __INIT_WORK((_work), (_func), 1); \ | |
204 | } while (0) | |
205 | ||
65f27f38 DH |
206 | #define INIT_DELAYED_WORK(_work, _func) \ |
207 | do { \ | |
208 | INIT_WORK(&(_work)->work, (_func)); \ | |
209 | init_timer(&(_work)->timer); \ | |
52bad64d DH |
210 | } while (0) |
211 | ||
ca1cab37 | 212 | #define INIT_DELAYED_WORK_ONSTACK(_work, _func) \ |
6d612b0f | 213 | do { \ |
ca1cab37 | 214 | INIT_WORK_ONSTACK(&(_work)->work, (_func)); \ |
6d612b0f PZ |
215 | init_timer_on_stack(&(_work)->timer); \ |
216 | } while (0) | |
217 | ||
dc186ad7 | 218 | #define INIT_DELAYED_WORK_DEFERRABLE(_work, _func) \ |
28287033 VP |
219 | do { \ |
220 | INIT_WORK(&(_work)->work, (_func)); \ | |
221 | init_timer_deferrable(&(_work)->timer); \ | |
222 | } while (0) | |
223 | ||
365970a1 DH |
224 | /** |
225 | * work_pending - Find out whether a work item is currently pending | |
226 | * @work: The work item in question | |
227 | */ | |
228 | #define work_pending(work) \ | |
22df02bb | 229 | test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) |
365970a1 DH |
230 | |
231 | /** | |
232 | * delayed_work_pending - Find out whether a delayable work item is currently | |
233 | * pending | |
234 | * @work: The work item in question | |
235 | */ | |
0221872a LT |
236 | #define delayed_work_pending(w) \ |
237 | work_pending(&(w)->work) | |
365970a1 | 238 | |
65f27f38 | 239 | /** |
23b2e599 ON |
240 | * work_clear_pending - for internal use only, mark a work item as not pending |
241 | * @work: The work item in question | |
65f27f38 | 242 | */ |
23b2e599 | 243 | #define work_clear_pending(work) \ |
22df02bb | 244 | clear_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) |
65f27f38 | 245 | |
c54fce6e TH |
246 | /* |
247 | * Workqueue flags and constants. For details, please refer to | |
248 | * Documentation/workqueue.txt. | |
249 | */ | |
97e37d7b | 250 | enum { |
bdbc5dd7 | 251 | WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */ |
c7fc77f7 | 252 | WQ_UNBOUND = 1 << 1, /* not bound to any cpu */ |
bdbc5dd7 | 253 | WQ_FREEZEABLE = 1 << 2, /* freeze during suspend */ |
6370a6ad | 254 | WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */ |
649027d7 | 255 | WQ_HIGHPRI = 1 << 4, /* high priority */ |
fb0e7beb | 256 | WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */ |
b71ab8c2 | 257 | |
e41e704b | 258 | WQ_DYING = 1 << 6, /* internal: workqueue is dying */ |
6370a6ad | 259 | WQ_RESCUER = 1 << 7, /* internal: workqueue has rescuer */ |
e41e704b | 260 | |
b71ab8c2 | 261 | WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ |
f3421797 | 262 | WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */ |
b71ab8c2 | 263 | WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2, |
97e37d7b | 264 | }; |
52bad64d | 265 | |
f3421797 TH |
266 | /* unbound wq's aren't per-cpu, scale max_active according to #cpus */ |
267 | #define WQ_UNBOUND_MAX_ACTIVE \ | |
268 | max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU) | |
65f27f38 | 269 | |
d320c038 TH |
270 | /* |
271 | * System-wide workqueues which are always present. | |
272 | * | |
273 | * system_wq is the one used by schedule[_delayed]_work[_on](). | |
274 | * Multi-CPU multi-threaded. There are users which expect relatively | |
275 | * short queue flush time. Don't queue works which can run for too | |
276 | * long. | |
277 | * | |
278 | * system_long_wq is similar to system_wq but may host long running | |
279 | * works. Queue flushing might take relatively long. | |
280 | * | |
281 | * system_nrt_wq is non-reentrant and guarantees that any given work | |
282 | * item is never executed in parallel by multiple CPUs. Queue | |
283 | * flushing might take relatively long. | |
f3421797 TH |
284 | * |
285 | * system_unbound_wq is unbound workqueue. Workers are not bound to | |
286 | * any specific CPU, not concurrency managed, and all queued works are | |
287 | * executed immediately as long as max_active limit is not reached and | |
288 | * resources are available. | |
d320c038 TH |
289 | */ |
290 | extern struct workqueue_struct *system_wq; | |
291 | extern struct workqueue_struct *system_long_wq; | |
292 | extern struct workqueue_struct *system_nrt_wq; | |
f3421797 | 293 | extern struct workqueue_struct *system_unbound_wq; |
52bad64d | 294 | |
4e6045f1 | 295 | extern struct workqueue_struct * |
d320c038 TH |
296 | __alloc_workqueue_key(const char *name, unsigned int flags, int max_active, |
297 | struct lock_class_key *key, const char *lock_name); | |
4e6045f1 JB |
298 | |
299 | #ifdef CONFIG_LOCKDEP | |
d320c038 | 300 | #define alloc_workqueue(name, flags, max_active) \ |
4e6045f1 JB |
301 | ({ \ |
302 | static struct lock_class_key __key; \ | |
eb13ba87 JB |
303 | const char *__lock_name; \ |
304 | \ | |
305 | if (__builtin_constant_p(name)) \ | |
306 | __lock_name = (name); \ | |
307 | else \ | |
308 | __lock_name = #name; \ | |
4e6045f1 | 309 | \ |
d320c038 TH |
310 | __alloc_workqueue_key((name), (flags), (max_active), \ |
311 | &__key, __lock_name); \ | |
4e6045f1 JB |
312 | }) |
313 | #else | |
d320c038 TH |
314 | #define alloc_workqueue(name, flags, max_active) \ |
315 | __alloc_workqueue_key((name), (flags), (max_active), NULL, NULL) | |
4e6045f1 JB |
316 | #endif |
317 | ||
81dcaf65 TH |
318 | /** |
319 | * alloc_ordered_workqueue - allocate an ordered workqueue | |
320 | * @name: name of the workqueue | |
6370a6ad | 321 | * @flags: WQ_* flags (only WQ_FREEZEABLE and WQ_MEM_RECLAIM are meaningful) |
81dcaf65 TH |
322 | * |
323 | * Allocate an ordered workqueue. An ordered workqueue executes at | |
324 | * most one work item at any given time in the queued order. They are | |
325 | * implemented as unbound workqueues with @max_active of one. | |
326 | * | |
327 | * RETURNS: | |
328 | * Pointer to the allocated workqueue on success, %NULL on failure. | |
329 | */ | |
330 | static inline struct workqueue_struct * | |
331 | alloc_ordered_workqueue(const char *name, unsigned int flags) | |
332 | { | |
333 | return alloc_workqueue(name, WQ_UNBOUND | flags, 1); | |
334 | } | |
335 | ||
97e37d7b | 336 | #define create_workqueue(name) \ |
6370a6ad | 337 | alloc_workqueue((name), WQ_MEM_RECLAIM, 1) |
97e37d7b | 338 | #define create_freezeable_workqueue(name) \ |
6370a6ad | 339 | alloc_workqueue((name), WQ_FREEZEABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1) |
97e37d7b | 340 | #define create_singlethread_workqueue(name) \ |
6370a6ad | 341 | alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1) |
1da177e4 LT |
342 | |
343 | extern void destroy_workqueue(struct workqueue_struct *wq); | |
344 | ||
b3c97528 | 345 | extern int queue_work(struct workqueue_struct *wq, struct work_struct *work); |
c1a220e7 ZR |
346 | extern int queue_work_on(int cpu, struct workqueue_struct *wq, |
347 | struct work_struct *work); | |
b3c97528 HH |
348 | extern int queue_delayed_work(struct workqueue_struct *wq, |
349 | struct delayed_work *work, unsigned long delay); | |
7a6bc1cd | 350 | extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, |
28e53bdd ON |
351 | struct delayed_work *work, unsigned long delay); |
352 | ||
b3c97528 | 353 | extern void flush_workqueue(struct workqueue_struct *wq); |
28e53bdd | 354 | extern void flush_scheduled_work(void); |
1da177e4 | 355 | |
b3c97528 | 356 | extern int schedule_work(struct work_struct *work); |
c1a220e7 | 357 | extern int schedule_work_on(int cpu, struct work_struct *work); |
b3c97528 | 358 | extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay); |
28e53bdd ON |
359 | extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, |
360 | unsigned long delay); | |
65f27f38 | 361 | extern int schedule_on_each_cpu(work_func_t func); |
1da177e4 LT |
362 | extern int keventd_up(void); |
363 | ||
65f27f38 | 364 | int execute_in_process_context(work_func_t fn, struct execute_work *); |
1da177e4 | 365 | |
401a8d04 | 366 | extern bool flush_work(struct work_struct *work); |
09383498 | 367 | extern bool flush_work_sync(struct work_struct *work); |
401a8d04 TH |
368 | extern bool cancel_work_sync(struct work_struct *work); |
369 | ||
370 | extern bool flush_delayed_work(struct delayed_work *dwork); | |
09383498 | 371 | extern bool flush_delayed_work_sync(struct delayed_work *work); |
401a8d04 | 372 | extern bool cancel_delayed_work_sync(struct delayed_work *dwork); |
28e53bdd | 373 | |
dcd989cb TH |
374 | extern void workqueue_set_max_active(struct workqueue_struct *wq, |
375 | int max_active); | |
376 | extern bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq); | |
377 | extern unsigned int work_cpu(struct work_struct *work); | |
378 | extern unsigned int work_busy(struct work_struct *work); | |
379 | ||
1da177e4 LT |
380 | /* |
381 | * Kill off a pending schedule_delayed_work(). Note that the work callback | |
071b6386 ON |
382 | * function may still be running on return from cancel_delayed_work(), unless |
383 | * it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or | |
28e53bdd | 384 | * cancel_work_sync() to wait on it. |
1da177e4 | 385 | */ |
401a8d04 | 386 | static inline bool cancel_delayed_work(struct delayed_work *work) |
1da177e4 | 387 | { |
401a8d04 | 388 | bool ret; |
1da177e4 | 389 | |
223a10a9 | 390 | ret = del_timer_sync(&work->timer); |
1da177e4 | 391 | if (ret) |
23b2e599 | 392 | work_clear_pending(&work->work); |
1da177e4 LT |
393 | return ret; |
394 | } | |
395 | ||
4e49627b ON |
396 | /* |
397 | * Like above, but uses del_timer() instead of del_timer_sync(). This means, | |
398 | * if it returns 0 the timer function may be running and the queueing is in | |
399 | * progress. | |
400 | */ | |
401a8d04 | 401 | static inline bool __cancel_delayed_work(struct delayed_work *work) |
4e49627b | 402 | { |
401a8d04 | 403 | bool ret; |
4e49627b ON |
404 | |
405 | ret = del_timer(&work->timer); | |
406 | if (ret) | |
407 | work_clear_pending(&work->work); | |
408 | return ret; | |
409 | } | |
410 | ||
f5a421a4 | 411 | /* Obsolete. use cancel_delayed_work_sync() */ |
ed41390f | 412 | static inline __deprecated |
1634c48f ON |
413 | void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq, |
414 | struct delayed_work *work) | |
415 | { | |
f5a421a4 ON |
416 | cancel_delayed_work_sync(work); |
417 | } | |
418 | ||
419 | /* Obsolete. use cancel_delayed_work_sync() */ | |
ed41390f | 420 | static inline __deprecated |
f5a421a4 ON |
421 | void cancel_rearming_delayed_work(struct delayed_work *work) |
422 | { | |
423 | cancel_delayed_work_sync(work); | |
1634c48f ON |
424 | } |
425 | ||
2d3854a3 RR |
426 | #ifndef CONFIG_SMP |
427 | static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) | |
428 | { | |
429 | return fn(arg); | |
430 | } | |
431 | #else | |
432 | long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg); | |
433 | #endif /* CONFIG_SMP */ | |
a25909a4 | 434 | |
a0a1a5fd TH |
435 | #ifdef CONFIG_FREEZER |
436 | extern void freeze_workqueues_begin(void); | |
437 | extern bool freeze_workqueues_busy(void); | |
438 | extern void thaw_workqueues(void); | |
439 | #endif /* CONFIG_FREEZER */ | |
440 | ||
1da177e4 | 441 | #endif |