workqueue: define masks for work flags and conditionalize STATIC flags
[deliverable/linux.git] / include / linux / workqueue.h
1 /*
2 * workqueue.h --- work queue handling for Linux.
3 */
4
5 #ifndef _LINUX_WORKQUEUE_H
6 #define _LINUX_WORKQUEUE_H
7
8 #include <linux/timer.h>
9 #include <linux/linkage.h>
10 #include <linux/bitops.h>
11 #include <linux/lockdep.h>
12 #include <asm/atomic.h>
13
14 struct workqueue_struct;
15
16 struct work_struct;
17 typedef void (*work_func_t)(struct work_struct *work);
18
19 /*
20 * The first word is the work queue pointer and the flags rolled into
21 * one
22 */
23 #define work_data_bits(work) ((unsigned long *)(&(work)->data))
24
25 enum {
26 WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */
27 #ifdef CONFIG_DEBUG_OBJECTS_WORK
28 WORK_STRUCT_STATIC_BIT = 1, /* static initializer (debugobjects) */
29 #endif
30
31 WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
32 #ifdef CONFIG_DEBUG_OBJECTS_WORK
33 WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT,
34 #else
35 WORK_STRUCT_STATIC = 0,
36 #endif
37
38 WORK_STRUCT_FLAG_MASK = 3UL,
39 WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
40 };
41
42 struct work_struct {
43 atomic_long_t data;
44 struct list_head entry;
45 work_func_t func;
46 #ifdef CONFIG_LOCKDEP
47 struct lockdep_map lockdep_map;
48 #endif
49 };
50
51 #define WORK_DATA_INIT() ATOMIC_LONG_INIT(0)
52 #define WORK_DATA_STATIC_INIT() ATOMIC_LONG_INIT(WORK_STRUCT_STATIC)
53
54 struct delayed_work {
55 struct work_struct work;
56 struct timer_list timer;
57 };
58
59 static inline struct delayed_work *to_delayed_work(struct work_struct *work)
60 {
61 return container_of(work, struct delayed_work, work);
62 }
63
64 struct execute_work {
65 struct work_struct work;
66 };
67
68 #ifdef CONFIG_LOCKDEP
69 /*
70 * NB: because we have to copy the lockdep_map, setting _key
71 * here is required, otherwise it could get initialised to the
72 * copy of the lockdep_map!
73 */
74 #define __WORK_INIT_LOCKDEP_MAP(n, k) \
75 .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k),
76 #else
77 #define __WORK_INIT_LOCKDEP_MAP(n, k)
78 #endif
79
80 #define __WORK_INITIALIZER(n, f) { \
81 .data = WORK_DATA_STATIC_INIT(), \
82 .entry = { &(n).entry, &(n).entry }, \
83 .func = (f), \
84 __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \
85 }
86
87 #define __DELAYED_WORK_INITIALIZER(n, f) { \
88 .work = __WORK_INITIALIZER((n).work, (f)), \
89 .timer = TIMER_INITIALIZER(NULL, 0, 0), \
90 }
91
92 #define DECLARE_WORK(n, f) \
93 struct work_struct n = __WORK_INITIALIZER(n, f)
94
95 #define DECLARE_DELAYED_WORK(n, f) \
96 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f)
97
98 /*
99 * initialize a work item's function pointer
100 */
101 #define PREPARE_WORK(_work, _func) \
102 do { \
103 (_work)->func = (_func); \
104 } while (0)
105
106 #define PREPARE_DELAYED_WORK(_work, _func) \
107 PREPARE_WORK(&(_work)->work, (_func))
108
109 #ifdef CONFIG_DEBUG_OBJECTS_WORK
110 extern void __init_work(struct work_struct *work, int onstack);
111 extern void destroy_work_on_stack(struct work_struct *work);
112 static inline unsigned int work_static(struct work_struct *work)
113 {
114 return *work_data_bits(work) & WORK_STRUCT_STATIC;
115 }
116 #else
117 static inline void __init_work(struct work_struct *work, int onstack) { }
118 static inline void destroy_work_on_stack(struct work_struct *work) { }
119 static inline unsigned int work_static(struct work_struct *work) { return 0; }
120 #endif
121
122 /*
123 * initialize all of a work item in one go
124 *
125 * NOTE! No point in using "atomic_long_set()": using a direct
126 * assignment of the work data initializer allows the compiler
127 * to generate better code.
128 */
129 #ifdef CONFIG_LOCKDEP
130 #define __INIT_WORK(_work, _func, _onstack) \
131 do { \
132 static struct lock_class_key __key; \
133 \
134 __init_work((_work), _onstack); \
135 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
136 lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0);\
137 INIT_LIST_HEAD(&(_work)->entry); \
138 PREPARE_WORK((_work), (_func)); \
139 } while (0)
140 #else
141 #define __INIT_WORK(_work, _func, _onstack) \
142 do { \
143 __init_work((_work), _onstack); \
144 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
145 INIT_LIST_HEAD(&(_work)->entry); \
146 PREPARE_WORK((_work), (_func)); \
147 } while (0)
148 #endif
149
150 #define INIT_WORK(_work, _func) \
151 do { \
152 __INIT_WORK((_work), (_func), 0); \
153 } while (0)
154
155 #define INIT_WORK_ON_STACK(_work, _func) \
156 do { \
157 __INIT_WORK((_work), (_func), 1); \
158 } while (0)
159
160 #define INIT_DELAYED_WORK(_work, _func) \
161 do { \
162 INIT_WORK(&(_work)->work, (_func)); \
163 init_timer(&(_work)->timer); \
164 } while (0)
165
166 #define INIT_DELAYED_WORK_ON_STACK(_work, _func) \
167 do { \
168 INIT_WORK_ON_STACK(&(_work)->work, (_func)); \
169 init_timer_on_stack(&(_work)->timer); \
170 } while (0)
171
172 #define INIT_DELAYED_WORK_DEFERRABLE(_work, _func) \
173 do { \
174 INIT_WORK(&(_work)->work, (_func)); \
175 init_timer_deferrable(&(_work)->timer); \
176 } while (0)
177
178 /**
179 * work_pending - Find out whether a work item is currently pending
180 * @work: The work item in question
181 */
182 #define work_pending(work) \
183 test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
184
185 /**
186 * delayed_work_pending - Find out whether a delayable work item is currently
187 * pending
188 * @work: The work item in question
189 */
190 #define delayed_work_pending(w) \
191 work_pending(&(w)->work)
192
193 /**
194 * work_clear_pending - for internal use only, mark a work item as not pending
195 * @work: The work item in question
196 */
197 #define work_clear_pending(work) \
198 clear_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
199
200 enum {
201 WQ_FREEZEABLE = 1 << 0, /* freeze during suspend */
202 WQ_SINGLE_THREAD = 1 << 1, /* no per-cpu worker */
203 };
204
205 extern struct workqueue_struct *
206 __create_workqueue_key(const char *name, unsigned int flags,
207 struct lock_class_key *key, const char *lock_name);
208
209 #ifdef CONFIG_LOCKDEP
210 #define __create_workqueue(name, flags) \
211 ({ \
212 static struct lock_class_key __key; \
213 const char *__lock_name; \
214 \
215 if (__builtin_constant_p(name)) \
216 __lock_name = (name); \
217 else \
218 __lock_name = #name; \
219 \
220 __create_workqueue_key((name), (flags), &__key, \
221 __lock_name); \
222 })
223 #else
224 #define __create_workqueue(name, flags) \
225 __create_workqueue_key((name), (flags), NULL, NULL)
226 #endif
227
228 #define create_workqueue(name) \
229 __create_workqueue((name), 0)
230 #define create_freezeable_workqueue(name) \
231 __create_workqueue((name), WQ_FREEZEABLE | WQ_SINGLE_THREAD)
232 #define create_singlethread_workqueue(name) \
233 __create_workqueue((name), WQ_SINGLE_THREAD)
234
235 extern void destroy_workqueue(struct workqueue_struct *wq);
236
237 extern int queue_work(struct workqueue_struct *wq, struct work_struct *work);
238 extern int queue_work_on(int cpu, struct workqueue_struct *wq,
239 struct work_struct *work);
240 extern int queue_delayed_work(struct workqueue_struct *wq,
241 struct delayed_work *work, unsigned long delay);
242 extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
243 struct delayed_work *work, unsigned long delay);
244
245 extern void flush_workqueue(struct workqueue_struct *wq);
246 extern void flush_scheduled_work(void);
247 extern void flush_delayed_work(struct delayed_work *work);
248
249 extern int schedule_work(struct work_struct *work);
250 extern int schedule_work_on(int cpu, struct work_struct *work);
251 extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay);
252 extern int schedule_delayed_work_on(int cpu, struct delayed_work *work,
253 unsigned long delay);
254 extern int schedule_on_each_cpu(work_func_t func);
255 extern int current_is_keventd(void);
256 extern int keventd_up(void);
257
258 extern void init_workqueues(void);
259 int execute_in_process_context(work_func_t fn, struct execute_work *);
260
261 extern int flush_work(struct work_struct *work);
262
263 extern int cancel_work_sync(struct work_struct *work);
264
265 /*
266 * Kill off a pending schedule_delayed_work(). Note that the work callback
267 * function may still be running on return from cancel_delayed_work(), unless
268 * it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or
269 * cancel_work_sync() to wait on it.
270 */
271 static inline int cancel_delayed_work(struct delayed_work *work)
272 {
273 int ret;
274
275 ret = del_timer_sync(&work->timer);
276 if (ret)
277 work_clear_pending(&work->work);
278 return ret;
279 }
280
281 /*
282 * Like above, but uses del_timer() instead of del_timer_sync(). This means,
283 * if it returns 0 the timer function may be running and the queueing is in
284 * progress.
285 */
286 static inline int __cancel_delayed_work(struct delayed_work *work)
287 {
288 int ret;
289
290 ret = del_timer(&work->timer);
291 if (ret)
292 work_clear_pending(&work->work);
293 return ret;
294 }
295
296 extern int cancel_delayed_work_sync(struct delayed_work *work);
297
298 /* Obsolete. use cancel_delayed_work_sync() */
299 static inline
300 void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
301 struct delayed_work *work)
302 {
303 cancel_delayed_work_sync(work);
304 }
305
306 /* Obsolete. use cancel_delayed_work_sync() */
307 static inline
308 void cancel_rearming_delayed_work(struct delayed_work *work)
309 {
310 cancel_delayed_work_sync(work);
311 }
312
313 #ifndef CONFIG_SMP
314 static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
315 {
316 return fn(arg);
317 }
318 #else
319 long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg);
320 #endif /* CONFIG_SMP */
321 #endif
This page took 0.043689 seconds and 5 git commands to generate.