Driver core: internal struct dma_coherent_mem, change type of a member.
[deliverable/linux.git] / include / linux / slow-work.h
1 /* Worker thread pool for slow items, such as filesystem lookups or mkdirs
2 *
3 * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 *
11 * See Documentation/slow-work.txt
12 */
13
14 #ifndef _LINUX_SLOW_WORK_H
15 #define _LINUX_SLOW_WORK_H
16
17 #ifdef CONFIG_SLOW_WORK
18
19 #include <linux/sysctl.h>
20 #include <linux/timer.h>
21
22 struct slow_work;
23 #ifdef CONFIG_SLOW_WORK_DEBUG
24 struct seq_file;
25 #endif
26
27 /*
28 * The operations used to support slow work items
29 */
30 struct slow_work_ops {
31 /* owner */
32 struct module *owner;
33
34 /* get a ref on a work item
35 * - return 0 if successful, -ve if not
36 */
37 int (*get_ref)(struct slow_work *work);
38
39 /* discard a ref to a work item */
40 void (*put_ref)(struct slow_work *work);
41
42 /* execute a work item */
43 void (*execute)(struct slow_work *work);
44
45 #ifdef CONFIG_SLOW_WORK_DEBUG
46 /* describe a work item for debugfs */
47 void (*desc)(struct slow_work *work, struct seq_file *m);
48 #endif
49 };
50
51 /*
52 * A slow work item
53 * - A reference is held on the parent object by the thread pool when it is
54 * queued
55 */
56 struct slow_work {
57 struct module *owner; /* the owning module */
58 unsigned long flags;
59 #define SLOW_WORK_PENDING 0 /* item pending (further) execution */
60 #define SLOW_WORK_EXECUTING 1 /* item currently executing */
61 #define SLOW_WORK_ENQ_DEFERRED 2 /* item enqueue deferred */
62 #define SLOW_WORK_VERY_SLOW 3 /* item is very slow */
63 #define SLOW_WORK_CANCELLING 4 /* item is being cancelled, don't enqueue */
64 #define SLOW_WORK_DELAYED 5 /* item is struct delayed_slow_work with active timer */
65 const struct slow_work_ops *ops; /* operations table for this item */
66 struct list_head link; /* link in queue */
67 #ifdef CONFIG_SLOW_WORK_DEBUG
68 struct timespec mark; /* jiffies at which queued or exec begun */
69 #endif
70 };
71
72 struct delayed_slow_work {
73 struct slow_work work;
74 struct timer_list timer;
75 };
76
77 /**
78 * slow_work_init - Initialise a slow work item
79 * @work: The work item to initialise
80 * @ops: The operations to use to handle the slow work item
81 *
82 * Initialise a slow work item.
83 */
84 static inline void slow_work_init(struct slow_work *work,
85 const struct slow_work_ops *ops)
86 {
87 work->flags = 0;
88 work->ops = ops;
89 INIT_LIST_HEAD(&work->link);
90 }
91
92 /**
93 * slow_work_init - Initialise a delayed slow work item
94 * @work: The work item to initialise
95 * @ops: The operations to use to handle the slow work item
96 *
97 * Initialise a delayed slow work item.
98 */
99 static inline void delayed_slow_work_init(struct delayed_slow_work *dwork,
100 const struct slow_work_ops *ops)
101 {
102 init_timer(&dwork->timer);
103 slow_work_init(&dwork->work, ops);
104 }
105
106 /**
107 * vslow_work_init - Initialise a very slow work item
108 * @work: The work item to initialise
109 * @ops: The operations to use to handle the slow work item
110 *
111 * Initialise a very slow work item. This item will be restricted such that
112 * only a certain number of the pool threads will be able to execute items of
113 * this type.
114 */
115 static inline void vslow_work_init(struct slow_work *work,
116 const struct slow_work_ops *ops)
117 {
118 work->flags = 1 << SLOW_WORK_VERY_SLOW;
119 work->ops = ops;
120 INIT_LIST_HEAD(&work->link);
121 }
122
123 /**
124 * slow_work_is_queued - Determine if a slow work item is on the work queue
125 * work: The work item to test
126 *
127 * Determine if the specified slow-work item is on the work queue. This
128 * returns true if it is actually on the queue.
129 *
130 * If the item is executing and has been marked for requeue when execution
131 * finishes, then false will be returned.
132 *
133 * Anyone wishing to wait for completion of execution can wait on the
134 * SLOW_WORK_EXECUTING bit.
135 */
136 static inline bool slow_work_is_queued(struct slow_work *work)
137 {
138 unsigned long flags = work->flags;
139 return flags & SLOW_WORK_PENDING && !(flags & SLOW_WORK_EXECUTING);
140 }
141
142 extern int slow_work_enqueue(struct slow_work *work);
143 extern void slow_work_cancel(struct slow_work *work);
144 extern int slow_work_register_user(struct module *owner);
145 extern void slow_work_unregister_user(struct module *owner);
146
147 extern int delayed_slow_work_enqueue(struct delayed_slow_work *dwork,
148 unsigned long delay);
149
150 static inline void delayed_slow_work_cancel(struct delayed_slow_work *dwork)
151 {
152 slow_work_cancel(&dwork->work);
153 }
154
155 extern bool slow_work_sleep_till_thread_needed(struct slow_work *work,
156 signed long *_timeout);
157
158 #ifdef CONFIG_SYSCTL
159 extern ctl_table slow_work_sysctls[];
160 #endif
161
162 #endif /* CONFIG_SLOW_WORK */
163 #endif /* _LINUX_SLOW_WORK_H */
This page took 0.036512 seconds and 5 git commands to generate.