Commit | Line | Data |
---|---|---|
8b712842 CM |
1 | /* |
2 | * Copyright (C) 2007 Oracle. All rights reserved. | |
08a9ff32 | 3 | * Copyright (C) 2014 Fujitsu. All rights reserved. |
8b712842 CM |
4 | * |
5 | * This program is free software; you can redistribute it and/or | |
6 | * modify it under the terms of the GNU General Public | |
7 | * License v2 as published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
12 | * General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public | |
15 | * License along with this program; if not, write to the | |
16 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
17 | * Boston, MA 021110-1307, USA. | |
18 | */ | |
19 | ||
20 | #include <linux/kthread.h> | |
5a0e3ad6 | 21 | #include <linux/slab.h> |
8b712842 CM |
22 | #include <linux/list.h> |
23 | #include <linux/spinlock.h> | |
b51912c9 | 24 | #include <linux/freezer.h> |
08a9ff32 | 25 | #include <linux/workqueue.h> |
8b712842 | 26 | #include "async-thread.h" |
52483bc2 | 27 | #include "ctree.h" |
8b712842 | 28 | |
a046e9c8 QW |
29 | #define WORK_DONE_BIT 0 |
30 | #define WORK_ORDER_DONE_BIT 1 | |
31 | #define WORK_HIGH_PRIO_BIT 2 | |
4a69a410 | 32 | |
0bd9289c QW |
33 | #define NO_THRESHOLD (-1) |
34 | #define DFT_THRESHOLD (32) | |
35 | ||
d458b054 | 36 | struct __btrfs_workqueue { |
08a9ff32 QW |
37 | struct workqueue_struct *normal_wq; |
38 | /* List head pointing to ordered work list */ | |
39 | struct list_head ordered_list; | |
40 | ||
41 | /* Spinlock for ordered_list */ | |
42 | spinlock_t list_lock; | |
0bd9289c QW |
43 | |
44 | /* Thresholding related variants */ | |
45 | atomic_t pending; | |
46 | int max_active; | |
47 | int current_max; | |
48 | int thresh; | |
49 | unsigned int count; | |
50 | spinlock_t thres_lock; | |
08a9ff32 QW |
51 | }; |
52 | ||
d458b054 QW |
53 | struct btrfs_workqueue { |
54 | struct __btrfs_workqueue *normal; | |
55 | struct __btrfs_workqueue *high; | |
1ca08976 QW |
56 | }; |
57 | ||
d458b054 | 58 | static inline struct __btrfs_workqueue |
c3a46891 QW |
59 | *__btrfs_alloc_workqueue(const char *name, int flags, int max_active, |
60 | int thresh) | |
1ca08976 | 61 | { |
d458b054 | 62 | struct __btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_NOFS); |
1ca08976 QW |
63 | |
64 | if (unlikely(!ret)) | |
65 | return NULL; | |
66 | ||
0bd9289c QW |
67 | ret->max_active = max_active; |
68 | atomic_set(&ret->pending, 0); | |
69 | if (thresh == 0) | |
70 | thresh = DFT_THRESHOLD; | |
71 | /* For low threshold, disabling threshold is a better choice */ | |
72 | if (thresh < DFT_THRESHOLD) { | |
73 | ret->current_max = max_active; | |
74 | ret->thresh = NO_THRESHOLD; | |
75 | } else { | |
76 | ret->current_max = 1; | |
77 | ret->thresh = thresh; | |
78 | } | |
79 | ||
1ca08976 QW |
80 | if (flags & WQ_HIGHPRI) |
81 | ret->normal_wq = alloc_workqueue("%s-%s-high", flags, | |
0bd9289c QW |
82 | ret->max_active, |
83 | "btrfs", name); | |
1ca08976 QW |
84 | else |
85 | ret->normal_wq = alloc_workqueue("%s-%s", flags, | |
0bd9289c QW |
86 | ret->max_active, "btrfs", |
87 | name); | |
1ca08976 QW |
88 | if (unlikely(!ret->normal_wq)) { |
89 | kfree(ret); | |
90 | return NULL; | |
91 | } | |
92 | ||
93 | INIT_LIST_HEAD(&ret->ordered_list); | |
94 | spin_lock_init(&ret->list_lock); | |
0bd9289c | 95 | spin_lock_init(&ret->thres_lock); |
c3a46891 | 96 | trace_btrfs_workqueue_alloc(ret, name, flags & WQ_HIGHPRI); |
1ca08976 QW |
97 | return ret; |
98 | } | |
99 | ||
100 | static inline void | |
d458b054 | 101 | __btrfs_destroy_workqueue(struct __btrfs_workqueue *wq); |
1ca08976 | 102 | |
c3a46891 | 103 | struct btrfs_workqueue *btrfs_alloc_workqueue(const char *name, |
d458b054 QW |
104 | int flags, |
105 | int max_active, | |
106 | int thresh) | |
08a9ff32 | 107 | { |
d458b054 | 108 | struct btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_NOFS); |
08a9ff32 QW |
109 | |
110 | if (unlikely(!ret)) | |
111 | return NULL; | |
112 | ||
1ca08976 | 113 | ret->normal = __btrfs_alloc_workqueue(name, flags & ~WQ_HIGHPRI, |
0bd9289c | 114 | max_active, thresh); |
1ca08976 | 115 | if (unlikely(!ret->normal)) { |
08a9ff32 QW |
116 | kfree(ret); |
117 | return NULL; | |
118 | } | |
119 | ||
1ca08976 | 120 | if (flags & WQ_HIGHPRI) { |
0bd9289c QW |
121 | ret->high = __btrfs_alloc_workqueue(name, flags, max_active, |
122 | thresh); | |
1ca08976 QW |
123 | if (unlikely(!ret->high)) { |
124 | __btrfs_destroy_workqueue(ret->normal); | |
125 | kfree(ret); | |
126 | return NULL; | |
127 | } | |
128 | } | |
08a9ff32 QW |
129 | return ret; |
130 | } | |
131 | ||
0bd9289c QW |
132 | /* |
133 | * Hook for threshold which will be called in btrfs_queue_work. | |
134 | * This hook WILL be called in IRQ handler context, | |
135 | * so workqueue_set_max_active MUST NOT be called in this hook | |
136 | */ | |
d458b054 | 137 | static inline void thresh_queue_hook(struct __btrfs_workqueue *wq) |
0bd9289c QW |
138 | { |
139 | if (wq->thresh == NO_THRESHOLD) | |
140 | return; | |
141 | atomic_inc(&wq->pending); | |
142 | } | |
143 | ||
144 | /* | |
145 | * Hook for threshold which will be called before executing the work, | |
146 | * This hook is called in kthread content. | |
147 | * So workqueue_set_max_active is called here. | |
148 | */ | |
d458b054 | 149 | static inline void thresh_exec_hook(struct __btrfs_workqueue *wq) |
0bd9289c QW |
150 | { |
151 | int new_max_active; | |
152 | long pending; | |
153 | int need_change = 0; | |
154 | ||
155 | if (wq->thresh == NO_THRESHOLD) | |
156 | return; | |
157 | ||
158 | atomic_dec(&wq->pending); | |
159 | spin_lock(&wq->thres_lock); | |
160 | /* | |
161 | * Use wq->count to limit the calling frequency of | |
162 | * workqueue_set_max_active. | |
163 | */ | |
164 | wq->count++; | |
165 | wq->count %= (wq->thresh / 4); | |
166 | if (!wq->count) | |
167 | goto out; | |
168 | new_max_active = wq->current_max; | |
169 | ||
170 | /* | |
171 | * pending may be changed later, but it's OK since we really | |
172 | * don't need it so accurate to calculate new_max_active. | |
173 | */ | |
174 | pending = atomic_read(&wq->pending); | |
175 | if (pending > wq->thresh) | |
176 | new_max_active++; | |
177 | if (pending < wq->thresh / 2) | |
178 | new_max_active--; | |
179 | new_max_active = clamp_val(new_max_active, 1, wq->max_active); | |
180 | if (new_max_active != wq->current_max) { | |
181 | need_change = 1; | |
182 | wq->current_max = new_max_active; | |
183 | } | |
184 | out: | |
185 | spin_unlock(&wq->thres_lock); | |
186 | ||
187 | if (need_change) { | |
188 | workqueue_set_max_active(wq->normal_wq, wq->current_max); | |
189 | } | |
190 | } | |
191 | ||
d458b054 | 192 | static void run_ordered_work(struct __btrfs_workqueue *wq) |
08a9ff32 QW |
193 | { |
194 | struct list_head *list = &wq->ordered_list; | |
d458b054 | 195 | struct btrfs_work *work; |
08a9ff32 QW |
196 | spinlock_t *lock = &wq->list_lock; |
197 | unsigned long flags; | |
198 | ||
199 | while (1) { | |
200 | spin_lock_irqsave(lock, flags); | |
201 | if (list_empty(list)) | |
202 | break; | |
d458b054 | 203 | work = list_entry(list->next, struct btrfs_work, |
08a9ff32 QW |
204 | ordered_list); |
205 | if (!test_bit(WORK_DONE_BIT, &work->flags)) | |
206 | break; | |
207 | ||
208 | /* | |
209 | * we are going to call the ordered done function, but | |
210 | * we leave the work item on the list as a barrier so | |
211 | * that later work items that are done don't have their | |
212 | * functions called before this one returns | |
213 | */ | |
214 | if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags)) | |
215 | break; | |
52483bc2 | 216 | trace_btrfs_ordered_sched(work); |
08a9ff32 QW |
217 | spin_unlock_irqrestore(lock, flags); |
218 | work->ordered_func(work); | |
219 | ||
220 | /* now take the lock again and drop our item from the list */ | |
221 | spin_lock_irqsave(lock, flags); | |
222 | list_del(&work->ordered_list); | |
223 | spin_unlock_irqrestore(lock, flags); | |
224 | ||
225 | /* | |
226 | * we don't want to call the ordered free functions | |
227 | * with the lock held though | |
228 | */ | |
229 | work->ordered_free(work); | |
52483bc2 | 230 | trace_btrfs_all_work_done(work); |
08a9ff32 QW |
231 | } |
232 | spin_unlock_irqrestore(lock, flags); | |
233 | } | |
234 | ||
235 | static void normal_work_helper(struct work_struct *arg) | |
236 | { | |
d458b054 QW |
237 | struct btrfs_work *work; |
238 | struct __btrfs_workqueue *wq; | |
08a9ff32 QW |
239 | int need_order = 0; |
240 | ||
d458b054 | 241 | work = container_of(arg, struct btrfs_work, normal_work); |
08a9ff32 QW |
242 | /* |
243 | * We should not touch things inside work in the following cases: | |
244 | * 1) after work->func() if it has no ordered_free | |
245 | * Since the struct is freed in work->func(). | |
246 | * 2) after setting WORK_DONE_BIT | |
247 | * The work may be freed in other threads almost instantly. | |
248 | * So we save the needed things here. | |
249 | */ | |
250 | if (work->ordered_func) | |
251 | need_order = 1; | |
252 | wq = work->wq; | |
253 | ||
52483bc2 | 254 | trace_btrfs_work_sched(work); |
0bd9289c | 255 | thresh_exec_hook(wq); |
08a9ff32 QW |
256 | work->func(work); |
257 | if (need_order) { | |
258 | set_bit(WORK_DONE_BIT, &work->flags); | |
259 | run_ordered_work(wq); | |
260 | } | |
52483bc2 QW |
261 | if (!need_order) |
262 | trace_btrfs_all_work_done(work); | |
08a9ff32 QW |
263 | } |
264 | ||
d458b054 | 265 | void btrfs_init_work(struct btrfs_work *work, |
6db8914f QW |
266 | btrfs_func_t func, |
267 | btrfs_func_t ordered_func, | |
268 | btrfs_func_t ordered_free) | |
08a9ff32 QW |
269 | { |
270 | work->func = func; | |
271 | work->ordered_func = ordered_func; | |
272 | work->ordered_free = ordered_free; | |
273 | INIT_WORK(&work->normal_work, normal_work_helper); | |
274 | INIT_LIST_HEAD(&work->ordered_list); | |
275 | work->flags = 0; | |
276 | } | |
277 | ||
d458b054 QW |
278 | static inline void __btrfs_queue_work(struct __btrfs_workqueue *wq, |
279 | struct btrfs_work *work) | |
08a9ff32 QW |
280 | { |
281 | unsigned long flags; | |
282 | ||
283 | work->wq = wq; | |
0bd9289c | 284 | thresh_queue_hook(wq); |
08a9ff32 QW |
285 | if (work->ordered_func) { |
286 | spin_lock_irqsave(&wq->list_lock, flags); | |
287 | list_add_tail(&work->ordered_list, &wq->ordered_list); | |
288 | spin_unlock_irqrestore(&wq->list_lock, flags); | |
289 | } | |
290 | queue_work(wq->normal_wq, &work->normal_work); | |
52483bc2 | 291 | trace_btrfs_work_queued(work); |
08a9ff32 QW |
292 | } |
293 | ||
d458b054 QW |
294 | void btrfs_queue_work(struct btrfs_workqueue *wq, |
295 | struct btrfs_work *work) | |
1ca08976 | 296 | { |
d458b054 | 297 | struct __btrfs_workqueue *dest_wq; |
1ca08976 QW |
298 | |
299 | if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags) && wq->high) | |
300 | dest_wq = wq->high; | |
301 | else | |
302 | dest_wq = wq->normal; | |
303 | __btrfs_queue_work(dest_wq, work); | |
304 | } | |
305 | ||
306 | static inline void | |
d458b054 | 307 | __btrfs_destroy_workqueue(struct __btrfs_workqueue *wq) |
08a9ff32 QW |
308 | { |
309 | destroy_workqueue(wq->normal_wq); | |
c3a46891 | 310 | trace_btrfs_workqueue_destroy(wq); |
08a9ff32 QW |
311 | kfree(wq); |
312 | } | |
313 | ||
d458b054 | 314 | void btrfs_destroy_workqueue(struct btrfs_workqueue *wq) |
1ca08976 QW |
315 | { |
316 | if (!wq) | |
317 | return; | |
318 | if (wq->high) | |
319 | __btrfs_destroy_workqueue(wq->high); | |
320 | __btrfs_destroy_workqueue(wq->normal); | |
ef66af10 | 321 | kfree(wq); |
1ca08976 QW |
322 | } |
323 | ||
d458b054 | 324 | void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int max) |
08a9ff32 | 325 | { |
800ee224 ST |
326 | if (!wq) |
327 | return; | |
0bd9289c | 328 | wq->normal->max_active = max; |
1ca08976 | 329 | if (wq->high) |
0bd9289c | 330 | wq->high->max_active = max; |
1ca08976 QW |
331 | } |
332 | ||
d458b054 | 333 | void btrfs_set_work_high_priority(struct btrfs_work *work) |
1ca08976 QW |
334 | { |
335 | set_bit(WORK_HIGH_PRIO_BIT, &work->flags); | |
08a9ff32 | 336 | } |