Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux
[deliverable/linux.git] / fs / btrfs / async-thread.c
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 * Copyright (C) 2014 Fujitsu. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public
7 * License v2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the
16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 021110-1307, USA.
18 */
19
20 #include <linux/kthread.h>
21 #include <linux/slab.h>
22 #include <linux/list.h>
23 #include <linux/spinlock.h>
24 #include <linux/freezer.h>
25 #include "async-thread.h"
26 #include "ctree.h"
27
28 #define WORK_DONE_BIT 0
29 #define WORK_ORDER_DONE_BIT 1
30 #define WORK_HIGH_PRIO_BIT 2
31
32 #define NO_THRESHOLD (-1)
33 #define DFT_THRESHOLD (32)
34
35 struct __btrfs_workqueue {
36 struct workqueue_struct *normal_wq;
37 /* List head pointing to ordered work list */
38 struct list_head ordered_list;
39
40 /* Spinlock for ordered_list */
41 spinlock_t list_lock;
42
43 /* Thresholding related variants */
44 atomic_t pending;
45
46 /* Up limit of concurrency workers */
47 int limit_active;
48
49 /* Current number of concurrency workers */
50 int current_active;
51
52 /* Threshold to change current_active */
53 int thresh;
54 unsigned int count;
55 spinlock_t thres_lock;
56 };
57
58 struct btrfs_workqueue {
59 struct __btrfs_workqueue *normal;
60 struct __btrfs_workqueue *high;
61 };
62
63 static void normal_work_helper(struct btrfs_work *work);
64
65 #define BTRFS_WORK_HELPER(name) \
66 void btrfs_##name(struct work_struct *arg) \
67 { \
68 struct btrfs_work *work = container_of(arg, struct btrfs_work, \
69 normal_work); \
70 normal_work_helper(work); \
71 }
72
73 BTRFS_WORK_HELPER(worker_helper);
74 BTRFS_WORK_HELPER(delalloc_helper);
75 BTRFS_WORK_HELPER(flush_delalloc_helper);
76 BTRFS_WORK_HELPER(cache_helper);
77 BTRFS_WORK_HELPER(submit_helper);
78 BTRFS_WORK_HELPER(fixup_helper);
79 BTRFS_WORK_HELPER(endio_helper);
80 BTRFS_WORK_HELPER(endio_meta_helper);
81 BTRFS_WORK_HELPER(endio_meta_write_helper);
82 BTRFS_WORK_HELPER(endio_raid56_helper);
83 BTRFS_WORK_HELPER(endio_repair_helper);
84 BTRFS_WORK_HELPER(rmw_helper);
85 BTRFS_WORK_HELPER(endio_write_helper);
86 BTRFS_WORK_HELPER(freespace_write_helper);
87 BTRFS_WORK_HELPER(delayed_meta_helper);
88 BTRFS_WORK_HELPER(readahead_helper);
89 BTRFS_WORK_HELPER(qgroup_rescan_helper);
90 BTRFS_WORK_HELPER(extent_refs_helper);
91 BTRFS_WORK_HELPER(scrub_helper);
92 BTRFS_WORK_HELPER(scrubwrc_helper);
93 BTRFS_WORK_HELPER(scrubnc_helper);
94 BTRFS_WORK_HELPER(scrubparity_helper);
95
96 static struct __btrfs_workqueue *
97 __btrfs_alloc_workqueue(const char *name, unsigned int flags, int limit_active,
98 int thresh)
99 {
100 struct __btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_KERNEL);
101
102 if (!ret)
103 return NULL;
104
105 ret->limit_active = limit_active;
106 atomic_set(&ret->pending, 0);
107 if (thresh == 0)
108 thresh = DFT_THRESHOLD;
109 /* For low threshold, disabling threshold is a better choice */
110 if (thresh < DFT_THRESHOLD) {
111 ret->current_active = limit_active;
112 ret->thresh = NO_THRESHOLD;
113 } else {
114 /*
115 * For threshold-able wq, let its concurrency grow on demand.
116 * Use minimal max_active at alloc time to reduce resource
117 * usage.
118 */
119 ret->current_active = 1;
120 ret->thresh = thresh;
121 }
122
123 if (flags & WQ_HIGHPRI)
124 ret->normal_wq = alloc_workqueue("%s-%s-high", flags,
125 ret->current_active, "btrfs",
126 name);
127 else
128 ret->normal_wq = alloc_workqueue("%s-%s", flags,
129 ret->current_active, "btrfs",
130 name);
131 if (!ret->normal_wq) {
132 kfree(ret);
133 return NULL;
134 }
135
136 INIT_LIST_HEAD(&ret->ordered_list);
137 spin_lock_init(&ret->list_lock);
138 spin_lock_init(&ret->thres_lock);
139 trace_btrfs_workqueue_alloc(ret, name, flags & WQ_HIGHPRI);
140 return ret;
141 }
142
143 static inline void
144 __btrfs_destroy_workqueue(struct __btrfs_workqueue *wq);
145
146 struct btrfs_workqueue *btrfs_alloc_workqueue(const char *name,
147 unsigned int flags,
148 int limit_active,
149 int thresh)
150 {
151 struct btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_KERNEL);
152
153 if (!ret)
154 return NULL;
155
156 ret->normal = __btrfs_alloc_workqueue(name, flags & ~WQ_HIGHPRI,
157 limit_active, thresh);
158 if (!ret->normal) {
159 kfree(ret);
160 return NULL;
161 }
162
163 if (flags & WQ_HIGHPRI) {
164 ret->high = __btrfs_alloc_workqueue(name, flags, limit_active,
165 thresh);
166 if (!ret->high) {
167 __btrfs_destroy_workqueue(ret->normal);
168 kfree(ret);
169 return NULL;
170 }
171 }
172 return ret;
173 }
174
175 /*
176 * Hook for threshold which will be called in btrfs_queue_work.
177 * This hook WILL be called in IRQ handler context,
178 * so workqueue_set_max_active MUST NOT be called in this hook
179 */
180 static inline void thresh_queue_hook(struct __btrfs_workqueue *wq)
181 {
182 if (wq->thresh == NO_THRESHOLD)
183 return;
184 atomic_inc(&wq->pending);
185 }
186
187 /*
188 * Hook for threshold which will be called before executing the work,
189 * This hook is called in kthread content.
190 * So workqueue_set_max_active is called here.
191 */
192 static inline void thresh_exec_hook(struct __btrfs_workqueue *wq)
193 {
194 int new_current_active;
195 long pending;
196 int need_change = 0;
197
198 if (wq->thresh == NO_THRESHOLD)
199 return;
200
201 atomic_dec(&wq->pending);
202 spin_lock(&wq->thres_lock);
203 /*
204 * Use wq->count to limit the calling frequency of
205 * workqueue_set_max_active.
206 */
207 wq->count++;
208 wq->count %= (wq->thresh / 4);
209 if (!wq->count)
210 goto out;
211 new_current_active = wq->current_active;
212
213 /*
214 * pending may be changed later, but it's OK since we really
215 * don't need it so accurate to calculate new_max_active.
216 */
217 pending = atomic_read(&wq->pending);
218 if (pending > wq->thresh)
219 new_current_active++;
220 if (pending < wq->thresh / 2)
221 new_current_active--;
222 new_current_active = clamp_val(new_current_active, 1, wq->limit_active);
223 if (new_current_active != wq->current_active) {
224 need_change = 1;
225 wq->current_active = new_current_active;
226 }
227 out:
228 spin_unlock(&wq->thres_lock);
229
230 if (need_change) {
231 workqueue_set_max_active(wq->normal_wq, wq->current_active);
232 }
233 }
234
235 static void run_ordered_work(struct __btrfs_workqueue *wq)
236 {
237 struct list_head *list = &wq->ordered_list;
238 struct btrfs_work *work;
239 spinlock_t *lock = &wq->list_lock;
240 unsigned long flags;
241
242 while (1) {
243 spin_lock_irqsave(lock, flags);
244 if (list_empty(list))
245 break;
246 work = list_entry(list->next, struct btrfs_work,
247 ordered_list);
248 if (!test_bit(WORK_DONE_BIT, &work->flags))
249 break;
250
251 /*
252 * we are going to call the ordered done function, but
253 * we leave the work item on the list as a barrier so
254 * that later work items that are done don't have their
255 * functions called before this one returns
256 */
257 if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
258 break;
259 trace_btrfs_ordered_sched(work);
260 spin_unlock_irqrestore(lock, flags);
261 work->ordered_func(work);
262
263 /* now take the lock again and drop our item from the list */
264 spin_lock_irqsave(lock, flags);
265 list_del(&work->ordered_list);
266 spin_unlock_irqrestore(lock, flags);
267
268 /*
269 * we don't want to call the ordered free functions
270 * with the lock held though
271 */
272 work->ordered_free(work);
273 trace_btrfs_all_work_done(work);
274 }
275 spin_unlock_irqrestore(lock, flags);
276 }
277
278 static void normal_work_helper(struct btrfs_work *work)
279 {
280 struct __btrfs_workqueue *wq;
281 int need_order = 0;
282
283 /*
284 * We should not touch things inside work in the following cases:
285 * 1) after work->func() if it has no ordered_free
286 * Since the struct is freed in work->func().
287 * 2) after setting WORK_DONE_BIT
288 * The work may be freed in other threads almost instantly.
289 * So we save the needed things here.
290 */
291 if (work->ordered_func)
292 need_order = 1;
293 wq = work->wq;
294
295 trace_btrfs_work_sched(work);
296 thresh_exec_hook(wq);
297 work->func(work);
298 if (need_order) {
299 set_bit(WORK_DONE_BIT, &work->flags);
300 run_ordered_work(wq);
301 }
302 if (!need_order)
303 trace_btrfs_all_work_done(work);
304 }
305
306 void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t uniq_func,
307 btrfs_func_t func,
308 btrfs_func_t ordered_func,
309 btrfs_func_t ordered_free)
310 {
311 work->func = func;
312 work->ordered_func = ordered_func;
313 work->ordered_free = ordered_free;
314 INIT_WORK(&work->normal_work, uniq_func);
315 INIT_LIST_HEAD(&work->ordered_list);
316 work->flags = 0;
317 }
318
319 static inline void __btrfs_queue_work(struct __btrfs_workqueue *wq,
320 struct btrfs_work *work)
321 {
322 unsigned long flags;
323
324 work->wq = wq;
325 thresh_queue_hook(wq);
326 if (work->ordered_func) {
327 spin_lock_irqsave(&wq->list_lock, flags);
328 list_add_tail(&work->ordered_list, &wq->ordered_list);
329 spin_unlock_irqrestore(&wq->list_lock, flags);
330 }
331 trace_btrfs_work_queued(work);
332 queue_work(wq->normal_wq, &work->normal_work);
333 }
334
335 void btrfs_queue_work(struct btrfs_workqueue *wq,
336 struct btrfs_work *work)
337 {
338 struct __btrfs_workqueue *dest_wq;
339
340 if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags) && wq->high)
341 dest_wq = wq->high;
342 else
343 dest_wq = wq->normal;
344 __btrfs_queue_work(dest_wq, work);
345 }
346
347 static inline void
348 __btrfs_destroy_workqueue(struct __btrfs_workqueue *wq)
349 {
350 destroy_workqueue(wq->normal_wq);
351 trace_btrfs_workqueue_destroy(wq);
352 kfree(wq);
353 }
354
355 void btrfs_destroy_workqueue(struct btrfs_workqueue *wq)
356 {
357 if (!wq)
358 return;
359 if (wq->high)
360 __btrfs_destroy_workqueue(wq->high);
361 __btrfs_destroy_workqueue(wq->normal);
362 kfree(wq);
363 }
364
365 void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int limit_active)
366 {
367 if (!wq)
368 return;
369 wq->normal->limit_active = limit_active;
370 if (wq->high)
371 wq->high->limit_active = limit_active;
372 }
373
374 void btrfs_set_work_high_priority(struct btrfs_work *work)
375 {
376 set_bit(WORK_HIGH_PRIO_BIT, &work->flags);
377 }
This page took 0.036961 seconds and 5 git commands to generate.