Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[deliverable/linux.git] / fs / btrfs / async-thread.c
CommitLineData
8b712842
CM
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/kthread.h>
20#include <linux/list.h>
21#include <linux/spinlock.h>
b51912c9 22#include <linux/freezer.h>
8b712842
CM
23#include "async-thread.h"
24
4a69a410
CM
25#define WORK_QUEUED_BIT 0
26#define WORK_DONE_BIT 1
27#define WORK_ORDER_DONE_BIT 2
d313d7a3 28#define WORK_HIGH_PRIO_BIT 3
4a69a410 29
8b712842
CM
30/*
31 * container for the kthread task pointer and the list of pending work
32 * One of these is allocated per thread.
33 */
34struct btrfs_worker_thread {
35d8ba66
CM
35 /* pool we belong to */
36 struct btrfs_workers *workers;
37
8b712842
CM
38 /* list of struct btrfs_work that are waiting for service */
39 struct list_head pending;
d313d7a3 40 struct list_head prio_pending;
8b712842
CM
41
42 /* list of worker threads from struct btrfs_workers */
43 struct list_head worker_list;
44
45 /* kthread */
46 struct task_struct *task;
47
48 /* number of things on the pending list */
49 atomic_t num_pending;
53863232 50
9042846b
CM
51 /* reference counter for this struct */
52 atomic_t refs;
53
4854ddd0 54 unsigned long sequence;
8b712842
CM
55
56 /* protects the pending list. */
57 spinlock_t lock;
58
59 /* set to non-zero when this thread is already awake and kicking */
60 int working;
35d8ba66
CM
61
62 /* are we currently idle */
63 int idle;
8b712842
CM
64};
65
35d8ba66
CM
66/*
67 * helper function to move a thread onto the idle list after it
68 * has finished some requests.
69 */
70static void check_idle_worker(struct btrfs_worker_thread *worker)
71{
72 if (!worker->idle && atomic_read(&worker->num_pending) <
73 worker->workers->idle_thresh / 2) {
74 unsigned long flags;
75 spin_lock_irqsave(&worker->workers->lock, flags);
76 worker->idle = 1;
3e99d8eb
CM
77
78 /* the list may be empty if the worker is just starting */
79 if (!list_empty(&worker->worker_list)) {
80 list_move(&worker->worker_list,
81 &worker->workers->idle_list);
82 }
35d8ba66
CM
83 spin_unlock_irqrestore(&worker->workers->lock, flags);
84 }
85}
86
87/*
88 * helper function to move a thread off the idle list after new
89 * pending work is added.
90 */
91static void check_busy_worker(struct btrfs_worker_thread *worker)
92{
93 if (worker->idle && atomic_read(&worker->num_pending) >=
94 worker->workers->idle_thresh) {
95 unsigned long flags;
96 spin_lock_irqsave(&worker->workers->lock, flags);
97 worker->idle = 0;
3e99d8eb
CM
98
99 if (!list_empty(&worker->worker_list)) {
100 list_move_tail(&worker->worker_list,
101 &worker->workers->worker_list);
102 }
35d8ba66
CM
103 spin_unlock_irqrestore(&worker->workers->lock, flags);
104 }
105}
106
9042846b
CM
107static void check_pending_worker_creates(struct btrfs_worker_thread *worker)
108{
109 struct btrfs_workers *workers = worker->workers;
110 unsigned long flags;
111
112 rmb();
113 if (!workers->atomic_start_pending)
114 return;
115
116 spin_lock_irqsave(&workers->lock, flags);
117 if (!workers->atomic_start_pending)
118 goto out;
119
120 workers->atomic_start_pending = 0;
121 if (workers->num_workers >= workers->max_workers)
122 goto out;
123
124 spin_unlock_irqrestore(&workers->lock, flags);
125 btrfs_start_workers(workers, 1);
126 return;
127
128out:
129 spin_unlock_irqrestore(&workers->lock, flags);
130}
131
4a69a410
CM
132static noinline int run_ordered_completions(struct btrfs_workers *workers,
133 struct btrfs_work *work)
134{
4a69a410
CM
135 if (!workers->ordered)
136 return 0;
137
138 set_bit(WORK_DONE_BIT, &work->flags);
139
4e3f9c50 140 spin_lock(&workers->order_lock);
4a69a410 141
d313d7a3
CM
142 while (1) {
143 if (!list_empty(&workers->prio_order_list)) {
144 work = list_entry(workers->prio_order_list.next,
145 struct btrfs_work, order_list);
146 } else if (!list_empty(&workers->order_list)) {
147 work = list_entry(workers->order_list.next,
148 struct btrfs_work, order_list);
149 } else {
150 break;
151 }
4a69a410
CM
152 if (!test_bit(WORK_DONE_BIT, &work->flags))
153 break;
154
155 /* we are going to call the ordered done function, but
156 * we leave the work item on the list as a barrier so
157 * that later work items that are done don't have their
158 * functions called before this one returns
159 */
160 if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
161 break;
162
4e3f9c50 163 spin_unlock(&workers->order_lock);
4a69a410
CM
164
165 work->ordered_func(work);
166
167 /* now take the lock again and call the freeing code */
4e3f9c50 168 spin_lock(&workers->order_lock);
4a69a410
CM
169 list_del(&work->order_list);
170 work->ordered_free(work);
171 }
172
4e3f9c50 173 spin_unlock(&workers->order_lock);
4a69a410
CM
174 return 0;
175}
176
9042846b
CM
177static void put_worker(struct btrfs_worker_thread *worker)
178{
179 if (atomic_dec_and_test(&worker->refs))
180 kfree(worker);
181}
182
183static int try_worker_shutdown(struct btrfs_worker_thread *worker)
184{
185 int freeit = 0;
186
187 spin_lock_irq(&worker->lock);
627e421a 188 spin_lock(&worker->workers->lock);
9042846b
CM
189 if (worker->workers->num_workers > 1 &&
190 worker->idle &&
191 !worker->working &&
192 !list_empty(&worker->worker_list) &&
193 list_empty(&worker->prio_pending) &&
6e74057c
CM
194 list_empty(&worker->pending) &&
195 atomic_read(&worker->num_pending) == 0) {
9042846b
CM
196 freeit = 1;
197 list_del_init(&worker->worker_list);
198 worker->workers->num_workers--;
199 }
627e421a 200 spin_unlock(&worker->workers->lock);
9042846b
CM
201 spin_unlock_irq(&worker->lock);
202
203 if (freeit)
204 put_worker(worker);
205 return freeit;
206}
207
4f878e84
CM
208static struct btrfs_work *get_next_work(struct btrfs_worker_thread *worker,
209 struct list_head *prio_head,
210 struct list_head *head)
211{
212 struct btrfs_work *work = NULL;
213 struct list_head *cur = NULL;
214
215 if(!list_empty(prio_head))
216 cur = prio_head->next;
217
218 smp_mb();
219 if (!list_empty(&worker->prio_pending))
220 goto refill;
221
222 if (!list_empty(head))
223 cur = head->next;
224
225 if (cur)
226 goto out;
227
228refill:
229 spin_lock_irq(&worker->lock);
230 list_splice_tail_init(&worker->prio_pending, prio_head);
231 list_splice_tail_init(&worker->pending, head);
232
233 if (!list_empty(prio_head))
234 cur = prio_head->next;
235 else if (!list_empty(head))
236 cur = head->next;
237 spin_unlock_irq(&worker->lock);
238
239 if (!cur)
240 goto out_fail;
241
242out:
243 work = list_entry(cur, struct btrfs_work, list);
244
245out_fail:
246 return work;
247}
248
8b712842
CM
249/*
250 * main loop for servicing work items
251 */
252static int worker_loop(void *arg)
253{
254 struct btrfs_worker_thread *worker = arg;
4f878e84
CM
255 struct list_head head;
256 struct list_head prio_head;
8b712842 257 struct btrfs_work *work;
4f878e84
CM
258
259 INIT_LIST_HEAD(&head);
260 INIT_LIST_HEAD(&prio_head);
261
8b712842 262 do {
4f878e84 263again:
d313d7a3 264 while (1) {
4f878e84
CM
265
266
267 work = get_next_work(worker, &prio_head, &head);
268 if (!work)
d313d7a3
CM
269 break;
270
8b712842 271 list_del(&work->list);
4a69a410 272 clear_bit(WORK_QUEUED_BIT, &work->flags);
8b712842
CM
273
274 work->worker = worker;
8b712842
CM
275
276 work->func(work);
277
278 atomic_dec(&worker->num_pending);
4a69a410
CM
279 /*
280 * unless this is an ordered work queue,
281 * 'work' was probably freed by func above.
282 */
283 run_ordered_completions(worker->workers, work);
284
9042846b
CM
285 check_pending_worker_creates(worker);
286
8b712842 287 }
4f878e84
CM
288
289 spin_lock_irq(&worker->lock);
290 check_idle_worker(worker);
291
8b712842 292 if (freezing(current)) {
b51912c9
CM
293 worker->working = 0;
294 spin_unlock_irq(&worker->lock);
8b712842
CM
295 refrigerator();
296 } else {
8b712842 297 spin_unlock_irq(&worker->lock);
b51912c9
CM
298 if (!kthread_should_stop()) {
299 cpu_relax();
300 /*
301 * we've dropped the lock, did someone else
302 * jump_in?
303 */
304 smp_mb();
d313d7a3
CM
305 if (!list_empty(&worker->pending) ||
306 !list_empty(&worker->prio_pending))
b51912c9
CM
307 continue;
308
309 /*
310 * this short schedule allows more work to
311 * come in without the queue functions
312 * needing to go through wake_up_process()
313 *
314 * worker->working is still 1, so nobody
315 * is going to try and wake us up
316 */
317 schedule_timeout(1);
318 smp_mb();
d313d7a3
CM
319 if (!list_empty(&worker->pending) ||
320 !list_empty(&worker->prio_pending))
b51912c9
CM
321 continue;
322
b5555f77
AG
323 if (kthread_should_stop())
324 break;
325
b51912c9
CM
326 /* still no more work?, sleep for real */
327 spin_lock_irq(&worker->lock);
328 set_current_state(TASK_INTERRUPTIBLE);
d313d7a3 329 if (!list_empty(&worker->pending) ||
4f878e84
CM
330 !list_empty(&worker->prio_pending)) {
331 spin_unlock_irq(&worker->lock);
332 goto again;
333 }
b51912c9
CM
334
335 /*
336 * this makes sure we get a wakeup when someone
337 * adds something new to the queue
338 */
339 worker->working = 0;
340 spin_unlock_irq(&worker->lock);
341
9042846b
CM
342 if (!kthread_should_stop()) {
343 schedule_timeout(HZ * 120);
344 if (!worker->working &&
345 try_worker_shutdown(worker)) {
346 return 0;
347 }
348 }
b51912c9 349 }
8b712842
CM
350 __set_current_state(TASK_RUNNING);
351 }
352 } while (!kthread_should_stop());
353 return 0;
354}
355
356/*
357 * this will wait for all the worker threads to shutdown
358 */
359int btrfs_stop_workers(struct btrfs_workers *workers)
360{
361 struct list_head *cur;
362 struct btrfs_worker_thread *worker;
9042846b 363 int can_stop;
8b712842 364
9042846b 365 spin_lock_irq(&workers->lock);
35d8ba66 366 list_splice_init(&workers->idle_list, &workers->worker_list);
d397712b 367 while (!list_empty(&workers->worker_list)) {
8b712842
CM
368 cur = workers->worker_list.next;
369 worker = list_entry(cur, struct btrfs_worker_thread,
370 worker_list);
9042846b
CM
371
372 atomic_inc(&worker->refs);
373 workers->num_workers -= 1;
374 if (!list_empty(&worker->worker_list)) {
375 list_del_init(&worker->worker_list);
376 put_worker(worker);
377 can_stop = 1;
378 } else
379 can_stop = 0;
380 spin_unlock_irq(&workers->lock);
381 if (can_stop)
382 kthread_stop(worker->task);
383 spin_lock_irq(&workers->lock);
384 put_worker(worker);
8b712842 385 }
9042846b 386 spin_unlock_irq(&workers->lock);
8b712842
CM
387 return 0;
388}
389
390/*
391 * simple init on struct btrfs_workers
392 */
5443be45 393void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max)
8b712842
CM
394{
395 workers->num_workers = 0;
396 INIT_LIST_HEAD(&workers->worker_list);
35d8ba66 397 INIT_LIST_HEAD(&workers->idle_list);
4a69a410 398 INIT_LIST_HEAD(&workers->order_list);
d313d7a3 399 INIT_LIST_HEAD(&workers->prio_order_list);
8b712842 400 spin_lock_init(&workers->lock);
4e3f9c50 401 spin_lock_init(&workers->order_lock);
8b712842 402 workers->max_workers = max;
61b49440 403 workers->idle_thresh = 32;
5443be45 404 workers->name = name;
4a69a410 405 workers->ordered = 0;
9042846b
CM
406 workers->atomic_start_pending = 0;
407 workers->atomic_worker_start = 0;
8b712842
CM
408}
409
410/*
411 * starts new worker threads. This does not enforce the max worker
412 * count in case you need to temporarily go past it.
413 */
414int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
415{
416 struct btrfs_worker_thread *worker;
417 int ret = 0;
418 int i;
419
420 for (i = 0; i < num_workers; i++) {
421 worker = kzalloc(sizeof(*worker), GFP_NOFS);
422 if (!worker) {
423 ret = -ENOMEM;
424 goto fail;
425 }
426
427 INIT_LIST_HEAD(&worker->pending);
d313d7a3 428 INIT_LIST_HEAD(&worker->prio_pending);
8b712842
CM
429 INIT_LIST_HEAD(&worker->worker_list);
430 spin_lock_init(&worker->lock);
4e3f9c50 431
8b712842 432 atomic_set(&worker->num_pending, 0);
9042846b 433 atomic_set(&worker->refs, 1);
fd0fb038 434 worker->workers = workers;
5443be45
CM
435 worker->task = kthread_run(worker_loop, worker,
436 "btrfs-%s-%d", workers->name,
437 workers->num_workers + i);
8b712842
CM
438 if (IS_ERR(worker->task)) {
439 ret = PTR_ERR(worker->task);
9b627e9b 440 kfree(worker);
8b712842
CM
441 goto fail;
442 }
8b712842 443 spin_lock_irq(&workers->lock);
35d8ba66 444 list_add_tail(&worker->worker_list, &workers->idle_list);
4854ddd0 445 worker->idle = 1;
8b712842
CM
446 workers->num_workers++;
447 spin_unlock_irq(&workers->lock);
448 }
449 return 0;
450fail:
451 btrfs_stop_workers(workers);
452 return ret;
453}
454
455/*
456 * run through the list and find a worker thread that doesn't have a lot
457 * to do right now. This can return null if we aren't yet at the thread
458 * count limit and all of the threads are busy.
459 */
460static struct btrfs_worker_thread *next_worker(struct btrfs_workers *workers)
461{
462 struct btrfs_worker_thread *worker;
463 struct list_head *next;
8b712842
CM
464 int enforce_min = workers->num_workers < workers->max_workers;
465
8b712842 466 /*
35d8ba66
CM
467 * if we find an idle thread, don't move it to the end of the
468 * idle list. This improves the chance that the next submission
469 * will reuse the same thread, and maybe catch it while it is still
470 * working
8b712842 471 */
35d8ba66
CM
472 if (!list_empty(&workers->idle_list)) {
473 next = workers->idle_list.next;
8b712842
CM
474 worker = list_entry(next, struct btrfs_worker_thread,
475 worker_list);
35d8ba66 476 return worker;
8b712842 477 }
35d8ba66
CM
478 if (enforce_min || list_empty(&workers->worker_list))
479 return NULL;
480
8b712842 481 /*
35d8ba66 482 * if we pick a busy task, move the task to the end of the list.
d352ac68
CM
483 * hopefully this will keep things somewhat evenly balanced.
484 * Do the move in batches based on the sequence number. This groups
485 * requests submitted at roughly the same time onto the same worker.
8b712842 486 */
35d8ba66
CM
487 next = workers->worker_list.next;
488 worker = list_entry(next, struct btrfs_worker_thread, worker_list);
4854ddd0 489 worker->sequence++;
d352ac68 490
53863232 491 if (worker->sequence % workers->idle_thresh == 0)
4854ddd0 492 list_move_tail(next, &workers->worker_list);
8b712842
CM
493 return worker;
494}
495
d352ac68
CM
496/*
497 * selects a worker thread to take the next job. This will either find
498 * an idle worker, start a new worker up to the max count, or just return
499 * one of the existing busy workers.
500 */
8b712842
CM
501static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
502{
503 struct btrfs_worker_thread *worker;
504 unsigned long flags;
9042846b 505 struct list_head *fallback;
8b712842
CM
506
507again:
508 spin_lock_irqsave(&workers->lock, flags);
509 worker = next_worker(workers);
8b712842
CM
510
511 if (!worker) {
8b712842 512 if (workers->num_workers >= workers->max_workers) {
9042846b
CM
513 goto fallback;
514 } else if (workers->atomic_worker_start) {
515 workers->atomic_start_pending = 1;
516 goto fallback;
8b712842
CM
517 } else {
518 spin_unlock_irqrestore(&workers->lock, flags);
519 /* we're below the limit, start another worker */
520 btrfs_start_workers(workers, 1);
521 goto again;
522 }
523 }
6e74057c 524 goto found;
9042846b
CM
525
526fallback:
527 fallback = NULL;
528 /*
529 * we have failed to find any workers, just
530 * return the first one we can find.
531 */
532 if (!list_empty(&workers->worker_list))
533 fallback = workers->worker_list.next;
534 if (!list_empty(&workers->idle_list))
535 fallback = workers->idle_list.next;
536 BUG_ON(!fallback);
537 worker = list_entry(fallback,
538 struct btrfs_worker_thread, worker_list);
6e74057c
CM
539found:
540 /*
541 * this makes sure the worker doesn't exit before it is placed
542 * onto a busy/idle list
543 */
544 atomic_inc(&worker->num_pending);
9042846b
CM
545 spin_unlock_irqrestore(&workers->lock, flags);
546 return worker;
8b712842
CM
547}
548
549/*
550 * btrfs_requeue_work just puts the work item back on the tail of the list
551 * it was taken from. It is intended for use with long running work functions
552 * that make some progress and want to give the cpu up for others.
553 */
554int btrfs_requeue_work(struct btrfs_work *work)
555{
556 struct btrfs_worker_thread *worker = work->worker;
557 unsigned long flags;
a6837051 558 int wake = 0;
8b712842 559
4a69a410 560 if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
8b712842
CM
561 goto out;
562
563 spin_lock_irqsave(&worker->lock, flags);
d313d7a3
CM
564 if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags))
565 list_add_tail(&work->list, &worker->prio_pending);
566 else
567 list_add_tail(&work->list, &worker->pending);
b51912c9 568 atomic_inc(&worker->num_pending);
75ccf47d
CM
569
570 /* by definition we're busy, take ourselves off the idle
571 * list
572 */
573 if (worker->idle) {
29c5e8ce 574 spin_lock(&worker->workers->lock);
75ccf47d
CM
575 worker->idle = 0;
576 list_move_tail(&worker->worker_list,
6e74057c 577 &worker->workers->worker_list);
29c5e8ce 578 spin_unlock(&worker->workers->lock);
75ccf47d 579 }
a6837051
CM
580 if (!worker->working) {
581 wake = 1;
582 worker->working = 1;
583 }
75ccf47d 584
a6837051
CM
585 if (wake)
586 wake_up_process(worker->task);
9042846b 587 spin_unlock_irqrestore(&worker->lock, flags);
8b712842 588out:
a6837051 589
8b712842
CM
590 return 0;
591}
592
d313d7a3
CM
593void btrfs_set_work_high_prio(struct btrfs_work *work)
594{
595 set_bit(WORK_HIGH_PRIO_BIT, &work->flags);
596}
597
8b712842
CM
598/*
599 * places a struct btrfs_work into the pending queue of one of the kthreads
600 */
601int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
602{
603 struct btrfs_worker_thread *worker;
604 unsigned long flags;
605 int wake = 0;
606
607 /* don't requeue something already on a list */
4a69a410 608 if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
8b712842
CM
609 goto out;
610
611 worker = find_worker(workers);
4a69a410 612 if (workers->ordered) {
4e3f9c50
CM
613 /*
614 * you're not allowed to do ordered queues from an
615 * interrupt handler
616 */
617 spin_lock(&workers->order_lock);
d313d7a3
CM
618 if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags)) {
619 list_add_tail(&work->order_list,
620 &workers->prio_order_list);
621 } else {
622 list_add_tail(&work->order_list, &workers->order_list);
623 }
4e3f9c50 624 spin_unlock(&workers->order_lock);
4a69a410
CM
625 } else {
626 INIT_LIST_HEAD(&work->order_list);
627 }
8b712842
CM
628
629 spin_lock_irqsave(&worker->lock, flags);
a6837051 630
d313d7a3
CM
631 if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags))
632 list_add_tail(&work->list, &worker->prio_pending);
633 else
634 list_add_tail(&work->list, &worker->pending);
35d8ba66 635 check_busy_worker(worker);
8b712842
CM
636
637 /*
638 * avoid calling into wake_up_process if this thread has already
639 * been kicked
640 */
641 if (!worker->working)
642 wake = 1;
643 worker->working = 1;
644
8b712842
CM
645 if (wake)
646 wake_up_process(worker->task);
9042846b
CM
647 spin_unlock_irqrestore(&worker->lock, flags);
648
8b712842
CM
649out:
650 return 0;
651}
This page took 0.105732 seconds and 5 git commands to generate.