Merge tag 'iwlwifi-next-for-kalle-2015-01-22' of https://git.kernel.org/pub/scm/linux...
[deliverable/linux.git] / fs / xfs / xfs_trans_ail.c
1 /*
2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
3 * Copyright (c) 2008 Dave Chinner
4 * All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it would be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19 #include "xfs.h"
20 #include "xfs_fs.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
24 #include "xfs_mount.h"
25 #include "xfs_trans.h"
26 #include "xfs_trans_priv.h"
27 #include "xfs_trace.h"
28 #include "xfs_error.h"
29 #include "xfs_log.h"
30
31 #ifdef DEBUG
32 /*
33 * Check that the list is sorted as it should be.
34 */
35 STATIC void
36 xfs_ail_check(
37 struct xfs_ail *ailp,
38 xfs_log_item_t *lip)
39 {
40 xfs_log_item_t *prev_lip;
41
42 if (list_empty(&ailp->xa_ail))
43 return;
44
45 /*
46 * Check the next and previous entries are valid.
47 */
48 ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0);
49 prev_lip = list_entry(lip->li_ail.prev, xfs_log_item_t, li_ail);
50 if (&prev_lip->li_ail != &ailp->xa_ail)
51 ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0);
52
53 prev_lip = list_entry(lip->li_ail.next, xfs_log_item_t, li_ail);
54 if (&prev_lip->li_ail != &ailp->xa_ail)
55 ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) >= 0);
56
57
58 }
59 #else /* !DEBUG */
60 #define xfs_ail_check(a,l)
61 #endif /* DEBUG */
62
63 /*
64 * Return a pointer to the last item in the AIL. If the AIL is empty, then
65 * return NULL.
66 */
67 static xfs_log_item_t *
68 xfs_ail_max(
69 struct xfs_ail *ailp)
70 {
71 if (list_empty(&ailp->xa_ail))
72 return NULL;
73
74 return list_entry(ailp->xa_ail.prev, xfs_log_item_t, li_ail);
75 }
76
77 /*
78 * Return a pointer to the item which follows the given item in the AIL. If
79 * the given item is the last item in the list, then return NULL.
80 */
81 static xfs_log_item_t *
82 xfs_ail_next(
83 struct xfs_ail *ailp,
84 xfs_log_item_t *lip)
85 {
86 if (lip->li_ail.next == &ailp->xa_ail)
87 return NULL;
88
89 return list_first_entry(&lip->li_ail, xfs_log_item_t, li_ail);
90 }
91
92 /*
93 * This is called by the log manager code to determine the LSN of the tail of
94 * the log. This is exactly the LSN of the first item in the AIL. If the AIL
95 * is empty, then this function returns 0.
96 *
97 * We need the AIL lock in order to get a coherent read of the lsn of the last
98 * item in the AIL.
99 */
100 xfs_lsn_t
101 xfs_ail_min_lsn(
102 struct xfs_ail *ailp)
103 {
104 xfs_lsn_t lsn = 0;
105 xfs_log_item_t *lip;
106
107 spin_lock(&ailp->xa_lock);
108 lip = xfs_ail_min(ailp);
109 if (lip)
110 lsn = lip->li_lsn;
111 spin_unlock(&ailp->xa_lock);
112
113 return lsn;
114 }
115
116 /*
117 * Return the maximum lsn held in the AIL, or zero if the AIL is empty.
118 */
119 static xfs_lsn_t
120 xfs_ail_max_lsn(
121 struct xfs_ail *ailp)
122 {
123 xfs_lsn_t lsn = 0;
124 xfs_log_item_t *lip;
125
126 spin_lock(&ailp->xa_lock);
127 lip = xfs_ail_max(ailp);
128 if (lip)
129 lsn = lip->li_lsn;
130 spin_unlock(&ailp->xa_lock);
131
132 return lsn;
133 }
134
135 /*
136 * The cursor keeps track of where our current traversal is up to by tracking
137 * the next item in the list for us. However, for this to be safe, removing an
138 * object from the AIL needs to invalidate any cursor that points to it. hence
139 * the traversal cursor needs to be linked to the struct xfs_ail so that
140 * deletion can search all the active cursors for invalidation.
141 */
142 STATIC void
143 xfs_trans_ail_cursor_init(
144 struct xfs_ail *ailp,
145 struct xfs_ail_cursor *cur)
146 {
147 cur->item = NULL;
148 list_add_tail(&cur->list, &ailp->xa_cursors);
149 }
150
151 /*
152 * Get the next item in the traversal and advance the cursor. If the cursor
153 * was invalidated (indicated by a lip of 1), restart the traversal.
154 */
155 struct xfs_log_item *
156 xfs_trans_ail_cursor_next(
157 struct xfs_ail *ailp,
158 struct xfs_ail_cursor *cur)
159 {
160 struct xfs_log_item *lip = cur->item;
161
162 if ((__psint_t)lip & 1)
163 lip = xfs_ail_min(ailp);
164 if (lip)
165 cur->item = xfs_ail_next(ailp, lip);
166 return lip;
167 }
168
169 /*
170 * When the traversal is complete, we need to remove the cursor from the list
171 * of traversing cursors.
172 */
173 void
174 xfs_trans_ail_cursor_done(
175 struct xfs_ail_cursor *cur)
176 {
177 cur->item = NULL;
178 list_del_init(&cur->list);
179 }
180
181 /*
182 * Invalidate any cursor that is pointing to this item. This is called when an
183 * item is removed from the AIL. Any cursor pointing to this object is now
184 * invalid and the traversal needs to be terminated so it doesn't reference a
185 * freed object. We set the low bit of the cursor item pointer so we can
186 * distinguish between an invalidation and the end of the list when getting the
187 * next item from the cursor.
188 */
189 STATIC void
190 xfs_trans_ail_cursor_clear(
191 struct xfs_ail *ailp,
192 struct xfs_log_item *lip)
193 {
194 struct xfs_ail_cursor *cur;
195
196 list_for_each_entry(cur, &ailp->xa_cursors, list) {
197 if (cur->item == lip)
198 cur->item = (struct xfs_log_item *)
199 ((__psint_t)cur->item | 1);
200 }
201 }
202
203 /*
204 * Find the first item in the AIL with the given @lsn by searching in ascending
205 * LSN order and initialise the cursor to point to the next item for a
206 * ascending traversal. Pass a @lsn of zero to initialise the cursor to the
207 * first item in the AIL. Returns NULL if the list is empty.
208 */
209 xfs_log_item_t *
210 xfs_trans_ail_cursor_first(
211 struct xfs_ail *ailp,
212 struct xfs_ail_cursor *cur,
213 xfs_lsn_t lsn)
214 {
215 xfs_log_item_t *lip;
216
217 xfs_trans_ail_cursor_init(ailp, cur);
218
219 if (lsn == 0) {
220 lip = xfs_ail_min(ailp);
221 goto out;
222 }
223
224 list_for_each_entry(lip, &ailp->xa_ail, li_ail) {
225 if (XFS_LSN_CMP(lip->li_lsn, lsn) >= 0)
226 goto out;
227 }
228 return NULL;
229
230 out:
231 if (lip)
232 cur->item = xfs_ail_next(ailp, lip);
233 return lip;
234 }
235
236 static struct xfs_log_item *
237 __xfs_trans_ail_cursor_last(
238 struct xfs_ail *ailp,
239 xfs_lsn_t lsn)
240 {
241 xfs_log_item_t *lip;
242
243 list_for_each_entry_reverse(lip, &ailp->xa_ail, li_ail) {
244 if (XFS_LSN_CMP(lip->li_lsn, lsn) <= 0)
245 return lip;
246 }
247 return NULL;
248 }
249
250 /*
251 * Find the last item in the AIL with the given @lsn by searching in descending
252 * LSN order and initialise the cursor to point to that item. If there is no
253 * item with the value of @lsn, then it sets the cursor to the last item with an
254 * LSN lower than @lsn. Returns NULL if the list is empty.
255 */
256 struct xfs_log_item *
257 xfs_trans_ail_cursor_last(
258 struct xfs_ail *ailp,
259 struct xfs_ail_cursor *cur,
260 xfs_lsn_t lsn)
261 {
262 xfs_trans_ail_cursor_init(ailp, cur);
263 cur->item = __xfs_trans_ail_cursor_last(ailp, lsn);
264 return cur->item;
265 }
266
267 /*
268 * Splice the log item list into the AIL at the given LSN. We splice to the
269 * tail of the given LSN to maintain insert order for push traversals. The
270 * cursor is optional, allowing repeated updates to the same LSN to avoid
271 * repeated traversals. This should not be called with an empty list.
272 */
273 static void
274 xfs_ail_splice(
275 struct xfs_ail *ailp,
276 struct xfs_ail_cursor *cur,
277 struct list_head *list,
278 xfs_lsn_t lsn)
279 {
280 struct xfs_log_item *lip;
281
282 ASSERT(!list_empty(list));
283
284 /*
285 * Use the cursor to determine the insertion point if one is
286 * provided. If not, or if the one we got is not valid,
287 * find the place in the AIL where the items belong.
288 */
289 lip = cur ? cur->item : NULL;
290 if (!lip || (__psint_t) lip & 1)
291 lip = __xfs_trans_ail_cursor_last(ailp, lsn);
292
293 /*
294 * If a cursor is provided, we know we're processing the AIL
295 * in lsn order, and future items to be spliced in will
296 * follow the last one being inserted now. Update the
297 * cursor to point to that last item, now while we have a
298 * reliable pointer to it.
299 */
300 if (cur)
301 cur->item = list_entry(list->prev, struct xfs_log_item, li_ail);
302
303 /*
304 * Finally perform the splice. Unless the AIL was empty,
305 * lip points to the item in the AIL _after_ which the new
306 * items should go. If lip is null the AIL was empty, so
307 * the new items go at the head of the AIL.
308 */
309 if (lip)
310 list_splice(list, &lip->li_ail);
311 else
312 list_splice(list, &ailp->xa_ail);
313 }
314
315 /*
316 * Delete the given item from the AIL. Return a pointer to the item.
317 */
318 static void
319 xfs_ail_delete(
320 struct xfs_ail *ailp,
321 xfs_log_item_t *lip)
322 {
323 xfs_ail_check(ailp, lip);
324 list_del(&lip->li_ail);
325 xfs_trans_ail_cursor_clear(ailp, lip);
326 }
327
328 static long
329 xfsaild_push(
330 struct xfs_ail *ailp)
331 {
332 xfs_mount_t *mp = ailp->xa_mount;
333 struct xfs_ail_cursor cur;
334 xfs_log_item_t *lip;
335 xfs_lsn_t lsn;
336 xfs_lsn_t target;
337 long tout;
338 int stuck = 0;
339 int flushing = 0;
340 int count = 0;
341
342 /*
343 * If we encountered pinned items or did not finish writing out all
344 * buffers the last time we ran, force the log first and wait for it
345 * before pushing again.
346 */
347 if (ailp->xa_log_flush && ailp->xa_last_pushed_lsn == 0 &&
348 (!list_empty_careful(&ailp->xa_buf_list) ||
349 xfs_ail_min_lsn(ailp))) {
350 ailp->xa_log_flush = 0;
351
352 XFS_STATS_INC(xs_push_ail_flush);
353 xfs_log_force(mp, XFS_LOG_SYNC);
354 }
355
356 spin_lock(&ailp->xa_lock);
357
358 /* barrier matches the xa_target update in xfs_ail_push() */
359 smp_rmb();
360 target = ailp->xa_target;
361 ailp->xa_target_prev = target;
362
363 lip = xfs_trans_ail_cursor_first(ailp, &cur, ailp->xa_last_pushed_lsn);
364 if (!lip) {
365 /*
366 * If the AIL is empty or our push has reached the end we are
367 * done now.
368 */
369 xfs_trans_ail_cursor_done(&cur);
370 spin_unlock(&ailp->xa_lock);
371 goto out_done;
372 }
373
374 XFS_STATS_INC(xs_push_ail);
375
376 lsn = lip->li_lsn;
377 while ((XFS_LSN_CMP(lip->li_lsn, target) <= 0)) {
378 int lock_result;
379
380 /*
381 * Note that iop_push may unlock and reacquire the AIL lock. We
382 * rely on the AIL cursor implementation to be able to deal with
383 * the dropped lock.
384 */
385 lock_result = lip->li_ops->iop_push(lip, &ailp->xa_buf_list);
386 switch (lock_result) {
387 case XFS_ITEM_SUCCESS:
388 XFS_STATS_INC(xs_push_ail_success);
389 trace_xfs_ail_push(lip);
390
391 ailp->xa_last_pushed_lsn = lsn;
392 break;
393
394 case XFS_ITEM_FLUSHING:
395 /*
396 * The item or its backing buffer is already beeing
397 * flushed. The typical reason for that is that an
398 * inode buffer is locked because we already pushed the
399 * updates to it as part of inode clustering.
400 *
401 * We do not want to to stop flushing just because lots
402 * of items are already beeing flushed, but we need to
403 * re-try the flushing relatively soon if most of the
404 * AIL is beeing flushed.
405 */
406 XFS_STATS_INC(xs_push_ail_flushing);
407 trace_xfs_ail_flushing(lip);
408
409 flushing++;
410 ailp->xa_last_pushed_lsn = lsn;
411 break;
412
413 case XFS_ITEM_PINNED:
414 XFS_STATS_INC(xs_push_ail_pinned);
415 trace_xfs_ail_pinned(lip);
416
417 stuck++;
418 ailp->xa_log_flush++;
419 break;
420 case XFS_ITEM_LOCKED:
421 XFS_STATS_INC(xs_push_ail_locked);
422 trace_xfs_ail_locked(lip);
423
424 stuck++;
425 break;
426 default:
427 ASSERT(0);
428 break;
429 }
430
431 count++;
432
433 /*
434 * Are there too many items we can't do anything with?
435 *
436 * If we we are skipping too many items because we can't flush
437 * them or they are already being flushed, we back off and
438 * given them time to complete whatever operation is being
439 * done. i.e. remove pressure from the AIL while we can't make
440 * progress so traversals don't slow down further inserts and
441 * removals to/from the AIL.
442 *
443 * The value of 100 is an arbitrary magic number based on
444 * observation.
445 */
446 if (stuck > 100)
447 break;
448
449 lip = xfs_trans_ail_cursor_next(ailp, &cur);
450 if (lip == NULL)
451 break;
452 lsn = lip->li_lsn;
453 }
454 xfs_trans_ail_cursor_done(&cur);
455 spin_unlock(&ailp->xa_lock);
456
457 if (xfs_buf_delwri_submit_nowait(&ailp->xa_buf_list))
458 ailp->xa_log_flush++;
459
460 if (!count || XFS_LSN_CMP(lsn, target) >= 0) {
461 out_done:
462 /*
463 * We reached the target or the AIL is empty, so wait a bit
464 * longer for I/O to complete and remove pushed items from the
465 * AIL before we start the next scan from the start of the AIL.
466 */
467 tout = 50;
468 ailp->xa_last_pushed_lsn = 0;
469 } else if (((stuck + flushing) * 100) / count > 90) {
470 /*
471 * Either there is a lot of contention on the AIL or we are
472 * stuck due to operations in progress. "Stuck" in this case
473 * is defined as >90% of the items we tried to push were stuck.
474 *
475 * Backoff a bit more to allow some I/O to complete before
476 * restarting from the start of the AIL. This prevents us from
477 * spinning on the same items, and if they are pinned will all
478 * the restart to issue a log force to unpin the stuck items.
479 */
480 tout = 20;
481 ailp->xa_last_pushed_lsn = 0;
482 } else {
483 /*
484 * Assume we have more work to do in a short while.
485 */
486 tout = 10;
487 }
488
489 return tout;
490 }
491
492 static int
493 xfsaild(
494 void *data)
495 {
496 struct xfs_ail *ailp = data;
497 long tout = 0; /* milliseconds */
498
499 current->flags |= PF_MEMALLOC;
500
501 while (!kthread_should_stop()) {
502 if (tout && tout <= 20)
503 __set_current_state(TASK_KILLABLE);
504 else
505 __set_current_state(TASK_INTERRUPTIBLE);
506
507 spin_lock(&ailp->xa_lock);
508
509 /*
510 * Idle if the AIL is empty and we are not racing with a target
511 * update. We check the AIL after we set the task to a sleep
512 * state to guarantee that we either catch an xa_target update
513 * or that a wake_up resets the state to TASK_RUNNING.
514 * Otherwise, we run the risk of sleeping indefinitely.
515 *
516 * The barrier matches the xa_target update in xfs_ail_push().
517 */
518 smp_rmb();
519 if (!xfs_ail_min(ailp) &&
520 ailp->xa_target == ailp->xa_target_prev) {
521 spin_unlock(&ailp->xa_lock);
522 schedule();
523 tout = 0;
524 continue;
525 }
526 spin_unlock(&ailp->xa_lock);
527
528 if (tout)
529 schedule_timeout(msecs_to_jiffies(tout));
530
531 __set_current_state(TASK_RUNNING);
532
533 try_to_freeze();
534
535 tout = xfsaild_push(ailp);
536 }
537
538 return 0;
539 }
540
541 /*
542 * This routine is called to move the tail of the AIL forward. It does this by
543 * trying to flush items in the AIL whose lsns are below the given
544 * threshold_lsn.
545 *
546 * The push is run asynchronously in a workqueue, which means the caller needs
547 * to handle waiting on the async flush for space to become available.
548 * We don't want to interrupt any push that is in progress, hence we only queue
549 * work if we set the pushing bit approriately.
550 *
551 * We do this unlocked - we only need to know whether there is anything in the
552 * AIL at the time we are called. We don't need to access the contents of
553 * any of the objects, so the lock is not needed.
554 */
555 void
556 xfs_ail_push(
557 struct xfs_ail *ailp,
558 xfs_lsn_t threshold_lsn)
559 {
560 xfs_log_item_t *lip;
561
562 lip = xfs_ail_min(ailp);
563 if (!lip || XFS_FORCED_SHUTDOWN(ailp->xa_mount) ||
564 XFS_LSN_CMP(threshold_lsn, ailp->xa_target) <= 0)
565 return;
566
567 /*
568 * Ensure that the new target is noticed in push code before it clears
569 * the XFS_AIL_PUSHING_BIT.
570 */
571 smp_wmb();
572 xfs_trans_ail_copy_lsn(ailp, &ailp->xa_target, &threshold_lsn);
573 smp_wmb();
574
575 wake_up_process(ailp->xa_task);
576 }
577
578 /*
579 * Push out all items in the AIL immediately
580 */
581 void
582 xfs_ail_push_all(
583 struct xfs_ail *ailp)
584 {
585 xfs_lsn_t threshold_lsn = xfs_ail_max_lsn(ailp);
586
587 if (threshold_lsn)
588 xfs_ail_push(ailp, threshold_lsn);
589 }
590
591 /*
592 * Push out all items in the AIL immediately and wait until the AIL is empty.
593 */
594 void
595 xfs_ail_push_all_sync(
596 struct xfs_ail *ailp)
597 {
598 struct xfs_log_item *lip;
599 DEFINE_WAIT(wait);
600
601 spin_lock(&ailp->xa_lock);
602 while ((lip = xfs_ail_max(ailp)) != NULL) {
603 prepare_to_wait(&ailp->xa_empty, &wait, TASK_UNINTERRUPTIBLE);
604 ailp->xa_target = lip->li_lsn;
605 wake_up_process(ailp->xa_task);
606 spin_unlock(&ailp->xa_lock);
607 schedule();
608 spin_lock(&ailp->xa_lock);
609 }
610 spin_unlock(&ailp->xa_lock);
611
612 finish_wait(&ailp->xa_empty, &wait);
613 }
614
615 /*
616 * xfs_trans_ail_update - bulk AIL insertion operation.
617 *
618 * @xfs_trans_ail_update takes an array of log items that all need to be
619 * positioned at the same LSN in the AIL. If an item is not in the AIL, it will
620 * be added. Otherwise, it will be repositioned by removing it and re-adding
621 * it to the AIL. If we move the first item in the AIL, update the log tail to
622 * match the new minimum LSN in the AIL.
623 *
624 * This function takes the AIL lock once to execute the update operations on
625 * all the items in the array, and as such should not be called with the AIL
626 * lock held. As a result, once we have the AIL lock, we need to check each log
627 * item LSN to confirm it needs to be moved forward in the AIL.
628 *
629 * To optimise the insert operation, we delete all the items from the AIL in
630 * the first pass, moving them into a temporary list, then splice the temporary
631 * list into the correct position in the AIL. This avoids needing to do an
632 * insert operation on every item.
633 *
634 * This function must be called with the AIL lock held. The lock is dropped
635 * before returning.
636 */
637 void
638 xfs_trans_ail_update_bulk(
639 struct xfs_ail *ailp,
640 struct xfs_ail_cursor *cur,
641 struct xfs_log_item **log_items,
642 int nr_items,
643 xfs_lsn_t lsn) __releases(ailp->xa_lock)
644 {
645 xfs_log_item_t *mlip;
646 int mlip_changed = 0;
647 int i;
648 LIST_HEAD(tmp);
649
650 ASSERT(nr_items > 0); /* Not required, but true. */
651 mlip = xfs_ail_min(ailp);
652
653 for (i = 0; i < nr_items; i++) {
654 struct xfs_log_item *lip = log_items[i];
655 if (lip->li_flags & XFS_LI_IN_AIL) {
656 /* check if we really need to move the item */
657 if (XFS_LSN_CMP(lsn, lip->li_lsn) <= 0)
658 continue;
659
660 trace_xfs_ail_move(lip, lip->li_lsn, lsn);
661 xfs_ail_delete(ailp, lip);
662 if (mlip == lip)
663 mlip_changed = 1;
664 } else {
665 lip->li_flags |= XFS_LI_IN_AIL;
666 trace_xfs_ail_insert(lip, 0, lsn);
667 }
668 lip->li_lsn = lsn;
669 list_add(&lip->li_ail, &tmp);
670 }
671
672 if (!list_empty(&tmp))
673 xfs_ail_splice(ailp, cur, &tmp, lsn);
674
675 if (mlip_changed) {
676 if (!XFS_FORCED_SHUTDOWN(ailp->xa_mount))
677 xlog_assign_tail_lsn_locked(ailp->xa_mount);
678 spin_unlock(&ailp->xa_lock);
679
680 xfs_log_space_wake(ailp->xa_mount);
681 } else {
682 spin_unlock(&ailp->xa_lock);
683 }
684 }
685
686 /*
687 * xfs_trans_ail_delete_bulk - remove multiple log items from the AIL
688 *
689 * @xfs_trans_ail_delete_bulk takes an array of log items that all need to
690 * removed from the AIL. The caller is already holding the AIL lock, and done
691 * all the checks necessary to ensure the items passed in via @log_items are
692 * ready for deletion. This includes checking that the items are in the AIL.
693 *
694 * For each log item to be removed, unlink it from the AIL, clear the IN_AIL
695 * flag from the item and reset the item's lsn to 0. If we remove the first
696 * item in the AIL, update the log tail to match the new minimum LSN in the
697 * AIL.
698 *
699 * This function will not drop the AIL lock until all items are removed from
700 * the AIL to minimise the amount of lock traffic on the AIL. This does not
701 * greatly increase the AIL hold time, but does significantly reduce the amount
702 * of traffic on the lock, especially during IO completion.
703 *
704 * This function must be called with the AIL lock held. The lock is dropped
705 * before returning.
706 */
707 void
708 xfs_trans_ail_delete_bulk(
709 struct xfs_ail *ailp,
710 struct xfs_log_item **log_items,
711 int nr_items,
712 int shutdown_type) __releases(ailp->xa_lock)
713 {
714 xfs_log_item_t *mlip;
715 int mlip_changed = 0;
716 int i;
717
718 mlip = xfs_ail_min(ailp);
719
720 for (i = 0; i < nr_items; i++) {
721 struct xfs_log_item *lip = log_items[i];
722 if (!(lip->li_flags & XFS_LI_IN_AIL)) {
723 struct xfs_mount *mp = ailp->xa_mount;
724
725 spin_unlock(&ailp->xa_lock);
726 if (!XFS_FORCED_SHUTDOWN(mp)) {
727 xfs_alert_tag(mp, XFS_PTAG_AILDELETE,
728 "%s: attempting to delete a log item that is not in the AIL",
729 __func__);
730 xfs_force_shutdown(mp, shutdown_type);
731 }
732 return;
733 }
734
735 trace_xfs_ail_delete(lip, mlip->li_lsn, lip->li_lsn);
736 xfs_ail_delete(ailp, lip);
737 lip->li_flags &= ~XFS_LI_IN_AIL;
738 lip->li_lsn = 0;
739 if (mlip == lip)
740 mlip_changed = 1;
741 }
742
743 if (mlip_changed) {
744 if (!XFS_FORCED_SHUTDOWN(ailp->xa_mount))
745 xlog_assign_tail_lsn_locked(ailp->xa_mount);
746 if (list_empty(&ailp->xa_ail))
747 wake_up_all(&ailp->xa_empty);
748 spin_unlock(&ailp->xa_lock);
749
750 xfs_log_space_wake(ailp->xa_mount);
751 } else {
752 spin_unlock(&ailp->xa_lock);
753 }
754 }
755
756 int
757 xfs_trans_ail_init(
758 xfs_mount_t *mp)
759 {
760 struct xfs_ail *ailp;
761
762 ailp = kmem_zalloc(sizeof(struct xfs_ail), KM_MAYFAIL);
763 if (!ailp)
764 return -ENOMEM;
765
766 ailp->xa_mount = mp;
767 INIT_LIST_HEAD(&ailp->xa_ail);
768 INIT_LIST_HEAD(&ailp->xa_cursors);
769 spin_lock_init(&ailp->xa_lock);
770 INIT_LIST_HEAD(&ailp->xa_buf_list);
771 init_waitqueue_head(&ailp->xa_empty);
772
773 ailp->xa_task = kthread_run(xfsaild, ailp, "xfsaild/%s",
774 ailp->xa_mount->m_fsname);
775 if (IS_ERR(ailp->xa_task))
776 goto out_free_ailp;
777
778 mp->m_ail = ailp;
779 return 0;
780
781 out_free_ailp:
782 kmem_free(ailp);
783 return -ENOMEM;
784 }
785
786 void
787 xfs_trans_ail_destroy(
788 xfs_mount_t *mp)
789 {
790 struct xfs_ail *ailp = mp->m_ail;
791
792 kthread_stop(ailp->xa_task);
793 kmem_free(ailp);
794 }
This page took 0.059168 seconds and 5 git commands to generate.