xfs: always push the AIL to the target
[deliverable/linux.git] / fs / xfs / xfs_trans_ail.c
1 /*
2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
3 * Copyright (c) 2008 Dave Chinner
4 * All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it would be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19 #include "xfs.h"
20 #include "xfs_fs.h"
21 #include "xfs_types.h"
22 #include "xfs_log.h"
23 #include "xfs_inum.h"
24 #include "xfs_trans.h"
25 #include "xfs_sb.h"
26 #include "xfs_ag.h"
27 #include "xfs_mount.h"
28 #include "xfs_trans_priv.h"
29 #include "xfs_error.h"
30
31 struct workqueue_struct *xfs_ail_wq; /* AIL workqueue */
32
33 #ifdef DEBUG
34 /*
35 * Check that the list is sorted as it should be.
36 */
37 STATIC void
38 xfs_ail_check(
39 struct xfs_ail *ailp,
40 xfs_log_item_t *lip)
41 {
42 xfs_log_item_t *prev_lip;
43
44 if (list_empty(&ailp->xa_ail))
45 return;
46
47 /*
48 * Check the next and previous entries are valid.
49 */
50 ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0);
51 prev_lip = list_entry(lip->li_ail.prev, xfs_log_item_t, li_ail);
52 if (&prev_lip->li_ail != &ailp->xa_ail)
53 ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0);
54
55 prev_lip = list_entry(lip->li_ail.next, xfs_log_item_t, li_ail);
56 if (&prev_lip->li_ail != &ailp->xa_ail)
57 ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) >= 0);
58
59
60 #ifdef XFS_TRANS_DEBUG
61 /*
62 * Walk the list checking lsn ordering, and that every entry has the
63 * XFS_LI_IN_AIL flag set. This is really expensive, so only do it
64 * when specifically debugging the transaction subsystem.
65 */
66 prev_lip = list_entry(&ailp->xa_ail, xfs_log_item_t, li_ail);
67 list_for_each_entry(lip, &ailp->xa_ail, li_ail) {
68 if (&prev_lip->li_ail != &ailp->xa_ail)
69 ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0);
70 ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0);
71 prev_lip = lip;
72 }
73 #endif /* XFS_TRANS_DEBUG */
74 }
75 #else /* !DEBUG */
76 #define xfs_ail_check(a,l)
77 #endif /* DEBUG */
78
79 /*
80 * Return a pointer to the first item in the AIL. If the AIL is empty, then
81 * return NULL.
82 */
83 static xfs_log_item_t *
84 xfs_ail_min(
85 struct xfs_ail *ailp)
86 {
87 if (list_empty(&ailp->xa_ail))
88 return NULL;
89
90 return list_first_entry(&ailp->xa_ail, xfs_log_item_t, li_ail);
91 }
92
93 /*
94 * Return a pointer to the last item in the AIL. If the AIL is empty, then
95 * return NULL.
96 */
97 static xfs_log_item_t *
98 xfs_ail_max(
99 struct xfs_ail *ailp)
100 {
101 if (list_empty(&ailp->xa_ail))
102 return NULL;
103
104 return list_entry(ailp->xa_ail.prev, xfs_log_item_t, li_ail);
105 }
106
107 /*
108 * Return a pointer to the item which follows the given item in the AIL. If
109 * the given item is the last item in the list, then return NULL.
110 */
111 static xfs_log_item_t *
112 xfs_ail_next(
113 struct xfs_ail *ailp,
114 xfs_log_item_t *lip)
115 {
116 if (lip->li_ail.next == &ailp->xa_ail)
117 return NULL;
118
119 return list_first_entry(&lip->li_ail, xfs_log_item_t, li_ail);
120 }
121
122 /*
123 * This is called by the log manager code to determine the LSN of the tail of
124 * the log. This is exactly the LSN of the first item in the AIL. If the AIL
125 * is empty, then this function returns 0.
126 *
127 * We need the AIL lock in order to get a coherent read of the lsn of the last
128 * item in the AIL.
129 */
130 xfs_lsn_t
131 xfs_ail_min_lsn(
132 struct xfs_ail *ailp)
133 {
134 xfs_lsn_t lsn = 0;
135 xfs_log_item_t *lip;
136
137 spin_lock(&ailp->xa_lock);
138 lip = xfs_ail_min(ailp);
139 if (lip)
140 lsn = lip->li_lsn;
141 spin_unlock(&ailp->xa_lock);
142
143 return lsn;
144 }
145
146 /*
147 * Return the maximum lsn held in the AIL, or zero if the AIL is empty.
148 */
149 static xfs_lsn_t
150 xfs_ail_max_lsn(
151 struct xfs_ail *ailp)
152 {
153 xfs_lsn_t lsn = 0;
154 xfs_log_item_t *lip;
155
156 spin_lock(&ailp->xa_lock);
157 lip = xfs_ail_max(ailp);
158 if (lip)
159 lsn = lip->li_lsn;
160 spin_unlock(&ailp->xa_lock);
161
162 return lsn;
163 }
164
165 /*
166 * AIL traversal cursor initialisation.
167 *
168 * The cursor keeps track of where our current traversal is up
169 * to by tracking the next ƣtem in the list for us. However, for
170 * this to be safe, removing an object from the AIL needs to invalidate
171 * any cursor that points to it. hence the traversal cursor needs to
172 * be linked to the struct xfs_ail so that deletion can search all the
173 * active cursors for invalidation.
174 *
175 * We don't link the push cursor because it is embedded in the struct
176 * xfs_ail and hence easily findable.
177 */
178 STATIC void
179 xfs_trans_ail_cursor_init(
180 struct xfs_ail *ailp,
181 struct xfs_ail_cursor *cur)
182 {
183 cur->item = NULL;
184 if (cur == &ailp->xa_cursors)
185 return;
186
187 cur->next = ailp->xa_cursors.next;
188 ailp->xa_cursors.next = cur;
189 }
190
191 /*
192 * Set the cursor to the next item, because when we look
193 * up the cursor the current item may have been freed.
194 */
195 STATIC void
196 xfs_trans_ail_cursor_set(
197 struct xfs_ail *ailp,
198 struct xfs_ail_cursor *cur,
199 struct xfs_log_item *lip)
200 {
201 if (lip)
202 cur->item = xfs_ail_next(ailp, lip);
203 }
204
205 /*
206 * Get the next item in the traversal and advance the cursor.
207 * If the cursor was invalidated (inidicated by a lip of 1),
208 * restart the traversal.
209 */
210 struct xfs_log_item *
211 xfs_trans_ail_cursor_next(
212 struct xfs_ail *ailp,
213 struct xfs_ail_cursor *cur)
214 {
215 struct xfs_log_item *lip = cur->item;
216
217 if ((__psint_t)lip & 1)
218 lip = xfs_ail_min(ailp);
219 xfs_trans_ail_cursor_set(ailp, cur, lip);
220 return lip;
221 }
222
223 /*
224 * Now that the traversal is complete, we need to remove the cursor
225 * from the list of traversing cursors. Avoid removing the embedded
226 * push cursor, but use the fact it is always present to make the
227 * list deletion simple.
228 */
229 void
230 xfs_trans_ail_cursor_done(
231 struct xfs_ail *ailp,
232 struct xfs_ail_cursor *done)
233 {
234 struct xfs_ail_cursor *prev = NULL;
235 struct xfs_ail_cursor *cur;
236
237 done->item = NULL;
238 if (done == &ailp->xa_cursors)
239 return;
240 prev = &ailp->xa_cursors;
241 for (cur = prev->next; cur; prev = cur, cur = prev->next) {
242 if (cur == done) {
243 prev->next = cur->next;
244 break;
245 }
246 }
247 ASSERT(cur);
248 }
249
250 /*
251 * Invalidate any cursor that is pointing to this item. This is
252 * called when an item is removed from the AIL. Any cursor pointing
253 * to this object is now invalid and the traversal needs to be
254 * terminated so it doesn't reference a freed object. We set the
255 * cursor item to a value of 1 so we can distinguish between an
256 * invalidation and the end of the list when getting the next item
257 * from the cursor.
258 */
259 STATIC void
260 xfs_trans_ail_cursor_clear(
261 struct xfs_ail *ailp,
262 struct xfs_log_item *lip)
263 {
264 struct xfs_ail_cursor *cur;
265
266 /* need to search all cursors */
267 for (cur = &ailp->xa_cursors; cur; cur = cur->next) {
268 if (cur->item == lip)
269 cur->item = (struct xfs_log_item *)
270 ((__psint_t)cur->item | 1);
271 }
272 }
273
274 /*
275 * Return the item in the AIL with the current lsn.
276 * Return the current tree generation number for use
277 * in calls to xfs_trans_next_ail().
278 */
279 xfs_log_item_t *
280 xfs_trans_ail_cursor_first(
281 struct xfs_ail *ailp,
282 struct xfs_ail_cursor *cur,
283 xfs_lsn_t lsn)
284 {
285 xfs_log_item_t *lip;
286
287 xfs_trans_ail_cursor_init(ailp, cur);
288 lip = xfs_ail_min(ailp);
289 if (lsn == 0)
290 goto out;
291
292 list_for_each_entry(lip, &ailp->xa_ail, li_ail) {
293 if (XFS_LSN_CMP(lip->li_lsn, lsn) >= 0)
294 goto out;
295 }
296 lip = NULL;
297 out:
298 xfs_trans_ail_cursor_set(ailp, cur, lip);
299 return lip;
300 }
301
302 /*
303 * splice the log item list into the AIL at the given LSN.
304 */
305 static void
306 xfs_ail_splice(
307 struct xfs_ail *ailp,
308 struct list_head *list,
309 xfs_lsn_t lsn)
310 {
311 xfs_log_item_t *next_lip;
312
313 /* If the list is empty, just insert the item. */
314 if (list_empty(&ailp->xa_ail)) {
315 list_splice(list, &ailp->xa_ail);
316 return;
317 }
318
319 list_for_each_entry_reverse(next_lip, &ailp->xa_ail, li_ail) {
320 if (XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0)
321 break;
322 }
323
324 ASSERT(&next_lip->li_ail == &ailp->xa_ail ||
325 XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0);
326
327 list_splice_init(list, &next_lip->li_ail);
328 }
329
330 /*
331 * Delete the given item from the AIL. Return a pointer to the item.
332 */
333 static void
334 xfs_ail_delete(
335 struct xfs_ail *ailp,
336 xfs_log_item_t *lip)
337 {
338 xfs_ail_check(ailp, lip);
339 list_del(&lip->li_ail);
340 xfs_trans_ail_cursor_clear(ailp, lip);
341 }
342
343 /*
344 * xfs_ail_worker does the work of pushing on the AIL. It will requeue itself
345 * to run at a later time if there is more work to do to complete the push.
346 */
347 STATIC void
348 xfs_ail_worker(
349 struct work_struct *work)
350 {
351 struct xfs_ail *ailp = container_of(to_delayed_work(work),
352 struct xfs_ail, xa_work);
353 xfs_mount_t *mp = ailp->xa_mount;
354 struct xfs_ail_cursor *cur = &ailp->xa_cursors;
355 xfs_log_item_t *lip;
356 xfs_lsn_t lsn;
357 xfs_lsn_t target = ailp->xa_target;
358 long tout = 10;
359 int flush_log = 0;
360 int stuck = 0;
361 int count = 0;
362 int push_xfsbufd = 0;
363
364 spin_lock(&ailp->xa_lock);
365 xfs_trans_ail_cursor_init(ailp, cur);
366 lip = xfs_trans_ail_cursor_first(ailp, cur, ailp->xa_last_pushed_lsn);
367 if (!lip || XFS_FORCED_SHUTDOWN(mp)) {
368 /*
369 * AIL is empty or our push has reached the end.
370 */
371 xfs_trans_ail_cursor_done(ailp, cur);
372 spin_unlock(&ailp->xa_lock);
373 goto out_done;
374 }
375
376 XFS_STATS_INC(xs_push_ail);
377
378 /*
379 * While the item we are looking at is below the given threshold
380 * try to flush it out. We'd like not to stop until we've at least
381 * tried to push on everything in the AIL with an LSN less than
382 * the given threshold.
383 *
384 * However, we will stop after a certain number of pushes and wait
385 * for a reduced timeout to fire before pushing further. This
386 * prevents use from spinning when we can't do anything or there is
387 * lots of contention on the AIL lists.
388 */
389 lsn = lip->li_lsn;
390 while ((XFS_LSN_CMP(lip->li_lsn, target) <= 0)) {
391 int lock_result;
392 /*
393 * If we can lock the item without sleeping, unlock the AIL
394 * lock and flush the item. Then re-grab the AIL lock so we
395 * can look for the next item on the AIL. List changes are
396 * handled by the AIL lookup functions internally
397 *
398 * If we can't lock the item, either its holder will flush it
399 * or it is already being flushed or it is being relogged. In
400 * any of these case it is being taken care of and we can just
401 * skip to the next item in the list.
402 */
403 lock_result = IOP_TRYLOCK(lip);
404 spin_unlock(&ailp->xa_lock);
405 switch (lock_result) {
406 case XFS_ITEM_SUCCESS:
407 XFS_STATS_INC(xs_push_ail_success);
408 IOP_PUSH(lip);
409 ailp->xa_last_pushed_lsn = lsn;
410 break;
411
412 case XFS_ITEM_PUSHBUF:
413 XFS_STATS_INC(xs_push_ail_pushbuf);
414 IOP_PUSHBUF(lip);
415 ailp->xa_last_pushed_lsn = lsn;
416 push_xfsbufd = 1;
417 break;
418
419 case XFS_ITEM_PINNED:
420 XFS_STATS_INC(xs_push_ail_pinned);
421 stuck++;
422 flush_log = 1;
423 break;
424
425 case XFS_ITEM_LOCKED:
426 XFS_STATS_INC(xs_push_ail_locked);
427 ailp->xa_last_pushed_lsn = lsn;
428 stuck++;
429 break;
430
431 default:
432 ASSERT(0);
433 break;
434 }
435
436 spin_lock(&ailp->xa_lock);
437 /* should we bother continuing? */
438 if (XFS_FORCED_SHUTDOWN(mp))
439 break;
440 ASSERT(mp->m_log);
441
442 count++;
443
444 /*
445 * Are there too many items we can't do anything with?
446 * If we we are skipping too many items because we can't flush
447 * them or they are already being flushed, we back off and
448 * given them time to complete whatever operation is being
449 * done. i.e. remove pressure from the AIL while we can't make
450 * progress so traversals don't slow down further inserts and
451 * removals to/from the AIL.
452 *
453 * The value of 100 is an arbitrary magic number based on
454 * observation.
455 */
456 if (stuck > 100)
457 break;
458
459 lip = xfs_trans_ail_cursor_next(ailp, cur);
460 if (lip == NULL)
461 break;
462 lsn = lip->li_lsn;
463 }
464 xfs_trans_ail_cursor_done(ailp, cur);
465 spin_unlock(&ailp->xa_lock);
466
467 if (flush_log) {
468 /*
469 * If something we need to push out was pinned, then
470 * push out the log so it will become unpinned and
471 * move forward in the AIL.
472 */
473 XFS_STATS_INC(xs_push_ail_flush);
474 xfs_log_force(mp, 0);
475 }
476
477 if (push_xfsbufd) {
478 /* we've got delayed write buffers to flush */
479 wake_up_process(mp->m_ddev_targp->bt_task);
480 }
481
482 /* assume we have more work to do in a short while */
483 out_done:
484 if (!count) {
485 /* We're past our target or empty, so idle */
486 ailp->xa_last_pushed_lsn = 0;
487
488 /*
489 * Check for an updated push target before clearing the
490 * XFS_AIL_PUSHING_BIT. If the target changed, we've got more
491 * work to do. Wait a bit longer before starting that work.
492 */
493 smp_rmb();
494 if (ailp->xa_target == target) {
495 clear_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags);
496 return;
497 }
498 tout = 50;
499 } else if (XFS_LSN_CMP(lsn, target) >= 0) {
500 /*
501 * We reached the target so wait a bit longer for I/O to
502 * complete and remove pushed items from the AIL before we
503 * start the next scan from the start of the AIL.
504 */
505 tout = 50;
506 ailp->xa_last_pushed_lsn = 0;
507 } else if ((stuck * 100) / count > 90) {
508 /*
509 * Either there is a lot of contention on the AIL or we
510 * are stuck due to operations in progress. "Stuck" in this
511 * case is defined as >90% of the items we tried to push
512 * were stuck.
513 *
514 * Backoff a bit more to allow some I/O to complete before
515 * continuing from where we were.
516 */
517 tout = 20;
518 }
519
520 /* There is more to do, requeue us. */
521 queue_delayed_work(xfs_syncd_wq, &ailp->xa_work,
522 msecs_to_jiffies(tout));
523 }
524
525 /*
526 * This routine is called to move the tail of the AIL forward. It does this by
527 * trying to flush items in the AIL whose lsns are below the given
528 * threshold_lsn.
529 *
530 * The push is run asynchronously in a workqueue, which means the caller needs
531 * to handle waiting on the async flush for space to become available.
532 * We don't want to interrupt any push that is in progress, hence we only queue
533 * work if we set the pushing bit approriately.
534 *
535 * We do this unlocked - we only need to know whether there is anything in the
536 * AIL at the time we are called. We don't need to access the contents of
537 * any of the objects, so the lock is not needed.
538 */
539 void
540 xfs_ail_push(
541 struct xfs_ail *ailp,
542 xfs_lsn_t threshold_lsn)
543 {
544 xfs_log_item_t *lip;
545
546 lip = xfs_ail_min(ailp);
547 if (!lip || XFS_FORCED_SHUTDOWN(ailp->xa_mount) ||
548 XFS_LSN_CMP(threshold_lsn, ailp->xa_target) <= 0)
549 return;
550
551 /*
552 * Ensure that the new target is noticed in push code before it clears
553 * the XFS_AIL_PUSHING_BIT.
554 */
555 smp_wmb();
556 ailp->xa_target = threshold_lsn;
557 if (!test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags))
558 queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 0);
559 }
560
561 /*
562 * Push out all items in the AIL immediately
563 */
564 void
565 xfs_ail_push_all(
566 struct xfs_ail *ailp)
567 {
568 xfs_lsn_t threshold_lsn = xfs_ail_max_lsn(ailp);
569
570 if (threshold_lsn)
571 xfs_ail_push(ailp, threshold_lsn);
572 }
573
574 /*
575 * This is to be called when an item is unlocked that may have
576 * been in the AIL. It will wake up the first member of the AIL
577 * wait list if this item's unlocking might allow it to progress.
578 * If the item is in the AIL, then we need to get the AIL lock
579 * while doing our checking so we don't race with someone going
580 * to sleep waiting for this event in xfs_trans_push_ail().
581 */
582 void
583 xfs_trans_unlocked_item(
584 struct xfs_ail *ailp,
585 xfs_log_item_t *lip)
586 {
587 xfs_log_item_t *min_lip;
588
589 /*
590 * If we're forcibly shutting down, we may have
591 * unlocked log items arbitrarily. The last thing
592 * we want to do is to move the tail of the log
593 * over some potentially valid data.
594 */
595 if (!(lip->li_flags & XFS_LI_IN_AIL) ||
596 XFS_FORCED_SHUTDOWN(ailp->xa_mount)) {
597 return;
598 }
599
600 /*
601 * This is the one case where we can call into xfs_ail_min()
602 * without holding the AIL lock because we only care about the
603 * case where we are at the tail of the AIL. If the object isn't
604 * at the tail, it doesn't matter what result we get back. This
605 * is slightly racy because since we were just unlocked, we could
606 * go to sleep between the call to xfs_ail_min and the call to
607 * xfs_log_move_tail, have someone else lock us, commit to us disk,
608 * move us out of the tail of the AIL, and then we wake up. However,
609 * the call to xfs_log_move_tail() doesn't do anything if there's
610 * not enough free space to wake people up so we're safe calling it.
611 */
612 min_lip = xfs_ail_min(ailp);
613
614 if (min_lip == lip)
615 xfs_log_move_tail(ailp->xa_mount, 1);
616 } /* xfs_trans_unlocked_item */
617
618 /*
619 * xfs_trans_ail_update - bulk AIL insertion operation.
620 *
621 * @xfs_trans_ail_update takes an array of log items that all need to be
622 * positioned at the same LSN in the AIL. If an item is not in the AIL, it will
623 * be added. Otherwise, it will be repositioned by removing it and re-adding
624 * it to the AIL. If we move the first item in the AIL, update the log tail to
625 * match the new minimum LSN in the AIL.
626 *
627 * This function takes the AIL lock once to execute the update operations on
628 * all the items in the array, and as such should not be called with the AIL
629 * lock held. As a result, once we have the AIL lock, we need to check each log
630 * item LSN to confirm it needs to be moved forward in the AIL.
631 *
632 * To optimise the insert operation, we delete all the items from the AIL in
633 * the first pass, moving them into a temporary list, then splice the temporary
634 * list into the correct position in the AIL. This avoids needing to do an
635 * insert operation on every item.
636 *
637 * This function must be called with the AIL lock held. The lock is dropped
638 * before returning.
639 */
640 void
641 xfs_trans_ail_update_bulk(
642 struct xfs_ail *ailp,
643 struct xfs_log_item **log_items,
644 int nr_items,
645 xfs_lsn_t lsn) __releases(ailp->xa_lock)
646 {
647 xfs_log_item_t *mlip;
648 xfs_lsn_t tail_lsn;
649 int mlip_changed = 0;
650 int i;
651 LIST_HEAD(tmp);
652
653 mlip = xfs_ail_min(ailp);
654
655 for (i = 0; i < nr_items; i++) {
656 struct xfs_log_item *lip = log_items[i];
657 if (lip->li_flags & XFS_LI_IN_AIL) {
658 /* check if we really need to move the item */
659 if (XFS_LSN_CMP(lsn, lip->li_lsn) <= 0)
660 continue;
661
662 xfs_ail_delete(ailp, lip);
663 if (mlip == lip)
664 mlip_changed = 1;
665 } else {
666 lip->li_flags |= XFS_LI_IN_AIL;
667 }
668 lip->li_lsn = lsn;
669 list_add(&lip->li_ail, &tmp);
670 }
671
672 xfs_ail_splice(ailp, &tmp, lsn);
673
674 if (!mlip_changed) {
675 spin_unlock(&ailp->xa_lock);
676 return;
677 }
678
679 /*
680 * It is not safe to access mlip after the AIL lock is dropped, so we
681 * must get a copy of li_lsn before we do so. This is especially
682 * important on 32-bit platforms where accessing and updating 64-bit
683 * values like li_lsn is not atomic.
684 */
685 mlip = xfs_ail_min(ailp);
686 tail_lsn = mlip->li_lsn;
687 spin_unlock(&ailp->xa_lock);
688 xfs_log_move_tail(ailp->xa_mount, tail_lsn);
689 }
690
691 /*
692 * xfs_trans_ail_delete_bulk - remove multiple log items from the AIL
693 *
694 * @xfs_trans_ail_delete_bulk takes an array of log items that all need to
695 * removed from the AIL. The caller is already holding the AIL lock, and done
696 * all the checks necessary to ensure the items passed in via @log_items are
697 * ready for deletion. This includes checking that the items are in the AIL.
698 *
699 * For each log item to be removed, unlink it from the AIL, clear the IN_AIL
700 * flag from the item and reset the item's lsn to 0. If we remove the first
701 * item in the AIL, update the log tail to match the new minimum LSN in the
702 * AIL.
703 *
704 * This function will not drop the AIL lock until all items are removed from
705 * the AIL to minimise the amount of lock traffic on the AIL. This does not
706 * greatly increase the AIL hold time, but does significantly reduce the amount
707 * of traffic on the lock, especially during IO completion.
708 *
709 * This function must be called with the AIL lock held. The lock is dropped
710 * before returning.
711 */
712 void
713 xfs_trans_ail_delete_bulk(
714 struct xfs_ail *ailp,
715 struct xfs_log_item **log_items,
716 int nr_items) __releases(ailp->xa_lock)
717 {
718 xfs_log_item_t *mlip;
719 xfs_lsn_t tail_lsn;
720 int mlip_changed = 0;
721 int i;
722
723 mlip = xfs_ail_min(ailp);
724
725 for (i = 0; i < nr_items; i++) {
726 struct xfs_log_item *lip = log_items[i];
727 if (!(lip->li_flags & XFS_LI_IN_AIL)) {
728 struct xfs_mount *mp = ailp->xa_mount;
729
730 spin_unlock(&ailp->xa_lock);
731 if (!XFS_FORCED_SHUTDOWN(mp)) {
732 xfs_alert_tag(mp, XFS_PTAG_AILDELETE,
733 "%s: attempting to delete a log item that is not in the AIL",
734 __func__);
735 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
736 }
737 return;
738 }
739
740 xfs_ail_delete(ailp, lip);
741 lip->li_flags &= ~XFS_LI_IN_AIL;
742 lip->li_lsn = 0;
743 if (mlip == lip)
744 mlip_changed = 1;
745 }
746
747 if (!mlip_changed) {
748 spin_unlock(&ailp->xa_lock);
749 return;
750 }
751
752 /*
753 * It is not safe to access mlip after the AIL lock is dropped, so we
754 * must get a copy of li_lsn before we do so. This is especially
755 * important on 32-bit platforms where accessing and updating 64-bit
756 * values like li_lsn is not atomic. It is possible we've emptied the
757 * AIL here, so if that is the case, pass an LSN of 0 to the tail move.
758 */
759 mlip = xfs_ail_min(ailp);
760 tail_lsn = mlip ? mlip->li_lsn : 0;
761 spin_unlock(&ailp->xa_lock);
762 xfs_log_move_tail(ailp->xa_mount, tail_lsn);
763 }
764
765 /*
766 * The active item list (AIL) is a doubly linked list of log
767 * items sorted by ascending lsn. The base of the list is
768 * a forw/back pointer pair embedded in the xfs mount structure.
769 * The base is initialized with both pointers pointing to the
770 * base. This case always needs to be distinguished, because
771 * the base has no lsn to look at. We almost always insert
772 * at the end of the list, so on inserts we search from the
773 * end of the list to find where the new item belongs.
774 */
775
776 /*
777 * Initialize the doubly linked list to point only to itself.
778 */
779 int
780 xfs_trans_ail_init(
781 xfs_mount_t *mp)
782 {
783 struct xfs_ail *ailp;
784
785 ailp = kmem_zalloc(sizeof(struct xfs_ail), KM_MAYFAIL);
786 if (!ailp)
787 return ENOMEM;
788
789 ailp->xa_mount = mp;
790 INIT_LIST_HEAD(&ailp->xa_ail);
791 spin_lock_init(&ailp->xa_lock);
792 INIT_DELAYED_WORK(&ailp->xa_work, xfs_ail_worker);
793 mp->m_ail = ailp;
794 return 0;
795 }
796
797 void
798 xfs_trans_ail_destroy(
799 xfs_mount_t *mp)
800 {
801 struct xfs_ail *ailp = mp->m_ail;
802
803 cancel_delayed_work_sync(&ailp->xa_work);
804 kmem_free(ailp);
805 }
This page took 0.047727 seconds and 5 git commands to generate.