2 * Copyright (c) 2000-2003 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "xfs_trans.h"
26 #include "xfs_alloc.h"
27 #include "xfs_quota.h"
28 #include "xfs_mount.h"
29 #include "xfs_bmap_btree.h"
30 #include "xfs_inode.h"
32 #include "xfs_rtalloc.h"
33 #include "xfs_error.h"
34 #include "xfs_itable.h"
36 #include "xfs_buf_item.h"
37 #include "xfs_trans_space.h"
38 #include "xfs_trans_priv.h"
40 #include "xfs_trace.h"
47 * dquot->q_qlock (xfs_dqlock() and friends)
48 * dquot->q_flush (xfs_dqflock() and friends)
51 * If two dquots need to be locked the order is user before group/project,
52 * otherwise by the lowest id first, see xfs_dqlock2.
56 xfs_buftarg_t
*xfs_dqerror_target
;
59 int xfs_dqerror_mod
= 33;
62 struct kmem_zone
*xfs_qm_dqtrxzone
;
63 static struct kmem_zone
*xfs_qm_dqzone
;
65 static struct lock_class_key xfs_dquot_other_class
;
68 * This is called to free all the memory associated with a dquot
74 ASSERT(list_empty(&dqp
->q_lru
));
76 mutex_destroy(&dqp
->q_qlock
);
77 kmem_zone_free(xfs_qm_dqzone
, dqp
);
79 XFS_STATS_DEC(xs_qm_dquot
);
83 * If default limits are in force, push them into the dquot now.
84 * We overwrite the dquot limits only if they are zero and this
85 * is not the root dquot.
88 xfs_qm_adjust_dqlimits(
92 xfs_quotainfo_t
*q
= mp
->m_quotainfo
;
96 if (q
->qi_bsoftlimit
&& !d
->d_blk_softlimit
)
97 d
->d_blk_softlimit
= cpu_to_be64(q
->qi_bsoftlimit
);
98 if (q
->qi_bhardlimit
&& !d
->d_blk_hardlimit
)
99 d
->d_blk_hardlimit
= cpu_to_be64(q
->qi_bhardlimit
);
100 if (q
->qi_isoftlimit
&& !d
->d_ino_softlimit
)
101 d
->d_ino_softlimit
= cpu_to_be64(q
->qi_isoftlimit
);
102 if (q
->qi_ihardlimit
&& !d
->d_ino_hardlimit
)
103 d
->d_ino_hardlimit
= cpu_to_be64(q
->qi_ihardlimit
);
104 if (q
->qi_rtbsoftlimit
&& !d
->d_rtb_softlimit
)
105 d
->d_rtb_softlimit
= cpu_to_be64(q
->qi_rtbsoftlimit
);
106 if (q
->qi_rtbhardlimit
&& !d
->d_rtb_hardlimit
)
107 d
->d_rtb_hardlimit
= cpu_to_be64(q
->qi_rtbhardlimit
);
111 * Check the limits and timers of a dquot and start or reset timers
113 * This gets called even when quota enforcement is OFF, which makes our
114 * life a little less complicated. (We just don't reject any quota
115 * reservations in that case, when enforcement is off).
116 * We also return 0 as the values of the timers in Q_GETQUOTA calls, when
118 * In contrast, warnings are a little different in that they don't
119 * 'automatically' get started when limits get exceeded. They do
120 * get reset to zero, however, when we find the count to be under
121 * the soft limit (they are only ever set non-zero via userspace).
124 xfs_qm_adjust_dqtimers(
131 if (d
->d_blk_hardlimit
)
132 ASSERT(be64_to_cpu(d
->d_blk_softlimit
) <=
133 be64_to_cpu(d
->d_blk_hardlimit
));
134 if (d
->d_ino_hardlimit
)
135 ASSERT(be64_to_cpu(d
->d_ino_softlimit
) <=
136 be64_to_cpu(d
->d_ino_hardlimit
));
137 if (d
->d_rtb_hardlimit
)
138 ASSERT(be64_to_cpu(d
->d_rtb_softlimit
) <=
139 be64_to_cpu(d
->d_rtb_hardlimit
));
143 if ((d
->d_blk_softlimit
&&
144 (be64_to_cpu(d
->d_bcount
) >
145 be64_to_cpu(d
->d_blk_softlimit
))) ||
146 (d
->d_blk_hardlimit
&&
147 (be64_to_cpu(d
->d_bcount
) >
148 be64_to_cpu(d
->d_blk_hardlimit
)))) {
149 d
->d_btimer
= cpu_to_be32(get_seconds() +
150 mp
->m_quotainfo
->qi_btimelimit
);
155 if ((!d
->d_blk_softlimit
||
156 (be64_to_cpu(d
->d_bcount
) <=
157 be64_to_cpu(d
->d_blk_softlimit
))) &&
158 (!d
->d_blk_hardlimit
||
159 (be64_to_cpu(d
->d_bcount
) <=
160 be64_to_cpu(d
->d_blk_hardlimit
)))) {
166 if ((d
->d_ino_softlimit
&&
167 (be64_to_cpu(d
->d_icount
) >
168 be64_to_cpu(d
->d_ino_softlimit
))) ||
169 (d
->d_ino_hardlimit
&&
170 (be64_to_cpu(d
->d_icount
) >
171 be64_to_cpu(d
->d_ino_hardlimit
)))) {
172 d
->d_itimer
= cpu_to_be32(get_seconds() +
173 mp
->m_quotainfo
->qi_itimelimit
);
178 if ((!d
->d_ino_softlimit
||
179 (be64_to_cpu(d
->d_icount
) <=
180 be64_to_cpu(d
->d_ino_softlimit
))) &&
181 (!d
->d_ino_hardlimit
||
182 (be64_to_cpu(d
->d_icount
) <=
183 be64_to_cpu(d
->d_ino_hardlimit
)))) {
188 if (!d
->d_rtbtimer
) {
189 if ((d
->d_rtb_softlimit
&&
190 (be64_to_cpu(d
->d_rtbcount
) >
191 be64_to_cpu(d
->d_rtb_softlimit
))) ||
192 (d
->d_rtb_hardlimit
&&
193 (be64_to_cpu(d
->d_rtbcount
) >
194 be64_to_cpu(d
->d_rtb_hardlimit
)))) {
195 d
->d_rtbtimer
= cpu_to_be32(get_seconds() +
196 mp
->m_quotainfo
->qi_rtbtimelimit
);
201 if ((!d
->d_rtb_softlimit
||
202 (be64_to_cpu(d
->d_rtbcount
) <=
203 be64_to_cpu(d
->d_rtb_softlimit
))) &&
204 (!d
->d_rtb_hardlimit
||
205 (be64_to_cpu(d
->d_rtbcount
) <=
206 be64_to_cpu(d
->d_rtb_hardlimit
)))) {
213 * initialize a buffer full of dquots and log the whole thing
216 xfs_qm_init_dquot_blk(
223 struct xfs_quotainfo
*q
= mp
->m_quotainfo
;
228 ASSERT(xfs_buf_islocked(bp
));
233 * ID of the first dquot in the block - id's are zero based.
235 curid
= id
- (id
% q
->qi_dqperchunk
);
237 memset(d
, 0, BBTOB(q
->qi_dqchunklen
));
238 for (i
= 0; i
< q
->qi_dqperchunk
; i
++, d
++, curid
++) {
239 d
->dd_diskdq
.d_magic
= cpu_to_be16(XFS_DQUOT_MAGIC
);
240 d
->dd_diskdq
.d_version
= XFS_DQUOT_VERSION
;
241 d
->dd_diskdq
.d_id
= cpu_to_be32(curid
);
242 d
->dd_diskdq
.d_flags
= type
;
245 xfs_trans_dquot_buf(tp
, bp
,
246 (type
& XFS_DQ_USER
? XFS_BLF_UDQUOT_BUF
:
247 ((type
& XFS_DQ_PROJ
) ? XFS_BLF_PDQUOT_BUF
:
248 XFS_BLF_GDQUOT_BUF
)));
249 xfs_trans_log_buf(tp
, bp
, 0, BBTOB(q
->qi_dqchunklen
) - 1);
255 * Allocate a block and fill it with dquots.
256 * This is called when the bmapi finds a hole.
264 xfs_fileoff_t offset_fsb
,
267 xfs_fsblock_t firstblock
;
268 xfs_bmap_free_t flist
;
270 int nmaps
, error
, committed
;
272 xfs_trans_t
*tp
= *tpp
;
276 trace_xfs_dqalloc(dqp
);
279 * Initialize the bmap freelist prior to calling bmapi code.
281 xfs_bmap_init(&flist
, &firstblock
);
282 xfs_ilock(quotip
, XFS_ILOCK_EXCL
);
284 * Return if this type of quotas is turned off while we didn't
287 if (!xfs_this_quota_on(dqp
->q_mount
, dqp
->dq_flags
)) {
288 xfs_iunlock(quotip
, XFS_ILOCK_EXCL
);
292 xfs_trans_ijoin(tp
, quotip
, XFS_ILOCK_EXCL
);
294 error
= xfs_bmapi_write(tp
, quotip
, offset_fsb
,
295 XFS_DQUOT_CLUSTER_SIZE_FSB
, XFS_BMAPI_METADATA
,
296 &firstblock
, XFS_QM_DQALLOC_SPACE_RES(mp
),
297 &map
, &nmaps
, &flist
);
300 ASSERT(map
.br_blockcount
== XFS_DQUOT_CLUSTER_SIZE_FSB
);
302 ASSERT((map
.br_startblock
!= DELAYSTARTBLOCK
) &&
303 (map
.br_startblock
!= HOLESTARTBLOCK
));
306 * Keep track of the blkno to save a lookup later
308 dqp
->q_blkno
= XFS_FSB_TO_DADDR(mp
, map
.br_startblock
);
310 /* now we can just get the buffer (there's nothing to read yet) */
311 bp
= xfs_trans_get_buf(tp
, mp
->m_ddev_targp
,
313 mp
->m_quotainfo
->qi_dqchunklen
,
316 error
= xfs_buf_geterror(bp
);
321 * Make a chunk of dquots out of this buffer and log
324 xfs_qm_init_dquot_blk(tp
, mp
, be32_to_cpu(dqp
->q_core
.d_id
),
325 dqp
->dq_flags
& XFS_DQ_ALLTYPES
, bp
);
328 * xfs_bmap_finish() may commit the current transaction and
329 * start a second transaction if the freelist is not empty.
331 * Since we still want to modify this buffer, we need to
332 * ensure that the buffer is not released on commit of
333 * the first transaction and ensure the buffer is added to the
334 * second transaction.
336 * If there is only one transaction then don't stop the buffer
337 * from being released when it commits later on.
340 xfs_trans_bhold(tp
, bp
);
342 if ((error
= xfs_bmap_finish(tpp
, &flist
, &committed
))) {
348 xfs_trans_bjoin(tp
, bp
);
350 xfs_trans_bhold_release(tp
, bp
);
357 xfs_bmap_cancel(&flist
);
359 xfs_iunlock(quotip
, XFS_ILOCK_EXCL
);
365 * Maps a dquot to the buffer containing its on-disk version.
366 * This returns a ptr to the buffer containing the on-disk dquot
367 * in the bpp param, and a ptr to the on-disk dquot within that buffer
373 xfs_disk_dquot_t
**O_ddpp
,
378 int nmaps
= 1, error
;
380 xfs_inode_t
*quotip
= XFS_DQ_TO_QIP(dqp
);
381 xfs_mount_t
*mp
= dqp
->q_mount
;
382 xfs_disk_dquot_t
*ddq
;
383 xfs_dqid_t id
= be32_to_cpu(dqp
->q_core
.d_id
);
384 xfs_trans_t
*tp
= (tpp
? *tpp
: NULL
);
386 dqp
->q_fileoffset
= (xfs_fileoff_t
)id
/ mp
->m_quotainfo
->qi_dqperchunk
;
388 xfs_ilock(quotip
, XFS_ILOCK_SHARED
);
389 if (!xfs_this_quota_on(dqp
->q_mount
, dqp
->dq_flags
)) {
391 * Return if this type of quotas is turned off while we
392 * didn't have the quota inode lock.
394 xfs_iunlock(quotip
, XFS_ILOCK_SHARED
);
399 * Find the block map; no allocations yet
401 error
= xfs_bmapi_read(quotip
, dqp
->q_fileoffset
,
402 XFS_DQUOT_CLUSTER_SIZE_FSB
, &map
, &nmaps
, 0);
404 xfs_iunlock(quotip
, XFS_ILOCK_SHARED
);
409 ASSERT(map
.br_blockcount
== 1);
412 * Offset of dquot in the (fixed sized) dquot chunk.
414 dqp
->q_bufoffset
= (id
% mp
->m_quotainfo
->qi_dqperchunk
) *
417 ASSERT(map
.br_startblock
!= DELAYSTARTBLOCK
);
418 if (map
.br_startblock
== HOLESTARTBLOCK
) {
420 * We don't allocate unless we're asked to
422 if (!(flags
& XFS_QMOPT_DQALLOC
))
426 error
= xfs_qm_dqalloc(tpp
, mp
, dqp
, quotip
,
427 dqp
->q_fileoffset
, &bp
);
432 trace_xfs_dqtobp_read(dqp
);
435 * store the blkno etc so that we don't have to do the
436 * mapping all the time
438 dqp
->q_blkno
= XFS_FSB_TO_DADDR(mp
, map
.br_startblock
);
440 error
= xfs_trans_read_buf(mp
, tp
, mp
->m_ddev_targp
,
442 mp
->m_quotainfo
->qi_dqchunklen
,
445 return XFS_ERROR(error
);
448 ASSERT(xfs_buf_islocked(bp
));
451 * calculate the location of the dquot inside the buffer.
453 ddq
= bp
->b_addr
+ dqp
->q_bufoffset
;
456 * A simple sanity check in case we got a corrupted dquot...
458 error
= xfs_qm_dqcheck(mp
, ddq
, id
, dqp
->dq_flags
& XFS_DQ_ALLTYPES
,
459 flags
& (XFS_QMOPT_DQREPAIR
|XFS_QMOPT_DOWARN
),
462 if (!(flags
& XFS_QMOPT_DQREPAIR
)) {
463 xfs_trans_brelse(tp
, bp
);
464 return XFS_ERROR(EIO
);
476 * Read in the ondisk dquot using dqtobp() then copy it to an incore version,
477 * and release the buffer immediately.
479 * If XFS_QMOPT_DQALLOC is set, allocate a dquot on disk if it needed.
483 struct xfs_mount
*mp
,
487 struct xfs_dquot
**O_dqpp
)
489 struct xfs_dquot
*dqp
;
490 struct xfs_disk_dquot
*ddqp
;
492 struct xfs_trans
*tp
= NULL
;
497 dqp
= kmem_zone_zalloc(xfs_qm_dqzone
, KM_SLEEP
);
499 dqp
->dq_flags
= type
;
500 dqp
->q_core
.d_id
= cpu_to_be32(id
);
502 INIT_LIST_HEAD(&dqp
->q_lru
);
503 mutex_init(&dqp
->q_qlock
);
504 init_waitqueue_head(&dqp
->q_pinwait
);
507 * Because we want to use a counting completion, complete
508 * the flush completion once to allow a single access to
509 * the flush completion without blocking.
511 init_completion(&dqp
->q_flush
);
512 complete(&dqp
->q_flush
);
515 * Make sure group quotas have a different lock class than user
518 if (!(type
& XFS_DQ_USER
))
519 lockdep_set_class(&dqp
->q_qlock
, &xfs_dquot_other_class
);
521 XFS_STATS_INC(xs_qm_dquot
);
523 trace_xfs_dqread(dqp
);
525 if (flags
& XFS_QMOPT_DQALLOC
) {
526 tp
= xfs_trans_alloc(mp
, XFS_TRANS_QM_DQALLOC
);
527 error
= xfs_trans_reserve(tp
, XFS_QM_DQALLOC_SPACE_RES(mp
),
528 XFS_WRITE_LOG_RES(mp
) +
530 * Round the chunklen up to the next multiple
531 * of 128 (buf log item chunk size)).
533 BBTOB(mp
->m_quotainfo
->qi_dqchunklen
) - 1 + 128,
535 XFS_TRANS_PERM_LOG_RES
,
536 XFS_WRITE_LOG_COUNT
);
539 cancelflags
= XFS_TRANS_RELEASE_LOG_RES
;
543 * get a pointer to the on-disk dquot and the buffer containing it
544 * dqp already knows its own type (GROUP/USER).
546 error
= xfs_qm_dqtobp(&tp
, dqp
, &ddqp
, &bp
, flags
);
549 * This can happen if quotas got turned off (ESRCH),
550 * or if the dquot didn't exist on disk and we ask to
553 trace_xfs_dqread_fail(dqp
);
554 cancelflags
|= XFS_TRANS_ABORT
;
558 /* copy everything from disk dquot to the incore dquot */
559 memcpy(&dqp
->q_core
, ddqp
, sizeof(xfs_disk_dquot_t
));
560 xfs_qm_dquot_logitem_init(dqp
);
563 * Reservation counters are defined as reservation plus current usage
564 * to avoid having to add every time.
566 dqp
->q_res_bcount
= be64_to_cpu(ddqp
->d_bcount
);
567 dqp
->q_res_icount
= be64_to_cpu(ddqp
->d_icount
);
568 dqp
->q_res_rtbcount
= be64_to_cpu(ddqp
->d_rtbcount
);
570 /* Mark the buf so that this will stay incore a little longer */
571 xfs_buf_set_ref(bp
, XFS_DQUOT_REF
);
574 * We got the buffer with a xfs_trans_read_buf() (in dqtobp())
575 * So we need to release with xfs_trans_brelse().
576 * The strategy here is identical to that of inodes; we lock
577 * the dquot in xfs_qm_dqget() before making it accessible to
578 * others. This is because dquots, like inodes, need a good level of
579 * concurrency, and we don't want to take locks on the entire buffers
580 * for dquot accesses.
581 * Note also that the dquot buffer may even be dirty at this point, if
582 * this particular dquot was repaired. We still aren't afraid to
583 * brelse it because we have the changes incore.
585 ASSERT(xfs_buf_islocked(bp
));
586 xfs_trans_brelse(tp
, bp
);
589 error
= xfs_trans_commit(tp
, XFS_TRANS_RELEASE_LOG_RES
);
599 xfs_trans_cancel(tp
, cancelflags
);
601 xfs_qm_dqdestroy(dqp
);
607 * Given the file system, inode OR id, and type (UDQUOT/GDQUOT), return a
608 * a locked dquot, doing an allocation (if requested) as needed.
609 * When both an inode and an id are given, the inode's id takes precedence.
610 * That is, if the id changes while we don't hold the ilock inside this
611 * function, the new dquot is returned, not necessarily the one requested
612 * in the id argument.
617 xfs_inode_t
*ip
, /* locked inode (optional) */
618 xfs_dqid_t id
, /* uid/projid/gid depending on type */
619 uint type
, /* XFS_DQ_USER/XFS_DQ_PROJ/XFS_DQ_GROUP */
620 uint flags
, /* DQALLOC, DQSUSER, DQREPAIR, DOWARN */
621 xfs_dquot_t
**O_dqpp
) /* OUT : locked incore dquot */
623 struct xfs_quotainfo
*qi
= mp
->m_quotainfo
;
624 struct radix_tree_root
*tree
= XFS_DQUOT_TREE(qi
, type
);
625 struct xfs_dquot
*dqp
;
628 ASSERT(XFS_IS_QUOTA_RUNNING(mp
));
629 if ((! XFS_IS_UQUOTA_ON(mp
) && type
== XFS_DQ_USER
) ||
630 (! XFS_IS_PQUOTA_ON(mp
) && type
== XFS_DQ_PROJ
) ||
631 (! XFS_IS_GQUOTA_ON(mp
) && type
== XFS_DQ_GROUP
)) {
636 if (xfs_do_dqerror
) {
637 if ((xfs_dqerror_target
== mp
->m_ddev_targp
) &&
638 (xfs_dqreq_num
++ % xfs_dqerror_mod
) == 0) {
639 xfs_debug(mp
, "Returning error in dqget");
644 ASSERT(type
== XFS_DQ_USER
||
645 type
== XFS_DQ_PROJ
||
646 type
== XFS_DQ_GROUP
);
648 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
649 ASSERT(xfs_inode_dquot(ip
, type
) == NULL
);
654 mutex_lock(&qi
->qi_tree_lock
);
655 dqp
= radix_tree_lookup(tree
, id
);
658 if (dqp
->dq_flags
& XFS_DQ_FREEING
) {
660 mutex_unlock(&qi
->qi_tree_lock
);
661 trace_xfs_dqget_freeing(dqp
);
667 mutex_unlock(&qi
->qi_tree_lock
);
669 trace_xfs_dqget_hit(dqp
);
670 XFS_STATS_INC(xs_qm_dqcachehits
);
674 mutex_unlock(&qi
->qi_tree_lock
);
675 XFS_STATS_INC(xs_qm_dqcachemisses
);
678 * Dquot cache miss. We don't want to keep the inode lock across
679 * a (potential) disk read. Also we don't want to deal with the lock
680 * ordering between quotainode and this inode. OTOH, dropping the inode
681 * lock here means dealing with a chown that can happen before
682 * we re-acquire the lock.
685 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
687 error
= xfs_qm_dqread(mp
, id
, type
, flags
, &dqp
);
690 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
697 * A dquot could be attached to this inode by now, since
698 * we had dropped the ilock.
700 if (xfs_this_quota_on(mp
, type
)) {
701 struct xfs_dquot
*dqp1
;
703 dqp1
= xfs_inode_dquot(ip
, type
);
705 xfs_qm_dqdestroy(dqp
);
711 /* inode stays locked on return */
712 xfs_qm_dqdestroy(dqp
);
713 return XFS_ERROR(ESRCH
);
717 mutex_lock(&qi
->qi_tree_lock
);
718 error
= -radix_tree_insert(tree
, id
, dqp
);
719 if (unlikely(error
)) {
720 WARN_ON(error
!= EEXIST
);
723 * Duplicate found. Just throw away the new dquot and start
726 mutex_unlock(&qi
->qi_tree_lock
);
727 trace_xfs_dqget_dup(dqp
);
728 xfs_qm_dqdestroy(dqp
);
729 XFS_STATS_INC(xs_qm_dquot_dups
);
734 * We return a locked dquot to the caller, with a reference taken
740 mutex_unlock(&qi
->qi_tree_lock
);
743 ASSERT((ip
== NULL
) || xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
744 trace_xfs_dqget_miss(dqp
);
752 struct xfs_dquot
*dqp
)
754 struct xfs_quotainfo
*qi
= dqp
->q_mount
->m_quotainfo
;
755 struct xfs_dquot
*gdqp
;
757 trace_xfs_dqput_free(dqp
);
759 mutex_lock(&qi
->qi_lru_lock
);
760 if (list_empty(&dqp
->q_lru
)) {
761 list_add_tail(&dqp
->q_lru
, &qi
->qi_lru_list
);
763 XFS_STATS_INC(xs_qm_dquot_unused
);
765 mutex_unlock(&qi
->qi_lru_lock
);
768 * If we just added a udquot to the freelist, then we want to release
769 * the gdquot reference that it (probably) has. Otherwise it'll keep
770 * the gdquot from getting reclaimed.
772 gdqp
= dqp
->q_gdquot
;
775 dqp
->q_gdquot
= NULL
;
780 * If we had a group quota hint, release it now.
787 * Release a reference to the dquot (decrement ref-count) and unlock it.
789 * If there is a group quota attached to this dquot, carefully release that
790 * too without tripping over deadlocks'n'stuff.
794 struct xfs_dquot
*dqp
)
796 ASSERT(dqp
->q_nrefs
> 0);
797 ASSERT(XFS_DQ_IS_LOCKED(dqp
));
799 trace_xfs_dqput(dqp
);
801 if (--dqp
->q_nrefs
> 0)
804 xfs_qm_dqput_final(dqp
);
808 * Release a dquot. Flush it if dirty, then dqput() it.
809 * dquot must not be locked.
818 trace_xfs_dqrele(dqp
);
822 * We don't care to flush it if the dquot is dirty here.
823 * That will create stutters that we want to avoid.
824 * Instead we do a delayed write when we try to reclaim
825 * a dirty dquot. Also xfs_sync will take part of the burden...
831 * This is the dquot flushing I/O completion routine. It is called
832 * from interrupt level when the buffer containing the dquot is
833 * flushed to disk. It is responsible for removing the dquot logitem
834 * from the AIL if it has not been re-logged, and unlocking the dquot's
835 * flush lock. This behavior is very similar to that of inodes..
840 struct xfs_log_item
*lip
)
842 xfs_dq_logitem_t
*qip
= (struct xfs_dq_logitem
*)lip
;
843 xfs_dquot_t
*dqp
= qip
->qli_dquot
;
844 struct xfs_ail
*ailp
= lip
->li_ailp
;
847 * We only want to pull the item from the AIL if its
848 * location in the log has not changed since we started the flush.
849 * Thus, we only bother if the dquot's lsn has
850 * not changed. First we check the lsn outside the lock
851 * since it's cheaper, and then we recheck while
852 * holding the lock before removing the dquot from the AIL.
854 if ((lip
->li_flags
& XFS_LI_IN_AIL
) &&
855 lip
->li_lsn
== qip
->qli_flush_lsn
) {
857 /* xfs_trans_ail_delete() drops the AIL lock. */
858 spin_lock(&ailp
->xa_lock
);
859 if (lip
->li_lsn
== qip
->qli_flush_lsn
)
860 xfs_trans_ail_delete(ailp
, lip
);
862 spin_unlock(&ailp
->xa_lock
);
866 * Release the dq's flush lock since we're done with it.
872 * Write a modified dquot to disk.
873 * The dquot must be locked and the flush lock too taken by caller.
874 * The flush lock will not be unlocked until the dquot reaches the disk,
875 * but the dquot is free to be unlocked and modified by the caller
876 * in the interim. Dquot is still locked on return. This behavior is
877 * identical to that of inodes.
884 struct xfs_mount
*mp
= dqp
->q_mount
;
886 struct xfs_disk_dquot
*ddqp
;
889 ASSERT(XFS_DQ_IS_LOCKED(dqp
));
890 ASSERT(!completion_done(&dqp
->q_flush
));
892 trace_xfs_dqflush(dqp
);
895 * If not dirty, or it's pinned and we are not supposed to block, nada.
897 if (!XFS_DQ_IS_DIRTY(dqp
) ||
898 ((flags
& SYNC_TRYLOCK
) && atomic_read(&dqp
->q_pincount
) > 0)) {
902 xfs_qm_dqunpin_wait(dqp
);
905 * This may have been unpinned because the filesystem is shutting
906 * down forcibly. If that's the case we must not write this dquot
907 * to disk, because the log record didn't make it to disk!
909 if (XFS_FORCED_SHUTDOWN(mp
)) {
910 dqp
->dq_flags
&= ~XFS_DQ_DIRTY
;
912 return XFS_ERROR(EIO
);
916 * Get the buffer containing the on-disk dquot
918 error
= xfs_trans_read_buf(mp
, NULL
, mp
->m_ddev_targp
, dqp
->q_blkno
,
919 mp
->m_quotainfo
->qi_dqchunklen
, 0, &bp
);
921 ASSERT(error
!= ENOENT
);
927 * Calculate the location of the dquot inside the buffer.
929 ddqp
= bp
->b_addr
+ dqp
->q_bufoffset
;
932 * A simple sanity check in case we got a corrupted dquot..
934 error
= xfs_qm_dqcheck(mp
, &dqp
->q_core
, be32_to_cpu(ddqp
->d_id
), 0,
935 XFS_QMOPT_DOWARN
, "dqflush (incore copy)");
939 xfs_force_shutdown(mp
, SHUTDOWN_CORRUPT_INCORE
);
940 return XFS_ERROR(EIO
);
943 /* This is the only portion of data that needs to persist */
944 memcpy(ddqp
, &dqp
->q_core
, sizeof(xfs_disk_dquot_t
));
947 * Clear the dirty field and remember the flush lsn for later use.
949 dqp
->dq_flags
&= ~XFS_DQ_DIRTY
;
951 xfs_trans_ail_copy_lsn(mp
->m_ail
, &dqp
->q_logitem
.qli_flush_lsn
,
952 &dqp
->q_logitem
.qli_item
.li_lsn
);
955 * Attach an iodone routine so that we can remove this dquot from the
956 * AIL and release the flush lock once the dquot is synced to disk.
958 xfs_buf_attach_iodone(bp
, xfs_qm_dqflush_done
,
959 &dqp
->q_logitem
.qli_item
);
962 * If the buffer is pinned then push on the log so we won't
963 * get stuck waiting in the write for too long.
965 if (xfs_buf_ispinned(bp
)) {
966 trace_xfs_dqflush_force(dqp
);
967 xfs_log_force(mp
, 0);
970 if (flags
& SYNC_WAIT
)
971 error
= xfs_bwrite(bp
);
973 xfs_buf_delwri_queue(bp
);
977 trace_xfs_dqflush_done(dqp
);
980 * dqp is still locked, but caller is free to unlock it now.
987 * Lock two xfs_dquot structures.
989 * To avoid deadlocks we always lock the quota structure with
990 * the lowerd id first.
999 if (be32_to_cpu(d1
->q_core
.d_id
) >
1000 be32_to_cpu(d2
->q_core
.d_id
)) {
1001 mutex_lock(&d2
->q_qlock
);
1002 mutex_lock_nested(&d1
->q_qlock
, XFS_QLOCK_NESTED
);
1004 mutex_lock(&d1
->q_qlock
);
1005 mutex_lock_nested(&d2
->q_qlock
, XFS_QLOCK_NESTED
);
1008 mutex_lock(&d1
->q_qlock
);
1010 mutex_lock(&d2
->q_qlock
);
1015 * Give the buffer a little push if it is incore and
1016 * wait on the flush lock.
1019 xfs_dqflock_pushbuf_wait(
1022 xfs_mount_t
*mp
= dqp
->q_mount
;
1026 * Check to see if the dquot has been flushed delayed
1027 * write. If so, grab its buffer and send it
1028 * out immediately. We'll be able to acquire
1029 * the flush lock when the I/O completes.
1031 bp
= xfs_incore(mp
->m_ddev_targp
, dqp
->q_blkno
,
1032 mp
->m_quotainfo
->qi_dqchunklen
, XBF_TRYLOCK
);
1036 if (XFS_BUF_ISDELAYWRITE(bp
)) {
1037 if (xfs_buf_ispinned(bp
))
1038 xfs_log_force(mp
, 0);
1039 xfs_buf_delwri_promote(bp
);
1040 wake_up_process(bp
->b_target
->bt_task
);
1051 kmem_zone_init(sizeof(struct xfs_dquot
), "xfs_dquot");
1056 kmem_zone_init(sizeof(struct xfs_dquot_acct
), "xfs_dqtrx");
1057 if (!xfs_qm_dqtrxzone
)
1058 goto out_free_dqzone
;
1063 kmem_zone_destroy(xfs_qm_dqzone
);
1071 kmem_zone_destroy(xfs_qm_dqtrxzone
);
1072 kmem_zone_destroy(xfs_qm_dqzone
);