2 * Copyright (C) 2011 STRATO. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/pagemap.h>
21 #include <linux/writeback.h>
22 #include <linux/blkdev.h>
23 #include <linux/rbtree.h>
24 #include <linux/slab.h>
25 #include <linux/workqueue.h>
26 #include <linux/btrfs.h>
29 #include "transaction.h"
34 #include "extent_io.h"
39 * - subvol delete -> delete when ref goes to 0? delete limits also?
43 * - copy also limits on subvol creation
45 * - caches fuer ulists
46 * - performance benchmarks
47 * - check all ioctl parameters
51 * one struct for each qgroup, organized in fs_info->qgroup_tree.
59 u64 rfer
; /* referenced */
60 u64 rfer_cmpr
; /* referenced compressed */
61 u64 excl
; /* exclusive */
62 u64 excl_cmpr
; /* exclusive compressed */
67 u64 lim_flags
; /* which limits are set */
74 * reservation tracking
81 struct list_head groups
; /* groups this group is member of */
82 struct list_head members
; /* groups that are members of this group */
83 struct list_head dirty
; /* dirty groups */
84 struct rb_node node
; /* tree of qgroups */
87 * temp variables for accounting operations
88 * Refer to qgroup_shared_accounting() for details.
94 static void btrfs_qgroup_update_old_refcnt(struct btrfs_qgroup
*qg
, u64 seq
,
97 if (qg
->old_refcnt
< seq
)
99 qg
->old_refcnt
+= mod
;
102 static void btrfs_qgroup_update_new_refcnt(struct btrfs_qgroup
*qg
, u64 seq
,
105 if (qg
->new_refcnt
< seq
)
106 qg
->new_refcnt
= seq
;
107 qg
->new_refcnt
+= mod
;
110 static inline u64
btrfs_qgroup_get_old_refcnt(struct btrfs_qgroup
*qg
, u64 seq
)
112 if (qg
->old_refcnt
< seq
)
114 return qg
->old_refcnt
- seq
;
117 static inline u64
btrfs_qgroup_get_new_refcnt(struct btrfs_qgroup
*qg
, u64 seq
)
119 if (qg
->new_refcnt
< seq
)
121 return qg
->new_refcnt
- seq
;
125 * glue structure to represent the relations between qgroups.
127 struct btrfs_qgroup_list
{
128 struct list_head next_group
;
129 struct list_head next_member
;
130 struct btrfs_qgroup
*group
;
131 struct btrfs_qgroup
*member
;
134 #define ptr_to_u64(x) ((u64)(uintptr_t)x)
135 #define u64_to_ptr(x) ((struct btrfs_qgroup *)(uintptr_t)x)
138 qgroup_rescan_init(struct btrfs_fs_info
*fs_info
, u64 progress_objectid
,
140 static void qgroup_rescan_zero_tracking(struct btrfs_fs_info
*fs_info
);
142 /* must be called with qgroup_ioctl_lock held */
143 static struct btrfs_qgroup
*find_qgroup_rb(struct btrfs_fs_info
*fs_info
,
146 struct rb_node
*n
= fs_info
->qgroup_tree
.rb_node
;
147 struct btrfs_qgroup
*qgroup
;
150 qgroup
= rb_entry(n
, struct btrfs_qgroup
, node
);
151 if (qgroup
->qgroupid
< qgroupid
)
153 else if (qgroup
->qgroupid
> qgroupid
)
161 /* must be called with qgroup_lock held */
162 static struct btrfs_qgroup
*add_qgroup_rb(struct btrfs_fs_info
*fs_info
,
165 struct rb_node
**p
= &fs_info
->qgroup_tree
.rb_node
;
166 struct rb_node
*parent
= NULL
;
167 struct btrfs_qgroup
*qgroup
;
171 qgroup
= rb_entry(parent
, struct btrfs_qgroup
, node
);
173 if (qgroup
->qgroupid
< qgroupid
)
175 else if (qgroup
->qgroupid
> qgroupid
)
181 qgroup
= kzalloc(sizeof(*qgroup
), GFP_ATOMIC
);
183 return ERR_PTR(-ENOMEM
);
185 qgroup
->qgroupid
= qgroupid
;
186 INIT_LIST_HEAD(&qgroup
->groups
);
187 INIT_LIST_HEAD(&qgroup
->members
);
188 INIT_LIST_HEAD(&qgroup
->dirty
);
190 rb_link_node(&qgroup
->node
, parent
, p
);
191 rb_insert_color(&qgroup
->node
, &fs_info
->qgroup_tree
);
196 static void __del_qgroup_rb(struct btrfs_qgroup
*qgroup
)
198 struct btrfs_qgroup_list
*list
;
200 list_del(&qgroup
->dirty
);
201 while (!list_empty(&qgroup
->groups
)) {
202 list
= list_first_entry(&qgroup
->groups
,
203 struct btrfs_qgroup_list
, next_group
);
204 list_del(&list
->next_group
);
205 list_del(&list
->next_member
);
209 while (!list_empty(&qgroup
->members
)) {
210 list
= list_first_entry(&qgroup
->members
,
211 struct btrfs_qgroup_list
, next_member
);
212 list_del(&list
->next_group
);
213 list_del(&list
->next_member
);
219 /* must be called with qgroup_lock held */
220 static int del_qgroup_rb(struct btrfs_fs_info
*fs_info
, u64 qgroupid
)
222 struct btrfs_qgroup
*qgroup
= find_qgroup_rb(fs_info
, qgroupid
);
227 rb_erase(&qgroup
->node
, &fs_info
->qgroup_tree
);
228 __del_qgroup_rb(qgroup
);
232 /* must be called with qgroup_lock held */
233 static int add_relation_rb(struct btrfs_fs_info
*fs_info
,
234 u64 memberid
, u64 parentid
)
236 struct btrfs_qgroup
*member
;
237 struct btrfs_qgroup
*parent
;
238 struct btrfs_qgroup_list
*list
;
240 member
= find_qgroup_rb(fs_info
, memberid
);
241 parent
= find_qgroup_rb(fs_info
, parentid
);
242 if (!member
|| !parent
)
245 list
= kzalloc(sizeof(*list
), GFP_ATOMIC
);
249 list
->group
= parent
;
250 list
->member
= member
;
251 list_add_tail(&list
->next_group
, &member
->groups
);
252 list_add_tail(&list
->next_member
, &parent
->members
);
257 /* must be called with qgroup_lock held */
258 static int del_relation_rb(struct btrfs_fs_info
*fs_info
,
259 u64 memberid
, u64 parentid
)
261 struct btrfs_qgroup
*member
;
262 struct btrfs_qgroup
*parent
;
263 struct btrfs_qgroup_list
*list
;
265 member
= find_qgroup_rb(fs_info
, memberid
);
266 parent
= find_qgroup_rb(fs_info
, parentid
);
267 if (!member
|| !parent
)
270 list_for_each_entry(list
, &member
->groups
, next_group
) {
271 if (list
->group
== parent
) {
272 list_del(&list
->next_group
);
273 list_del(&list
->next_member
);
281 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
282 int btrfs_verify_qgroup_counts(struct btrfs_fs_info
*fs_info
, u64 qgroupid
,
285 struct btrfs_qgroup
*qgroup
;
287 qgroup
= find_qgroup_rb(fs_info
, qgroupid
);
290 if (qgroup
->rfer
!= rfer
|| qgroup
->excl
!= excl
)
297 * The full config is read in one go, only called from open_ctree()
298 * It doesn't use any locking, as at this point we're still single-threaded
300 int btrfs_read_qgroup_config(struct btrfs_fs_info
*fs_info
)
302 struct btrfs_key key
;
303 struct btrfs_key found_key
;
304 struct btrfs_root
*quota_root
= fs_info
->quota_root
;
305 struct btrfs_path
*path
= NULL
;
306 struct extent_buffer
*l
;
310 u64 rescan_progress
= 0;
312 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
))
315 fs_info
->qgroup_ulist
= ulist_alloc(GFP_NOFS
);
316 if (!fs_info
->qgroup_ulist
) {
321 path
= btrfs_alloc_path();
327 /* default this to quota off, in case no status key is found */
328 fs_info
->qgroup_flags
= 0;
331 * pass 1: read status, all qgroup infos and limits
336 ret
= btrfs_search_slot_for_read(quota_root
, &key
, path
, 1, 1);
341 struct btrfs_qgroup
*qgroup
;
343 slot
= path
->slots
[0];
345 btrfs_item_key_to_cpu(l
, &found_key
, slot
);
347 if (found_key
.type
== BTRFS_QGROUP_STATUS_KEY
) {
348 struct btrfs_qgroup_status_item
*ptr
;
350 ptr
= btrfs_item_ptr(l
, slot
,
351 struct btrfs_qgroup_status_item
);
353 if (btrfs_qgroup_status_version(l
, ptr
) !=
354 BTRFS_QGROUP_STATUS_VERSION
) {
356 "old qgroup version, quota disabled");
359 if (btrfs_qgroup_status_generation(l
, ptr
) !=
360 fs_info
->generation
) {
361 flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
363 "qgroup generation mismatch, "
364 "marked as inconsistent");
366 fs_info
->qgroup_flags
= btrfs_qgroup_status_flags(l
,
368 rescan_progress
= btrfs_qgroup_status_rescan(l
, ptr
);
372 if (found_key
.type
!= BTRFS_QGROUP_INFO_KEY
&&
373 found_key
.type
!= BTRFS_QGROUP_LIMIT_KEY
)
376 qgroup
= find_qgroup_rb(fs_info
, found_key
.offset
);
377 if ((qgroup
&& found_key
.type
== BTRFS_QGROUP_INFO_KEY
) ||
378 (!qgroup
&& found_key
.type
== BTRFS_QGROUP_LIMIT_KEY
)) {
379 btrfs_err(fs_info
, "inconsistent qgroup config");
380 flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
383 qgroup
= add_qgroup_rb(fs_info
, found_key
.offset
);
384 if (IS_ERR(qgroup
)) {
385 ret
= PTR_ERR(qgroup
);
389 switch (found_key
.type
) {
390 case BTRFS_QGROUP_INFO_KEY
: {
391 struct btrfs_qgroup_info_item
*ptr
;
393 ptr
= btrfs_item_ptr(l
, slot
,
394 struct btrfs_qgroup_info_item
);
395 qgroup
->rfer
= btrfs_qgroup_info_rfer(l
, ptr
);
396 qgroup
->rfer_cmpr
= btrfs_qgroup_info_rfer_cmpr(l
, ptr
);
397 qgroup
->excl
= btrfs_qgroup_info_excl(l
, ptr
);
398 qgroup
->excl_cmpr
= btrfs_qgroup_info_excl_cmpr(l
, ptr
);
399 /* generation currently unused */
402 case BTRFS_QGROUP_LIMIT_KEY
: {
403 struct btrfs_qgroup_limit_item
*ptr
;
405 ptr
= btrfs_item_ptr(l
, slot
,
406 struct btrfs_qgroup_limit_item
);
407 qgroup
->lim_flags
= btrfs_qgroup_limit_flags(l
, ptr
);
408 qgroup
->max_rfer
= btrfs_qgroup_limit_max_rfer(l
, ptr
);
409 qgroup
->max_excl
= btrfs_qgroup_limit_max_excl(l
, ptr
);
410 qgroup
->rsv_rfer
= btrfs_qgroup_limit_rsv_rfer(l
, ptr
);
411 qgroup
->rsv_excl
= btrfs_qgroup_limit_rsv_excl(l
, ptr
);
416 ret
= btrfs_next_item(quota_root
, path
);
422 btrfs_release_path(path
);
425 * pass 2: read all qgroup relations
428 key
.type
= BTRFS_QGROUP_RELATION_KEY
;
430 ret
= btrfs_search_slot_for_read(quota_root
, &key
, path
, 1, 0);
434 slot
= path
->slots
[0];
436 btrfs_item_key_to_cpu(l
, &found_key
, slot
);
438 if (found_key
.type
!= BTRFS_QGROUP_RELATION_KEY
)
441 if (found_key
.objectid
> found_key
.offset
) {
442 /* parent <- member, not needed to build config */
443 /* FIXME should we omit the key completely? */
447 ret
= add_relation_rb(fs_info
, found_key
.objectid
,
449 if (ret
== -ENOENT
) {
451 "orphan qgroup relation 0x%llx->0x%llx",
452 found_key
.objectid
, found_key
.offset
);
453 ret
= 0; /* ignore the error */
458 ret
= btrfs_next_item(quota_root
, path
);
465 fs_info
->qgroup_flags
|= flags
;
466 if (!(fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_ON
))
467 clear_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
);
468 else if (fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_RESCAN
&&
470 ret
= qgroup_rescan_init(fs_info
, rescan_progress
, 0);
471 btrfs_free_path(path
);
474 ulist_free(fs_info
->qgroup_ulist
);
475 fs_info
->qgroup_ulist
= NULL
;
476 fs_info
->qgroup_flags
&= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN
;
479 return ret
< 0 ? ret
: 0;
483 * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(),
484 * first two are in single-threaded paths.And for the third one, we have set
485 * quota_root to be null with qgroup_lock held before, so it is safe to clean
486 * up the in-memory structures without qgroup_lock held.
488 void btrfs_free_qgroup_config(struct btrfs_fs_info
*fs_info
)
491 struct btrfs_qgroup
*qgroup
;
493 while ((n
= rb_first(&fs_info
->qgroup_tree
))) {
494 qgroup
= rb_entry(n
, struct btrfs_qgroup
, node
);
495 rb_erase(n
, &fs_info
->qgroup_tree
);
496 __del_qgroup_rb(qgroup
);
499 * we call btrfs_free_qgroup_config() when umounting
500 * filesystem and disabling quota, so we set qgroup_ulist
501 * to be null here to avoid double free.
503 ulist_free(fs_info
->qgroup_ulist
);
504 fs_info
->qgroup_ulist
= NULL
;
507 static int add_qgroup_relation_item(struct btrfs_trans_handle
*trans
,
508 struct btrfs_root
*quota_root
,
512 struct btrfs_path
*path
;
513 struct btrfs_key key
;
515 path
= btrfs_alloc_path();
520 key
.type
= BTRFS_QGROUP_RELATION_KEY
;
523 ret
= btrfs_insert_empty_item(trans
, quota_root
, path
, &key
, 0);
525 btrfs_mark_buffer_dirty(path
->nodes
[0]);
527 btrfs_free_path(path
);
531 static int del_qgroup_relation_item(struct btrfs_trans_handle
*trans
,
532 struct btrfs_root
*quota_root
,
536 struct btrfs_path
*path
;
537 struct btrfs_key key
;
539 path
= btrfs_alloc_path();
544 key
.type
= BTRFS_QGROUP_RELATION_KEY
;
547 ret
= btrfs_search_slot(trans
, quota_root
, &key
, path
, -1, 1);
556 ret
= btrfs_del_item(trans
, quota_root
, path
);
558 btrfs_free_path(path
);
562 static int add_qgroup_item(struct btrfs_trans_handle
*trans
,
563 struct btrfs_root
*quota_root
, u64 qgroupid
)
566 struct btrfs_path
*path
;
567 struct btrfs_qgroup_info_item
*qgroup_info
;
568 struct btrfs_qgroup_limit_item
*qgroup_limit
;
569 struct extent_buffer
*leaf
;
570 struct btrfs_key key
;
572 if (btrfs_is_testing(quota_root
->fs_info
))
575 path
= btrfs_alloc_path();
580 key
.type
= BTRFS_QGROUP_INFO_KEY
;
581 key
.offset
= qgroupid
;
584 * Avoid a transaction abort by catching -EEXIST here. In that
585 * case, we proceed by re-initializing the existing structure
589 ret
= btrfs_insert_empty_item(trans
, quota_root
, path
, &key
,
590 sizeof(*qgroup_info
));
591 if (ret
&& ret
!= -EEXIST
)
594 leaf
= path
->nodes
[0];
595 qgroup_info
= btrfs_item_ptr(leaf
, path
->slots
[0],
596 struct btrfs_qgroup_info_item
);
597 btrfs_set_qgroup_info_generation(leaf
, qgroup_info
, trans
->transid
);
598 btrfs_set_qgroup_info_rfer(leaf
, qgroup_info
, 0);
599 btrfs_set_qgroup_info_rfer_cmpr(leaf
, qgroup_info
, 0);
600 btrfs_set_qgroup_info_excl(leaf
, qgroup_info
, 0);
601 btrfs_set_qgroup_info_excl_cmpr(leaf
, qgroup_info
, 0);
603 btrfs_mark_buffer_dirty(leaf
);
605 btrfs_release_path(path
);
607 key
.type
= BTRFS_QGROUP_LIMIT_KEY
;
608 ret
= btrfs_insert_empty_item(trans
, quota_root
, path
, &key
,
609 sizeof(*qgroup_limit
));
610 if (ret
&& ret
!= -EEXIST
)
613 leaf
= path
->nodes
[0];
614 qgroup_limit
= btrfs_item_ptr(leaf
, path
->slots
[0],
615 struct btrfs_qgroup_limit_item
);
616 btrfs_set_qgroup_limit_flags(leaf
, qgroup_limit
, 0);
617 btrfs_set_qgroup_limit_max_rfer(leaf
, qgroup_limit
, 0);
618 btrfs_set_qgroup_limit_max_excl(leaf
, qgroup_limit
, 0);
619 btrfs_set_qgroup_limit_rsv_rfer(leaf
, qgroup_limit
, 0);
620 btrfs_set_qgroup_limit_rsv_excl(leaf
, qgroup_limit
, 0);
622 btrfs_mark_buffer_dirty(leaf
);
626 btrfs_free_path(path
);
630 static int del_qgroup_item(struct btrfs_trans_handle
*trans
,
631 struct btrfs_root
*quota_root
, u64 qgroupid
)
634 struct btrfs_path
*path
;
635 struct btrfs_key key
;
637 path
= btrfs_alloc_path();
642 key
.type
= BTRFS_QGROUP_INFO_KEY
;
643 key
.offset
= qgroupid
;
644 ret
= btrfs_search_slot(trans
, quota_root
, &key
, path
, -1, 1);
653 ret
= btrfs_del_item(trans
, quota_root
, path
);
657 btrfs_release_path(path
);
659 key
.type
= BTRFS_QGROUP_LIMIT_KEY
;
660 ret
= btrfs_search_slot(trans
, quota_root
, &key
, path
, -1, 1);
669 ret
= btrfs_del_item(trans
, quota_root
, path
);
672 btrfs_free_path(path
);
676 static int update_qgroup_limit_item(struct btrfs_trans_handle
*trans
,
677 struct btrfs_root
*root
,
678 struct btrfs_qgroup
*qgroup
)
680 struct btrfs_path
*path
;
681 struct btrfs_key key
;
682 struct extent_buffer
*l
;
683 struct btrfs_qgroup_limit_item
*qgroup_limit
;
688 key
.type
= BTRFS_QGROUP_LIMIT_KEY
;
689 key
.offset
= qgroup
->qgroupid
;
691 path
= btrfs_alloc_path();
695 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
703 slot
= path
->slots
[0];
704 qgroup_limit
= btrfs_item_ptr(l
, slot
, struct btrfs_qgroup_limit_item
);
705 btrfs_set_qgroup_limit_flags(l
, qgroup_limit
, qgroup
->lim_flags
);
706 btrfs_set_qgroup_limit_max_rfer(l
, qgroup_limit
, qgroup
->max_rfer
);
707 btrfs_set_qgroup_limit_max_excl(l
, qgroup_limit
, qgroup
->max_excl
);
708 btrfs_set_qgroup_limit_rsv_rfer(l
, qgroup_limit
, qgroup
->rsv_rfer
);
709 btrfs_set_qgroup_limit_rsv_excl(l
, qgroup_limit
, qgroup
->rsv_excl
);
711 btrfs_mark_buffer_dirty(l
);
714 btrfs_free_path(path
);
718 static int update_qgroup_info_item(struct btrfs_trans_handle
*trans
,
719 struct btrfs_root
*root
,
720 struct btrfs_qgroup
*qgroup
)
722 struct btrfs_path
*path
;
723 struct btrfs_key key
;
724 struct extent_buffer
*l
;
725 struct btrfs_qgroup_info_item
*qgroup_info
;
729 if (btrfs_is_testing(root
->fs_info
))
733 key
.type
= BTRFS_QGROUP_INFO_KEY
;
734 key
.offset
= qgroup
->qgroupid
;
736 path
= btrfs_alloc_path();
740 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
748 slot
= path
->slots
[0];
749 qgroup_info
= btrfs_item_ptr(l
, slot
, struct btrfs_qgroup_info_item
);
750 btrfs_set_qgroup_info_generation(l
, qgroup_info
, trans
->transid
);
751 btrfs_set_qgroup_info_rfer(l
, qgroup_info
, qgroup
->rfer
);
752 btrfs_set_qgroup_info_rfer_cmpr(l
, qgroup_info
, qgroup
->rfer_cmpr
);
753 btrfs_set_qgroup_info_excl(l
, qgroup_info
, qgroup
->excl
);
754 btrfs_set_qgroup_info_excl_cmpr(l
, qgroup_info
, qgroup
->excl_cmpr
);
756 btrfs_mark_buffer_dirty(l
);
759 btrfs_free_path(path
);
763 static int update_qgroup_status_item(struct btrfs_trans_handle
*trans
,
764 struct btrfs_fs_info
*fs_info
,
765 struct btrfs_root
*root
)
767 struct btrfs_path
*path
;
768 struct btrfs_key key
;
769 struct extent_buffer
*l
;
770 struct btrfs_qgroup_status_item
*ptr
;
775 key
.type
= BTRFS_QGROUP_STATUS_KEY
;
778 path
= btrfs_alloc_path();
782 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
790 slot
= path
->slots
[0];
791 ptr
= btrfs_item_ptr(l
, slot
, struct btrfs_qgroup_status_item
);
792 btrfs_set_qgroup_status_flags(l
, ptr
, fs_info
->qgroup_flags
);
793 btrfs_set_qgroup_status_generation(l
, ptr
, trans
->transid
);
794 btrfs_set_qgroup_status_rescan(l
, ptr
,
795 fs_info
->qgroup_rescan_progress
.objectid
);
797 btrfs_mark_buffer_dirty(l
);
800 btrfs_free_path(path
);
805 * called with qgroup_lock held
807 static int btrfs_clean_quota_tree(struct btrfs_trans_handle
*trans
,
808 struct btrfs_root
*root
)
810 struct btrfs_path
*path
;
811 struct btrfs_key key
;
812 struct extent_buffer
*leaf
= NULL
;
816 path
= btrfs_alloc_path();
820 path
->leave_spinning
= 1;
827 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
830 leaf
= path
->nodes
[0];
831 nr
= btrfs_header_nritems(leaf
);
835 * delete the leaf one by one
836 * since the whole tree is going
840 ret
= btrfs_del_items(trans
, root
, path
, 0, nr
);
844 btrfs_release_path(path
);
848 set_bit(BTRFS_FS_QUOTA_DISABLING
, &root
->fs_info
->flags
);
849 btrfs_free_path(path
);
853 int btrfs_quota_enable(struct btrfs_trans_handle
*trans
,
854 struct btrfs_fs_info
*fs_info
)
856 struct btrfs_root
*quota_root
;
857 struct btrfs_root
*tree_root
= fs_info
->tree_root
;
858 struct btrfs_path
*path
= NULL
;
859 struct btrfs_qgroup_status_item
*ptr
;
860 struct extent_buffer
*leaf
;
861 struct btrfs_key key
;
862 struct btrfs_key found_key
;
863 struct btrfs_qgroup
*qgroup
= NULL
;
867 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
868 if (fs_info
->quota_root
) {
869 set_bit(BTRFS_FS_QUOTA_ENABLING
, &fs_info
->flags
);
873 fs_info
->qgroup_ulist
= ulist_alloc(GFP_NOFS
);
874 if (!fs_info
->qgroup_ulist
) {
880 * initially create the quota tree
882 quota_root
= btrfs_create_tree(trans
, fs_info
,
883 BTRFS_QUOTA_TREE_OBJECTID
);
884 if (IS_ERR(quota_root
)) {
885 ret
= PTR_ERR(quota_root
);
889 path
= btrfs_alloc_path();
896 key
.type
= BTRFS_QGROUP_STATUS_KEY
;
899 ret
= btrfs_insert_empty_item(trans
, quota_root
, path
, &key
,
904 leaf
= path
->nodes
[0];
905 ptr
= btrfs_item_ptr(leaf
, path
->slots
[0],
906 struct btrfs_qgroup_status_item
);
907 btrfs_set_qgroup_status_generation(leaf
, ptr
, trans
->transid
);
908 btrfs_set_qgroup_status_version(leaf
, ptr
, BTRFS_QGROUP_STATUS_VERSION
);
909 fs_info
->qgroup_flags
= BTRFS_QGROUP_STATUS_FLAG_ON
|
910 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
911 btrfs_set_qgroup_status_flags(leaf
, ptr
, fs_info
->qgroup_flags
);
912 btrfs_set_qgroup_status_rescan(leaf
, ptr
, 0);
914 btrfs_mark_buffer_dirty(leaf
);
917 key
.type
= BTRFS_ROOT_REF_KEY
;
920 btrfs_release_path(path
);
921 ret
= btrfs_search_slot_for_read(tree_root
, &key
, path
, 1, 0);
929 slot
= path
->slots
[0];
930 leaf
= path
->nodes
[0];
931 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
933 if (found_key
.type
== BTRFS_ROOT_REF_KEY
) {
934 ret
= add_qgroup_item(trans
, quota_root
,
939 qgroup
= add_qgroup_rb(fs_info
, found_key
.offset
);
940 if (IS_ERR(qgroup
)) {
941 ret
= PTR_ERR(qgroup
);
945 ret
= btrfs_next_item(tree_root
, path
);
953 btrfs_release_path(path
);
954 ret
= add_qgroup_item(trans
, quota_root
, BTRFS_FS_TREE_OBJECTID
);
958 qgroup
= add_qgroup_rb(fs_info
, BTRFS_FS_TREE_OBJECTID
);
959 if (IS_ERR(qgroup
)) {
960 ret
= PTR_ERR(qgroup
);
963 spin_lock(&fs_info
->qgroup_lock
);
964 fs_info
->quota_root
= quota_root
;
965 set_bit(BTRFS_FS_QUOTA_ENABLING
, &fs_info
->flags
);
966 spin_unlock(&fs_info
->qgroup_lock
);
968 btrfs_free_path(path
);
971 free_extent_buffer(quota_root
->node
);
972 free_extent_buffer(quota_root
->commit_root
);
977 ulist_free(fs_info
->qgroup_ulist
);
978 fs_info
->qgroup_ulist
= NULL
;
980 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
984 int btrfs_quota_disable(struct btrfs_trans_handle
*trans
,
985 struct btrfs_fs_info
*fs_info
)
987 struct btrfs_root
*tree_root
= fs_info
->tree_root
;
988 struct btrfs_root
*quota_root
;
991 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
992 if (!fs_info
->quota_root
)
994 clear_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
);
995 set_bit(BTRFS_FS_QUOTA_DISABLING
, &fs_info
->flags
);
996 btrfs_qgroup_wait_for_completion(fs_info
, false);
997 spin_lock(&fs_info
->qgroup_lock
);
998 quota_root
= fs_info
->quota_root
;
999 fs_info
->quota_root
= NULL
;
1000 fs_info
->qgroup_flags
&= ~BTRFS_QGROUP_STATUS_FLAG_ON
;
1001 spin_unlock(&fs_info
->qgroup_lock
);
1003 btrfs_free_qgroup_config(fs_info
);
1005 ret
= btrfs_clean_quota_tree(trans
, quota_root
);
1009 ret
= btrfs_del_root(trans
, tree_root
, "a_root
->root_key
);
1013 list_del("a_root
->dirty_list
);
1015 btrfs_tree_lock(quota_root
->node
);
1016 clean_tree_block(trans
, tree_root
->fs_info
, quota_root
->node
);
1017 btrfs_tree_unlock(quota_root
->node
);
1018 btrfs_free_tree_block(trans
, quota_root
, quota_root
->node
, 0, 1);
1020 free_extent_buffer(quota_root
->node
);
1021 free_extent_buffer(quota_root
->commit_root
);
1024 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
1028 static void qgroup_dirty(struct btrfs_fs_info
*fs_info
,
1029 struct btrfs_qgroup
*qgroup
)
1031 if (list_empty(&qgroup
->dirty
))
1032 list_add(&qgroup
->dirty
, &fs_info
->dirty_qgroups
);
1036 * The easy accounting, if we are adding/removing the only ref for an extent
1037 * then this qgroup and all of the parent qgroups get their reference and
1038 * exclusive counts adjusted.
1040 * Caller should hold fs_info->qgroup_lock.
1042 static int __qgroup_excl_accounting(struct btrfs_fs_info
*fs_info
,
1043 struct ulist
*tmp
, u64 ref_root
,
1044 u64 num_bytes
, int sign
)
1046 struct btrfs_qgroup
*qgroup
;
1047 struct btrfs_qgroup_list
*glist
;
1048 struct ulist_node
*unode
;
1049 struct ulist_iterator uiter
;
1052 qgroup
= find_qgroup_rb(fs_info
, ref_root
);
1056 qgroup
->rfer
+= sign
* num_bytes
;
1057 qgroup
->rfer_cmpr
+= sign
* num_bytes
;
1059 WARN_ON(sign
< 0 && qgroup
->excl
< num_bytes
);
1060 qgroup
->excl
+= sign
* num_bytes
;
1061 qgroup
->excl_cmpr
+= sign
* num_bytes
;
1063 qgroup
->reserved
-= num_bytes
;
1065 qgroup_dirty(fs_info
, qgroup
);
1067 /* Get all of the parent groups that contain this qgroup */
1068 list_for_each_entry(glist
, &qgroup
->groups
, next_group
) {
1069 ret
= ulist_add(tmp
, glist
->group
->qgroupid
,
1070 ptr_to_u64(glist
->group
), GFP_ATOMIC
);
1075 /* Iterate all of the parents and adjust their reference counts */
1076 ULIST_ITER_INIT(&uiter
);
1077 while ((unode
= ulist_next(tmp
, &uiter
))) {
1078 qgroup
= u64_to_ptr(unode
->aux
);
1079 qgroup
->rfer
+= sign
* num_bytes
;
1080 qgroup
->rfer_cmpr
+= sign
* num_bytes
;
1081 WARN_ON(sign
< 0 && qgroup
->excl
< num_bytes
);
1082 qgroup
->excl
+= sign
* num_bytes
;
1084 qgroup
->reserved
-= num_bytes
;
1085 qgroup
->excl_cmpr
+= sign
* num_bytes
;
1086 qgroup_dirty(fs_info
, qgroup
);
1088 /* Add any parents of the parents */
1089 list_for_each_entry(glist
, &qgroup
->groups
, next_group
) {
1090 ret
= ulist_add(tmp
, glist
->group
->qgroupid
,
1091 ptr_to_u64(glist
->group
), GFP_ATOMIC
);
1103 * Quick path for updating qgroup with only excl refs.
1105 * In that case, just update all parent will be enough.
1106 * Or we needs to do a full rescan.
1107 * Caller should also hold fs_info->qgroup_lock.
1109 * Return 0 for quick update, return >0 for need to full rescan
1110 * and mark INCONSISTENT flag.
1111 * Return < 0 for other error.
1113 static int quick_update_accounting(struct btrfs_fs_info
*fs_info
,
1114 struct ulist
*tmp
, u64 src
, u64 dst
,
1117 struct btrfs_qgroup
*qgroup
;
1121 qgroup
= find_qgroup_rb(fs_info
, src
);
1124 if (qgroup
->excl
== qgroup
->rfer
) {
1126 err
= __qgroup_excl_accounting(fs_info
, tmp
, dst
,
1127 qgroup
->excl
, sign
);
1135 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
1139 int btrfs_add_qgroup_relation(struct btrfs_trans_handle
*trans
,
1140 struct btrfs_fs_info
*fs_info
, u64 src
, u64 dst
)
1142 struct btrfs_root
*quota_root
;
1143 struct btrfs_qgroup
*parent
;
1144 struct btrfs_qgroup
*member
;
1145 struct btrfs_qgroup_list
*list
;
1149 /* Check the level of src and dst first */
1150 if (btrfs_qgroup_level(src
) >= btrfs_qgroup_level(dst
))
1153 tmp
= ulist_alloc(GFP_NOFS
);
1157 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
1158 quota_root
= fs_info
->quota_root
;
1163 member
= find_qgroup_rb(fs_info
, src
);
1164 parent
= find_qgroup_rb(fs_info
, dst
);
1165 if (!member
|| !parent
) {
1170 /* check if such qgroup relation exist firstly */
1171 list_for_each_entry(list
, &member
->groups
, next_group
) {
1172 if (list
->group
== parent
) {
1178 ret
= add_qgroup_relation_item(trans
, quota_root
, src
, dst
);
1182 ret
= add_qgroup_relation_item(trans
, quota_root
, dst
, src
);
1184 del_qgroup_relation_item(trans
, quota_root
, src
, dst
);
1188 spin_lock(&fs_info
->qgroup_lock
);
1189 ret
= add_relation_rb(quota_root
->fs_info
, src
, dst
);
1191 spin_unlock(&fs_info
->qgroup_lock
);
1194 ret
= quick_update_accounting(fs_info
, tmp
, src
, dst
, 1);
1195 spin_unlock(&fs_info
->qgroup_lock
);
1197 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
1202 int __del_qgroup_relation(struct btrfs_trans_handle
*trans
,
1203 struct btrfs_fs_info
*fs_info
, u64 src
, u64 dst
)
1205 struct btrfs_root
*quota_root
;
1206 struct btrfs_qgroup
*parent
;
1207 struct btrfs_qgroup
*member
;
1208 struct btrfs_qgroup_list
*list
;
1213 tmp
= ulist_alloc(GFP_NOFS
);
1217 quota_root
= fs_info
->quota_root
;
1223 member
= find_qgroup_rb(fs_info
, src
);
1224 parent
= find_qgroup_rb(fs_info
, dst
);
1225 if (!member
|| !parent
) {
1230 /* check if such qgroup relation exist firstly */
1231 list_for_each_entry(list
, &member
->groups
, next_group
) {
1232 if (list
->group
== parent
)
1238 ret
= del_qgroup_relation_item(trans
, quota_root
, src
, dst
);
1239 err
= del_qgroup_relation_item(trans
, quota_root
, dst
, src
);
1243 spin_lock(&fs_info
->qgroup_lock
);
1244 del_relation_rb(fs_info
, src
, dst
);
1245 ret
= quick_update_accounting(fs_info
, tmp
, src
, dst
, -1);
1246 spin_unlock(&fs_info
->qgroup_lock
);
1252 int btrfs_del_qgroup_relation(struct btrfs_trans_handle
*trans
,
1253 struct btrfs_fs_info
*fs_info
, u64 src
, u64 dst
)
1257 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
1258 ret
= __del_qgroup_relation(trans
, fs_info
, src
, dst
);
1259 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
1264 int btrfs_create_qgroup(struct btrfs_trans_handle
*trans
,
1265 struct btrfs_fs_info
*fs_info
, u64 qgroupid
)
1267 struct btrfs_root
*quota_root
;
1268 struct btrfs_qgroup
*qgroup
;
1271 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
1272 quota_root
= fs_info
->quota_root
;
1277 qgroup
= find_qgroup_rb(fs_info
, qgroupid
);
1283 ret
= add_qgroup_item(trans
, quota_root
, qgroupid
);
1287 spin_lock(&fs_info
->qgroup_lock
);
1288 qgroup
= add_qgroup_rb(fs_info
, qgroupid
);
1289 spin_unlock(&fs_info
->qgroup_lock
);
1292 ret
= PTR_ERR(qgroup
);
1294 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
1298 int btrfs_remove_qgroup(struct btrfs_trans_handle
*trans
,
1299 struct btrfs_fs_info
*fs_info
, u64 qgroupid
)
1301 struct btrfs_root
*quota_root
;
1302 struct btrfs_qgroup
*qgroup
;
1303 struct btrfs_qgroup_list
*list
;
1306 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
1307 quota_root
= fs_info
->quota_root
;
1313 qgroup
= find_qgroup_rb(fs_info
, qgroupid
);
1318 /* check if there are no children of this qgroup */
1319 if (!list_empty(&qgroup
->members
)) {
1324 ret
= del_qgroup_item(trans
, quota_root
, qgroupid
);
1326 while (!list_empty(&qgroup
->groups
)) {
1327 list
= list_first_entry(&qgroup
->groups
,
1328 struct btrfs_qgroup_list
, next_group
);
1329 ret
= __del_qgroup_relation(trans
, fs_info
,
1331 list
->group
->qgroupid
);
1336 spin_lock(&fs_info
->qgroup_lock
);
1337 del_qgroup_rb(quota_root
->fs_info
, qgroupid
);
1338 spin_unlock(&fs_info
->qgroup_lock
);
1340 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
1344 int btrfs_limit_qgroup(struct btrfs_trans_handle
*trans
,
1345 struct btrfs_fs_info
*fs_info
, u64 qgroupid
,
1346 struct btrfs_qgroup_limit
*limit
)
1348 struct btrfs_root
*quota_root
;
1349 struct btrfs_qgroup
*qgroup
;
1351 /* Sometimes we would want to clear the limit on this qgroup.
1352 * To meet this requirement, we treat the -1 as a special value
1353 * which tell kernel to clear the limit on this qgroup.
1355 const u64 CLEAR_VALUE
= -1;
1357 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
1358 quota_root
= fs_info
->quota_root
;
1364 qgroup
= find_qgroup_rb(fs_info
, qgroupid
);
1370 spin_lock(&fs_info
->qgroup_lock
);
1371 if (limit
->flags
& BTRFS_QGROUP_LIMIT_MAX_RFER
) {
1372 if (limit
->max_rfer
== CLEAR_VALUE
) {
1373 qgroup
->lim_flags
&= ~BTRFS_QGROUP_LIMIT_MAX_RFER
;
1374 limit
->flags
&= ~BTRFS_QGROUP_LIMIT_MAX_RFER
;
1375 qgroup
->max_rfer
= 0;
1377 qgroup
->max_rfer
= limit
->max_rfer
;
1380 if (limit
->flags
& BTRFS_QGROUP_LIMIT_MAX_EXCL
) {
1381 if (limit
->max_excl
== CLEAR_VALUE
) {
1382 qgroup
->lim_flags
&= ~BTRFS_QGROUP_LIMIT_MAX_EXCL
;
1383 limit
->flags
&= ~BTRFS_QGROUP_LIMIT_MAX_EXCL
;
1384 qgroup
->max_excl
= 0;
1386 qgroup
->max_excl
= limit
->max_excl
;
1389 if (limit
->flags
& BTRFS_QGROUP_LIMIT_RSV_RFER
) {
1390 if (limit
->rsv_rfer
== CLEAR_VALUE
) {
1391 qgroup
->lim_flags
&= ~BTRFS_QGROUP_LIMIT_RSV_RFER
;
1392 limit
->flags
&= ~BTRFS_QGROUP_LIMIT_RSV_RFER
;
1393 qgroup
->rsv_rfer
= 0;
1395 qgroup
->rsv_rfer
= limit
->rsv_rfer
;
1398 if (limit
->flags
& BTRFS_QGROUP_LIMIT_RSV_EXCL
) {
1399 if (limit
->rsv_excl
== CLEAR_VALUE
) {
1400 qgroup
->lim_flags
&= ~BTRFS_QGROUP_LIMIT_RSV_EXCL
;
1401 limit
->flags
&= ~BTRFS_QGROUP_LIMIT_RSV_EXCL
;
1402 qgroup
->rsv_excl
= 0;
1404 qgroup
->rsv_excl
= limit
->rsv_excl
;
1407 qgroup
->lim_flags
|= limit
->flags
;
1409 spin_unlock(&fs_info
->qgroup_lock
);
1411 ret
= update_qgroup_limit_item(trans
, quota_root
, qgroup
);
1413 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
1414 btrfs_info(fs_info
, "unable to update quota limit for %llu",
1419 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
1423 int btrfs_qgroup_prepare_account_extents(struct btrfs_trans_handle
*trans
,
1424 struct btrfs_fs_info
*fs_info
)
1426 struct btrfs_qgroup_extent_record
*record
;
1427 struct btrfs_delayed_ref_root
*delayed_refs
;
1428 struct rb_node
*node
;
1432 delayed_refs
= &trans
->transaction
->delayed_refs
;
1433 qgroup_to_skip
= delayed_refs
->qgroup_to_skip
;
1436 * No need to do lock, since this function will only be called in
1437 * btrfs_commit_transaction().
1439 node
= rb_first(&delayed_refs
->dirty_extent_root
);
1441 record
= rb_entry(node
, struct btrfs_qgroup_extent_record
,
1443 ret
= btrfs_find_all_roots(NULL
, fs_info
, record
->bytenr
, 0,
1444 &record
->old_roots
);
1448 ulist_del(record
->old_roots
, qgroup_to_skip
, 0);
1449 node
= rb_next(node
);
1454 int btrfs_qgroup_insert_dirty_extent_nolock(struct btrfs_fs_info
*fs_info
,
1455 struct btrfs_delayed_ref_root
*delayed_refs
,
1456 struct btrfs_qgroup_extent_record
*record
)
1458 struct rb_node
**p
= &delayed_refs
->dirty_extent_root
.rb_node
;
1459 struct rb_node
*parent_node
= NULL
;
1460 struct btrfs_qgroup_extent_record
*entry
;
1461 u64 bytenr
= record
->bytenr
;
1463 assert_spin_locked(&delayed_refs
->lock
);
1464 trace_btrfs_qgroup_insert_dirty_extent(fs_info
, record
);
1468 entry
= rb_entry(parent_node
, struct btrfs_qgroup_extent_record
,
1470 if (bytenr
< entry
->bytenr
)
1472 else if (bytenr
> entry
->bytenr
)
1473 p
= &(*p
)->rb_right
;
1478 rb_link_node(&record
->node
, parent_node
, p
);
1479 rb_insert_color(&record
->node
, &delayed_refs
->dirty_extent_root
);
1483 int btrfs_qgroup_insert_dirty_extent(struct btrfs_trans_handle
*trans
,
1484 struct btrfs_fs_info
*fs_info
, u64 bytenr
, u64 num_bytes
,
1487 struct btrfs_qgroup_extent_record
*record
;
1488 struct btrfs_delayed_ref_root
*delayed_refs
;
1491 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
)
1492 || bytenr
== 0 || num_bytes
== 0)
1494 if (WARN_ON(trans
== NULL
))
1496 record
= kmalloc(sizeof(*record
), gfp_flag
);
1500 delayed_refs
= &trans
->transaction
->delayed_refs
;
1501 record
->bytenr
= bytenr
;
1502 record
->num_bytes
= num_bytes
;
1503 record
->old_roots
= NULL
;
1505 spin_lock(&delayed_refs
->lock
);
1506 ret
= btrfs_qgroup_insert_dirty_extent_nolock(fs_info
, delayed_refs
,
1508 spin_unlock(&delayed_refs
->lock
);
1514 #define UPDATE_NEW 0
1515 #define UPDATE_OLD 1
1517 * Walk all of the roots that points to the bytenr and adjust their refcnts.
1519 static int qgroup_update_refcnt(struct btrfs_fs_info
*fs_info
,
1520 struct ulist
*roots
, struct ulist
*tmp
,
1521 struct ulist
*qgroups
, u64 seq
, int update_old
)
1523 struct ulist_node
*unode
;
1524 struct ulist_iterator uiter
;
1525 struct ulist_node
*tmp_unode
;
1526 struct ulist_iterator tmp_uiter
;
1527 struct btrfs_qgroup
*qg
;
1532 ULIST_ITER_INIT(&uiter
);
1533 while ((unode
= ulist_next(roots
, &uiter
))) {
1534 qg
= find_qgroup_rb(fs_info
, unode
->val
);
1539 ret
= ulist_add(qgroups
, qg
->qgroupid
, ptr_to_u64(qg
),
1543 ret
= ulist_add(tmp
, qg
->qgroupid
, ptr_to_u64(qg
), GFP_ATOMIC
);
1546 ULIST_ITER_INIT(&tmp_uiter
);
1547 while ((tmp_unode
= ulist_next(tmp
, &tmp_uiter
))) {
1548 struct btrfs_qgroup_list
*glist
;
1550 qg
= u64_to_ptr(tmp_unode
->aux
);
1552 btrfs_qgroup_update_old_refcnt(qg
, seq
, 1);
1554 btrfs_qgroup_update_new_refcnt(qg
, seq
, 1);
1555 list_for_each_entry(glist
, &qg
->groups
, next_group
) {
1556 ret
= ulist_add(qgroups
, glist
->group
->qgroupid
,
1557 ptr_to_u64(glist
->group
),
1561 ret
= ulist_add(tmp
, glist
->group
->qgroupid
,
1562 ptr_to_u64(glist
->group
),
1573 * Update qgroup rfer/excl counters.
1574 * Rfer update is easy, codes can explain themselves.
1576 * Excl update is tricky, the update is split into 2 part.
1577 * Part 1: Possible exclusive <-> sharing detect:
1579 * -------------------------------------
1581 * -------------------------------------
1583 * -------------------------------------
1586 * A: cur_old_roots < nr_old_roots (not exclusive before)
1587 * !A: cur_old_roots == nr_old_roots (possible exclusive before)
1588 * B: cur_new_roots < nr_new_roots (not exclusive now)
1589 * !B: cur_new_roots == nr_new_roots (possible exclusive now)
1592 * +: Possible sharing -> exclusive -: Possible exclusive -> sharing
1593 * *: Definitely not changed. **: Possible unchanged.
1595 * For !A and !B condition, the exception is cur_old/new_roots == 0 case.
1597 * To make the logic clear, we first use condition A and B to split
1598 * combination into 4 results.
1600 * Then, for result "+" and "-", check old/new_roots == 0 case, as in them
1601 * only on variant maybe 0.
1603 * Lastly, check result **, since there are 2 variants maybe 0, split them
1605 * But this time we don't need to consider other things, the codes and logic
1606 * is easy to understand now.
1608 static int qgroup_update_counters(struct btrfs_fs_info
*fs_info
,
1609 struct ulist
*qgroups
,
1612 u64 num_bytes
, u64 seq
)
1614 struct ulist_node
*unode
;
1615 struct ulist_iterator uiter
;
1616 struct btrfs_qgroup
*qg
;
1617 u64 cur_new_count
, cur_old_count
;
1619 ULIST_ITER_INIT(&uiter
);
1620 while ((unode
= ulist_next(qgroups
, &uiter
))) {
1623 qg
= u64_to_ptr(unode
->aux
);
1624 cur_old_count
= btrfs_qgroup_get_old_refcnt(qg
, seq
);
1625 cur_new_count
= btrfs_qgroup_get_new_refcnt(qg
, seq
);
1627 trace_qgroup_update_counters(fs_info
, qg
->qgroupid
,
1628 cur_old_count
, cur_new_count
);
1630 /* Rfer update part */
1631 if (cur_old_count
== 0 && cur_new_count
> 0) {
1632 qg
->rfer
+= num_bytes
;
1633 qg
->rfer_cmpr
+= num_bytes
;
1636 if (cur_old_count
> 0 && cur_new_count
== 0) {
1637 qg
->rfer
-= num_bytes
;
1638 qg
->rfer_cmpr
-= num_bytes
;
1642 /* Excl update part */
1643 /* Exclusive/none -> shared case */
1644 if (cur_old_count
== nr_old_roots
&&
1645 cur_new_count
< nr_new_roots
) {
1646 /* Exclusive -> shared */
1647 if (cur_old_count
!= 0) {
1648 qg
->excl
-= num_bytes
;
1649 qg
->excl_cmpr
-= num_bytes
;
1654 /* Shared -> exclusive/none case */
1655 if (cur_old_count
< nr_old_roots
&&
1656 cur_new_count
== nr_new_roots
) {
1657 /* Shared->exclusive */
1658 if (cur_new_count
!= 0) {
1659 qg
->excl
+= num_bytes
;
1660 qg
->excl_cmpr
+= num_bytes
;
1665 /* Exclusive/none -> exclusive/none case */
1666 if (cur_old_count
== nr_old_roots
&&
1667 cur_new_count
== nr_new_roots
) {
1668 if (cur_old_count
== 0) {
1669 /* None -> exclusive/none */
1671 if (cur_new_count
!= 0) {
1672 /* None -> exclusive */
1673 qg
->excl
+= num_bytes
;
1674 qg
->excl_cmpr
+= num_bytes
;
1677 /* None -> none, nothing changed */
1679 /* Exclusive -> exclusive/none */
1681 if (cur_new_count
== 0) {
1682 /* Exclusive -> none */
1683 qg
->excl
-= num_bytes
;
1684 qg
->excl_cmpr
-= num_bytes
;
1687 /* Exclusive -> exclusive, nothing changed */
1692 qgroup_dirty(fs_info
, qg
);
1698 btrfs_qgroup_account_extent(struct btrfs_trans_handle
*trans
,
1699 struct btrfs_fs_info
*fs_info
,
1700 u64 bytenr
, u64 num_bytes
,
1701 struct ulist
*old_roots
, struct ulist
*new_roots
)
1703 struct ulist
*qgroups
= NULL
;
1704 struct ulist
*tmp
= NULL
;
1706 u64 nr_new_roots
= 0;
1707 u64 nr_old_roots
= 0;
1711 nr_new_roots
= new_roots
->nnodes
;
1713 nr_old_roots
= old_roots
->nnodes
;
1715 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
))
1717 BUG_ON(!fs_info
->quota_root
);
1719 trace_btrfs_qgroup_account_extent(fs_info
, bytenr
, num_bytes
,
1720 nr_old_roots
, nr_new_roots
);
1722 qgroups
= ulist_alloc(GFP_NOFS
);
1727 tmp
= ulist_alloc(GFP_NOFS
);
1733 mutex_lock(&fs_info
->qgroup_rescan_lock
);
1734 if (fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_RESCAN
) {
1735 if (fs_info
->qgroup_rescan_progress
.objectid
<= bytenr
) {
1736 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
1741 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
1743 spin_lock(&fs_info
->qgroup_lock
);
1744 seq
= fs_info
->qgroup_seq
;
1746 /* Update old refcnts using old_roots */
1747 ret
= qgroup_update_refcnt(fs_info
, old_roots
, tmp
, qgroups
, seq
,
1752 /* Update new refcnts using new_roots */
1753 ret
= qgroup_update_refcnt(fs_info
, new_roots
, tmp
, qgroups
, seq
,
1758 qgroup_update_counters(fs_info
, qgroups
, nr_old_roots
, nr_new_roots
,
1762 * Bump qgroup_seq to avoid seq overlap
1764 fs_info
->qgroup_seq
+= max(nr_old_roots
, nr_new_roots
) + 1;
1766 spin_unlock(&fs_info
->qgroup_lock
);
1769 ulist_free(qgroups
);
1770 ulist_free(old_roots
);
1771 ulist_free(new_roots
);
1775 int btrfs_qgroup_account_extents(struct btrfs_trans_handle
*trans
,
1776 struct btrfs_fs_info
*fs_info
)
1778 struct btrfs_qgroup_extent_record
*record
;
1779 struct btrfs_delayed_ref_root
*delayed_refs
;
1780 struct ulist
*new_roots
= NULL
;
1781 struct rb_node
*node
;
1785 delayed_refs
= &trans
->transaction
->delayed_refs
;
1786 qgroup_to_skip
= delayed_refs
->qgroup_to_skip
;
1787 while ((node
= rb_first(&delayed_refs
->dirty_extent_root
))) {
1788 record
= rb_entry(node
, struct btrfs_qgroup_extent_record
,
1791 trace_btrfs_qgroup_account_extents(fs_info
, record
);
1795 * Use (u64)-1 as time_seq to do special search, which
1796 * doesn't lock tree or delayed_refs and search current
1797 * root. It's safe inside commit_transaction().
1799 ret
= btrfs_find_all_roots(trans
, fs_info
,
1800 record
->bytenr
, (u64
)-1, &new_roots
);
1804 ulist_del(new_roots
, qgroup_to_skip
, 0);
1805 ret
= btrfs_qgroup_account_extent(trans
, fs_info
,
1806 record
->bytenr
, record
->num_bytes
,
1807 record
->old_roots
, new_roots
);
1808 record
->old_roots
= NULL
;
1812 ulist_free(record
->old_roots
);
1813 ulist_free(new_roots
);
1815 rb_erase(node
, &delayed_refs
->dirty_extent_root
);
1823 * called from commit_transaction. Writes all changed qgroups to disk.
1825 int btrfs_run_qgroups(struct btrfs_trans_handle
*trans
,
1826 struct btrfs_fs_info
*fs_info
)
1828 struct btrfs_root
*quota_root
= fs_info
->quota_root
;
1830 int start_rescan_worker
= 0;
1835 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
) &&
1836 test_bit(BTRFS_FS_QUOTA_ENABLING
, &fs_info
->flags
))
1837 start_rescan_worker
= 1;
1839 if (test_and_clear_bit(BTRFS_FS_QUOTA_ENABLING
, &fs_info
->flags
))
1840 set_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
);
1841 if (test_and_clear_bit(BTRFS_FS_QUOTA_DISABLING
, &fs_info
->flags
))
1842 clear_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
);
1844 spin_lock(&fs_info
->qgroup_lock
);
1845 while (!list_empty(&fs_info
->dirty_qgroups
)) {
1846 struct btrfs_qgroup
*qgroup
;
1847 qgroup
= list_first_entry(&fs_info
->dirty_qgroups
,
1848 struct btrfs_qgroup
, dirty
);
1849 list_del_init(&qgroup
->dirty
);
1850 spin_unlock(&fs_info
->qgroup_lock
);
1851 ret
= update_qgroup_info_item(trans
, quota_root
, qgroup
);
1853 fs_info
->qgroup_flags
|=
1854 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
1855 ret
= update_qgroup_limit_item(trans
, quota_root
, qgroup
);
1857 fs_info
->qgroup_flags
|=
1858 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
1859 spin_lock(&fs_info
->qgroup_lock
);
1861 if (test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
))
1862 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_ON
;
1864 fs_info
->qgroup_flags
&= ~BTRFS_QGROUP_STATUS_FLAG_ON
;
1865 spin_unlock(&fs_info
->qgroup_lock
);
1867 ret
= update_qgroup_status_item(trans
, fs_info
, quota_root
);
1869 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
1871 if (!ret
&& start_rescan_worker
) {
1872 ret
= qgroup_rescan_init(fs_info
, 0, 1);
1874 qgroup_rescan_zero_tracking(fs_info
);
1875 btrfs_queue_work(fs_info
->qgroup_rescan_workers
,
1876 &fs_info
->qgroup_rescan_work
);
1887 * Copy the accounting information between qgroups. This is necessary
1888 * when a snapshot or a subvolume is created. Throwing an error will
1889 * cause a transaction abort so we take extra care here to only error
1890 * when a readonly fs is a reasonable outcome.
1892 int btrfs_qgroup_inherit(struct btrfs_trans_handle
*trans
,
1893 struct btrfs_fs_info
*fs_info
, u64 srcid
, u64 objectid
,
1894 struct btrfs_qgroup_inherit
*inherit
)
1899 struct btrfs_root
*quota_root
= fs_info
->quota_root
;
1900 struct btrfs_qgroup
*srcgroup
;
1901 struct btrfs_qgroup
*dstgroup
;
1905 mutex_lock(&fs_info
->qgroup_ioctl_lock
);
1906 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
))
1915 i_qgroups
= (u64
*)(inherit
+ 1);
1916 nums
= inherit
->num_qgroups
+ 2 * inherit
->num_ref_copies
+
1917 2 * inherit
->num_excl_copies
;
1918 for (i
= 0; i
< nums
; ++i
) {
1919 srcgroup
= find_qgroup_rb(fs_info
, *i_qgroups
);
1922 * Zero out invalid groups so we can ignore
1926 ((srcgroup
->qgroupid
>> 48) <= (objectid
>> 48)))
1934 * create a tracking group for the subvol itself
1936 ret
= add_qgroup_item(trans
, quota_root
, objectid
);
1941 struct btrfs_root
*srcroot
;
1942 struct btrfs_key srckey
;
1944 srckey
.objectid
= srcid
;
1945 srckey
.type
= BTRFS_ROOT_ITEM_KEY
;
1946 srckey
.offset
= (u64
)-1;
1947 srcroot
= btrfs_read_fs_root_no_name(fs_info
, &srckey
);
1948 if (IS_ERR(srcroot
)) {
1949 ret
= PTR_ERR(srcroot
);
1954 level_size
= srcroot
->nodesize
;
1959 * add qgroup to all inherited groups
1962 i_qgroups
= (u64
*)(inherit
+ 1);
1963 for (i
= 0; i
< inherit
->num_qgroups
; ++i
, ++i_qgroups
) {
1964 if (*i_qgroups
== 0)
1966 ret
= add_qgroup_relation_item(trans
, quota_root
,
1967 objectid
, *i_qgroups
);
1968 if (ret
&& ret
!= -EEXIST
)
1970 ret
= add_qgroup_relation_item(trans
, quota_root
,
1971 *i_qgroups
, objectid
);
1972 if (ret
&& ret
!= -EEXIST
)
1979 spin_lock(&fs_info
->qgroup_lock
);
1981 dstgroup
= add_qgroup_rb(fs_info
, objectid
);
1982 if (IS_ERR(dstgroup
)) {
1983 ret
= PTR_ERR(dstgroup
);
1987 if (inherit
&& inherit
->flags
& BTRFS_QGROUP_INHERIT_SET_LIMITS
) {
1988 dstgroup
->lim_flags
= inherit
->lim
.flags
;
1989 dstgroup
->max_rfer
= inherit
->lim
.max_rfer
;
1990 dstgroup
->max_excl
= inherit
->lim
.max_excl
;
1991 dstgroup
->rsv_rfer
= inherit
->lim
.rsv_rfer
;
1992 dstgroup
->rsv_excl
= inherit
->lim
.rsv_excl
;
1994 ret
= update_qgroup_limit_item(trans
, quota_root
, dstgroup
);
1996 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
1997 btrfs_info(fs_info
, "unable to update quota limit for %llu",
1998 dstgroup
->qgroupid
);
2004 srcgroup
= find_qgroup_rb(fs_info
, srcid
);
2009 * We call inherit after we clone the root in order to make sure
2010 * our counts don't go crazy, so at this point the only
2011 * difference between the two roots should be the root node.
2013 dstgroup
->rfer
= srcgroup
->rfer
;
2014 dstgroup
->rfer_cmpr
= srcgroup
->rfer_cmpr
;
2015 dstgroup
->excl
= level_size
;
2016 dstgroup
->excl_cmpr
= level_size
;
2017 srcgroup
->excl
= level_size
;
2018 srcgroup
->excl_cmpr
= level_size
;
2020 /* inherit the limit info */
2021 dstgroup
->lim_flags
= srcgroup
->lim_flags
;
2022 dstgroup
->max_rfer
= srcgroup
->max_rfer
;
2023 dstgroup
->max_excl
= srcgroup
->max_excl
;
2024 dstgroup
->rsv_rfer
= srcgroup
->rsv_rfer
;
2025 dstgroup
->rsv_excl
= srcgroup
->rsv_excl
;
2027 qgroup_dirty(fs_info
, dstgroup
);
2028 qgroup_dirty(fs_info
, srcgroup
);
2034 i_qgroups
= (u64
*)(inherit
+ 1);
2035 for (i
= 0; i
< inherit
->num_qgroups
; ++i
) {
2037 ret
= add_relation_rb(quota_root
->fs_info
, objectid
,
2045 for (i
= 0; i
< inherit
->num_ref_copies
; ++i
, i_qgroups
+= 2) {
2046 struct btrfs_qgroup
*src
;
2047 struct btrfs_qgroup
*dst
;
2049 if (!i_qgroups
[0] || !i_qgroups
[1])
2052 src
= find_qgroup_rb(fs_info
, i_qgroups
[0]);
2053 dst
= find_qgroup_rb(fs_info
, i_qgroups
[1]);
2060 dst
->rfer
= src
->rfer
- level_size
;
2061 dst
->rfer_cmpr
= src
->rfer_cmpr
- level_size
;
2063 for (i
= 0; i
< inherit
->num_excl_copies
; ++i
, i_qgroups
+= 2) {
2064 struct btrfs_qgroup
*src
;
2065 struct btrfs_qgroup
*dst
;
2067 if (!i_qgroups
[0] || !i_qgroups
[1])
2070 src
= find_qgroup_rb(fs_info
, i_qgroups
[0]);
2071 dst
= find_qgroup_rb(fs_info
, i_qgroups
[1]);
2078 dst
->excl
= src
->excl
+ level_size
;
2079 dst
->excl_cmpr
= src
->excl_cmpr
+ level_size
;
2083 spin_unlock(&fs_info
->qgroup_lock
);
2085 mutex_unlock(&fs_info
->qgroup_ioctl_lock
);
2089 static int qgroup_reserve(struct btrfs_root
*root
, u64 num_bytes
)
2091 struct btrfs_root
*quota_root
;
2092 struct btrfs_qgroup
*qgroup
;
2093 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2094 u64 ref_root
= root
->root_key
.objectid
;
2096 struct ulist_node
*unode
;
2097 struct ulist_iterator uiter
;
2099 if (!is_fstree(ref_root
))
2105 spin_lock(&fs_info
->qgroup_lock
);
2106 quota_root
= fs_info
->quota_root
;
2110 qgroup
= find_qgroup_rb(fs_info
, ref_root
);
2115 * in a first step, we check all affected qgroups if any limits would
2118 ulist_reinit(fs_info
->qgroup_ulist
);
2119 ret
= ulist_add(fs_info
->qgroup_ulist
, qgroup
->qgroupid
,
2120 (uintptr_t)qgroup
, GFP_ATOMIC
);
2123 ULIST_ITER_INIT(&uiter
);
2124 while ((unode
= ulist_next(fs_info
->qgroup_ulist
, &uiter
))) {
2125 struct btrfs_qgroup
*qg
;
2126 struct btrfs_qgroup_list
*glist
;
2128 qg
= u64_to_ptr(unode
->aux
);
2130 if ((qg
->lim_flags
& BTRFS_QGROUP_LIMIT_MAX_RFER
) &&
2131 qg
->reserved
+ (s64
)qg
->rfer
+ num_bytes
>
2137 if ((qg
->lim_flags
& BTRFS_QGROUP_LIMIT_MAX_EXCL
) &&
2138 qg
->reserved
+ (s64
)qg
->excl
+ num_bytes
>
2144 list_for_each_entry(glist
, &qg
->groups
, next_group
) {
2145 ret
= ulist_add(fs_info
->qgroup_ulist
,
2146 glist
->group
->qgroupid
,
2147 (uintptr_t)glist
->group
, GFP_ATOMIC
);
2154 * no limits exceeded, now record the reservation into all qgroups
2156 ULIST_ITER_INIT(&uiter
);
2157 while ((unode
= ulist_next(fs_info
->qgroup_ulist
, &uiter
))) {
2158 struct btrfs_qgroup
*qg
;
2160 qg
= u64_to_ptr(unode
->aux
);
2162 qg
->reserved
+= num_bytes
;
2166 spin_unlock(&fs_info
->qgroup_lock
);
2170 void btrfs_qgroup_free_refroot(struct btrfs_fs_info
*fs_info
,
2171 u64 ref_root
, u64 num_bytes
)
2173 struct btrfs_root
*quota_root
;
2174 struct btrfs_qgroup
*qgroup
;
2175 struct ulist_node
*unode
;
2176 struct ulist_iterator uiter
;
2179 if (!is_fstree(ref_root
))
2185 spin_lock(&fs_info
->qgroup_lock
);
2187 quota_root
= fs_info
->quota_root
;
2191 qgroup
= find_qgroup_rb(fs_info
, ref_root
);
2195 ulist_reinit(fs_info
->qgroup_ulist
);
2196 ret
= ulist_add(fs_info
->qgroup_ulist
, qgroup
->qgroupid
,
2197 (uintptr_t)qgroup
, GFP_ATOMIC
);
2200 ULIST_ITER_INIT(&uiter
);
2201 while ((unode
= ulist_next(fs_info
->qgroup_ulist
, &uiter
))) {
2202 struct btrfs_qgroup
*qg
;
2203 struct btrfs_qgroup_list
*glist
;
2205 qg
= u64_to_ptr(unode
->aux
);
2207 qg
->reserved
-= num_bytes
;
2209 list_for_each_entry(glist
, &qg
->groups
, next_group
) {
2210 ret
= ulist_add(fs_info
->qgroup_ulist
,
2211 glist
->group
->qgroupid
,
2212 (uintptr_t)glist
->group
, GFP_ATOMIC
);
2219 spin_unlock(&fs_info
->qgroup_lock
);
2222 static inline void qgroup_free(struct btrfs_root
*root
, u64 num_bytes
)
2224 return btrfs_qgroup_free_refroot(root
->fs_info
, root
->objectid
,
2227 void assert_qgroups_uptodate(struct btrfs_trans_handle
*trans
)
2229 if (list_empty(&trans
->qgroup_ref_list
) && !trans
->delayed_ref_elem
.seq
)
2231 btrfs_err(trans
->fs_info
,
2232 "qgroups not uptodate in trans handle %p: list is%s empty, "
2234 trans
, list_empty(&trans
->qgroup_ref_list
) ? "" : " not",
2235 (u32
)(trans
->delayed_ref_elem
.seq
>> 32),
2236 (u32
)trans
->delayed_ref_elem
.seq
);
2241 * returns < 0 on error, 0 when more leafs are to be scanned.
2242 * returns 1 when done.
2245 qgroup_rescan_leaf(struct btrfs_fs_info
*fs_info
, struct btrfs_path
*path
,
2246 struct btrfs_trans_handle
*trans
)
2248 struct btrfs_key found
;
2249 struct extent_buffer
*scratch_leaf
= NULL
;
2250 struct ulist
*roots
= NULL
;
2251 struct seq_list tree_mod_seq_elem
= SEQ_LIST_INIT(tree_mod_seq_elem
);
2256 mutex_lock(&fs_info
->qgroup_rescan_lock
);
2257 ret
= btrfs_search_slot_for_read(fs_info
->extent_root
,
2258 &fs_info
->qgroup_rescan_progress
,
2261 pr_debug("current progress key (%llu %u %llu), search_slot ret %d\n",
2262 fs_info
->qgroup_rescan_progress
.objectid
,
2263 fs_info
->qgroup_rescan_progress
.type
,
2264 fs_info
->qgroup_rescan_progress
.offset
, ret
);
2268 * The rescan is about to end, we will not be scanning any
2269 * further blocks. We cannot unset the RESCAN flag here, because
2270 * we want to commit the transaction if everything went well.
2271 * To make the live accounting work in this phase, we set our
2272 * scan progress pointer such that every real extent objectid
2275 fs_info
->qgroup_rescan_progress
.objectid
= (u64
)-1;
2276 btrfs_release_path(path
);
2277 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
2281 btrfs_item_key_to_cpu(path
->nodes
[0], &found
,
2282 btrfs_header_nritems(path
->nodes
[0]) - 1);
2283 fs_info
->qgroup_rescan_progress
.objectid
= found
.objectid
+ 1;
2285 btrfs_get_tree_mod_seq(fs_info
, &tree_mod_seq_elem
);
2286 scratch_leaf
= btrfs_clone_extent_buffer(path
->nodes
[0]);
2287 if (!scratch_leaf
) {
2289 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
2292 extent_buffer_get(scratch_leaf
);
2293 btrfs_tree_read_lock(scratch_leaf
);
2294 btrfs_set_lock_blocking_rw(scratch_leaf
, BTRFS_READ_LOCK
);
2295 slot
= path
->slots
[0];
2296 btrfs_release_path(path
);
2297 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
2299 for (; slot
< btrfs_header_nritems(scratch_leaf
); ++slot
) {
2300 btrfs_item_key_to_cpu(scratch_leaf
, &found
, slot
);
2301 if (found
.type
!= BTRFS_EXTENT_ITEM_KEY
&&
2302 found
.type
!= BTRFS_METADATA_ITEM_KEY
)
2304 if (found
.type
== BTRFS_METADATA_ITEM_KEY
)
2305 num_bytes
= fs_info
->extent_root
->nodesize
;
2307 num_bytes
= found
.offset
;
2309 ret
= btrfs_find_all_roots(NULL
, fs_info
, found
.objectid
, 0,
2313 /* For rescan, just pass old_roots as NULL */
2314 ret
= btrfs_qgroup_account_extent(trans
, fs_info
,
2315 found
.objectid
, num_bytes
, NULL
, roots
);
2321 btrfs_tree_read_unlock_blocking(scratch_leaf
);
2322 free_extent_buffer(scratch_leaf
);
2324 btrfs_put_tree_mod_seq(fs_info
, &tree_mod_seq_elem
);
2329 static void btrfs_qgroup_rescan_worker(struct btrfs_work
*work
)
2331 struct btrfs_fs_info
*fs_info
= container_of(work
, struct btrfs_fs_info
,
2332 qgroup_rescan_work
);
2333 struct btrfs_path
*path
;
2334 struct btrfs_trans_handle
*trans
= NULL
;
2338 mutex_lock(&fs_info
->qgroup_rescan_lock
);
2339 fs_info
->qgroup_rescan_running
= true;
2340 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
2342 path
= btrfs_alloc_path();
2347 while (!err
&& !btrfs_fs_closing(fs_info
)) {
2348 trans
= btrfs_start_transaction(fs_info
->fs_root
, 0);
2349 if (IS_ERR(trans
)) {
2350 err
= PTR_ERR(trans
);
2353 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &fs_info
->flags
)) {
2356 err
= qgroup_rescan_leaf(fs_info
, path
, trans
);
2359 btrfs_commit_transaction(trans
, fs_info
->fs_root
);
2361 btrfs_end_transaction(trans
, fs_info
->fs_root
);
2365 btrfs_free_path(path
);
2367 mutex_lock(&fs_info
->qgroup_rescan_lock
);
2368 if (!btrfs_fs_closing(fs_info
))
2369 fs_info
->qgroup_flags
&= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN
;
2372 fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
) {
2373 fs_info
->qgroup_flags
&= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
2374 } else if (err
< 0) {
2375 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT
;
2377 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
2380 * only update status, since the previous part has already updated the
2383 trans
= btrfs_start_transaction(fs_info
->quota_root
, 1);
2384 if (IS_ERR(trans
)) {
2385 err
= PTR_ERR(trans
);
2387 "fail to start transaction for status update: %d\n",
2391 ret
= update_qgroup_status_item(trans
, fs_info
, fs_info
->quota_root
);
2394 btrfs_err(fs_info
, "fail to update qgroup status: %d\n", err
);
2396 btrfs_end_transaction(trans
, fs_info
->quota_root
);
2398 if (btrfs_fs_closing(fs_info
)) {
2399 btrfs_info(fs_info
, "qgroup scan paused");
2400 } else if (err
>= 0) {
2401 btrfs_info(fs_info
, "qgroup scan completed%s",
2402 err
> 0 ? " (inconsistency flag cleared)" : "");
2404 btrfs_err(fs_info
, "qgroup scan failed with %d", err
);
2408 mutex_lock(&fs_info
->qgroup_rescan_lock
);
2409 fs_info
->qgroup_rescan_running
= false;
2410 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
2411 complete_all(&fs_info
->qgroup_rescan_completion
);
2415 * Checks that (a) no rescan is running and (b) quota is enabled. Allocates all
2416 * memory required for the rescan context.
2419 qgroup_rescan_init(struct btrfs_fs_info
*fs_info
, u64 progress_objectid
,
2425 (!(fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_RESCAN
) ||
2426 !(fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_ON
))) {
2431 mutex_lock(&fs_info
->qgroup_rescan_lock
);
2432 spin_lock(&fs_info
->qgroup_lock
);
2435 if (fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_RESCAN
)
2437 else if (!(fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_ON
))
2441 spin_unlock(&fs_info
->qgroup_lock
);
2442 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
2445 fs_info
->qgroup_flags
|= BTRFS_QGROUP_STATUS_FLAG_RESCAN
;
2448 memset(&fs_info
->qgroup_rescan_progress
, 0,
2449 sizeof(fs_info
->qgroup_rescan_progress
));
2450 fs_info
->qgroup_rescan_progress
.objectid
= progress_objectid
;
2451 init_completion(&fs_info
->qgroup_rescan_completion
);
2453 spin_unlock(&fs_info
->qgroup_lock
);
2454 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
2456 memset(&fs_info
->qgroup_rescan_work
, 0,
2457 sizeof(fs_info
->qgroup_rescan_work
));
2458 btrfs_init_work(&fs_info
->qgroup_rescan_work
,
2459 btrfs_qgroup_rescan_helper
,
2460 btrfs_qgroup_rescan_worker
, NULL
, NULL
);
2464 btrfs_info(fs_info
, "qgroup_rescan_init failed with %d", ret
);
2472 qgroup_rescan_zero_tracking(struct btrfs_fs_info
*fs_info
)
2475 struct btrfs_qgroup
*qgroup
;
2477 spin_lock(&fs_info
->qgroup_lock
);
2478 /* clear all current qgroup tracking information */
2479 for (n
= rb_first(&fs_info
->qgroup_tree
); n
; n
= rb_next(n
)) {
2480 qgroup
= rb_entry(n
, struct btrfs_qgroup
, node
);
2482 qgroup
->rfer_cmpr
= 0;
2484 qgroup
->excl_cmpr
= 0;
2486 spin_unlock(&fs_info
->qgroup_lock
);
2490 btrfs_qgroup_rescan(struct btrfs_fs_info
*fs_info
)
2493 struct btrfs_trans_handle
*trans
;
2495 ret
= qgroup_rescan_init(fs_info
, 0, 1);
2500 * We have set the rescan_progress to 0, which means no more
2501 * delayed refs will be accounted by btrfs_qgroup_account_ref.
2502 * However, btrfs_qgroup_account_ref may be right after its call
2503 * to btrfs_find_all_roots, in which case it would still do the
2505 * To solve this, we're committing the transaction, which will
2506 * ensure we run all delayed refs and only after that, we are
2507 * going to clear all tracking information for a clean start.
2510 trans
= btrfs_join_transaction(fs_info
->fs_root
);
2511 if (IS_ERR(trans
)) {
2512 fs_info
->qgroup_flags
&= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN
;
2513 return PTR_ERR(trans
);
2515 ret
= btrfs_commit_transaction(trans
, fs_info
->fs_root
);
2517 fs_info
->qgroup_flags
&= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN
;
2521 qgroup_rescan_zero_tracking(fs_info
);
2523 btrfs_queue_work(fs_info
->qgroup_rescan_workers
,
2524 &fs_info
->qgroup_rescan_work
);
2529 int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info
*fs_info
,
2535 mutex_lock(&fs_info
->qgroup_rescan_lock
);
2536 spin_lock(&fs_info
->qgroup_lock
);
2537 running
= fs_info
->qgroup_rescan_running
;
2538 spin_unlock(&fs_info
->qgroup_lock
);
2539 mutex_unlock(&fs_info
->qgroup_rescan_lock
);
2545 ret
= wait_for_completion_interruptible(
2546 &fs_info
->qgroup_rescan_completion
);
2548 wait_for_completion(&fs_info
->qgroup_rescan_completion
);
2554 * this is only called from open_ctree where we're still single threaded, thus
2555 * locking is omitted here.
2558 btrfs_qgroup_rescan_resume(struct btrfs_fs_info
*fs_info
)
2560 if (fs_info
->qgroup_flags
& BTRFS_QGROUP_STATUS_FLAG_RESCAN
)
2561 btrfs_queue_work(fs_info
->qgroup_rescan_workers
,
2562 &fs_info
->qgroup_rescan_work
);
2566 * Reserve qgroup space for range [start, start + len).
2568 * This function will either reserve space from related qgroups or doing
2569 * nothing if the range is already reserved.
2571 * Return 0 for successful reserve
2572 * Return <0 for error (including -EQUOT)
2574 * NOTE: this function may sleep for memory allocation.
2576 int btrfs_qgroup_reserve_data(struct inode
*inode
, u64 start
, u64 len
)
2578 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
2579 struct extent_changeset changeset
;
2580 struct ulist_node
*unode
;
2581 struct ulist_iterator uiter
;
2584 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &root
->fs_info
->flags
) ||
2585 !is_fstree(root
->objectid
) || len
== 0)
2588 changeset
.bytes_changed
= 0;
2589 changeset
.range_changed
= ulist_alloc(GFP_NOFS
);
2590 ret
= set_record_extent_bits(&BTRFS_I(inode
)->io_tree
, start
,
2591 start
+ len
-1, EXTENT_QGROUP_RESERVED
, &changeset
);
2592 trace_btrfs_qgroup_reserve_data(inode
, start
, len
,
2593 changeset
.bytes_changed
,
2597 ret
= qgroup_reserve(root
, changeset
.bytes_changed
);
2601 ulist_free(changeset
.range_changed
);
2605 /* cleanup already reserved ranges */
2606 ULIST_ITER_INIT(&uiter
);
2607 while ((unode
= ulist_next(changeset
.range_changed
, &uiter
)))
2608 clear_extent_bit(&BTRFS_I(inode
)->io_tree
, unode
->val
,
2609 unode
->aux
, EXTENT_QGROUP_RESERVED
, 0, 0, NULL
,
2611 ulist_free(changeset
.range_changed
);
2615 static int __btrfs_qgroup_release_data(struct inode
*inode
, u64 start
, u64 len
,
2618 struct extent_changeset changeset
;
2619 int trace_op
= QGROUP_RELEASE
;
2622 changeset
.bytes_changed
= 0;
2623 changeset
.range_changed
= ulist_alloc(GFP_NOFS
);
2624 if (!changeset
.range_changed
)
2627 ret
= clear_record_extent_bits(&BTRFS_I(inode
)->io_tree
, start
,
2628 start
+ len
-1, EXTENT_QGROUP_RESERVED
, &changeset
);
2633 qgroup_free(BTRFS_I(inode
)->root
, changeset
.bytes_changed
);
2634 trace_op
= QGROUP_FREE
;
2636 trace_btrfs_qgroup_release_data(inode
, start
, len
,
2637 changeset
.bytes_changed
, trace_op
);
2639 ulist_free(changeset
.range_changed
);
2644 * Free a reserved space range from io_tree and related qgroups
2646 * Should be called when a range of pages get invalidated before reaching disk.
2647 * Or for error cleanup case.
2649 * For data written to disk, use btrfs_qgroup_release_data().
2651 * NOTE: This function may sleep for memory allocation.
2653 int btrfs_qgroup_free_data(struct inode
*inode
, u64 start
, u64 len
)
2655 return __btrfs_qgroup_release_data(inode
, start
, len
, 1);
2659 * Release a reserved space range from io_tree only.
2661 * Should be called when a range of pages get written to disk and corresponding
2662 * FILE_EXTENT is inserted into corresponding root.
2664 * Since new qgroup accounting framework will only update qgroup numbers at
2665 * commit_transaction() time, its reserved space shouldn't be freed from
2668 * But we should release the range from io_tree, to allow further write to be
2671 * NOTE: This function may sleep for memory allocation.
2673 int btrfs_qgroup_release_data(struct inode
*inode
, u64 start
, u64 len
)
2675 return __btrfs_qgroup_release_data(inode
, start
, len
, 0);
2678 int btrfs_qgroup_reserve_meta(struct btrfs_root
*root
, int num_bytes
)
2682 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &root
->fs_info
->flags
) ||
2683 !is_fstree(root
->objectid
) || num_bytes
== 0)
2686 BUG_ON(num_bytes
!= round_down(num_bytes
, root
->nodesize
));
2687 ret
= qgroup_reserve(root
, num_bytes
);
2690 atomic_add(num_bytes
, &root
->qgroup_meta_rsv
);
2694 void btrfs_qgroup_free_meta_all(struct btrfs_root
*root
)
2698 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &root
->fs_info
->flags
) ||
2699 !is_fstree(root
->objectid
))
2702 reserved
= atomic_xchg(&root
->qgroup_meta_rsv
, 0);
2705 qgroup_free(root
, reserved
);
2708 void btrfs_qgroup_free_meta(struct btrfs_root
*root
, int num_bytes
)
2710 if (!test_bit(BTRFS_FS_QUOTA_ENABLED
, &root
->fs_info
->flags
) ||
2711 !is_fstree(root
->objectid
))
2714 BUG_ON(num_bytes
!= round_down(num_bytes
, root
->nodesize
));
2715 WARN_ON(atomic_read(&root
->qgroup_meta_rsv
) < num_bytes
);
2716 atomic_sub(num_bytes
, &root
->qgroup_meta_rsv
);
2717 qgroup_free(root
, num_bytes
);
2721 * Check qgroup reserved space leaking, normally at destroy inode
2724 void btrfs_qgroup_check_reserved_leak(struct inode
*inode
)
2726 struct extent_changeset changeset
;
2727 struct ulist_node
*unode
;
2728 struct ulist_iterator iter
;
2731 changeset
.bytes_changed
= 0;
2732 changeset
.range_changed
= ulist_alloc(GFP_NOFS
);
2733 if (WARN_ON(!changeset
.range_changed
))
2736 ret
= clear_record_extent_bits(&BTRFS_I(inode
)->io_tree
, 0, (u64
)-1,
2737 EXTENT_QGROUP_RESERVED
, &changeset
);
2740 if (WARN_ON(changeset
.bytes_changed
)) {
2741 ULIST_ITER_INIT(&iter
);
2742 while ((unode
= ulist_next(changeset
.range_changed
, &iter
))) {
2743 btrfs_warn(BTRFS_I(inode
)->root
->fs_info
,
2744 "leaking qgroup reserved space, ino: %lu, start: %llu, end: %llu",
2745 inode
->i_ino
, unode
->val
, unode
->aux
);
2747 qgroup_free(BTRFS_I(inode
)->root
, changeset
.bytes_changed
);
2749 ulist_free(changeset
.range_changed
);