Btrfs: add hole punching
[deliverable/linux.git] / fs / btrfs / extent-tree.c
index 1bb408f737fb2e19541046bf900058e338939442..ee51b7afe97d1265d4ad4c1693e41d01dda5426c 100644 (file)
@@ -2251,6 +2251,16 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
                        }
                }
 
+               /*
+                * We need to try and merge add/drops of the same ref since we
+                * can run into issues with relocate dropping the implicit ref
+                * and then it being added back again before the drop can
+                * finish.  If we merged anything we need to re-loop so we can
+                * get a good ref.
+                */
+               btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
+                                        locked_ref);
+
                /*
                 * locked_ref is the head node, so we have to go one
                 * node back for any delayed ref updates
@@ -2318,6 +2328,23 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
                ref->in_tree = 0;
                rb_erase(&ref->rb_node, &delayed_refs->root);
                delayed_refs->num_entries--;
+               if (locked_ref) {
+                       /*
+                        * when we play the delayed ref, also correct the
+                        * ref_mod on head
+                        */
+                       switch (ref->action) {
+                       case BTRFS_ADD_DELAYED_REF:
+                       case BTRFS_ADD_DELAYED_EXTENT:
+                               locked_ref->node.ref_mod -= ref->ref_mod;
+                               break;
+                       case BTRFS_DROP_DELAYED_REF:
+                               locked_ref->node.ref_mod += ref->ref_mod;
+                               break;
+                       default:
+                               WARN_ON(1);
+                       }
+               }
                spin_unlock(&delayed_refs->lock);
 
                ret = run_one_delayed_ref(trans, root, ref, extent_op,
@@ -3477,7 +3504,8 @@ static int should_alloc_chunk(struct btrfs_root *root,
         * and purposes it's used space.  Don't worry about locking the
         * global_rsv, it doesn't change except when the transaction commits.
         */
-       num_allocated += global_rsv->size;
+       if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
+               num_allocated += global_rsv->size;
 
        /*
         * in limited mode, we want to have some free space up to
@@ -3491,15 +3519,8 @@ static int should_alloc_chunk(struct btrfs_root *root,
                if (num_bytes - num_allocated < thresh)
                        return 1;
        }
-       thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
 
-       /* 256MB or 2% of the FS */
-       thresh = max_t(u64, 256 * 1024 * 1024, div_factor_fine(thresh, 2));
-       /* system chunks need a much small threshold */
-       if (sinfo->flags & BTRFS_BLOCK_GROUP_SYSTEM)
-               thresh = 32 * 1024 * 1024;
-
-       if (num_bytes > thresh && sinfo->bytes_used < div_factor(num_bytes, 8))
+       if (num_allocated + alloc_bytes < div_factor(num_bytes, 8))
                return 0;
        return 1;
 }
@@ -4111,6 +4132,8 @@ struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root)
 void btrfs_free_block_rsv(struct btrfs_root *root,
                          struct btrfs_block_rsv *rsv)
 {
+       if (!rsv)
+               return;
        btrfs_block_rsv_release(root, rsv, (u64)-1);
        kfree(rsv);
 }
@@ -6287,7 +6310,7 @@ use_block_rsv(struct btrfs_trans_handle *trans,
        ret = block_rsv_use_bytes(block_rsv, blocksize);
        if (!ret)
                return block_rsv;
-       if (ret) {
+       if (ret && !block_rsv->failfast) {
                static DEFINE_RATELIMIT_STATE(_rs,
                                DEFAULT_RATELIMIT_INTERVAL,
                                /*DEFAULT_RATELIMIT_BURST*/ 2);
This page took 0.027722 seconds and 5 git commands to generate.