Commit | Line | Data |
---|---|---|
56bec294 CM |
1 | /* |
2 | * Copyright (C) 2009 Oracle. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public | |
6 | * License v2 as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * General Public License for more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public | |
14 | * License along with this program; if not, write to the | |
15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
16 | * Boston, MA 021110-1307, USA. | |
17 | */ | |
18 | ||
19 | #include <linux/sched.h> | |
5a0e3ad6 | 20 | #include <linux/slab.h> |
56bec294 | 21 | #include <linux/sort.h> |
56bec294 CM |
22 | #include "ctree.h" |
23 | #include "delayed-ref.h" | |
24 | #include "transaction.h" | |
25 | ||
78a6184a MX |
26 | struct kmem_cache *btrfs_delayed_ref_head_cachep; |
27 | struct kmem_cache *btrfs_delayed_tree_ref_cachep; | |
28 | struct kmem_cache *btrfs_delayed_data_ref_cachep; | |
29 | struct kmem_cache *btrfs_delayed_extent_op_cachep; | |
56bec294 CM |
30 | /* |
31 | * delayed back reference update tracking. For subvolume trees | |
32 | * we queue up extent allocations and backref maintenance for | |
33 | * delayed processing. This avoids deep call chains where we | |
34 | * add extents in the middle of btrfs_search_slot, and it allows | |
35 | * us to buffer up frequently modified backrefs in an rb tree instead | |
36 | * of hammering updates on the extent allocation tree. | |
56bec294 CM |
37 | */ |
38 | ||
39 | /* | |
5d4f98a2 YZ |
40 | * compare two delayed tree backrefs with same bytenr and type |
41 | */ | |
42 | static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2, | |
41b0fc42 | 43 | struct btrfs_delayed_tree_ref *ref1, int type) |
5d4f98a2 | 44 | { |
41b0fc42 JB |
45 | if (type == BTRFS_TREE_BLOCK_REF_KEY) { |
46 | if (ref1->root < ref2->root) | |
47 | return -1; | |
48 | if (ref1->root > ref2->root) | |
49 | return 1; | |
50 | } else { | |
51 | if (ref1->parent < ref2->parent) | |
52 | return -1; | |
53 | if (ref1->parent > ref2->parent) | |
54 | return 1; | |
55 | } | |
5d4f98a2 YZ |
56 | return 0; |
57 | } | |
58 | ||
59 | /* | |
60 | * compare two delayed data backrefs with same bytenr and type | |
56bec294 | 61 | */ |
5d4f98a2 YZ |
62 | static int comp_data_refs(struct btrfs_delayed_data_ref *ref2, |
63 | struct btrfs_delayed_data_ref *ref1) | |
56bec294 | 64 | { |
5d4f98a2 YZ |
65 | if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) { |
66 | if (ref1->root < ref2->root) | |
67 | return -1; | |
68 | if (ref1->root > ref2->root) | |
69 | return 1; | |
70 | if (ref1->objectid < ref2->objectid) | |
71 | return -1; | |
72 | if (ref1->objectid > ref2->objectid) | |
73 | return 1; | |
74 | if (ref1->offset < ref2->offset) | |
75 | return -1; | |
76 | if (ref1->offset > ref2->offset) | |
77 | return 1; | |
78 | } else { | |
79 | if (ref1->parent < ref2->parent) | |
80 | return -1; | |
81 | if (ref1->parent > ref2->parent) | |
82 | return 1; | |
83 | } | |
84 | return 0; | |
85 | } | |
86 | ||
87 | /* | |
88 | * entries in the rb tree are ordered by the byte number of the extent, | |
89 | * type of the delayed backrefs and content of delayed backrefs. | |
90 | */ | |
91 | static int comp_entry(struct btrfs_delayed_ref_node *ref2, | |
ae1e206b JB |
92 | struct btrfs_delayed_ref_node *ref1, |
93 | bool compare_seq) | |
5d4f98a2 YZ |
94 | { |
95 | if (ref1->bytenr < ref2->bytenr) | |
56bec294 | 96 | return -1; |
5d4f98a2 | 97 | if (ref1->bytenr > ref2->bytenr) |
56bec294 | 98 | return 1; |
5d4f98a2 YZ |
99 | if (ref1->is_head && ref2->is_head) |
100 | return 0; | |
101 | if (ref2->is_head) | |
56bec294 | 102 | return -1; |
5d4f98a2 | 103 | if (ref1->is_head) |
56bec294 | 104 | return 1; |
5d4f98a2 YZ |
105 | if (ref1->type < ref2->type) |
106 | return -1; | |
107 | if (ref1->type > ref2->type) | |
108 | return 1; | |
00f04b88 | 109 | /* merging of sequenced refs is not allowed */ |
ae1e206b JB |
110 | if (compare_seq) { |
111 | if (ref1->seq < ref2->seq) | |
112 | return -1; | |
113 | if (ref1->seq > ref2->seq) | |
114 | return 1; | |
115 | } | |
5d4f98a2 YZ |
116 | if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY || |
117 | ref1->type == BTRFS_SHARED_BLOCK_REF_KEY) { | |
118 | return comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref2), | |
41b0fc42 JB |
119 | btrfs_delayed_node_to_tree_ref(ref1), |
120 | ref1->type); | |
5d4f98a2 YZ |
121 | } else if (ref1->type == BTRFS_EXTENT_DATA_REF_KEY || |
122 | ref1->type == BTRFS_SHARED_DATA_REF_KEY) { | |
123 | return comp_data_refs(btrfs_delayed_node_to_data_ref(ref2), | |
124 | btrfs_delayed_node_to_data_ref(ref1)); | |
125 | } | |
126 | BUG(); | |
56bec294 CM |
127 | return 0; |
128 | } | |
129 | ||
130 | /* | |
131 | * insert a new ref into the rbtree. This returns any existing refs | |
132 | * for the same (bytenr,parent) tuple, or NULL if the new node was properly | |
133 | * inserted. | |
134 | */ | |
135 | static struct btrfs_delayed_ref_node *tree_insert(struct rb_root *root, | |
56bec294 CM |
136 | struct rb_node *node) |
137 | { | |
138 | struct rb_node **p = &root->rb_node; | |
139 | struct rb_node *parent_node = NULL; | |
140 | struct btrfs_delayed_ref_node *entry; | |
5d4f98a2 | 141 | struct btrfs_delayed_ref_node *ins; |
56bec294 CM |
142 | int cmp; |
143 | ||
5d4f98a2 | 144 | ins = rb_entry(node, struct btrfs_delayed_ref_node, rb_node); |
56bec294 CM |
145 | while (*p) { |
146 | parent_node = *p; | |
147 | entry = rb_entry(parent_node, struct btrfs_delayed_ref_node, | |
148 | rb_node); | |
149 | ||
ae1e206b | 150 | cmp = comp_entry(entry, ins, 1); |
56bec294 CM |
151 | if (cmp < 0) |
152 | p = &(*p)->rb_left; | |
153 | else if (cmp > 0) | |
154 | p = &(*p)->rb_right; | |
155 | else | |
156 | return entry; | |
157 | } | |
158 | ||
56bec294 CM |
159 | rb_link_node(node, parent_node, p); |
160 | rb_insert_color(node, root); | |
161 | return NULL; | |
162 | } | |
163 | ||
c46effa6 LB |
164 | /* insert a new ref to head ref rbtree */ |
165 | static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root, | |
166 | struct rb_node *node) | |
167 | { | |
168 | struct rb_node **p = &root->rb_node; | |
169 | struct rb_node *parent_node = NULL; | |
170 | struct btrfs_delayed_ref_head *entry; | |
171 | struct btrfs_delayed_ref_head *ins; | |
172 | u64 bytenr; | |
173 | ||
174 | ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node); | |
175 | bytenr = ins->node.bytenr; | |
176 | while (*p) { | |
177 | parent_node = *p; | |
178 | entry = rb_entry(parent_node, struct btrfs_delayed_ref_head, | |
179 | href_node); | |
180 | ||
181 | if (bytenr < entry->node.bytenr) | |
182 | p = &(*p)->rb_left; | |
183 | else if (bytenr > entry->node.bytenr) | |
184 | p = &(*p)->rb_right; | |
185 | else | |
186 | return entry; | |
187 | } | |
188 | ||
189 | rb_link_node(node, parent_node, p); | |
190 | rb_insert_color(node, root); | |
191 | return NULL; | |
192 | } | |
193 | ||
56bec294 | 194 | /* |
5d4f98a2 | 195 | * find an head entry based on bytenr. This returns the delayed ref |
d1270cd9 AJ |
196 | * head if it was able to find one, or NULL if nothing was in that spot. |
197 | * If return_bigger is given, the next bigger entry is returned if no exact | |
198 | * match is found. | |
56bec294 | 199 | */ |
c46effa6 LB |
200 | static struct btrfs_delayed_ref_head * |
201 | find_ref_head(struct rb_root *root, u64 bytenr, | |
85fdfdf6 | 202 | int return_bigger) |
56bec294 | 203 | { |
d1270cd9 | 204 | struct rb_node *n; |
c46effa6 | 205 | struct btrfs_delayed_ref_head *entry; |
56bec294 | 206 | |
d1270cd9 AJ |
207 | n = root->rb_node; |
208 | entry = NULL; | |
56bec294 | 209 | while (n) { |
c46effa6 | 210 | entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node); |
56bec294 | 211 | |
c46effa6 | 212 | if (bytenr < entry->node.bytenr) |
56bec294 | 213 | n = n->rb_left; |
85fdfdf6 | 214 | else if (bytenr > entry->node.bytenr) |
56bec294 CM |
215 | n = n->rb_right; |
216 | else | |
217 | return entry; | |
218 | } | |
d1270cd9 | 219 | if (entry && return_bigger) { |
85fdfdf6 | 220 | if (bytenr > entry->node.bytenr) { |
c46effa6 | 221 | n = rb_next(&entry->href_node); |
d1270cd9 AJ |
222 | if (!n) |
223 | n = rb_first(root); | |
c46effa6 LB |
224 | entry = rb_entry(n, struct btrfs_delayed_ref_head, |
225 | href_node); | |
6103fb43 | 226 | return entry; |
d1270cd9 AJ |
227 | } |
228 | return entry; | |
229 | } | |
56bec294 CM |
230 | return NULL; |
231 | } | |
232 | ||
c3e69d58 CM |
233 | int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans, |
234 | struct btrfs_delayed_ref_head *head) | |
56bec294 | 235 | { |
c3e69d58 CM |
236 | struct btrfs_delayed_ref_root *delayed_refs; |
237 | ||
238 | delayed_refs = &trans->transaction->delayed_refs; | |
239 | assert_spin_locked(&delayed_refs->lock); | |
240 | if (mutex_trylock(&head->mutex)) | |
241 | return 0; | |
242 | ||
243 | atomic_inc(&head->node.refs); | |
244 | spin_unlock(&delayed_refs->lock); | |
245 | ||
246 | mutex_lock(&head->mutex); | |
247 | spin_lock(&delayed_refs->lock); | |
248 | if (!head->node.in_tree) { | |
249 | mutex_unlock(&head->mutex); | |
250 | btrfs_put_delayed_ref(&head->node); | |
251 | return -EAGAIN; | |
252 | } | |
253 | btrfs_put_delayed_ref(&head->node); | |
254 | return 0; | |
255 | } | |
256 | ||
35a3621b | 257 | static inline void drop_delayed_ref(struct btrfs_trans_handle *trans, |
ae1e206b | 258 | struct btrfs_delayed_ref_root *delayed_refs, |
d7df2c79 | 259 | struct btrfs_delayed_ref_head *head, |
ae1e206b JB |
260 | struct btrfs_delayed_ref_node *ref) |
261 | { | |
c46effa6 | 262 | if (btrfs_delayed_ref_is_head(ref)) { |
c46effa6 LB |
263 | head = btrfs_delayed_node_to_head(ref); |
264 | rb_erase(&head->href_node, &delayed_refs->href_root); | |
d7df2c79 JB |
265 | } else { |
266 | assert_spin_locked(&head->lock); | |
267 | rb_erase(&ref->rb_node, &head->ref_root); | |
c46effa6 | 268 | } |
ae1e206b JB |
269 | ref->in_tree = 0; |
270 | btrfs_put_delayed_ref(ref); | |
d7df2c79 | 271 | atomic_dec(&delayed_refs->num_entries); |
ae1e206b JB |
272 | if (trans->delayed_ref_updates) |
273 | trans->delayed_ref_updates--; | |
274 | } | |
275 | ||
276 | static int merge_ref(struct btrfs_trans_handle *trans, | |
277 | struct btrfs_delayed_ref_root *delayed_refs, | |
d7df2c79 | 278 | struct btrfs_delayed_ref_head *head, |
ae1e206b JB |
279 | struct btrfs_delayed_ref_node *ref, u64 seq) |
280 | { | |
281 | struct rb_node *node; | |
ae1e206b JB |
282 | int mod = 0; |
283 | int done = 0; | |
284 | ||
d7df2c79 JB |
285 | node = rb_next(&ref->rb_node); |
286 | while (!done && node) { | |
ae1e206b JB |
287 | struct btrfs_delayed_ref_node *next; |
288 | ||
289 | next = rb_entry(node, struct btrfs_delayed_ref_node, rb_node); | |
d7df2c79 | 290 | node = rb_next(node); |
ae1e206b JB |
291 | if (seq && next->seq >= seq) |
292 | break; | |
293 | if (comp_entry(ref, next, 0)) | |
294 | continue; | |
295 | ||
296 | if (ref->action == next->action) { | |
297 | mod = next->ref_mod; | |
298 | } else { | |
299 | if (ref->ref_mod < next->ref_mod) { | |
300 | struct btrfs_delayed_ref_node *tmp; | |
301 | ||
302 | tmp = ref; | |
303 | ref = next; | |
304 | next = tmp; | |
305 | done = 1; | |
306 | } | |
307 | mod = -next->ref_mod; | |
308 | } | |
309 | ||
d7df2c79 | 310 | drop_delayed_ref(trans, delayed_refs, head, next); |
ae1e206b JB |
311 | ref->ref_mod += mod; |
312 | if (ref->ref_mod == 0) { | |
d7df2c79 JB |
313 | drop_delayed_ref(trans, delayed_refs, head, ref); |
314 | done = 1; | |
ae1e206b JB |
315 | } else { |
316 | /* | |
317 | * You can't have multiples of the same ref on a tree | |
318 | * block. | |
319 | */ | |
320 | WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY || | |
321 | ref->type == BTRFS_SHARED_BLOCK_REF_KEY); | |
322 | } | |
ae1e206b | 323 | } |
d7df2c79 | 324 | return done; |
ae1e206b JB |
325 | } |
326 | ||
327 | void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans, | |
328 | struct btrfs_fs_info *fs_info, | |
329 | struct btrfs_delayed_ref_root *delayed_refs, | |
330 | struct btrfs_delayed_ref_head *head) | |
331 | { | |
332 | struct rb_node *node; | |
333 | u64 seq = 0; | |
334 | ||
d7df2c79 | 335 | assert_spin_locked(&head->lock); |
9e5ac13a LB |
336 | /* |
337 | * We don't have too much refs to merge in the case of delayed data | |
338 | * refs. | |
339 | */ | |
340 | if (head->is_data) | |
341 | return; | |
342 | ||
ae1e206b JB |
343 | spin_lock(&fs_info->tree_mod_seq_lock); |
344 | if (!list_empty(&fs_info->tree_mod_seq_list)) { | |
345 | struct seq_list *elem; | |
346 | ||
347 | elem = list_first_entry(&fs_info->tree_mod_seq_list, | |
348 | struct seq_list, list); | |
349 | seq = elem->seq; | |
350 | } | |
351 | spin_unlock(&fs_info->tree_mod_seq_lock); | |
352 | ||
d7df2c79 | 353 | node = rb_first(&head->ref_root); |
ae1e206b JB |
354 | while (node) { |
355 | struct btrfs_delayed_ref_node *ref; | |
356 | ||
357 | ref = rb_entry(node, struct btrfs_delayed_ref_node, | |
358 | rb_node); | |
ae1e206b JB |
359 | /* We can't merge refs that are outside of our seq count */ |
360 | if (seq && ref->seq >= seq) | |
361 | break; | |
d7df2c79 JB |
362 | if (merge_ref(trans, delayed_refs, head, ref, seq)) |
363 | node = rb_first(&head->ref_root); | |
ae1e206b | 364 | else |
d7df2c79 | 365 | node = rb_next(&ref->rb_node); |
ae1e206b JB |
366 | } |
367 | } | |
368 | ||
097b8a7c JS |
369 | int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, |
370 | struct btrfs_delayed_ref_root *delayed_refs, | |
00f04b88 AJ |
371 | u64 seq) |
372 | { | |
373 | struct seq_list *elem; | |
097b8a7c JS |
374 | int ret = 0; |
375 | ||
376 | spin_lock(&fs_info->tree_mod_seq_lock); | |
377 | if (!list_empty(&fs_info->tree_mod_seq_list)) { | |
378 | elem = list_first_entry(&fs_info->tree_mod_seq_list, | |
379 | struct seq_list, list); | |
380 | if (seq >= elem->seq) { | |
fc36ed7e JS |
381 | pr_debug("holding back delayed_ref %#x.%x, lowest is %#x.%x (%p)\n", |
382 | (u32)(seq >> 32), (u32)seq, | |
383 | (u32)(elem->seq >> 32), (u32)elem->seq, | |
384 | delayed_refs); | |
097b8a7c JS |
385 | ret = 1; |
386 | } | |
00f04b88 | 387 | } |
097b8a7c JS |
388 | |
389 | spin_unlock(&fs_info->tree_mod_seq_lock); | |
390 | return ret; | |
00f04b88 AJ |
391 | } |
392 | ||
d7df2c79 JB |
393 | struct btrfs_delayed_ref_head * |
394 | btrfs_select_ref_head(struct btrfs_trans_handle *trans) | |
c3e69d58 | 395 | { |
c3e69d58 | 396 | struct btrfs_delayed_ref_root *delayed_refs; |
d7df2c79 JB |
397 | struct btrfs_delayed_ref_head *head; |
398 | u64 start; | |
399 | bool loop = false; | |
56bec294 | 400 | |
c3e69d58 | 401 | delayed_refs = &trans->transaction->delayed_refs; |
c46effa6 | 402 | |
c3e69d58 | 403 | again: |
d7df2c79 | 404 | start = delayed_refs->run_delayed_start; |
85fdfdf6 | 405 | head = find_ref_head(&delayed_refs->href_root, start, 1); |
d7df2c79 JB |
406 | if (!head && !loop) { |
407 | delayed_refs->run_delayed_start = 0; | |
c3e69d58 | 408 | start = 0; |
d7df2c79 | 409 | loop = true; |
85fdfdf6 | 410 | head = find_ref_head(&delayed_refs->href_root, start, 1); |
d7df2c79 JB |
411 | if (!head) |
412 | return NULL; | |
413 | } else if (!head && loop) { | |
414 | return NULL; | |
c3e69d58 | 415 | } |
56bec294 | 416 | |
d7df2c79 JB |
417 | while (head->processing) { |
418 | struct rb_node *node; | |
419 | ||
420 | node = rb_next(&head->href_node); | |
421 | if (!node) { | |
422 | if (loop) | |
423 | return NULL; | |
424 | delayed_refs->run_delayed_start = 0; | |
425 | start = 0; | |
426 | loop = true; | |
427 | goto again; | |
428 | } | |
429 | head = rb_entry(node, struct btrfs_delayed_ref_head, | |
430 | href_node); | |
431 | } | |
093486c4 | 432 | |
d7df2c79 JB |
433 | head->processing = 1; |
434 | WARN_ON(delayed_refs->num_heads_ready == 0); | |
435 | delayed_refs->num_heads_ready--; | |
436 | delayed_refs->run_delayed_start = head->node.bytenr + | |
437 | head->node.num_bytes; | |
438 | return head; | |
093486c4 MX |
439 | } |
440 | ||
56bec294 CM |
441 | /* |
442 | * helper function to update an extent delayed ref in the | |
443 | * rbtree. existing and update must both have the same | |
444 | * bytenr and parent | |
445 | * | |
446 | * This may free existing if the update cancels out whatever | |
447 | * operation it was doing. | |
448 | */ | |
449 | static noinline void | |
450 | update_existing_ref(struct btrfs_trans_handle *trans, | |
451 | struct btrfs_delayed_ref_root *delayed_refs, | |
d7df2c79 | 452 | struct btrfs_delayed_ref_head *head, |
56bec294 CM |
453 | struct btrfs_delayed_ref_node *existing, |
454 | struct btrfs_delayed_ref_node *update) | |
455 | { | |
5d4f98a2 | 456 | if (update->action != existing->action) { |
56bec294 CM |
457 | /* |
458 | * this is effectively undoing either an add or a | |
459 | * drop. We decrement the ref_mod, and if it goes | |
460 | * down to zero we just delete the entry without | |
461 | * every changing the extent allocation tree. | |
462 | */ | |
463 | existing->ref_mod--; | |
ae1e206b | 464 | if (existing->ref_mod == 0) |
d7df2c79 | 465 | drop_delayed_ref(trans, delayed_refs, head, existing); |
ae1e206b | 466 | else |
5d4f98a2 YZ |
467 | WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY || |
468 | existing->type == BTRFS_SHARED_BLOCK_REF_KEY); | |
56bec294 | 469 | } else { |
5d4f98a2 YZ |
470 | WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY || |
471 | existing->type == BTRFS_SHARED_BLOCK_REF_KEY); | |
56bec294 CM |
472 | /* |
473 | * the action on the existing ref matches | |
474 | * the action on the ref we're trying to add. | |
475 | * Bump the ref_mod by one so the backref that | |
476 | * is eventually added/removed has the correct | |
477 | * reference count | |
478 | */ | |
479 | existing->ref_mod += update->ref_mod; | |
480 | } | |
481 | } | |
482 | ||
483 | /* | |
484 | * helper function to update the accounting in the head ref | |
485 | * existing and update must have the same bytenr | |
486 | */ | |
487 | static noinline void | |
488 | update_existing_head_ref(struct btrfs_delayed_ref_node *existing, | |
489 | struct btrfs_delayed_ref_node *update) | |
490 | { | |
491 | struct btrfs_delayed_ref_head *existing_ref; | |
492 | struct btrfs_delayed_ref_head *ref; | |
493 | ||
494 | existing_ref = btrfs_delayed_node_to_head(existing); | |
495 | ref = btrfs_delayed_node_to_head(update); | |
5d4f98a2 | 496 | BUG_ON(existing_ref->is_data != ref->is_data); |
56bec294 | 497 | |
21543bad | 498 | spin_lock(&existing_ref->lock); |
56bec294 CM |
499 | if (ref->must_insert_reserved) { |
500 | /* if the extent was freed and then | |
501 | * reallocated before the delayed ref | |
502 | * entries were processed, we can end up | |
503 | * with an existing head ref without | |
504 | * the must_insert_reserved flag set. | |
505 | * Set it again here | |
506 | */ | |
507 | existing_ref->must_insert_reserved = ref->must_insert_reserved; | |
508 | ||
509 | /* | |
510 | * update the num_bytes so we make sure the accounting | |
511 | * is done correctly | |
512 | */ | |
513 | existing->num_bytes = update->num_bytes; | |
514 | ||
515 | } | |
516 | ||
5d4f98a2 YZ |
517 | if (ref->extent_op) { |
518 | if (!existing_ref->extent_op) { | |
519 | existing_ref->extent_op = ref->extent_op; | |
520 | } else { | |
521 | if (ref->extent_op->update_key) { | |
522 | memcpy(&existing_ref->extent_op->key, | |
523 | &ref->extent_op->key, | |
524 | sizeof(ref->extent_op->key)); | |
525 | existing_ref->extent_op->update_key = 1; | |
526 | } | |
527 | if (ref->extent_op->update_flags) { | |
528 | existing_ref->extent_op->flags_to_set |= | |
529 | ref->extent_op->flags_to_set; | |
530 | existing_ref->extent_op->update_flags = 1; | |
531 | } | |
78a6184a | 532 | btrfs_free_delayed_extent_op(ref->extent_op); |
5d4f98a2 YZ |
533 | } |
534 | } | |
56bec294 | 535 | /* |
d7df2c79 JB |
536 | * update the reference mod on the head to reflect this new operation, |
537 | * only need the lock for this case cause we could be processing it | |
538 | * currently, for refs we just added we know we're a-ok. | |
56bec294 CM |
539 | */ |
540 | existing->ref_mod += update->ref_mod; | |
d7df2c79 | 541 | spin_unlock(&existing_ref->lock); |
56bec294 CM |
542 | } |
543 | ||
544 | /* | |
5d4f98a2 | 545 | * helper function to actually insert a head node into the rbtree. |
56bec294 | 546 | * this does all the dirty work in terms of maintaining the correct |
5d4f98a2 | 547 | * overall modification count. |
56bec294 | 548 | */ |
d7df2c79 JB |
549 | static noinline struct btrfs_delayed_ref_head * |
550 | add_delayed_ref_head(struct btrfs_fs_info *fs_info, | |
551 | struct btrfs_trans_handle *trans, | |
552 | struct btrfs_delayed_ref_node *ref, u64 bytenr, | |
553 | u64 num_bytes, int action, int is_data) | |
56bec294 | 554 | { |
d7df2c79 | 555 | struct btrfs_delayed_ref_head *existing; |
c3e69d58 | 556 | struct btrfs_delayed_ref_head *head_ref = NULL; |
56bec294 CM |
557 | struct btrfs_delayed_ref_root *delayed_refs; |
558 | int count_mod = 1; | |
559 | int must_insert_reserved = 0; | |
560 | ||
561 | /* | |
562 | * the head node stores the sum of all the mods, so dropping a ref | |
563 | * should drop the sum in the head node by one. | |
564 | */ | |
5d4f98a2 YZ |
565 | if (action == BTRFS_UPDATE_DELAYED_HEAD) |
566 | count_mod = 0; | |
567 | else if (action == BTRFS_DROP_DELAYED_REF) | |
568 | count_mod = -1; | |
56bec294 CM |
569 | |
570 | /* | |
571 | * BTRFS_ADD_DELAYED_EXTENT means that we need to update | |
572 | * the reserved accounting when the extent is finally added, or | |
573 | * if a later modification deletes the delayed ref without ever | |
574 | * inserting the extent into the extent allocation tree. | |
575 | * ref->must_insert_reserved is the flag used to record | |
576 | * that accounting mods are required. | |
577 | * | |
578 | * Once we record must_insert_reserved, switch the action to | |
579 | * BTRFS_ADD_DELAYED_REF because other special casing is not required. | |
580 | */ | |
5d4f98a2 | 581 | if (action == BTRFS_ADD_DELAYED_EXTENT) |
56bec294 | 582 | must_insert_reserved = 1; |
5d4f98a2 | 583 | else |
56bec294 | 584 | must_insert_reserved = 0; |
56bec294 CM |
585 | |
586 | delayed_refs = &trans->transaction->delayed_refs; | |
587 | ||
588 | /* first set the basic ref node struct up */ | |
589 | atomic_set(&ref->refs, 1); | |
590 | ref->bytenr = bytenr; | |
5d4f98a2 | 591 | ref->num_bytes = num_bytes; |
56bec294 | 592 | ref->ref_mod = count_mod; |
5d4f98a2 YZ |
593 | ref->type = 0; |
594 | ref->action = 0; | |
595 | ref->is_head = 1; | |
56bec294 | 596 | ref->in_tree = 1; |
00f04b88 | 597 | ref->seq = 0; |
5d4f98a2 YZ |
598 | |
599 | head_ref = btrfs_delayed_node_to_head(ref); | |
600 | head_ref->must_insert_reserved = must_insert_reserved; | |
601 | head_ref->is_data = is_data; | |
d7df2c79 JB |
602 | head_ref->ref_root = RB_ROOT; |
603 | head_ref->processing = 0; | |
5d4f98a2 | 604 | |
d7df2c79 | 605 | spin_lock_init(&head_ref->lock); |
5d4f98a2 YZ |
606 | mutex_init(&head_ref->mutex); |
607 | ||
599c75ec | 608 | trace_add_delayed_ref_head(ref, head_ref, action); |
1abe9b8a | 609 | |
d7df2c79 JB |
610 | existing = htree_insert(&delayed_refs->href_root, |
611 | &head_ref->href_node); | |
5d4f98a2 | 612 | if (existing) { |
d7df2c79 | 613 | update_existing_head_ref(&existing->node, ref); |
5d4f98a2 YZ |
614 | /* |
615 | * we've updated the existing ref, free the newly | |
616 | * allocated ref | |
617 | */ | |
78a6184a | 618 | kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref); |
d7df2c79 | 619 | head_ref = existing; |
5d4f98a2 YZ |
620 | } else { |
621 | delayed_refs->num_heads++; | |
622 | delayed_refs->num_heads_ready++; | |
d7df2c79 | 623 | atomic_inc(&delayed_refs->num_entries); |
5d4f98a2 YZ |
624 | trans->delayed_ref_updates++; |
625 | } | |
d7df2c79 | 626 | return head_ref; |
5d4f98a2 YZ |
627 | } |
628 | ||
629 | /* | |
630 | * helper to insert a delayed tree ref into the rbtree. | |
631 | */ | |
d7df2c79 JB |
632 | static noinline void |
633 | add_delayed_tree_ref(struct btrfs_fs_info *fs_info, | |
634 | struct btrfs_trans_handle *trans, | |
635 | struct btrfs_delayed_ref_head *head_ref, | |
636 | struct btrfs_delayed_ref_node *ref, u64 bytenr, | |
637 | u64 num_bytes, u64 parent, u64 ref_root, int level, | |
638 | int action, int for_cow) | |
5d4f98a2 YZ |
639 | { |
640 | struct btrfs_delayed_ref_node *existing; | |
641 | struct btrfs_delayed_tree_ref *full_ref; | |
642 | struct btrfs_delayed_ref_root *delayed_refs; | |
00f04b88 | 643 | u64 seq = 0; |
5d4f98a2 YZ |
644 | |
645 | if (action == BTRFS_ADD_DELAYED_EXTENT) | |
646 | action = BTRFS_ADD_DELAYED_REF; | |
647 | ||
648 | delayed_refs = &trans->transaction->delayed_refs; | |
649 | ||
650 | /* first set the basic ref node struct up */ | |
651 | atomic_set(&ref->refs, 1); | |
652 | ref->bytenr = bytenr; | |
56bec294 | 653 | ref->num_bytes = num_bytes; |
5d4f98a2 YZ |
654 | ref->ref_mod = 1; |
655 | ref->action = action; | |
656 | ref->is_head = 0; | |
657 | ref->in_tree = 1; | |
56bec294 | 658 | |
546adb0d JS |
659 | if (need_ref_seq(for_cow, ref_root)) |
660 | seq = btrfs_get_tree_mod_seq(fs_info, &trans->delayed_ref_elem); | |
00f04b88 AJ |
661 | ref->seq = seq; |
662 | ||
5d4f98a2 | 663 | full_ref = btrfs_delayed_node_to_tree_ref(ref); |
eebe063b AJ |
664 | full_ref->parent = parent; |
665 | full_ref->root = ref_root; | |
666 | if (parent) | |
5d4f98a2 | 667 | ref->type = BTRFS_SHARED_BLOCK_REF_KEY; |
eebe063b | 668 | else |
5d4f98a2 | 669 | ref->type = BTRFS_TREE_BLOCK_REF_KEY; |
5d4f98a2 | 670 | full_ref->level = level; |
56bec294 | 671 | |
599c75ec | 672 | trace_add_delayed_tree_ref(ref, full_ref, action); |
1abe9b8a | 673 | |
d7df2c79 JB |
674 | spin_lock(&head_ref->lock); |
675 | existing = tree_insert(&head_ref->ref_root, &ref->rb_node); | |
56bec294 | 676 | if (existing) { |
d7df2c79 JB |
677 | update_existing_ref(trans, delayed_refs, head_ref, existing, |
678 | ref); | |
5d4f98a2 YZ |
679 | /* |
680 | * we've updated the existing ref, free the newly | |
681 | * allocated ref | |
682 | */ | |
78a6184a | 683 | kmem_cache_free(btrfs_delayed_tree_ref_cachep, full_ref); |
5d4f98a2 | 684 | } else { |
d7df2c79 | 685 | atomic_inc(&delayed_refs->num_entries); |
5d4f98a2 YZ |
686 | trans->delayed_ref_updates++; |
687 | } | |
d7df2c79 | 688 | spin_unlock(&head_ref->lock); |
5d4f98a2 YZ |
689 | } |
690 | ||
691 | /* | |
692 | * helper to insert a delayed data ref into the rbtree. | |
693 | */ | |
d7df2c79 JB |
694 | static noinline void |
695 | add_delayed_data_ref(struct btrfs_fs_info *fs_info, | |
696 | struct btrfs_trans_handle *trans, | |
697 | struct btrfs_delayed_ref_head *head_ref, | |
698 | struct btrfs_delayed_ref_node *ref, u64 bytenr, | |
699 | u64 num_bytes, u64 parent, u64 ref_root, u64 owner, | |
700 | u64 offset, int action, int for_cow) | |
5d4f98a2 YZ |
701 | { |
702 | struct btrfs_delayed_ref_node *existing; | |
703 | struct btrfs_delayed_data_ref *full_ref; | |
704 | struct btrfs_delayed_ref_root *delayed_refs; | |
00f04b88 | 705 | u64 seq = 0; |
5d4f98a2 YZ |
706 | |
707 | if (action == BTRFS_ADD_DELAYED_EXTENT) | |
708 | action = BTRFS_ADD_DELAYED_REF; | |
709 | ||
710 | delayed_refs = &trans->transaction->delayed_refs; | |
711 | ||
712 | /* first set the basic ref node struct up */ | |
713 | atomic_set(&ref->refs, 1); | |
714 | ref->bytenr = bytenr; | |
715 | ref->num_bytes = num_bytes; | |
716 | ref->ref_mod = 1; | |
717 | ref->action = action; | |
718 | ref->is_head = 0; | |
719 | ref->in_tree = 1; | |
720 | ||
546adb0d JS |
721 | if (need_ref_seq(for_cow, ref_root)) |
722 | seq = btrfs_get_tree_mod_seq(fs_info, &trans->delayed_ref_elem); | |
00f04b88 AJ |
723 | ref->seq = seq; |
724 | ||
5d4f98a2 | 725 | full_ref = btrfs_delayed_node_to_data_ref(ref); |
eebe063b AJ |
726 | full_ref->parent = parent; |
727 | full_ref->root = ref_root; | |
728 | if (parent) | |
5d4f98a2 | 729 | ref->type = BTRFS_SHARED_DATA_REF_KEY; |
eebe063b | 730 | else |
5d4f98a2 | 731 | ref->type = BTRFS_EXTENT_DATA_REF_KEY; |
66d7e7f0 | 732 | |
5d4f98a2 YZ |
733 | full_ref->objectid = owner; |
734 | full_ref->offset = offset; | |
56bec294 | 735 | |
599c75ec | 736 | trace_add_delayed_data_ref(ref, full_ref, action); |
1abe9b8a | 737 | |
d7df2c79 JB |
738 | spin_lock(&head_ref->lock); |
739 | existing = tree_insert(&head_ref->ref_root, &ref->rb_node); | |
5d4f98a2 | 740 | if (existing) { |
d7df2c79 JB |
741 | update_existing_ref(trans, delayed_refs, head_ref, existing, |
742 | ref); | |
56bec294 CM |
743 | /* |
744 | * we've updated the existing ref, free the newly | |
745 | * allocated ref | |
746 | */ | |
78a6184a | 747 | kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref); |
56bec294 | 748 | } else { |
d7df2c79 | 749 | atomic_inc(&delayed_refs->num_entries); |
56bec294 CM |
750 | trans->delayed_ref_updates++; |
751 | } | |
d7df2c79 | 752 | spin_unlock(&head_ref->lock); |
56bec294 CM |
753 | } |
754 | ||
755 | /* | |
5d4f98a2 | 756 | * add a delayed tree ref. This does all of the accounting required |
56bec294 CM |
757 | * to make sure the delayed ref is eventually processed before this |
758 | * transaction commits. | |
759 | */ | |
66d7e7f0 AJ |
760 | int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info, |
761 | struct btrfs_trans_handle *trans, | |
5d4f98a2 YZ |
762 | u64 bytenr, u64 num_bytes, u64 parent, |
763 | u64 ref_root, int level, int action, | |
66d7e7f0 AJ |
764 | struct btrfs_delayed_extent_op *extent_op, |
765 | int for_cow) | |
56bec294 | 766 | { |
5d4f98a2 | 767 | struct btrfs_delayed_tree_ref *ref; |
56bec294 CM |
768 | struct btrfs_delayed_ref_head *head_ref; |
769 | struct btrfs_delayed_ref_root *delayed_refs; | |
56bec294 | 770 | |
5d4f98a2 | 771 | BUG_ON(extent_op && extent_op->is_data); |
78a6184a | 772 | ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS); |
56bec294 CM |
773 | if (!ref) |
774 | return -ENOMEM; | |
775 | ||
78a6184a | 776 | head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS); |
5d4f98a2 | 777 | if (!head_ref) { |
78a6184a | 778 | kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref); |
5d4f98a2 YZ |
779 | return -ENOMEM; |
780 | } | |
781 | ||
782 | head_ref->extent_op = extent_op; | |
783 | ||
784 | delayed_refs = &trans->transaction->delayed_refs; | |
785 | spin_lock(&delayed_refs->lock); | |
786 | ||
56bec294 | 787 | /* |
5d4f98a2 YZ |
788 | * insert both the head node and the new ref without dropping |
789 | * the spin lock | |
56bec294 | 790 | */ |
d7df2c79 JB |
791 | head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, |
792 | bytenr, num_bytes, action, 0); | |
5d4f98a2 | 793 | |
d7df2c79 | 794 | add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr, |
66d7e7f0 AJ |
795 | num_bytes, parent, ref_root, level, action, |
796 | for_cow); | |
5d4f98a2 | 797 | spin_unlock(&delayed_refs->lock); |
546adb0d JS |
798 | if (need_ref_seq(for_cow, ref_root)) |
799 | btrfs_qgroup_record_ref(trans, &ref->node, extent_op); | |
95a06077 | 800 | |
5d4f98a2 YZ |
801 | return 0; |
802 | } | |
803 | ||
804 | /* | |
805 | * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref. | |
806 | */ | |
66d7e7f0 AJ |
807 | int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info, |
808 | struct btrfs_trans_handle *trans, | |
5d4f98a2 YZ |
809 | u64 bytenr, u64 num_bytes, |
810 | u64 parent, u64 ref_root, | |
811 | u64 owner, u64 offset, int action, | |
66d7e7f0 AJ |
812 | struct btrfs_delayed_extent_op *extent_op, |
813 | int for_cow) | |
5d4f98a2 YZ |
814 | { |
815 | struct btrfs_delayed_data_ref *ref; | |
816 | struct btrfs_delayed_ref_head *head_ref; | |
817 | struct btrfs_delayed_ref_root *delayed_refs; | |
5d4f98a2 YZ |
818 | |
819 | BUG_ON(extent_op && !extent_op->is_data); | |
78a6184a | 820 | ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS); |
5d4f98a2 YZ |
821 | if (!ref) |
822 | return -ENOMEM; | |
56bec294 | 823 | |
78a6184a | 824 | head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS); |
56bec294 | 825 | if (!head_ref) { |
78a6184a | 826 | kmem_cache_free(btrfs_delayed_data_ref_cachep, ref); |
56bec294 CM |
827 | return -ENOMEM; |
828 | } | |
5d4f98a2 YZ |
829 | |
830 | head_ref->extent_op = extent_op; | |
831 | ||
56bec294 CM |
832 | delayed_refs = &trans->transaction->delayed_refs; |
833 | spin_lock(&delayed_refs->lock); | |
834 | ||
835 | /* | |
836 | * insert both the head node and the new ref without dropping | |
837 | * the spin lock | |
838 | */ | |
d7df2c79 JB |
839 | head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, |
840 | bytenr, num_bytes, action, 1); | |
56bec294 | 841 | |
d7df2c79 | 842 | add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr, |
66d7e7f0 AJ |
843 | num_bytes, parent, ref_root, owner, offset, |
844 | action, for_cow); | |
5d4f98a2 | 845 | spin_unlock(&delayed_refs->lock); |
546adb0d JS |
846 | if (need_ref_seq(for_cow, ref_root)) |
847 | btrfs_qgroup_record_ref(trans, &ref->node, extent_op); | |
95a06077 | 848 | |
5d4f98a2 YZ |
849 | return 0; |
850 | } | |
851 | ||
66d7e7f0 AJ |
852 | int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info, |
853 | struct btrfs_trans_handle *trans, | |
5d4f98a2 YZ |
854 | u64 bytenr, u64 num_bytes, |
855 | struct btrfs_delayed_extent_op *extent_op) | |
856 | { | |
857 | struct btrfs_delayed_ref_head *head_ref; | |
858 | struct btrfs_delayed_ref_root *delayed_refs; | |
5d4f98a2 | 859 | |
78a6184a | 860 | head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS); |
5d4f98a2 YZ |
861 | if (!head_ref) |
862 | return -ENOMEM; | |
863 | ||
864 | head_ref->extent_op = extent_op; | |
865 | ||
866 | delayed_refs = &trans->transaction->delayed_refs; | |
867 | spin_lock(&delayed_refs->lock); | |
868 | ||
143bede5 | 869 | add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr, |
5d4f98a2 YZ |
870 | num_bytes, BTRFS_UPDATE_DELAYED_HEAD, |
871 | extent_op->is_data); | |
5d4f98a2 | 872 | |
56bec294 CM |
873 | spin_unlock(&delayed_refs->lock); |
874 | return 0; | |
875 | } | |
876 | ||
1887be66 CM |
877 | /* |
878 | * this does a simple search for the head node for a given extent. | |
879 | * It must be called with the delayed ref spinlock held, and it returns | |
880 | * the head node if any where found, or NULL if not. | |
881 | */ | |
882 | struct btrfs_delayed_ref_head * | |
883 | btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr) | |
884 | { | |
1887be66 CM |
885 | struct btrfs_delayed_ref_root *delayed_refs; |
886 | ||
887 | delayed_refs = &trans->transaction->delayed_refs; | |
85fdfdf6 | 888 | return find_ref_head(&delayed_refs->href_root, bytenr, 0); |
1887be66 | 889 | } |
78a6184a MX |
890 | |
891 | void btrfs_delayed_ref_exit(void) | |
892 | { | |
893 | if (btrfs_delayed_ref_head_cachep) | |
894 | kmem_cache_destroy(btrfs_delayed_ref_head_cachep); | |
895 | if (btrfs_delayed_tree_ref_cachep) | |
896 | kmem_cache_destroy(btrfs_delayed_tree_ref_cachep); | |
897 | if (btrfs_delayed_data_ref_cachep) | |
898 | kmem_cache_destroy(btrfs_delayed_data_ref_cachep); | |
899 | if (btrfs_delayed_extent_op_cachep) | |
900 | kmem_cache_destroy(btrfs_delayed_extent_op_cachep); | |
901 | } | |
902 | ||
903 | int btrfs_delayed_ref_init(void) | |
904 | { | |
905 | btrfs_delayed_ref_head_cachep = kmem_cache_create( | |
906 | "btrfs_delayed_ref_head", | |
907 | sizeof(struct btrfs_delayed_ref_head), 0, | |
908 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); | |
909 | if (!btrfs_delayed_ref_head_cachep) | |
910 | goto fail; | |
911 | ||
912 | btrfs_delayed_tree_ref_cachep = kmem_cache_create( | |
913 | "btrfs_delayed_tree_ref", | |
914 | sizeof(struct btrfs_delayed_tree_ref), 0, | |
915 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); | |
916 | if (!btrfs_delayed_tree_ref_cachep) | |
917 | goto fail; | |
918 | ||
919 | btrfs_delayed_data_ref_cachep = kmem_cache_create( | |
920 | "btrfs_delayed_data_ref", | |
921 | sizeof(struct btrfs_delayed_data_ref), 0, | |
922 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); | |
923 | if (!btrfs_delayed_data_ref_cachep) | |
924 | goto fail; | |
925 | ||
926 | btrfs_delayed_extent_op_cachep = kmem_cache_create( | |
927 | "btrfs_delayed_extent_op", | |
928 | sizeof(struct btrfs_delayed_extent_op), 0, | |
929 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); | |
930 | if (!btrfs_delayed_extent_op_cachep) | |
931 | goto fail; | |
932 | ||
933 | return 0; | |
934 | fail: | |
935 | btrfs_delayed_ref_exit(); | |
936 | return -ENOMEM; | |
937 | } |