Merge remote-tracking branches 'regmap/fix/doc' and 'regmap/fix/mmio' into regmap...
[deliverable/linux.git] / fs / btrfs / delayed-inode.c
1 /*
2 * Copyright (C) 2011 Fujitsu. All rights reserved.
3 * Written by Miao Xie <miaox@cn.fujitsu.com>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public
7 * License v2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the
16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 021110-1307, USA.
18 */
19
20 #include <linux/slab.h>
21 #include "delayed-inode.h"
22 #include "disk-io.h"
23 #include "transaction.h"
24 #include "ctree.h"
25
26 #define BTRFS_DELAYED_WRITEBACK 512
27 #define BTRFS_DELAYED_BACKGROUND 128
28 #define BTRFS_DELAYED_BATCH 16
29
30 static struct kmem_cache *delayed_node_cache;
31
32 int __init btrfs_delayed_inode_init(void)
33 {
34 delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
35 sizeof(struct btrfs_delayed_node),
36 0,
37 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
38 NULL);
39 if (!delayed_node_cache)
40 return -ENOMEM;
41 return 0;
42 }
43
44 void btrfs_delayed_inode_exit(void)
45 {
46 if (delayed_node_cache)
47 kmem_cache_destroy(delayed_node_cache);
48 }
49
50 static inline void btrfs_init_delayed_node(
51 struct btrfs_delayed_node *delayed_node,
52 struct btrfs_root *root, u64 inode_id)
53 {
54 delayed_node->root = root;
55 delayed_node->inode_id = inode_id;
56 atomic_set(&delayed_node->refs, 0);
57 delayed_node->count = 0;
58 delayed_node->in_list = 0;
59 delayed_node->inode_dirty = 0;
60 delayed_node->ins_root = RB_ROOT;
61 delayed_node->del_root = RB_ROOT;
62 mutex_init(&delayed_node->mutex);
63 delayed_node->index_cnt = 0;
64 INIT_LIST_HEAD(&delayed_node->n_list);
65 INIT_LIST_HEAD(&delayed_node->p_list);
66 delayed_node->bytes_reserved = 0;
67 memset(&delayed_node->inode_item, 0, sizeof(delayed_node->inode_item));
68 }
69
70 static inline int btrfs_is_continuous_delayed_item(
71 struct btrfs_delayed_item *item1,
72 struct btrfs_delayed_item *item2)
73 {
74 if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
75 item1->key.objectid == item2->key.objectid &&
76 item1->key.type == item2->key.type &&
77 item1->key.offset + 1 == item2->key.offset)
78 return 1;
79 return 0;
80 }
81
82 static inline struct btrfs_delayed_root *btrfs_get_delayed_root(
83 struct btrfs_root *root)
84 {
85 return root->fs_info->delayed_root;
86 }
87
88 static struct btrfs_delayed_node *btrfs_get_delayed_node(struct inode *inode)
89 {
90 struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
91 struct btrfs_root *root = btrfs_inode->root;
92 u64 ino = btrfs_ino(inode);
93 struct btrfs_delayed_node *node;
94
95 node = ACCESS_ONCE(btrfs_inode->delayed_node);
96 if (node) {
97 atomic_inc(&node->refs);
98 return node;
99 }
100
101 spin_lock(&root->inode_lock);
102 node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
103 if (node) {
104 if (btrfs_inode->delayed_node) {
105 atomic_inc(&node->refs); /* can be accessed */
106 BUG_ON(btrfs_inode->delayed_node != node);
107 spin_unlock(&root->inode_lock);
108 return node;
109 }
110 btrfs_inode->delayed_node = node;
111 /* can be accessed and cached in the inode */
112 atomic_add(2, &node->refs);
113 spin_unlock(&root->inode_lock);
114 return node;
115 }
116 spin_unlock(&root->inode_lock);
117
118 return NULL;
119 }
120
121 /* Will return either the node or PTR_ERR(-ENOMEM) */
122 static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
123 struct inode *inode)
124 {
125 struct btrfs_delayed_node *node;
126 struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
127 struct btrfs_root *root = btrfs_inode->root;
128 u64 ino = btrfs_ino(inode);
129 int ret;
130
131 again:
132 node = btrfs_get_delayed_node(inode);
133 if (node)
134 return node;
135
136 node = kmem_cache_alloc(delayed_node_cache, GFP_NOFS);
137 if (!node)
138 return ERR_PTR(-ENOMEM);
139 btrfs_init_delayed_node(node, root, ino);
140
141 /* cached in the btrfs inode and can be accessed */
142 atomic_add(2, &node->refs);
143
144 ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
145 if (ret) {
146 kmem_cache_free(delayed_node_cache, node);
147 return ERR_PTR(ret);
148 }
149
150 spin_lock(&root->inode_lock);
151 ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
152 if (ret == -EEXIST) {
153 kmem_cache_free(delayed_node_cache, node);
154 spin_unlock(&root->inode_lock);
155 radix_tree_preload_end();
156 goto again;
157 }
158 btrfs_inode->delayed_node = node;
159 spin_unlock(&root->inode_lock);
160 radix_tree_preload_end();
161
162 return node;
163 }
164
165 /*
166 * Call it when holding delayed_node->mutex
167 *
168 * If mod = 1, add this node into the prepared list.
169 */
170 static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
171 struct btrfs_delayed_node *node,
172 int mod)
173 {
174 spin_lock(&root->lock);
175 if (node->in_list) {
176 if (!list_empty(&node->p_list))
177 list_move_tail(&node->p_list, &root->prepare_list);
178 else if (mod)
179 list_add_tail(&node->p_list, &root->prepare_list);
180 } else {
181 list_add_tail(&node->n_list, &root->node_list);
182 list_add_tail(&node->p_list, &root->prepare_list);
183 atomic_inc(&node->refs); /* inserted into list */
184 root->nodes++;
185 node->in_list = 1;
186 }
187 spin_unlock(&root->lock);
188 }
189
190 /* Call it when holding delayed_node->mutex */
191 static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
192 struct btrfs_delayed_node *node)
193 {
194 spin_lock(&root->lock);
195 if (node->in_list) {
196 root->nodes--;
197 atomic_dec(&node->refs); /* not in the list */
198 list_del_init(&node->n_list);
199 if (!list_empty(&node->p_list))
200 list_del_init(&node->p_list);
201 node->in_list = 0;
202 }
203 spin_unlock(&root->lock);
204 }
205
206 static struct btrfs_delayed_node *btrfs_first_delayed_node(
207 struct btrfs_delayed_root *delayed_root)
208 {
209 struct list_head *p;
210 struct btrfs_delayed_node *node = NULL;
211
212 spin_lock(&delayed_root->lock);
213 if (list_empty(&delayed_root->node_list))
214 goto out;
215
216 p = delayed_root->node_list.next;
217 node = list_entry(p, struct btrfs_delayed_node, n_list);
218 atomic_inc(&node->refs);
219 out:
220 spin_unlock(&delayed_root->lock);
221
222 return node;
223 }
224
225 static struct btrfs_delayed_node *btrfs_next_delayed_node(
226 struct btrfs_delayed_node *node)
227 {
228 struct btrfs_delayed_root *delayed_root;
229 struct list_head *p;
230 struct btrfs_delayed_node *next = NULL;
231
232 delayed_root = node->root->fs_info->delayed_root;
233 spin_lock(&delayed_root->lock);
234 if (!node->in_list) { /* not in the list */
235 if (list_empty(&delayed_root->node_list))
236 goto out;
237 p = delayed_root->node_list.next;
238 } else if (list_is_last(&node->n_list, &delayed_root->node_list))
239 goto out;
240 else
241 p = node->n_list.next;
242
243 next = list_entry(p, struct btrfs_delayed_node, n_list);
244 atomic_inc(&next->refs);
245 out:
246 spin_unlock(&delayed_root->lock);
247
248 return next;
249 }
250
251 static void __btrfs_release_delayed_node(
252 struct btrfs_delayed_node *delayed_node,
253 int mod)
254 {
255 struct btrfs_delayed_root *delayed_root;
256
257 if (!delayed_node)
258 return;
259
260 delayed_root = delayed_node->root->fs_info->delayed_root;
261
262 mutex_lock(&delayed_node->mutex);
263 if (delayed_node->count)
264 btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
265 else
266 btrfs_dequeue_delayed_node(delayed_root, delayed_node);
267 mutex_unlock(&delayed_node->mutex);
268
269 if (atomic_dec_and_test(&delayed_node->refs)) {
270 struct btrfs_root *root = delayed_node->root;
271 spin_lock(&root->inode_lock);
272 if (atomic_read(&delayed_node->refs) == 0) {
273 radix_tree_delete(&root->delayed_nodes_tree,
274 delayed_node->inode_id);
275 kmem_cache_free(delayed_node_cache, delayed_node);
276 }
277 spin_unlock(&root->inode_lock);
278 }
279 }
280
281 static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
282 {
283 __btrfs_release_delayed_node(node, 0);
284 }
285
286 static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
287 struct btrfs_delayed_root *delayed_root)
288 {
289 struct list_head *p;
290 struct btrfs_delayed_node *node = NULL;
291
292 spin_lock(&delayed_root->lock);
293 if (list_empty(&delayed_root->prepare_list))
294 goto out;
295
296 p = delayed_root->prepare_list.next;
297 list_del_init(p);
298 node = list_entry(p, struct btrfs_delayed_node, p_list);
299 atomic_inc(&node->refs);
300 out:
301 spin_unlock(&delayed_root->lock);
302
303 return node;
304 }
305
306 static inline void btrfs_release_prepared_delayed_node(
307 struct btrfs_delayed_node *node)
308 {
309 __btrfs_release_delayed_node(node, 1);
310 }
311
312 static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
313 {
314 struct btrfs_delayed_item *item;
315 item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
316 if (item) {
317 item->data_len = data_len;
318 item->ins_or_del = 0;
319 item->bytes_reserved = 0;
320 item->delayed_node = NULL;
321 atomic_set(&item->refs, 1);
322 }
323 return item;
324 }
325
326 /*
327 * __btrfs_lookup_delayed_item - look up the delayed item by key
328 * @delayed_node: pointer to the delayed node
329 * @key: the key to look up
330 * @prev: used to store the prev item if the right item isn't found
331 * @next: used to store the next item if the right item isn't found
332 *
333 * Note: if we don't find the right item, we will return the prev item and
334 * the next item.
335 */
336 static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
337 struct rb_root *root,
338 struct btrfs_key *key,
339 struct btrfs_delayed_item **prev,
340 struct btrfs_delayed_item **next)
341 {
342 struct rb_node *node, *prev_node = NULL;
343 struct btrfs_delayed_item *delayed_item = NULL;
344 int ret = 0;
345
346 node = root->rb_node;
347
348 while (node) {
349 delayed_item = rb_entry(node, struct btrfs_delayed_item,
350 rb_node);
351 prev_node = node;
352 ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
353 if (ret < 0)
354 node = node->rb_right;
355 else if (ret > 0)
356 node = node->rb_left;
357 else
358 return delayed_item;
359 }
360
361 if (prev) {
362 if (!prev_node)
363 *prev = NULL;
364 else if (ret < 0)
365 *prev = delayed_item;
366 else if ((node = rb_prev(prev_node)) != NULL) {
367 *prev = rb_entry(node, struct btrfs_delayed_item,
368 rb_node);
369 } else
370 *prev = NULL;
371 }
372
373 if (next) {
374 if (!prev_node)
375 *next = NULL;
376 else if (ret > 0)
377 *next = delayed_item;
378 else if ((node = rb_next(prev_node)) != NULL) {
379 *next = rb_entry(node, struct btrfs_delayed_item,
380 rb_node);
381 } else
382 *next = NULL;
383 }
384 return NULL;
385 }
386
387 static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
388 struct btrfs_delayed_node *delayed_node,
389 struct btrfs_key *key)
390 {
391 struct btrfs_delayed_item *item;
392
393 item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
394 NULL, NULL);
395 return item;
396 }
397
398 static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
399 struct btrfs_delayed_item *ins,
400 int action)
401 {
402 struct rb_node **p, *node;
403 struct rb_node *parent_node = NULL;
404 struct rb_root *root;
405 struct btrfs_delayed_item *item;
406 int cmp;
407
408 if (action == BTRFS_DELAYED_INSERTION_ITEM)
409 root = &delayed_node->ins_root;
410 else if (action == BTRFS_DELAYED_DELETION_ITEM)
411 root = &delayed_node->del_root;
412 else
413 BUG();
414 p = &root->rb_node;
415 node = &ins->rb_node;
416
417 while (*p) {
418 parent_node = *p;
419 item = rb_entry(parent_node, struct btrfs_delayed_item,
420 rb_node);
421
422 cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
423 if (cmp < 0)
424 p = &(*p)->rb_right;
425 else if (cmp > 0)
426 p = &(*p)->rb_left;
427 else
428 return -EEXIST;
429 }
430
431 rb_link_node(node, parent_node, p);
432 rb_insert_color(node, root);
433 ins->delayed_node = delayed_node;
434 ins->ins_or_del = action;
435
436 if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
437 action == BTRFS_DELAYED_INSERTION_ITEM &&
438 ins->key.offset >= delayed_node->index_cnt)
439 delayed_node->index_cnt = ins->key.offset + 1;
440
441 delayed_node->count++;
442 atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
443 return 0;
444 }
445
446 static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
447 struct btrfs_delayed_item *item)
448 {
449 return __btrfs_add_delayed_item(node, item,
450 BTRFS_DELAYED_INSERTION_ITEM);
451 }
452
453 static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
454 struct btrfs_delayed_item *item)
455 {
456 return __btrfs_add_delayed_item(node, item,
457 BTRFS_DELAYED_DELETION_ITEM);
458 }
459
460 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
461 {
462 int seq = atomic_inc_return(&delayed_root->items_seq);
463 if ((atomic_dec_return(&delayed_root->items) <
464 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
465 waitqueue_active(&delayed_root->wait))
466 wake_up(&delayed_root->wait);
467 }
468
469 static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
470 {
471 struct rb_root *root;
472 struct btrfs_delayed_root *delayed_root;
473
474 delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
475
476 BUG_ON(!delayed_root);
477 BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
478 delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
479
480 if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
481 root = &delayed_item->delayed_node->ins_root;
482 else
483 root = &delayed_item->delayed_node->del_root;
484
485 rb_erase(&delayed_item->rb_node, root);
486 delayed_item->delayed_node->count--;
487
488 finish_one_item(delayed_root);
489 }
490
491 static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
492 {
493 if (item) {
494 __btrfs_remove_delayed_item(item);
495 if (atomic_dec_and_test(&item->refs))
496 kfree(item);
497 }
498 }
499
500 static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
501 struct btrfs_delayed_node *delayed_node)
502 {
503 struct rb_node *p;
504 struct btrfs_delayed_item *item = NULL;
505
506 p = rb_first(&delayed_node->ins_root);
507 if (p)
508 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
509
510 return item;
511 }
512
513 static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
514 struct btrfs_delayed_node *delayed_node)
515 {
516 struct rb_node *p;
517 struct btrfs_delayed_item *item = NULL;
518
519 p = rb_first(&delayed_node->del_root);
520 if (p)
521 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
522
523 return item;
524 }
525
526 static struct btrfs_delayed_item *__btrfs_next_delayed_item(
527 struct btrfs_delayed_item *item)
528 {
529 struct rb_node *p;
530 struct btrfs_delayed_item *next = NULL;
531
532 p = rb_next(&item->rb_node);
533 if (p)
534 next = rb_entry(p, struct btrfs_delayed_item, rb_node);
535
536 return next;
537 }
538
539 static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
540 struct btrfs_root *root,
541 struct btrfs_delayed_item *item)
542 {
543 struct btrfs_block_rsv *src_rsv;
544 struct btrfs_block_rsv *dst_rsv;
545 u64 num_bytes;
546 int ret;
547
548 if (!trans->bytes_reserved)
549 return 0;
550
551 src_rsv = trans->block_rsv;
552 dst_rsv = &root->fs_info->delayed_block_rsv;
553
554 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
555 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
556 if (!ret) {
557 trace_btrfs_space_reservation(root->fs_info, "delayed_item",
558 item->key.objectid,
559 num_bytes, 1);
560 item->bytes_reserved = num_bytes;
561 }
562
563 return ret;
564 }
565
566 static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
567 struct btrfs_delayed_item *item)
568 {
569 struct btrfs_block_rsv *rsv;
570
571 if (!item->bytes_reserved)
572 return;
573
574 rsv = &root->fs_info->delayed_block_rsv;
575 trace_btrfs_space_reservation(root->fs_info, "delayed_item",
576 item->key.objectid, item->bytes_reserved,
577 0);
578 btrfs_block_rsv_release(root, rsv,
579 item->bytes_reserved);
580 }
581
582 static int btrfs_delayed_inode_reserve_metadata(
583 struct btrfs_trans_handle *trans,
584 struct btrfs_root *root,
585 struct inode *inode,
586 struct btrfs_delayed_node *node)
587 {
588 struct btrfs_block_rsv *src_rsv;
589 struct btrfs_block_rsv *dst_rsv;
590 u64 num_bytes;
591 int ret;
592 bool release = false;
593
594 src_rsv = trans->block_rsv;
595 dst_rsv = &root->fs_info->delayed_block_rsv;
596
597 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
598
599 /*
600 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
601 * which doesn't reserve space for speed. This is a problem since we
602 * still need to reserve space for this update, so try to reserve the
603 * space.
604 *
605 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
606 * we're accounted for.
607 */
608 if (!src_rsv || (!trans->bytes_reserved &&
609 src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
610 ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
611 BTRFS_RESERVE_NO_FLUSH);
612 /*
613 * Since we're under a transaction reserve_metadata_bytes could
614 * try to commit the transaction which will make it return
615 * EAGAIN to make us stop the transaction we have, so return
616 * ENOSPC instead so that btrfs_dirty_inode knows what to do.
617 */
618 if (ret == -EAGAIN)
619 ret = -ENOSPC;
620 if (!ret) {
621 node->bytes_reserved = num_bytes;
622 trace_btrfs_space_reservation(root->fs_info,
623 "delayed_inode",
624 btrfs_ino(inode),
625 num_bytes, 1);
626 }
627 return ret;
628 } else if (src_rsv->type == BTRFS_BLOCK_RSV_DELALLOC) {
629 spin_lock(&BTRFS_I(inode)->lock);
630 if (test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
631 &BTRFS_I(inode)->runtime_flags)) {
632 spin_unlock(&BTRFS_I(inode)->lock);
633 release = true;
634 goto migrate;
635 }
636 spin_unlock(&BTRFS_I(inode)->lock);
637
638 /* Ok we didn't have space pre-reserved. This shouldn't happen
639 * too often but it can happen if we do delalloc to an existing
640 * inode which gets dirtied because of the time update, and then
641 * isn't touched again until after the transaction commits and
642 * then we try to write out the data. First try to be nice and
643 * reserve something strictly for us. If not be a pain and try
644 * to steal from the delalloc block rsv.
645 */
646 ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
647 BTRFS_RESERVE_NO_FLUSH);
648 if (!ret)
649 goto out;
650
651 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
652 if (!WARN_ON(ret))
653 goto out;
654
655 /*
656 * Ok this is a problem, let's just steal from the global rsv
657 * since this really shouldn't happen that often.
658 */
659 ret = btrfs_block_rsv_migrate(&root->fs_info->global_block_rsv,
660 dst_rsv, num_bytes);
661 goto out;
662 }
663
664 migrate:
665 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
666
667 out:
668 /*
669 * Migrate only takes a reservation, it doesn't touch the size of the
670 * block_rsv. This is to simplify people who don't normally have things
671 * migrated from their block rsv. If they go to release their
672 * reservation, that will decrease the size as well, so if migrate
673 * reduced size we'd end up with a negative size. But for the
674 * delalloc_meta_reserved stuff we will only know to drop 1 reservation,
675 * but we could in fact do this reserve/migrate dance several times
676 * between the time we did the original reservation and we'd clean it
677 * up. So to take care of this, release the space for the meta
678 * reservation here. I think it may be time for a documentation page on
679 * how block rsvs. work.
680 */
681 if (!ret) {
682 trace_btrfs_space_reservation(root->fs_info, "delayed_inode",
683 btrfs_ino(inode), num_bytes, 1);
684 node->bytes_reserved = num_bytes;
685 }
686
687 if (release) {
688 trace_btrfs_space_reservation(root->fs_info, "delalloc",
689 btrfs_ino(inode), num_bytes, 0);
690 btrfs_block_rsv_release(root, src_rsv, num_bytes);
691 }
692
693 return ret;
694 }
695
696 static void btrfs_delayed_inode_release_metadata(struct btrfs_root *root,
697 struct btrfs_delayed_node *node)
698 {
699 struct btrfs_block_rsv *rsv;
700
701 if (!node->bytes_reserved)
702 return;
703
704 rsv = &root->fs_info->delayed_block_rsv;
705 trace_btrfs_space_reservation(root->fs_info, "delayed_inode",
706 node->inode_id, node->bytes_reserved, 0);
707 btrfs_block_rsv_release(root, rsv,
708 node->bytes_reserved);
709 node->bytes_reserved = 0;
710 }
711
712 /*
713 * This helper will insert some continuous items into the same leaf according
714 * to the free space of the leaf.
715 */
716 static int btrfs_batch_insert_items(struct btrfs_root *root,
717 struct btrfs_path *path,
718 struct btrfs_delayed_item *item)
719 {
720 struct btrfs_delayed_item *curr, *next;
721 int free_space;
722 int total_data_size = 0, total_size = 0;
723 struct extent_buffer *leaf;
724 char *data_ptr;
725 struct btrfs_key *keys;
726 u32 *data_size;
727 struct list_head head;
728 int slot;
729 int nitems;
730 int i;
731 int ret = 0;
732
733 BUG_ON(!path->nodes[0]);
734
735 leaf = path->nodes[0];
736 free_space = btrfs_leaf_free_space(root, leaf);
737 INIT_LIST_HEAD(&head);
738
739 next = item;
740 nitems = 0;
741
742 /*
743 * count the number of the continuous items that we can insert in batch
744 */
745 while (total_size + next->data_len + sizeof(struct btrfs_item) <=
746 free_space) {
747 total_data_size += next->data_len;
748 total_size += next->data_len + sizeof(struct btrfs_item);
749 list_add_tail(&next->tree_list, &head);
750 nitems++;
751
752 curr = next;
753 next = __btrfs_next_delayed_item(curr);
754 if (!next)
755 break;
756
757 if (!btrfs_is_continuous_delayed_item(curr, next))
758 break;
759 }
760
761 if (!nitems) {
762 ret = 0;
763 goto out;
764 }
765
766 /*
767 * we need allocate some memory space, but it might cause the task
768 * to sleep, so we set all locked nodes in the path to blocking locks
769 * first.
770 */
771 btrfs_set_path_blocking(path);
772
773 keys = kmalloc_array(nitems, sizeof(struct btrfs_key), GFP_NOFS);
774 if (!keys) {
775 ret = -ENOMEM;
776 goto out;
777 }
778
779 data_size = kmalloc_array(nitems, sizeof(u32), GFP_NOFS);
780 if (!data_size) {
781 ret = -ENOMEM;
782 goto error;
783 }
784
785 /* get keys of all the delayed items */
786 i = 0;
787 list_for_each_entry(next, &head, tree_list) {
788 keys[i] = next->key;
789 data_size[i] = next->data_len;
790 i++;
791 }
792
793 /* reset all the locked nodes in the patch to spinning locks. */
794 btrfs_clear_path_blocking(path, NULL, 0);
795
796 /* insert the keys of the items */
797 setup_items_for_insert(root, path, keys, data_size,
798 total_data_size, total_size, nitems);
799
800 /* insert the dir index items */
801 slot = path->slots[0];
802 list_for_each_entry_safe(curr, next, &head, tree_list) {
803 data_ptr = btrfs_item_ptr(leaf, slot, char);
804 write_extent_buffer(leaf, &curr->data,
805 (unsigned long)data_ptr,
806 curr->data_len);
807 slot++;
808
809 btrfs_delayed_item_release_metadata(root, curr);
810
811 list_del(&curr->tree_list);
812 btrfs_release_delayed_item(curr);
813 }
814
815 error:
816 kfree(data_size);
817 kfree(keys);
818 out:
819 return ret;
820 }
821
822 /*
823 * This helper can just do simple insertion that needn't extend item for new
824 * data, such as directory name index insertion, inode insertion.
825 */
826 static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
827 struct btrfs_root *root,
828 struct btrfs_path *path,
829 struct btrfs_delayed_item *delayed_item)
830 {
831 struct extent_buffer *leaf;
832 char *ptr;
833 int ret;
834
835 ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
836 delayed_item->data_len);
837 if (ret < 0 && ret != -EEXIST)
838 return ret;
839
840 leaf = path->nodes[0];
841
842 ptr = btrfs_item_ptr(leaf, path->slots[0], char);
843
844 write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
845 delayed_item->data_len);
846 btrfs_mark_buffer_dirty(leaf);
847
848 btrfs_delayed_item_release_metadata(root, delayed_item);
849 return 0;
850 }
851
852 /*
853 * we insert an item first, then if there are some continuous items, we try
854 * to insert those items into the same leaf.
855 */
856 static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
857 struct btrfs_path *path,
858 struct btrfs_root *root,
859 struct btrfs_delayed_node *node)
860 {
861 struct btrfs_delayed_item *curr, *prev;
862 int ret = 0;
863
864 do_again:
865 mutex_lock(&node->mutex);
866 curr = __btrfs_first_delayed_insertion_item(node);
867 if (!curr)
868 goto insert_end;
869
870 ret = btrfs_insert_delayed_item(trans, root, path, curr);
871 if (ret < 0) {
872 btrfs_release_path(path);
873 goto insert_end;
874 }
875
876 prev = curr;
877 curr = __btrfs_next_delayed_item(prev);
878 if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
879 /* insert the continuous items into the same leaf */
880 path->slots[0]++;
881 btrfs_batch_insert_items(root, path, curr);
882 }
883 btrfs_release_delayed_item(prev);
884 btrfs_mark_buffer_dirty(path->nodes[0]);
885
886 btrfs_release_path(path);
887 mutex_unlock(&node->mutex);
888 goto do_again;
889
890 insert_end:
891 mutex_unlock(&node->mutex);
892 return ret;
893 }
894
895 static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
896 struct btrfs_root *root,
897 struct btrfs_path *path,
898 struct btrfs_delayed_item *item)
899 {
900 struct btrfs_delayed_item *curr, *next;
901 struct extent_buffer *leaf;
902 struct btrfs_key key;
903 struct list_head head;
904 int nitems, i, last_item;
905 int ret = 0;
906
907 BUG_ON(!path->nodes[0]);
908
909 leaf = path->nodes[0];
910
911 i = path->slots[0];
912 last_item = btrfs_header_nritems(leaf) - 1;
913 if (i > last_item)
914 return -ENOENT; /* FIXME: Is errno suitable? */
915
916 next = item;
917 INIT_LIST_HEAD(&head);
918 btrfs_item_key_to_cpu(leaf, &key, i);
919 nitems = 0;
920 /*
921 * count the number of the dir index items that we can delete in batch
922 */
923 while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
924 list_add_tail(&next->tree_list, &head);
925 nitems++;
926
927 curr = next;
928 next = __btrfs_next_delayed_item(curr);
929 if (!next)
930 break;
931
932 if (!btrfs_is_continuous_delayed_item(curr, next))
933 break;
934
935 i++;
936 if (i > last_item)
937 break;
938 btrfs_item_key_to_cpu(leaf, &key, i);
939 }
940
941 if (!nitems)
942 return 0;
943
944 ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
945 if (ret)
946 goto out;
947
948 list_for_each_entry_safe(curr, next, &head, tree_list) {
949 btrfs_delayed_item_release_metadata(root, curr);
950 list_del(&curr->tree_list);
951 btrfs_release_delayed_item(curr);
952 }
953
954 out:
955 return ret;
956 }
957
958 static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
959 struct btrfs_path *path,
960 struct btrfs_root *root,
961 struct btrfs_delayed_node *node)
962 {
963 struct btrfs_delayed_item *curr, *prev;
964 int ret = 0;
965
966 do_again:
967 mutex_lock(&node->mutex);
968 curr = __btrfs_first_delayed_deletion_item(node);
969 if (!curr)
970 goto delete_fail;
971
972 ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
973 if (ret < 0)
974 goto delete_fail;
975 else if (ret > 0) {
976 /*
977 * can't find the item which the node points to, so this node
978 * is invalid, just drop it.
979 */
980 prev = curr;
981 curr = __btrfs_next_delayed_item(prev);
982 btrfs_release_delayed_item(prev);
983 ret = 0;
984 btrfs_release_path(path);
985 if (curr) {
986 mutex_unlock(&node->mutex);
987 goto do_again;
988 } else
989 goto delete_fail;
990 }
991
992 btrfs_batch_delete_items(trans, root, path, curr);
993 btrfs_release_path(path);
994 mutex_unlock(&node->mutex);
995 goto do_again;
996
997 delete_fail:
998 btrfs_release_path(path);
999 mutex_unlock(&node->mutex);
1000 return ret;
1001 }
1002
1003 static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
1004 {
1005 struct btrfs_delayed_root *delayed_root;
1006
1007 if (delayed_node && delayed_node->inode_dirty) {
1008 BUG_ON(!delayed_node->root);
1009 delayed_node->inode_dirty = 0;
1010 delayed_node->count--;
1011
1012 delayed_root = delayed_node->root->fs_info->delayed_root;
1013 finish_one_item(delayed_root);
1014 }
1015 }
1016
1017 static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1018 struct btrfs_root *root,
1019 struct btrfs_path *path,
1020 struct btrfs_delayed_node *node)
1021 {
1022 struct btrfs_key key;
1023 struct btrfs_inode_item *inode_item;
1024 struct extent_buffer *leaf;
1025 int ret;
1026
1027 key.objectid = node->inode_id;
1028 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
1029 key.offset = 0;
1030
1031 ret = btrfs_lookup_inode(trans, root, path, &key, 1);
1032 if (ret > 0) {
1033 btrfs_release_path(path);
1034 return -ENOENT;
1035 } else if (ret < 0) {
1036 return ret;
1037 }
1038
1039 btrfs_unlock_up_safe(path, 1);
1040 leaf = path->nodes[0];
1041 inode_item = btrfs_item_ptr(leaf, path->slots[0],
1042 struct btrfs_inode_item);
1043 write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
1044 sizeof(struct btrfs_inode_item));
1045 btrfs_mark_buffer_dirty(leaf);
1046 btrfs_release_path(path);
1047
1048 btrfs_delayed_inode_release_metadata(root, node);
1049 btrfs_release_delayed_inode(node);
1050
1051 return 0;
1052 }
1053
1054 static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1055 struct btrfs_root *root,
1056 struct btrfs_path *path,
1057 struct btrfs_delayed_node *node)
1058 {
1059 int ret;
1060
1061 mutex_lock(&node->mutex);
1062 if (!node->inode_dirty) {
1063 mutex_unlock(&node->mutex);
1064 return 0;
1065 }
1066
1067 ret = __btrfs_update_delayed_inode(trans, root, path, node);
1068 mutex_unlock(&node->mutex);
1069 return ret;
1070 }
1071
1072 static inline int
1073 __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1074 struct btrfs_path *path,
1075 struct btrfs_delayed_node *node)
1076 {
1077 int ret;
1078
1079 ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1080 if (ret)
1081 return ret;
1082
1083 ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1084 if (ret)
1085 return ret;
1086
1087 ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1088 return ret;
1089 }
1090
1091 /*
1092 * Called when committing the transaction.
1093 * Returns 0 on success.
1094 * Returns < 0 on error and returns with an aborted transaction with any
1095 * outstanding delayed items cleaned up.
1096 */
1097 static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
1098 struct btrfs_root *root, int nr)
1099 {
1100 struct btrfs_delayed_root *delayed_root;
1101 struct btrfs_delayed_node *curr_node, *prev_node;
1102 struct btrfs_path *path;
1103 struct btrfs_block_rsv *block_rsv;
1104 int ret = 0;
1105 bool count = (nr > 0);
1106
1107 if (trans->aborted)
1108 return -EIO;
1109
1110 path = btrfs_alloc_path();
1111 if (!path)
1112 return -ENOMEM;
1113 path->leave_spinning = 1;
1114
1115 block_rsv = trans->block_rsv;
1116 trans->block_rsv = &root->fs_info->delayed_block_rsv;
1117
1118 delayed_root = btrfs_get_delayed_root(root);
1119
1120 curr_node = btrfs_first_delayed_node(delayed_root);
1121 while (curr_node && (!count || (count && nr--))) {
1122 ret = __btrfs_commit_inode_delayed_items(trans, path,
1123 curr_node);
1124 if (ret) {
1125 btrfs_release_delayed_node(curr_node);
1126 curr_node = NULL;
1127 btrfs_abort_transaction(trans, root, ret);
1128 break;
1129 }
1130
1131 prev_node = curr_node;
1132 curr_node = btrfs_next_delayed_node(curr_node);
1133 btrfs_release_delayed_node(prev_node);
1134 }
1135
1136 if (curr_node)
1137 btrfs_release_delayed_node(curr_node);
1138 btrfs_free_path(path);
1139 trans->block_rsv = block_rsv;
1140
1141 return ret;
1142 }
1143
1144 int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
1145 struct btrfs_root *root)
1146 {
1147 return __btrfs_run_delayed_items(trans, root, -1);
1148 }
1149
1150 int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans,
1151 struct btrfs_root *root, int nr)
1152 {
1153 return __btrfs_run_delayed_items(trans, root, nr);
1154 }
1155
1156 int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1157 struct inode *inode)
1158 {
1159 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1160 struct btrfs_path *path;
1161 struct btrfs_block_rsv *block_rsv;
1162 int ret;
1163
1164 if (!delayed_node)
1165 return 0;
1166
1167 mutex_lock(&delayed_node->mutex);
1168 if (!delayed_node->count) {
1169 mutex_unlock(&delayed_node->mutex);
1170 btrfs_release_delayed_node(delayed_node);
1171 return 0;
1172 }
1173 mutex_unlock(&delayed_node->mutex);
1174
1175 path = btrfs_alloc_path();
1176 if (!path) {
1177 btrfs_release_delayed_node(delayed_node);
1178 return -ENOMEM;
1179 }
1180 path->leave_spinning = 1;
1181
1182 block_rsv = trans->block_rsv;
1183 trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1184
1185 ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1186
1187 btrfs_release_delayed_node(delayed_node);
1188 btrfs_free_path(path);
1189 trans->block_rsv = block_rsv;
1190
1191 return ret;
1192 }
1193
1194 int btrfs_commit_inode_delayed_inode(struct inode *inode)
1195 {
1196 struct btrfs_trans_handle *trans;
1197 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1198 struct btrfs_path *path;
1199 struct btrfs_block_rsv *block_rsv;
1200 int ret;
1201
1202 if (!delayed_node)
1203 return 0;
1204
1205 mutex_lock(&delayed_node->mutex);
1206 if (!delayed_node->inode_dirty) {
1207 mutex_unlock(&delayed_node->mutex);
1208 btrfs_release_delayed_node(delayed_node);
1209 return 0;
1210 }
1211 mutex_unlock(&delayed_node->mutex);
1212
1213 trans = btrfs_join_transaction(delayed_node->root);
1214 if (IS_ERR(trans)) {
1215 ret = PTR_ERR(trans);
1216 goto out;
1217 }
1218
1219 path = btrfs_alloc_path();
1220 if (!path) {
1221 ret = -ENOMEM;
1222 goto trans_out;
1223 }
1224 path->leave_spinning = 1;
1225
1226 block_rsv = trans->block_rsv;
1227 trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1228
1229 mutex_lock(&delayed_node->mutex);
1230 if (delayed_node->inode_dirty)
1231 ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
1232 path, delayed_node);
1233 else
1234 ret = 0;
1235 mutex_unlock(&delayed_node->mutex);
1236
1237 btrfs_free_path(path);
1238 trans->block_rsv = block_rsv;
1239 trans_out:
1240 btrfs_end_transaction(trans, delayed_node->root);
1241 btrfs_btree_balance_dirty(delayed_node->root);
1242 out:
1243 btrfs_release_delayed_node(delayed_node);
1244
1245 return ret;
1246 }
1247
1248 void btrfs_remove_delayed_node(struct inode *inode)
1249 {
1250 struct btrfs_delayed_node *delayed_node;
1251
1252 delayed_node = ACCESS_ONCE(BTRFS_I(inode)->delayed_node);
1253 if (!delayed_node)
1254 return;
1255
1256 BTRFS_I(inode)->delayed_node = NULL;
1257 btrfs_release_delayed_node(delayed_node);
1258 }
1259
1260 struct btrfs_async_delayed_work {
1261 struct btrfs_delayed_root *delayed_root;
1262 int nr;
1263 struct btrfs_work work;
1264 };
1265
1266 static void btrfs_async_run_delayed_root(struct btrfs_work *work)
1267 {
1268 struct btrfs_async_delayed_work *async_work;
1269 struct btrfs_delayed_root *delayed_root;
1270 struct btrfs_trans_handle *trans;
1271 struct btrfs_path *path;
1272 struct btrfs_delayed_node *delayed_node = NULL;
1273 struct btrfs_root *root;
1274 struct btrfs_block_rsv *block_rsv;
1275 int total_done = 0;
1276
1277 async_work = container_of(work, struct btrfs_async_delayed_work, work);
1278 delayed_root = async_work->delayed_root;
1279
1280 path = btrfs_alloc_path();
1281 if (!path)
1282 goto out;
1283
1284 again:
1285 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND / 2)
1286 goto free_path;
1287
1288 delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
1289 if (!delayed_node)
1290 goto free_path;
1291
1292 path->leave_spinning = 1;
1293 root = delayed_node->root;
1294
1295 trans = btrfs_join_transaction(root);
1296 if (IS_ERR(trans))
1297 goto release_path;
1298
1299 block_rsv = trans->block_rsv;
1300 trans->block_rsv = &root->fs_info->delayed_block_rsv;
1301
1302 __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1303 /*
1304 * Maybe new delayed items have been inserted, so we need requeue
1305 * the work. Besides that, we must dequeue the empty delayed nodes
1306 * to avoid the race between delayed items balance and the worker.
1307 * The race like this:
1308 * Task1 Worker thread
1309 * count == 0, needn't requeue
1310 * also needn't insert the
1311 * delayed node into prepare
1312 * list again.
1313 * add lots of delayed items
1314 * queue the delayed node
1315 * already in the list,
1316 * and not in the prepare
1317 * list, it means the delayed
1318 * node is being dealt with
1319 * by the worker.
1320 * do delayed items balance
1321 * the delayed node is being
1322 * dealt with by the worker
1323 * now, just wait.
1324 * the worker goto idle.
1325 * Task1 will sleep until the transaction is commited.
1326 */
1327 mutex_lock(&delayed_node->mutex);
1328 btrfs_dequeue_delayed_node(root->fs_info->delayed_root, delayed_node);
1329 mutex_unlock(&delayed_node->mutex);
1330
1331 trans->block_rsv = block_rsv;
1332 btrfs_end_transaction_dmeta(trans, root);
1333 btrfs_btree_balance_dirty_nodelay(root);
1334
1335 release_path:
1336 btrfs_release_path(path);
1337 total_done++;
1338
1339 btrfs_release_prepared_delayed_node(delayed_node);
1340 if (async_work->nr == 0 || total_done < async_work->nr)
1341 goto again;
1342
1343 free_path:
1344 btrfs_free_path(path);
1345 out:
1346 wake_up(&delayed_root->wait);
1347 kfree(async_work);
1348 }
1349
1350
1351 static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
1352 struct btrfs_root *root, int nr)
1353 {
1354 struct btrfs_async_delayed_work *async_work;
1355
1356 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1357 return 0;
1358
1359 async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
1360 if (!async_work)
1361 return -ENOMEM;
1362
1363 async_work->delayed_root = delayed_root;
1364 async_work->work.func = btrfs_async_run_delayed_root;
1365 async_work->work.flags = 0;
1366 async_work->nr = nr;
1367
1368 btrfs_queue_worker(&root->fs_info->delayed_workers, &async_work->work);
1369 return 0;
1370 }
1371
1372 void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
1373 {
1374 struct btrfs_delayed_root *delayed_root;
1375 delayed_root = btrfs_get_delayed_root(root);
1376 WARN_ON(btrfs_first_delayed_node(delayed_root));
1377 }
1378
1379 static int refs_newer(struct btrfs_delayed_root *delayed_root,
1380 int seq, int count)
1381 {
1382 int val = atomic_read(&delayed_root->items_seq);
1383
1384 if (val < seq || val >= seq + count)
1385 return 1;
1386 return 0;
1387 }
1388
1389 void btrfs_balance_delayed_items(struct btrfs_root *root)
1390 {
1391 struct btrfs_delayed_root *delayed_root;
1392 int seq;
1393
1394 delayed_root = btrfs_get_delayed_root(root);
1395
1396 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1397 return;
1398
1399 seq = atomic_read(&delayed_root->items_seq);
1400
1401 if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
1402 int ret;
1403 DEFINE_WAIT(__wait);
1404
1405 ret = btrfs_wq_run_delayed_node(delayed_root, root, 0);
1406 if (ret)
1407 return;
1408
1409 while (1) {
1410 prepare_to_wait(&delayed_root->wait, &__wait,
1411 TASK_INTERRUPTIBLE);
1412
1413 if (refs_newer(delayed_root, seq,
1414 BTRFS_DELAYED_BATCH) ||
1415 atomic_read(&delayed_root->items) <
1416 BTRFS_DELAYED_BACKGROUND) {
1417 break;
1418 }
1419 if (!signal_pending(current))
1420 schedule();
1421 else
1422 break;
1423 }
1424 finish_wait(&delayed_root->wait, &__wait);
1425 }
1426
1427 btrfs_wq_run_delayed_node(delayed_root, root, BTRFS_DELAYED_BATCH);
1428 }
1429
1430 /* Will return 0 or -ENOMEM */
1431 int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
1432 struct btrfs_root *root, const char *name,
1433 int name_len, struct inode *dir,
1434 struct btrfs_disk_key *disk_key, u8 type,
1435 u64 index)
1436 {
1437 struct btrfs_delayed_node *delayed_node;
1438 struct btrfs_delayed_item *delayed_item;
1439 struct btrfs_dir_item *dir_item;
1440 int ret;
1441
1442 delayed_node = btrfs_get_or_create_delayed_node(dir);
1443 if (IS_ERR(delayed_node))
1444 return PTR_ERR(delayed_node);
1445
1446 delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
1447 if (!delayed_item) {
1448 ret = -ENOMEM;
1449 goto release_node;
1450 }
1451
1452 delayed_item->key.objectid = btrfs_ino(dir);
1453 btrfs_set_key_type(&delayed_item->key, BTRFS_DIR_INDEX_KEY);
1454 delayed_item->key.offset = index;
1455
1456 dir_item = (struct btrfs_dir_item *)delayed_item->data;
1457 dir_item->location = *disk_key;
1458 btrfs_set_stack_dir_transid(dir_item, trans->transid);
1459 btrfs_set_stack_dir_data_len(dir_item, 0);
1460 btrfs_set_stack_dir_name_len(dir_item, name_len);
1461 btrfs_set_stack_dir_type(dir_item, type);
1462 memcpy((char *)(dir_item + 1), name, name_len);
1463
1464 ret = btrfs_delayed_item_reserve_metadata(trans, root, delayed_item);
1465 /*
1466 * we have reserved enough space when we start a new transaction,
1467 * so reserving metadata failure is impossible
1468 */
1469 BUG_ON(ret);
1470
1471
1472 mutex_lock(&delayed_node->mutex);
1473 ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
1474 if (unlikely(ret)) {
1475 printk(KERN_ERR "err add delayed dir index item(name: %.*s) "
1476 "into the insertion tree of the delayed node"
1477 "(root id: %llu, inode id: %llu, errno: %d)\n",
1478 name_len, name, delayed_node->root->objectid,
1479 delayed_node->inode_id, ret);
1480 BUG();
1481 }
1482 mutex_unlock(&delayed_node->mutex);
1483
1484 release_node:
1485 btrfs_release_delayed_node(delayed_node);
1486 return ret;
1487 }
1488
1489 static int btrfs_delete_delayed_insertion_item(struct btrfs_root *root,
1490 struct btrfs_delayed_node *node,
1491 struct btrfs_key *key)
1492 {
1493 struct btrfs_delayed_item *item;
1494
1495 mutex_lock(&node->mutex);
1496 item = __btrfs_lookup_delayed_insertion_item(node, key);
1497 if (!item) {
1498 mutex_unlock(&node->mutex);
1499 return 1;
1500 }
1501
1502 btrfs_delayed_item_release_metadata(root, item);
1503 btrfs_release_delayed_item(item);
1504 mutex_unlock(&node->mutex);
1505 return 0;
1506 }
1507
1508 int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
1509 struct btrfs_root *root, struct inode *dir,
1510 u64 index)
1511 {
1512 struct btrfs_delayed_node *node;
1513 struct btrfs_delayed_item *item;
1514 struct btrfs_key item_key;
1515 int ret;
1516
1517 node = btrfs_get_or_create_delayed_node(dir);
1518 if (IS_ERR(node))
1519 return PTR_ERR(node);
1520
1521 item_key.objectid = btrfs_ino(dir);
1522 btrfs_set_key_type(&item_key, BTRFS_DIR_INDEX_KEY);
1523 item_key.offset = index;
1524
1525 ret = btrfs_delete_delayed_insertion_item(root, node, &item_key);
1526 if (!ret)
1527 goto end;
1528
1529 item = btrfs_alloc_delayed_item(0);
1530 if (!item) {
1531 ret = -ENOMEM;
1532 goto end;
1533 }
1534
1535 item->key = item_key;
1536
1537 ret = btrfs_delayed_item_reserve_metadata(trans, root, item);
1538 /*
1539 * we have reserved enough space when we start a new transaction,
1540 * so reserving metadata failure is impossible.
1541 */
1542 BUG_ON(ret);
1543
1544 mutex_lock(&node->mutex);
1545 ret = __btrfs_add_delayed_deletion_item(node, item);
1546 if (unlikely(ret)) {
1547 printk(KERN_ERR "err add delayed dir index item(index: %llu) "
1548 "into the deletion tree of the delayed node"
1549 "(root id: %llu, inode id: %llu, errno: %d)\n",
1550 index, node->root->objectid, node->inode_id,
1551 ret);
1552 BUG();
1553 }
1554 mutex_unlock(&node->mutex);
1555 end:
1556 btrfs_release_delayed_node(node);
1557 return ret;
1558 }
1559
1560 int btrfs_inode_delayed_dir_index_count(struct inode *inode)
1561 {
1562 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1563
1564 if (!delayed_node)
1565 return -ENOENT;
1566
1567 /*
1568 * Since we have held i_mutex of this directory, it is impossible that
1569 * a new directory index is added into the delayed node and index_cnt
1570 * is updated now. So we needn't lock the delayed node.
1571 */
1572 if (!delayed_node->index_cnt) {
1573 btrfs_release_delayed_node(delayed_node);
1574 return -EINVAL;
1575 }
1576
1577 BTRFS_I(inode)->index_cnt = delayed_node->index_cnt;
1578 btrfs_release_delayed_node(delayed_node);
1579 return 0;
1580 }
1581
1582 void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list,
1583 struct list_head *del_list)
1584 {
1585 struct btrfs_delayed_node *delayed_node;
1586 struct btrfs_delayed_item *item;
1587
1588 delayed_node = btrfs_get_delayed_node(inode);
1589 if (!delayed_node)
1590 return;
1591
1592 mutex_lock(&delayed_node->mutex);
1593 item = __btrfs_first_delayed_insertion_item(delayed_node);
1594 while (item) {
1595 atomic_inc(&item->refs);
1596 list_add_tail(&item->readdir_list, ins_list);
1597 item = __btrfs_next_delayed_item(item);
1598 }
1599
1600 item = __btrfs_first_delayed_deletion_item(delayed_node);
1601 while (item) {
1602 atomic_inc(&item->refs);
1603 list_add_tail(&item->readdir_list, del_list);
1604 item = __btrfs_next_delayed_item(item);
1605 }
1606 mutex_unlock(&delayed_node->mutex);
1607 /*
1608 * This delayed node is still cached in the btrfs inode, so refs
1609 * must be > 1 now, and we needn't check it is going to be freed
1610 * or not.
1611 *
1612 * Besides that, this function is used to read dir, we do not
1613 * insert/delete delayed items in this period. So we also needn't
1614 * requeue or dequeue this delayed node.
1615 */
1616 atomic_dec(&delayed_node->refs);
1617 }
1618
1619 void btrfs_put_delayed_items(struct list_head *ins_list,
1620 struct list_head *del_list)
1621 {
1622 struct btrfs_delayed_item *curr, *next;
1623
1624 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1625 list_del(&curr->readdir_list);
1626 if (atomic_dec_and_test(&curr->refs))
1627 kfree(curr);
1628 }
1629
1630 list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1631 list_del(&curr->readdir_list);
1632 if (atomic_dec_and_test(&curr->refs))
1633 kfree(curr);
1634 }
1635 }
1636
1637 int btrfs_should_delete_dir_index(struct list_head *del_list,
1638 u64 index)
1639 {
1640 struct btrfs_delayed_item *curr, *next;
1641 int ret;
1642
1643 if (list_empty(del_list))
1644 return 0;
1645
1646 list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1647 if (curr->key.offset > index)
1648 break;
1649
1650 list_del(&curr->readdir_list);
1651 ret = (curr->key.offset == index);
1652
1653 if (atomic_dec_and_test(&curr->refs))
1654 kfree(curr);
1655
1656 if (ret)
1657 return 1;
1658 else
1659 continue;
1660 }
1661 return 0;
1662 }
1663
1664 /*
1665 * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1666 *
1667 */
1668 int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
1669 struct list_head *ins_list)
1670 {
1671 struct btrfs_dir_item *di;
1672 struct btrfs_delayed_item *curr, *next;
1673 struct btrfs_key location;
1674 char *name;
1675 int name_len;
1676 int over = 0;
1677 unsigned char d_type;
1678
1679 if (list_empty(ins_list))
1680 return 0;
1681
1682 /*
1683 * Changing the data of the delayed item is impossible. So
1684 * we needn't lock them. And we have held i_mutex of the
1685 * directory, nobody can delete any directory indexes now.
1686 */
1687 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1688 list_del(&curr->readdir_list);
1689
1690 if (curr->key.offset < ctx->pos) {
1691 if (atomic_dec_and_test(&curr->refs))
1692 kfree(curr);
1693 continue;
1694 }
1695
1696 ctx->pos = curr->key.offset;
1697
1698 di = (struct btrfs_dir_item *)curr->data;
1699 name = (char *)(di + 1);
1700 name_len = btrfs_stack_dir_name_len(di);
1701
1702 d_type = btrfs_filetype_table[di->type];
1703 btrfs_disk_key_to_cpu(&location, &di->location);
1704
1705 over = !dir_emit(ctx, name, name_len,
1706 location.objectid, d_type);
1707
1708 if (atomic_dec_and_test(&curr->refs))
1709 kfree(curr);
1710
1711 if (over)
1712 return 1;
1713 }
1714 return 0;
1715 }
1716
1717 static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1718 struct btrfs_inode_item *inode_item,
1719 struct inode *inode)
1720 {
1721 btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
1722 btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
1723 btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1724 btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1725 btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1726 btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1727 btrfs_set_stack_inode_generation(inode_item,
1728 BTRFS_I(inode)->generation);
1729 btrfs_set_stack_inode_sequence(inode_item, inode->i_version);
1730 btrfs_set_stack_inode_transid(inode_item, trans->transid);
1731 btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1732 btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
1733 btrfs_set_stack_inode_block_group(inode_item, 0);
1734
1735 btrfs_set_stack_timespec_sec(btrfs_inode_atime(inode_item),
1736 inode->i_atime.tv_sec);
1737 btrfs_set_stack_timespec_nsec(btrfs_inode_atime(inode_item),
1738 inode->i_atime.tv_nsec);
1739
1740 btrfs_set_stack_timespec_sec(btrfs_inode_mtime(inode_item),
1741 inode->i_mtime.tv_sec);
1742 btrfs_set_stack_timespec_nsec(btrfs_inode_mtime(inode_item),
1743 inode->i_mtime.tv_nsec);
1744
1745 btrfs_set_stack_timespec_sec(btrfs_inode_ctime(inode_item),
1746 inode->i_ctime.tv_sec);
1747 btrfs_set_stack_timespec_nsec(btrfs_inode_ctime(inode_item),
1748 inode->i_ctime.tv_nsec);
1749 }
1750
1751 int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1752 {
1753 struct btrfs_delayed_node *delayed_node;
1754 struct btrfs_inode_item *inode_item;
1755 struct btrfs_timespec *tspec;
1756
1757 delayed_node = btrfs_get_delayed_node(inode);
1758 if (!delayed_node)
1759 return -ENOENT;
1760
1761 mutex_lock(&delayed_node->mutex);
1762 if (!delayed_node->inode_dirty) {
1763 mutex_unlock(&delayed_node->mutex);
1764 btrfs_release_delayed_node(delayed_node);
1765 return -ENOENT;
1766 }
1767
1768 inode_item = &delayed_node->inode_item;
1769
1770 i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
1771 i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
1772 btrfs_i_size_write(inode, btrfs_stack_inode_size(inode_item));
1773 inode->i_mode = btrfs_stack_inode_mode(inode_item);
1774 set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
1775 inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1776 BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
1777 inode->i_version = btrfs_stack_inode_sequence(inode_item);
1778 inode->i_rdev = 0;
1779 *rdev = btrfs_stack_inode_rdev(inode_item);
1780 BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
1781
1782 tspec = btrfs_inode_atime(inode_item);
1783 inode->i_atime.tv_sec = btrfs_stack_timespec_sec(tspec);
1784 inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
1785
1786 tspec = btrfs_inode_mtime(inode_item);
1787 inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(tspec);
1788 inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
1789
1790 tspec = btrfs_inode_ctime(inode_item);
1791 inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(tspec);
1792 inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
1793
1794 inode->i_generation = BTRFS_I(inode)->generation;
1795 BTRFS_I(inode)->index_cnt = (u64)-1;
1796
1797 mutex_unlock(&delayed_node->mutex);
1798 btrfs_release_delayed_node(delayed_node);
1799 return 0;
1800 }
1801
1802 int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1803 struct btrfs_root *root, struct inode *inode)
1804 {
1805 struct btrfs_delayed_node *delayed_node;
1806 int ret = 0;
1807
1808 delayed_node = btrfs_get_or_create_delayed_node(inode);
1809 if (IS_ERR(delayed_node))
1810 return PTR_ERR(delayed_node);
1811
1812 mutex_lock(&delayed_node->mutex);
1813 if (delayed_node->inode_dirty) {
1814 fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1815 goto release_node;
1816 }
1817
1818 ret = btrfs_delayed_inode_reserve_metadata(trans, root, inode,
1819 delayed_node);
1820 if (ret)
1821 goto release_node;
1822
1823 fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1824 delayed_node->inode_dirty = 1;
1825 delayed_node->count++;
1826 atomic_inc(&root->fs_info->delayed_root->items);
1827 release_node:
1828 mutex_unlock(&delayed_node->mutex);
1829 btrfs_release_delayed_node(delayed_node);
1830 return ret;
1831 }
1832
1833 static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1834 {
1835 struct btrfs_root *root = delayed_node->root;
1836 struct btrfs_delayed_item *curr_item, *prev_item;
1837
1838 mutex_lock(&delayed_node->mutex);
1839 curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
1840 while (curr_item) {
1841 btrfs_delayed_item_release_metadata(root, curr_item);
1842 prev_item = curr_item;
1843 curr_item = __btrfs_next_delayed_item(prev_item);
1844 btrfs_release_delayed_item(prev_item);
1845 }
1846
1847 curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
1848 while (curr_item) {
1849 btrfs_delayed_item_release_metadata(root, curr_item);
1850 prev_item = curr_item;
1851 curr_item = __btrfs_next_delayed_item(prev_item);
1852 btrfs_release_delayed_item(prev_item);
1853 }
1854
1855 if (delayed_node->inode_dirty) {
1856 btrfs_delayed_inode_release_metadata(root, delayed_node);
1857 btrfs_release_delayed_inode(delayed_node);
1858 }
1859 mutex_unlock(&delayed_node->mutex);
1860 }
1861
1862 void btrfs_kill_delayed_inode_items(struct inode *inode)
1863 {
1864 struct btrfs_delayed_node *delayed_node;
1865
1866 delayed_node = btrfs_get_delayed_node(inode);
1867 if (!delayed_node)
1868 return;
1869
1870 __btrfs_kill_delayed_node(delayed_node);
1871 btrfs_release_delayed_node(delayed_node);
1872 }
1873
1874 void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
1875 {
1876 u64 inode_id = 0;
1877 struct btrfs_delayed_node *delayed_nodes[8];
1878 int i, n;
1879
1880 while (1) {
1881 spin_lock(&root->inode_lock);
1882 n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
1883 (void **)delayed_nodes, inode_id,
1884 ARRAY_SIZE(delayed_nodes));
1885 if (!n) {
1886 spin_unlock(&root->inode_lock);
1887 break;
1888 }
1889
1890 inode_id = delayed_nodes[n - 1]->inode_id + 1;
1891
1892 for (i = 0; i < n; i++)
1893 atomic_inc(&delayed_nodes[i]->refs);
1894 spin_unlock(&root->inode_lock);
1895
1896 for (i = 0; i < n; i++) {
1897 __btrfs_kill_delayed_node(delayed_nodes[i]);
1898 btrfs_release_delayed_node(delayed_nodes[i]);
1899 }
1900 }
1901 }
1902
1903 void btrfs_destroy_delayed_inodes(struct btrfs_root *root)
1904 {
1905 struct btrfs_delayed_root *delayed_root;
1906 struct btrfs_delayed_node *curr_node, *prev_node;
1907
1908 delayed_root = btrfs_get_delayed_root(root);
1909
1910 curr_node = btrfs_first_delayed_node(delayed_root);
1911 while (curr_node) {
1912 __btrfs_kill_delayed_node(curr_node);
1913
1914 prev_node = curr_node;
1915 curr_node = btrfs_next_delayed_node(curr_node);
1916 btrfs_release_delayed_node(prev_node);
1917 }
1918 }
1919
This page took 0.104867 seconds and 6 git commands to generate.