2 * Code for working with individual keys, and sorted sets of keys with in a
5 * Copyright 2012 Google, Inc.
12 #include <linux/random.h>
16 void bch_keylist_copy(struct keylist
*dest
, struct keylist
*src
)
20 if (src
->list
== src
->d
) {
21 size_t n
= (uint64_t *) src
->top
- src
->d
;
22 dest
->top
= (struct bkey
*) &dest
->d
[n
];
27 int bch_keylist_realloc(struct keylist
*l
, int nptrs
, struct cache_set
*c
)
29 unsigned oldsize
= (uint64_t *) l
->top
- l
->list
;
30 unsigned newsize
= oldsize
+ 2 + nptrs
;
33 /* The journalling code doesn't handle the case where the keys to insert
34 * is bigger than an empty write: If we just return -ENOMEM here,
35 * bio_insert() and bio_invalidate() will insert the keys created so far
36 * and finish the rest when the keylist is empty.
38 if (newsize
* sizeof(uint64_t) > block_bytes(c
) - sizeof(struct jset
))
41 newsize
= roundup_pow_of_two(newsize
);
43 if (newsize
<= KEYLIST_INLINE
||
44 roundup_pow_of_two(oldsize
) == newsize
)
47 new = krealloc(l
->list
== l
->d
? NULL
: l
->list
,
48 sizeof(uint64_t) * newsize
, GFP_NOIO
);
54 memcpy(new, l
->list
, sizeof(uint64_t) * KEYLIST_INLINE
);
57 l
->top
= (struct bkey
*) (&l
->list
[oldsize
]);
62 struct bkey
*bch_keylist_pop(struct keylist
*l
)
64 struct bkey
*k
= l
->bottom
;
69 while (bkey_next(k
) != l
->top
)
75 /* Pointer validation */
77 bool __bch_ptr_invalid(struct cache_set
*c
, int level
, const struct bkey
*k
)
81 if (level
&& (!KEY_PTRS(k
) || !KEY_SIZE(k
) || KEY_DIRTY(k
)))
84 if (!level
&& KEY_SIZE(k
) > KEY_OFFSET(k
))
90 for (i
= 0; i
< KEY_PTRS(k
); i
++)
91 if (ptr_available(c
, k
, i
)) {
92 struct cache
*ca
= PTR_CACHE(c
, k
, i
);
93 size_t bucket
= PTR_BUCKET_NR(c
, k
, i
);
94 size_t r
= bucket_remainder(c
, PTR_OFFSET(k
, i
));
96 if (KEY_SIZE(k
) + r
> c
->sb
.bucket_size
||
97 bucket
< ca
->sb
.first_bucket
||
98 bucket
>= ca
->sb
.nbuckets
)
104 cache_bug(c
, "spotted bad key %s: %s", pkey(k
), bch_ptr_status(c
, k
));
108 bool bch_ptr_bad(struct btree
*b
, const struct bkey
*k
)
113 if (!bkey_cmp(k
, &ZERO_KEY
) ||
115 bch_ptr_invalid(b
, k
))
118 if (KEY_PTRS(k
) && PTR_DEV(k
, 0) == PTR_CHECK_DEV
)
121 for (i
= 0; i
< KEY_PTRS(k
); i
++)
122 if (ptr_available(b
->c
, k
, i
)) {
123 g
= PTR_BUCKET(b
->c
, k
, i
);
124 stale
= ptr_stale(b
->c
, k
, i
);
126 btree_bug_on(stale
> 96, b
,
127 "key too stale: %i, need_gc %u",
128 stale
, b
->c
->need_gc
);
130 btree_bug_on(stale
&& KEY_DIRTY(k
) && KEY_SIZE(k
),
131 b
, "stale dirty pointer");
136 #ifdef CONFIG_BCACHE_EDEBUG
137 if (!mutex_trylock(&b
->c
->bucket_lock
))
142 g
->prio
!= BTREE_PRIO
||
143 (b
->c
->gc_mark_valid
&&
144 GC_MARK(g
) != GC_MARK_METADATA
))
148 if (g
->prio
== BTREE_PRIO
)
152 b
->c
->gc_mark_valid
&&
153 GC_MARK(g
) != GC_MARK_DIRTY
)
156 mutex_unlock(&b
->c
->bucket_lock
);
161 #ifdef CONFIG_BCACHE_EDEBUG
163 mutex_unlock(&b
->c
->bucket_lock
);
164 btree_bug(b
, "inconsistent pointer %s: bucket %li pin %i "
165 "prio %i gen %i last_gc %i mark %llu gc_gen %i", pkey(k
),
166 PTR_BUCKET_NR(b
->c
, k
, i
), atomic_read(&g
->pin
),
167 g
->prio
, g
->gen
, g
->last_gc
, GC_MARK(g
), g
->gc_gen
);
172 /* Key/pointer manipulation */
174 void bch_bkey_copy_single_ptr(struct bkey
*dest
, const struct bkey
*src
,
177 BUG_ON(i
> KEY_PTRS(src
));
179 /* Only copy the header, key, and one pointer. */
180 memcpy(dest
, src
, 2 * sizeof(uint64_t));
181 dest
->ptr
[0] = src
->ptr
[i
];
182 SET_KEY_PTRS(dest
, 1);
183 /* We didn't copy the checksum so clear that bit. */
184 SET_KEY_CSUM(dest
, 0);
187 bool __bch_cut_front(const struct bkey
*where
, struct bkey
*k
)
191 if (bkey_cmp(where
, &START_KEY(k
)) <= 0)
194 if (bkey_cmp(where
, k
) < 0)
195 len
= KEY_OFFSET(k
) - KEY_OFFSET(where
);
197 bkey_copy_key(k
, where
);
199 for (i
= 0; i
< KEY_PTRS(k
); i
++)
200 SET_PTR_OFFSET(k
, i
, PTR_OFFSET(k
, i
) + KEY_SIZE(k
) - len
);
202 BUG_ON(len
> KEY_SIZE(k
));
203 SET_KEY_SIZE(k
, len
);
207 bool __bch_cut_back(const struct bkey
*where
, struct bkey
*k
)
211 if (bkey_cmp(where
, k
) >= 0)
214 BUG_ON(KEY_INODE(where
) != KEY_INODE(k
));
216 if (bkey_cmp(where
, &START_KEY(k
)) > 0)
217 len
= KEY_OFFSET(where
) - KEY_START(k
);
219 bkey_copy_key(k
, where
);
221 BUG_ON(len
> KEY_SIZE(k
));
222 SET_KEY_SIZE(k
, len
);
226 static uint64_t merge_chksums(struct bkey
*l
, struct bkey
*r
)
228 return (l
->ptr
[KEY_PTRS(l
)] + r
->ptr
[KEY_PTRS(r
)]) &
229 ~((uint64_t)1 << 63);
232 /* Tries to merge l and r: l should be lower than r
233 * Returns true if we were able to merge. If we did merge, l will be the merged
234 * key, r will be untouched.
236 bool bch_bkey_try_merge(struct btree
*b
, struct bkey
*l
, struct bkey
*r
)
240 if (key_merging_disabled(b
->c
))
243 if (KEY_PTRS(l
) != KEY_PTRS(r
) ||
244 KEY_DIRTY(l
) != KEY_DIRTY(r
) ||
245 bkey_cmp(l
, &START_KEY(r
)))
248 for (i
= 0; i
< KEY_PTRS(l
); i
++)
249 if (l
->ptr
[i
] + PTR(0, KEY_SIZE(l
), 0) != r
->ptr
[i
] ||
250 PTR_BUCKET_NR(b
->c
, l
, i
) != PTR_BUCKET_NR(b
->c
, r
, i
))
253 /* Keys with no pointers aren't restricted to one bucket and could
256 if (KEY_SIZE(l
) + KEY_SIZE(r
) > USHRT_MAX
) {
257 SET_KEY_OFFSET(l
, KEY_OFFSET(l
) + USHRT_MAX
- KEY_SIZE(l
));
258 SET_KEY_SIZE(l
, USHRT_MAX
);
266 l
->ptr
[KEY_PTRS(l
)] = merge_chksums(l
, r
);
271 SET_KEY_OFFSET(l
, KEY_OFFSET(l
) + KEY_SIZE(r
));
272 SET_KEY_SIZE(l
, KEY_SIZE(l
) + KEY_SIZE(r
));
277 /* Binary tree stuff for auxiliary search trees */
279 static unsigned inorder_next(unsigned j
, unsigned size
)
281 if (j
* 2 + 1 < size
) {
292 static unsigned inorder_prev(unsigned j
, unsigned size
)
297 while (j
* 2 + 1 < size
)
305 /* I have no idea why this code works... and I'm the one who wrote it
307 * However, I do know what it does:
308 * Given a binary tree constructed in an array (i.e. how you normally implement
309 * a heap), it converts a node in the tree - referenced by array index - to the
310 * index it would have if you did an inorder traversal.
312 * Also tested for every j, size up to size somewhere around 6 million.
314 * The binary tree starts at array index 1, not 0
315 * extra is a function of size:
316 * extra = (size - rounddown_pow_of_two(size - 1)) << 1;
318 static unsigned __to_inorder(unsigned j
, unsigned size
, unsigned extra
)
321 unsigned shift
= fls(size
- 1) - b
;
329 j
-= (j
- extra
) >> 1;
334 static unsigned to_inorder(unsigned j
, struct bset_tree
*t
)
336 return __to_inorder(j
, t
->size
, t
->extra
);
339 static unsigned __inorder_to_tree(unsigned j
, unsigned size
, unsigned extra
)
349 j
|= roundup_pow_of_two(size
) >> shift
;
354 static unsigned inorder_to_tree(unsigned j
, struct bset_tree
*t
)
356 return __inorder_to_tree(j
, t
->size
, t
->extra
);
360 void inorder_test(void)
362 unsigned long done
= 0;
363 ktime_t start
= ktime_get();
365 for (unsigned size
= 2;
368 unsigned extra
= (size
- rounddown_pow_of_two(size
- 1)) << 1;
369 unsigned i
= 1, j
= rounddown_pow_of_two(size
- 1);
372 printk(KERN_NOTICE
"loop %u, %llu per us\n", size
,
373 done
/ ktime_us_delta(ktime_get(), start
));
376 if (__inorder_to_tree(i
, size
, extra
) != j
)
377 panic("size %10u j %10u i %10u", size
, j
, i
);
379 if (__to_inorder(j
, size
, extra
) != i
)
380 panic("size %10u j %10u i %10u", size
, j
, i
);
382 if (j
== rounddown_pow_of_two(size
) - 1)
385 BUG_ON(inorder_prev(inorder_next(j
, size
), size
) != j
);
387 j
= inorder_next(j
, size
);
397 * Cacheline/offset <-> bkey pointer arithmatic:
399 * t->tree is a binary search tree in an array; each node corresponds to a key
400 * in one cacheline in t->set (BSET_CACHELINE bytes).
402 * This means we don't have to store the full index of the key that a node in
403 * the binary tree points to; to_inorder() gives us the cacheline, and then
404 * bkey_float->m gives us the offset within that cacheline, in units of 8 bytes.
406 * cacheline_to_bkey() and friends abstract out all the pointer arithmatic to
409 * To construct the bfloat for an arbitrary key we need to know what the key
410 * immediately preceding it is: we have to check if the two keys differ in the
411 * bits we're going to store in bkey_float->mantissa. t->prev[j] stores the size
412 * of the previous key so we can walk backwards to it from t->tree[j]'s key.
415 static struct bkey
*cacheline_to_bkey(struct bset_tree
*t
, unsigned cacheline
,
418 return ((void *) t
->data
) + cacheline
* BSET_CACHELINE
+ offset
* 8;
421 static unsigned bkey_to_cacheline(struct bset_tree
*t
, struct bkey
*k
)
423 return ((void *) k
- (void *) t
->data
) / BSET_CACHELINE
;
426 static unsigned bkey_to_cacheline_offset(struct bkey
*k
)
428 return ((size_t) k
& (BSET_CACHELINE
- 1)) / sizeof(uint64_t);
431 static struct bkey
*tree_to_bkey(struct bset_tree
*t
, unsigned j
)
433 return cacheline_to_bkey(t
, to_inorder(j
, t
), t
->tree
[j
].m
);
436 static struct bkey
*tree_to_prev_bkey(struct bset_tree
*t
, unsigned j
)
438 return (void *) (((uint64_t *) tree_to_bkey(t
, j
)) - t
->prev
[j
]);
442 * For the write set - the one we're currently inserting keys into - we don't
443 * maintain a full search tree, we just keep a simple lookup table in t->prev.
445 static struct bkey
*table_to_bkey(struct bset_tree
*t
, unsigned cacheline
)
447 return cacheline_to_bkey(t
, cacheline
, t
->prev
[cacheline
]);
450 static inline uint64_t shrd128(uint64_t high
, uint64_t low
, uint8_t shift
)
453 asm("shrd %[shift],%[high],%[low]"
460 low
|= (high
<< 1) << (63U - shift
);
465 static inline unsigned bfloat_mantissa(const struct bkey
*k
,
466 struct bkey_float
*f
)
468 const uint64_t *p
= &k
->low
- (f
->exponent
>> 6);
469 return shrd128(p
[-1], p
[0], f
->exponent
& 63) & BKEY_MANTISSA_MASK
;
472 static void make_bfloat(struct bset_tree
*t
, unsigned j
)
474 struct bkey_float
*f
= &t
->tree
[j
];
475 struct bkey
*m
= tree_to_bkey(t
, j
);
476 struct bkey
*p
= tree_to_prev_bkey(t
, j
);
478 struct bkey
*l
= is_power_of_2(j
)
480 : tree_to_prev_bkey(t
, j
>> ffs(j
));
482 struct bkey
*r
= is_power_of_2(j
+ 1)
483 ? node(t
->data
, t
->data
->keys
- bkey_u64s(&t
->end
))
484 : tree_to_bkey(t
, j
>> (ffz(j
) + 1));
486 BUG_ON(m
< l
|| m
> r
);
487 BUG_ON(bkey_next(p
) != m
);
489 if (KEY_INODE(l
) != KEY_INODE(r
))
490 f
->exponent
= fls64(KEY_INODE(r
) ^ KEY_INODE(l
)) + 64;
492 f
->exponent
= fls64(r
->low
^ l
->low
);
494 f
->exponent
= max_t(int, f
->exponent
- BKEY_MANTISSA_BITS
, 0);
497 * Setting f->exponent = 127 flags this node as failed, and causes the
498 * lookup code to fall back to comparing against the original key.
501 if (bfloat_mantissa(m
, f
) != bfloat_mantissa(p
, f
))
502 f
->mantissa
= bfloat_mantissa(m
, f
) - 1;
507 static void bset_alloc_tree(struct btree
*b
, struct bset_tree
*t
)
510 unsigned j
= roundup(t
[-1].size
,
511 64 / sizeof(struct bkey_float
));
513 t
->tree
= t
[-1].tree
+ j
;
514 t
->prev
= t
[-1].prev
+ j
;
517 while (t
< b
->sets
+ MAX_BSETS
)
521 static void bset_build_unwritten_tree(struct btree
*b
)
523 struct bset_tree
*t
= b
->sets
+ b
->nsets
;
525 bset_alloc_tree(b
, t
);
527 if (t
->tree
!= b
->sets
->tree
+ bset_tree_space(b
)) {
528 t
->prev
[0] = bkey_to_cacheline_offset(t
->data
->start
);
533 static void bset_build_written_tree(struct btree
*b
)
535 struct bset_tree
*t
= b
->sets
+ b
->nsets
;
536 struct bkey
*k
= t
->data
->start
;
537 unsigned j
, cacheline
= 1;
539 bset_alloc_tree(b
, t
);
541 t
->size
= min_t(unsigned,
542 bkey_to_cacheline(t
, end(t
->data
)),
543 b
->sets
->tree
+ bset_tree_space(b
) - t
->tree
);
550 t
->extra
= (t
->size
- rounddown_pow_of_two(t
->size
- 1)) << 1;
552 /* First we figure out where the first key in each cacheline is */
553 for (j
= inorder_next(0, t
->size
);
555 j
= inorder_next(j
, t
->size
)) {
556 while (bkey_to_cacheline(t
, k
) != cacheline
)
559 t
->prev
[j
] = bkey_u64s(k
);
562 t
->tree
[j
].m
= bkey_to_cacheline_offset(k
);
565 while (bkey_next(k
) != end(t
->data
))
570 /* Then we build the tree */
571 for (j
= inorder_next(0, t
->size
);
573 j
= inorder_next(j
, t
->size
))
577 void bch_bset_fix_invalidated_key(struct btree
*b
, struct bkey
*k
)
580 unsigned inorder
, j
= 1;
582 for (t
= b
->sets
; t
<= &b
->sets
[b
->nsets
]; t
++)
583 if (k
< end(t
->data
))
588 if (!t
->size
|| !bset_written(b
, t
))
591 inorder
= bkey_to_cacheline(t
, k
);
593 if (k
== t
->data
->start
)
596 if (bkey_next(k
) == end(t
->data
)) {
601 j
= inorder_to_tree(inorder
, t
);
605 k
== tree_to_bkey(t
, j
))
609 } while (j
< t
->size
);
611 j
= inorder_to_tree(inorder
+ 1, t
);
615 k
== tree_to_prev_bkey(t
, j
))
619 } while (j
< t
->size
);
622 void bch_bset_fix_lookup_table(struct btree
*b
, struct bkey
*k
)
624 struct bset_tree
*t
= &b
->sets
[b
->nsets
];
625 unsigned shift
= bkey_u64s(k
);
626 unsigned j
= bkey_to_cacheline(t
, k
);
628 /* We're getting called from btree_split() or btree_gc, just bail out */
632 /* k is the key we just inserted; we need to find the entry in the
633 * lookup table for the first key that is strictly greater than k:
634 * it's either k's cacheline or the next one
637 table_to_bkey(t
, j
) <= k
)
640 /* Adjust all the lookup table entries, and find a new key for any that
641 * have gotten too big
643 for (; j
< t
->size
; j
++) {
646 if (t
->prev
[j
] > 7) {
647 k
= table_to_bkey(t
, j
- 1);
649 while (k
< cacheline_to_bkey(t
, j
, 0))
652 t
->prev
[j
] = bkey_to_cacheline_offset(k
);
656 if (t
->size
== b
->sets
->tree
+ bset_tree_space(b
) - t
->tree
)
659 /* Possibly add a new entry to the end of the lookup table */
661 for (k
= table_to_bkey(t
, t
->size
- 1);
664 if (t
->size
== bkey_to_cacheline(t
, k
)) {
665 t
->prev
[t
->size
] = bkey_to_cacheline_offset(k
);
670 void bch_bset_init_next(struct btree
*b
)
672 struct bset
*i
= write_block(b
);
674 if (i
!= b
->sets
[0].data
) {
675 b
->sets
[++b
->nsets
].data
= i
;
676 i
->seq
= b
->sets
[0].data
->seq
;
678 get_random_bytes(&i
->seq
, sizeof(uint64_t));
680 i
->magic
= bset_magic(b
->c
);
684 bset_build_unwritten_tree(b
);
687 struct bset_search_iter
{
691 static struct bset_search_iter
bset_search_write_set(struct btree
*b
,
693 const struct bkey
*search
)
695 unsigned li
= 0, ri
= t
->size
;
698 t
->size
< bkey_to_cacheline(t
, end(t
->data
)));
700 while (li
+ 1 != ri
) {
701 unsigned m
= (li
+ ri
) >> 1;
703 if (bkey_cmp(table_to_bkey(t
, m
), search
) > 0)
709 return (struct bset_search_iter
) {
710 table_to_bkey(t
, li
),
711 ri
< t
->size
? table_to_bkey(t
, ri
) : end(t
->data
)
715 static struct bset_search_iter
bset_search_tree(struct btree
*b
,
717 const struct bkey
*search
)
720 struct bkey_float
*f
;
721 unsigned inorder
, j
, n
= 1;
725 p
&= ((int) (p
- t
->size
)) >> 31;
727 prefetch(&t
->tree
[p
]);
733 * n = (f->mantissa > bfloat_mantissa())
737 * We need to subtract 1 from f->mantissa for the sign bit trick
738 * to work - that's done in make_bfloat()
740 if (likely(f
->exponent
!= 127))
741 n
= j
* 2 + (((unsigned)
743 bfloat_mantissa(search
, f
))) >> 31);
745 n
= (bkey_cmp(tree_to_bkey(t
, j
), search
) > 0)
748 } while (n
< t
->size
);
750 inorder
= to_inorder(j
, t
);
753 * n would have been the node we recursed to - the low bit tells us if
754 * we recursed left or recursed right.
757 l
= cacheline_to_bkey(t
, inorder
, f
->m
);
759 if (++inorder
!= t
->size
) {
760 f
= &t
->tree
[inorder_next(j
, t
->size
)];
761 r
= cacheline_to_bkey(t
, inorder
, f
->m
);
765 r
= cacheline_to_bkey(t
, inorder
, f
->m
);
768 f
= &t
->tree
[inorder_prev(j
, t
->size
)];
769 l
= cacheline_to_bkey(t
, inorder
, f
->m
);
774 return (struct bset_search_iter
) {l
, r
};
777 struct bkey
*__bch_bset_search(struct btree
*b
, struct bset_tree
*t
,
778 const struct bkey
*search
)
780 struct bset_search_iter i
;
783 * First, we search for a cacheline, then lastly we do a linear search
784 * within that cacheline.
786 * To search for the cacheline, there's three different possibilities:
787 * * The set is too small to have a search tree, so we just do a linear
788 * search over the whole set.
789 * * The set is the one we're currently inserting into; keeping a full
790 * auxiliary search tree up to date would be too expensive, so we
791 * use a much simpler lookup table to do a binary search -
792 * bset_search_write_set().
793 * * Or we use the auxiliary search tree we constructed earlier -
797 if (unlikely(!t
->size
)) {
798 i
.l
= t
->data
->start
;
800 } else if (bset_written(b
, t
)) {
802 * Each node in the auxiliary search tree covers a certain range
803 * of bits, and keys above and below the set it covers might
804 * differ outside those bits - so we have to special case the
805 * start and end - handle that here:
808 if (unlikely(bkey_cmp(search
, &t
->end
) >= 0))
811 if (unlikely(bkey_cmp(search
, t
->data
->start
) < 0))
812 return t
->data
->start
;
814 i
= bset_search_tree(b
, t
, search
);
816 i
= bset_search_write_set(b
, t
, search
);
818 #ifdef CONFIG_BCACHE_EDEBUG
819 BUG_ON(bset_written(b
, t
) &&
820 i
.l
!= t
->data
->start
&&
821 bkey_cmp(tree_to_prev_bkey(t
,
822 inorder_to_tree(bkey_to_cacheline(t
, i
.l
), t
)),
825 BUG_ON(i
.r
!= end(t
->data
) &&
826 bkey_cmp(i
.r
, search
) <= 0);
829 while (likely(i
.l
!= i
.r
) &&
830 bkey_cmp(i
.l
, search
) <= 0)
831 i
.l
= bkey_next(i
.l
);
838 static inline bool btree_iter_cmp(struct btree_iter_set l
,
839 struct btree_iter_set r
)
841 int64_t c
= bkey_cmp(&START_KEY(l
.k
), &START_KEY(r
.k
));
843 return c
? c
> 0 : l
.k
< r
.k
;
846 static inline bool btree_iter_end(struct btree_iter
*iter
)
851 void bch_btree_iter_push(struct btree_iter
*iter
, struct bkey
*k
,
855 BUG_ON(!heap_add(iter
,
856 ((struct btree_iter_set
) { k
, end
}),
860 struct bkey
*__bch_btree_iter_init(struct btree
*b
, struct btree_iter
*iter
,
861 struct bkey
*search
, struct bset_tree
*start
)
863 struct bkey
*ret
= NULL
;
864 iter
->size
= ARRAY_SIZE(iter
->data
);
867 for (; start
<= &b
->sets
[b
->nsets
]; start
++) {
868 ret
= bch_bset_search(b
, start
, search
);
869 bch_btree_iter_push(iter
, ret
, end(start
->data
));
875 struct bkey
*bch_btree_iter_next(struct btree_iter
*iter
)
877 struct btree_iter_set unused
;
878 struct bkey
*ret
= NULL
;
880 if (!btree_iter_end(iter
)) {
882 iter
->data
->k
= bkey_next(iter
->data
->k
);
884 if (iter
->data
->k
> iter
->data
->end
) {
886 iter
->data
->k
= iter
->data
->end
;
889 if (iter
->data
->k
== iter
->data
->end
)
890 heap_pop(iter
, unused
, btree_iter_cmp
);
892 heap_sift(iter
, 0, btree_iter_cmp
);
898 struct bkey
*bch_btree_iter_next_filter(struct btree_iter
*iter
,
899 struct btree
*b
, ptr_filter_fn fn
)
904 ret
= bch_btree_iter_next(iter
);
905 } while (ret
&& fn(b
, ret
));
910 struct bkey
*bch_next_recurse_key(struct btree
*b
, struct bkey
*search
)
912 struct btree_iter iter
;
914 bch_btree_iter_init(b
, &iter
, search
);
915 return bch_btree_iter_next_filter(&iter
, b
, bch_ptr_bad
);
920 static void btree_sort_fixup(struct btree_iter
*iter
)
922 while (iter
->used
> 1) {
923 struct btree_iter_set
*top
= iter
->data
, *i
= top
+ 1;
926 if (iter
->used
> 2 &&
927 btree_iter_cmp(i
[0], i
[1]))
931 k
!= i
->end
&& bkey_cmp(top
->k
, &START_KEY(k
)) > 0;
934 __bch_cut_front(top
->k
, k
);
935 else if (KEY_SIZE(k
))
936 bch_cut_back(&START_KEY(k
), top
->k
);
938 if (top
->k
< i
->k
|| k
== i
->k
)
941 heap_sift(iter
, i
- top
, btree_iter_cmp
);
945 static void btree_mergesort(struct btree
*b
, struct bset
*out
,
946 struct btree_iter
*iter
,
947 bool fixup
, bool remove_stale
)
949 struct bkey
*k
, *last
= NULL
;
950 bool (*bad
)(struct btree
*, const struct bkey
*) = remove_stale
954 while (!btree_iter_end(iter
)) {
955 if (fixup
&& !b
->level
)
956 btree_sort_fixup(iter
);
958 k
= bch_btree_iter_next(iter
);
965 } else if (b
->level
||
966 !bch_bkey_try_merge(b
, last
, k
)) {
967 last
= bkey_next(last
);
972 out
->keys
= last
? (uint64_t *) bkey_next(last
) - out
->d
: 0;
974 pr_debug("sorted %i keys", out
->keys
);
975 bch_check_key_order(b
, out
);
978 static void __btree_sort(struct btree
*b
, struct btree_iter
*iter
,
979 unsigned start
, unsigned order
, bool fixup
)
982 bool remove_stale
= !b
->written
;
983 struct bset
*out
= (void *) __get_free_pages(__GFP_NOWARN
|GFP_NOIO
,
986 mutex_lock(&b
->c
->sort_lock
);
988 order
= ilog2(bucket_pages(b
->c
));
991 start_time
= local_clock();
993 btree_mergesort(b
, out
, iter
, fixup
, remove_stale
);
996 if (!fixup
&& !start
&& b
->written
)
997 bch_btree_verify(b
, out
);
999 if (!start
&& order
== b
->page_order
) {
1001 * Our temporary buffer is the same size as the btree node's
1002 * buffer, we can just swap buffers instead of doing a big
1006 out
->magic
= bset_magic(b
->c
);
1007 out
->seq
= b
->sets
[0].data
->seq
;
1008 out
->version
= b
->sets
[0].data
->version
;
1009 swap(out
, b
->sets
[0].data
);
1011 if (b
->c
->sort
== b
->sets
[0].data
)
1014 b
->sets
[start
].data
->keys
= out
->keys
;
1015 memcpy(b
->sets
[start
].data
->start
, out
->start
,
1016 (void *) end(out
) - (void *) out
->start
);
1019 if (out
== b
->c
->sort
)
1020 mutex_unlock(&b
->c
->sort_lock
);
1022 free_pages((unsigned long) out
, order
);
1025 bset_build_written_tree(b
);
1028 spin_lock(&b
->c
->sort_time_lock
);
1029 time_stats_update(&b
->c
->sort_time
, start_time
);
1030 spin_unlock(&b
->c
->sort_time_lock
);
1034 void bch_btree_sort_partial(struct btree
*b
, unsigned start
)
1036 size_t oldsize
= 0, order
= b
->page_order
, keys
= 0;
1037 struct btree_iter iter
;
1038 __bch_btree_iter_init(b
, &iter
, NULL
, &b
->sets
[start
]);
1040 BUG_ON(b
->sets
[b
->nsets
].data
== write_block(b
) &&
1041 (b
->sets
[b
->nsets
].size
|| b
->nsets
));
1044 oldsize
= bch_count_data(b
);
1049 for (i
= start
; i
<= b
->nsets
; i
++)
1050 keys
+= b
->sets
[i
].data
->keys
;
1052 order
= roundup_pow_of_two(__set_bytes(b
->sets
->data
, keys
)) / PAGE_SIZE
;
1054 order
= ilog2(order
);
1057 __btree_sort(b
, &iter
, start
, order
, false);
1059 EBUG_ON(b
->written
&& bch_count_data(b
) != oldsize
);
1062 void bch_btree_sort_and_fix_extents(struct btree
*b
, struct btree_iter
*iter
)
1064 BUG_ON(!b
->written
);
1065 __btree_sort(b
, iter
, 0, b
->page_order
, true);
1068 void bch_btree_sort_into(struct btree
*b
, struct btree
*new)
1070 uint64_t start_time
= local_clock();
1072 struct btree_iter iter
;
1073 bch_btree_iter_init(b
, &iter
, NULL
);
1075 btree_mergesort(b
, new->sets
->data
, &iter
, false, true);
1077 spin_lock(&b
->c
->sort_time_lock
);
1078 time_stats_update(&b
->c
->sort_time
, start_time
);
1079 spin_unlock(&b
->c
->sort_time_lock
);
1081 bkey_copy_key(&new->key
, &b
->key
);
1082 new->sets
->size
= 0;
1085 void bch_btree_sort_lazy(struct btree
*b
)
1088 unsigned i
, j
, keys
= 0, total
;
1090 for (i
= 0; i
<= b
->nsets
; i
++)
1091 keys
+= b
->sets
[i
].data
->keys
;
1095 for (j
= 0; j
< b
->nsets
; j
++) {
1096 if (keys
* 2 < total
||
1098 bch_btree_sort_partial(b
, j
);
1102 keys
-= b
->sets
[j
].data
->keys
;
1105 /* Must sort if b->nsets == 3 or we'll overflow */
1106 if (b
->nsets
>= (MAX_BSETS
- 1) - b
->level
) {
1112 bset_build_written_tree(b
);
1119 size_t sets_written
, sets_unwritten
;
1120 size_t bytes_written
, bytes_unwritten
;
1121 size_t floats
, failed
;
1124 static int bch_btree_bset_stats(struct btree
*b
, struct btree_op
*op
,
1125 struct bset_stats
*stats
)
1132 for (i
= 0; i
<= b
->nsets
; i
++) {
1133 struct bset_tree
*t
= &b
->sets
[i
];
1134 size_t bytes
= t
->data
->keys
* sizeof(uint64_t);
1137 if (bset_written(b
, t
)) {
1138 stats
->sets_written
++;
1139 stats
->bytes_written
+= bytes
;
1141 stats
->floats
+= t
->size
- 1;
1143 for (j
= 1; j
< t
->size
; j
++)
1144 if (t
->tree
[j
].exponent
== 127)
1147 stats
->sets_unwritten
++;
1148 stats
->bytes_unwritten
+= bytes
;
1153 struct btree_iter iter
;
1155 for_each_key_filter(b
, k
, &iter
, bch_ptr_bad
) {
1156 int ret
= btree(bset_stats
, k
, b
, op
, stats
);
1165 int bch_bset_print_stats(struct cache_set
*c
, char *buf
)
1168 struct bset_stats t
;
1171 bch_btree_op_init_stack(&op
);
1172 memset(&t
, 0, sizeof(struct bset_stats
));
1174 ret
= btree_root(bset_stats
, c
, &op
, &t
);
1178 return snprintf(buf
, PAGE_SIZE
,
1179 "btree nodes: %zu\n"
1180 "written sets: %zu\n"
1181 "unwritten sets: %zu\n"
1182 "written key bytes: %zu\n"
1183 "unwritten key bytes: %zu\n"
1187 t
.sets_written
, t
.sets_unwritten
,
1188 t
.bytes_written
, t
.bytes_unwritten
,
1189 t
.floats
, t
.failed
);
This page took 0.061116 seconds and 5 git commands to generate.