[media] media-device: fix builds when USB or PCI is compiled as module
[deliverable/linux.git] / fs / gfs2 / rgrp.c
1 /*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12 #include <linux/slab.h>
13 #include <linux/spinlock.h>
14 #include <linux/completion.h>
15 #include <linux/buffer_head.h>
16 #include <linux/fs.h>
17 #include <linux/gfs2_ondisk.h>
18 #include <linux/prefetch.h>
19 #include <linux/blkdev.h>
20 #include <linux/rbtree.h>
21 #include <linux/random.h>
22
23 #include "gfs2.h"
24 #include "incore.h"
25 #include "glock.h"
26 #include "glops.h"
27 #include "lops.h"
28 #include "meta_io.h"
29 #include "quota.h"
30 #include "rgrp.h"
31 #include "super.h"
32 #include "trans.h"
33 #include "util.h"
34 #include "log.h"
35 #include "inode.h"
36 #include "trace_gfs2.h"
37
38 #define BFITNOENT ((u32)~0)
39 #define NO_BLOCK ((u64)~0)
40
41 #if BITS_PER_LONG == 32
42 #define LBITMASK (0x55555555UL)
43 #define LBITSKIP55 (0x55555555UL)
44 #define LBITSKIP00 (0x00000000UL)
45 #else
46 #define LBITMASK (0x5555555555555555UL)
47 #define LBITSKIP55 (0x5555555555555555UL)
48 #define LBITSKIP00 (0x0000000000000000UL)
49 #endif
50
51 /*
52 * These routines are used by the resource group routines (rgrp.c)
53 * to keep track of block allocation. Each block is represented by two
54 * bits. So, each byte represents GFS2_NBBY (i.e. 4) blocks.
55 *
56 * 0 = Free
57 * 1 = Used (not metadata)
58 * 2 = Unlinked (still in use) inode
59 * 3 = Used (metadata)
60 */
61
62 struct gfs2_extent {
63 struct gfs2_rbm rbm;
64 u32 len;
65 };
66
67 static const char valid_change[16] = {
68 /* current */
69 /* n */ 0, 1, 1, 1,
70 /* e */ 1, 0, 0, 0,
71 /* w */ 0, 0, 0, 1,
72 1, 0, 0, 0
73 };
74
75 static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
76 const struct gfs2_inode *ip, bool nowrap,
77 const struct gfs2_alloc_parms *ap);
78
79
80 /**
81 * gfs2_setbit - Set a bit in the bitmaps
82 * @rbm: The position of the bit to set
83 * @do_clone: Also set the clone bitmap, if it exists
84 * @new_state: the new state of the block
85 *
86 */
87
88 static inline void gfs2_setbit(const struct gfs2_rbm *rbm, bool do_clone,
89 unsigned char new_state)
90 {
91 unsigned char *byte1, *byte2, *end, cur_state;
92 struct gfs2_bitmap *bi = rbm_bi(rbm);
93 unsigned int buflen = bi->bi_len;
94 const unsigned int bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE;
95
96 byte1 = bi->bi_bh->b_data + bi->bi_offset + (rbm->offset / GFS2_NBBY);
97 end = bi->bi_bh->b_data + bi->bi_offset + buflen;
98
99 BUG_ON(byte1 >= end);
100
101 cur_state = (*byte1 >> bit) & GFS2_BIT_MASK;
102
103 if (unlikely(!valid_change[new_state * 4 + cur_state])) {
104 pr_warn("buf_blk = 0x%x old_state=%d, new_state=%d\n",
105 rbm->offset, cur_state, new_state);
106 pr_warn("rgrp=0x%llx bi_start=0x%x\n",
107 (unsigned long long)rbm->rgd->rd_addr, bi->bi_start);
108 pr_warn("bi_offset=0x%x bi_len=0x%x\n",
109 bi->bi_offset, bi->bi_len);
110 dump_stack();
111 gfs2_consist_rgrpd(rbm->rgd);
112 return;
113 }
114 *byte1 ^= (cur_state ^ new_state) << bit;
115
116 if (do_clone && bi->bi_clone) {
117 byte2 = bi->bi_clone + bi->bi_offset + (rbm->offset / GFS2_NBBY);
118 cur_state = (*byte2 >> bit) & GFS2_BIT_MASK;
119 *byte2 ^= (cur_state ^ new_state) << bit;
120 }
121 }
122
123 /**
124 * gfs2_testbit - test a bit in the bitmaps
125 * @rbm: The bit to test
126 *
127 * Returns: The two bit block state of the requested bit
128 */
129
130 static inline u8 gfs2_testbit(const struct gfs2_rbm *rbm)
131 {
132 struct gfs2_bitmap *bi = rbm_bi(rbm);
133 const u8 *buffer = bi->bi_bh->b_data + bi->bi_offset;
134 const u8 *byte;
135 unsigned int bit;
136
137 byte = buffer + (rbm->offset / GFS2_NBBY);
138 bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE;
139
140 return (*byte >> bit) & GFS2_BIT_MASK;
141 }
142
143 /**
144 * gfs2_bit_search
145 * @ptr: Pointer to bitmap data
146 * @mask: Mask to use (normally 0x55555.... but adjusted for search start)
147 * @state: The state we are searching for
148 *
149 * We xor the bitmap data with a patter which is the bitwise opposite
150 * of what we are looking for, this gives rise to a pattern of ones
151 * wherever there is a match. Since we have two bits per entry, we
152 * take this pattern, shift it down by one place and then and it with
153 * the original. All the even bit positions (0,2,4, etc) then represent
154 * successful matches, so we mask with 0x55555..... to remove the unwanted
155 * odd bit positions.
156 *
157 * This allows searching of a whole u64 at once (32 blocks) with a
158 * single test (on 64 bit arches).
159 */
160
161 static inline u64 gfs2_bit_search(const __le64 *ptr, u64 mask, u8 state)
162 {
163 u64 tmp;
164 static const u64 search[] = {
165 [0] = 0xffffffffffffffffULL,
166 [1] = 0xaaaaaaaaaaaaaaaaULL,
167 [2] = 0x5555555555555555ULL,
168 [3] = 0x0000000000000000ULL,
169 };
170 tmp = le64_to_cpu(*ptr) ^ search[state];
171 tmp &= (tmp >> 1);
172 tmp &= mask;
173 return tmp;
174 }
175
176 /**
177 * rs_cmp - multi-block reservation range compare
178 * @blk: absolute file system block number of the new reservation
179 * @len: number of blocks in the new reservation
180 * @rs: existing reservation to compare against
181 *
182 * returns: 1 if the block range is beyond the reach of the reservation
183 * -1 if the block range is before the start of the reservation
184 * 0 if the block range overlaps with the reservation
185 */
186 static inline int rs_cmp(u64 blk, u32 len, struct gfs2_blkreserv *rs)
187 {
188 u64 startblk = gfs2_rbm_to_block(&rs->rs_rbm);
189
190 if (blk >= startblk + rs->rs_free)
191 return 1;
192 if (blk + len - 1 < startblk)
193 return -1;
194 return 0;
195 }
196
197 /**
198 * gfs2_bitfit - Search an rgrp's bitmap buffer to find a bit-pair representing
199 * a block in a given allocation state.
200 * @buf: the buffer that holds the bitmaps
201 * @len: the length (in bytes) of the buffer
202 * @goal: start search at this block's bit-pair (within @buffer)
203 * @state: GFS2_BLKST_XXX the state of the block we're looking for.
204 *
205 * Scope of @goal and returned block number is only within this bitmap buffer,
206 * not entire rgrp or filesystem. @buffer will be offset from the actual
207 * beginning of a bitmap block buffer, skipping any header structures, but
208 * headers are always a multiple of 64 bits long so that the buffer is
209 * always aligned to a 64 bit boundary.
210 *
211 * The size of the buffer is in bytes, but is it assumed that it is
212 * always ok to read a complete multiple of 64 bits at the end
213 * of the block in case the end is no aligned to a natural boundary.
214 *
215 * Return: the block number (bitmap buffer scope) that was found
216 */
217
218 static u32 gfs2_bitfit(const u8 *buf, const unsigned int len,
219 u32 goal, u8 state)
220 {
221 u32 spoint = (goal << 1) & ((8*sizeof(u64)) - 1);
222 const __le64 *ptr = ((__le64 *)buf) + (goal >> 5);
223 const __le64 *end = (__le64 *)(buf + ALIGN(len, sizeof(u64)));
224 u64 tmp;
225 u64 mask = 0x5555555555555555ULL;
226 u32 bit;
227
228 /* Mask off bits we don't care about at the start of the search */
229 mask <<= spoint;
230 tmp = gfs2_bit_search(ptr, mask, state);
231 ptr++;
232 while(tmp == 0 && ptr < end) {
233 tmp = gfs2_bit_search(ptr, 0x5555555555555555ULL, state);
234 ptr++;
235 }
236 /* Mask off any bits which are more than len bytes from the start */
237 if (ptr == end && (len & (sizeof(u64) - 1)))
238 tmp &= (((u64)~0) >> (64 - 8*(len & (sizeof(u64) - 1))));
239 /* Didn't find anything, so return */
240 if (tmp == 0)
241 return BFITNOENT;
242 ptr--;
243 bit = __ffs64(tmp);
244 bit /= 2; /* two bits per entry in the bitmap */
245 return (((const unsigned char *)ptr - buf) * GFS2_NBBY) + bit;
246 }
247
248 /**
249 * gfs2_rbm_from_block - Set the rbm based upon rgd and block number
250 * @rbm: The rbm with rgd already set correctly
251 * @block: The block number (filesystem relative)
252 *
253 * This sets the bi and offset members of an rbm based on a
254 * resource group and a filesystem relative block number. The
255 * resource group must be set in the rbm on entry, the bi and
256 * offset members will be set by this function.
257 *
258 * Returns: 0 on success, or an error code
259 */
260
261 static int gfs2_rbm_from_block(struct gfs2_rbm *rbm, u64 block)
262 {
263 u64 rblock = block - rbm->rgd->rd_data0;
264
265 if (WARN_ON_ONCE(rblock > UINT_MAX))
266 return -EINVAL;
267 if (block >= rbm->rgd->rd_data0 + rbm->rgd->rd_data)
268 return -E2BIG;
269
270 rbm->bii = 0;
271 rbm->offset = (u32)(rblock);
272 /* Check if the block is within the first block */
273 if (rbm->offset < rbm_bi(rbm)->bi_blocks)
274 return 0;
275
276 /* Adjust for the size diff between gfs2_meta_header and gfs2_rgrp */
277 rbm->offset += (sizeof(struct gfs2_rgrp) -
278 sizeof(struct gfs2_meta_header)) * GFS2_NBBY;
279 rbm->bii = rbm->offset / rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
280 rbm->offset -= rbm->bii * rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
281 return 0;
282 }
283
284 /**
285 * gfs2_rbm_incr - increment an rbm structure
286 * @rbm: The rbm with rgd already set correctly
287 *
288 * This function takes an existing rbm structure and increments it to the next
289 * viable block offset.
290 *
291 * Returns: If incrementing the offset would cause the rbm to go past the
292 * end of the rgrp, true is returned, otherwise false.
293 *
294 */
295
296 static bool gfs2_rbm_incr(struct gfs2_rbm *rbm)
297 {
298 if (rbm->offset + 1 < rbm_bi(rbm)->bi_blocks) { /* in the same bitmap */
299 rbm->offset++;
300 return false;
301 }
302 if (rbm->bii == rbm->rgd->rd_length - 1) /* at the last bitmap */
303 return true;
304
305 rbm->offset = 0;
306 rbm->bii++;
307 return false;
308 }
309
310 /**
311 * gfs2_unaligned_extlen - Look for free blocks which are not byte aligned
312 * @rbm: Position to search (value/result)
313 * @n_unaligned: Number of unaligned blocks to check
314 * @len: Decremented for each block found (terminate on zero)
315 *
316 * Returns: true if a non-free block is encountered
317 */
318
319 static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *len)
320 {
321 u32 n;
322 u8 res;
323
324 for (n = 0; n < n_unaligned; n++) {
325 res = gfs2_testbit(rbm);
326 if (res != GFS2_BLKST_FREE)
327 return true;
328 (*len)--;
329 if (*len == 0)
330 return true;
331 if (gfs2_rbm_incr(rbm))
332 return true;
333 }
334
335 return false;
336 }
337
338 /**
339 * gfs2_free_extlen - Return extent length of free blocks
340 * @rrbm: Starting position
341 * @len: Max length to check
342 *
343 * Starting at the block specified by the rbm, see how many free blocks
344 * there are, not reading more than len blocks ahead. This can be done
345 * using memchr_inv when the blocks are byte aligned, but has to be done
346 * on a block by block basis in case of unaligned blocks. Also this
347 * function can cope with bitmap boundaries (although it must stop on
348 * a resource group boundary)
349 *
350 * Returns: Number of free blocks in the extent
351 */
352
353 static u32 gfs2_free_extlen(const struct gfs2_rbm *rrbm, u32 len)
354 {
355 struct gfs2_rbm rbm = *rrbm;
356 u32 n_unaligned = rbm.offset & 3;
357 u32 size = len;
358 u32 bytes;
359 u32 chunk_size;
360 u8 *ptr, *start, *end;
361 u64 block;
362 struct gfs2_bitmap *bi;
363
364 if (n_unaligned &&
365 gfs2_unaligned_extlen(&rbm, 4 - n_unaligned, &len))
366 goto out;
367
368 n_unaligned = len & 3;
369 /* Start is now byte aligned */
370 while (len > 3) {
371 bi = rbm_bi(&rbm);
372 start = bi->bi_bh->b_data;
373 if (bi->bi_clone)
374 start = bi->bi_clone;
375 end = start + bi->bi_bh->b_size;
376 start += bi->bi_offset;
377 BUG_ON(rbm.offset & 3);
378 start += (rbm.offset / GFS2_NBBY);
379 bytes = min_t(u32, len / GFS2_NBBY, (end - start));
380 ptr = memchr_inv(start, 0, bytes);
381 chunk_size = ((ptr == NULL) ? bytes : (ptr - start));
382 chunk_size *= GFS2_NBBY;
383 BUG_ON(len < chunk_size);
384 len -= chunk_size;
385 block = gfs2_rbm_to_block(&rbm);
386 if (gfs2_rbm_from_block(&rbm, block + chunk_size)) {
387 n_unaligned = 0;
388 break;
389 }
390 if (ptr) {
391 n_unaligned = 3;
392 break;
393 }
394 n_unaligned = len & 3;
395 }
396
397 /* Deal with any bits left over at the end */
398 if (n_unaligned)
399 gfs2_unaligned_extlen(&rbm, n_unaligned, &len);
400 out:
401 return size - len;
402 }
403
404 /**
405 * gfs2_bitcount - count the number of bits in a certain state
406 * @rgd: the resource group descriptor
407 * @buffer: the buffer that holds the bitmaps
408 * @buflen: the length (in bytes) of the buffer
409 * @state: the state of the block we're looking for
410 *
411 * Returns: The number of bits
412 */
413
414 static u32 gfs2_bitcount(struct gfs2_rgrpd *rgd, const u8 *buffer,
415 unsigned int buflen, u8 state)
416 {
417 const u8 *byte = buffer;
418 const u8 *end = buffer + buflen;
419 const u8 state1 = state << 2;
420 const u8 state2 = state << 4;
421 const u8 state3 = state << 6;
422 u32 count = 0;
423
424 for (; byte < end; byte++) {
425 if (((*byte) & 0x03) == state)
426 count++;
427 if (((*byte) & 0x0C) == state1)
428 count++;
429 if (((*byte) & 0x30) == state2)
430 count++;
431 if (((*byte) & 0xC0) == state3)
432 count++;
433 }
434
435 return count;
436 }
437
438 /**
439 * gfs2_rgrp_verify - Verify that a resource group is consistent
440 * @rgd: the rgrp
441 *
442 */
443
444 void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd)
445 {
446 struct gfs2_sbd *sdp = rgd->rd_sbd;
447 struct gfs2_bitmap *bi = NULL;
448 u32 length = rgd->rd_length;
449 u32 count[4], tmp;
450 int buf, x;
451
452 memset(count, 0, 4 * sizeof(u32));
453
454 /* Count # blocks in each of 4 possible allocation states */
455 for (buf = 0; buf < length; buf++) {
456 bi = rgd->rd_bits + buf;
457 for (x = 0; x < 4; x++)
458 count[x] += gfs2_bitcount(rgd,
459 bi->bi_bh->b_data +
460 bi->bi_offset,
461 bi->bi_len, x);
462 }
463
464 if (count[0] != rgd->rd_free) {
465 if (gfs2_consist_rgrpd(rgd))
466 fs_err(sdp, "free data mismatch: %u != %u\n",
467 count[0], rgd->rd_free);
468 return;
469 }
470
471 tmp = rgd->rd_data - rgd->rd_free - rgd->rd_dinodes;
472 if (count[1] != tmp) {
473 if (gfs2_consist_rgrpd(rgd))
474 fs_err(sdp, "used data mismatch: %u != %u\n",
475 count[1], tmp);
476 return;
477 }
478
479 if (count[2] + count[3] != rgd->rd_dinodes) {
480 if (gfs2_consist_rgrpd(rgd))
481 fs_err(sdp, "used metadata mismatch: %u != %u\n",
482 count[2] + count[3], rgd->rd_dinodes);
483 return;
484 }
485 }
486
487 static inline int rgrp_contains_block(struct gfs2_rgrpd *rgd, u64 block)
488 {
489 u64 first = rgd->rd_data0;
490 u64 last = first + rgd->rd_data;
491 return first <= block && block < last;
492 }
493
494 /**
495 * gfs2_blk2rgrpd - Find resource group for a given data/meta block number
496 * @sdp: The GFS2 superblock
497 * @blk: The data block number
498 * @exact: True if this needs to be an exact match
499 *
500 * Returns: The resource group, or NULL if not found
501 */
502
503 struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk, bool exact)
504 {
505 struct rb_node *n, *next;
506 struct gfs2_rgrpd *cur;
507
508 spin_lock(&sdp->sd_rindex_spin);
509 n = sdp->sd_rindex_tree.rb_node;
510 while (n) {
511 cur = rb_entry(n, struct gfs2_rgrpd, rd_node);
512 next = NULL;
513 if (blk < cur->rd_addr)
514 next = n->rb_left;
515 else if (blk >= cur->rd_data0 + cur->rd_data)
516 next = n->rb_right;
517 if (next == NULL) {
518 spin_unlock(&sdp->sd_rindex_spin);
519 if (exact) {
520 if (blk < cur->rd_addr)
521 return NULL;
522 if (blk >= cur->rd_data0 + cur->rd_data)
523 return NULL;
524 }
525 return cur;
526 }
527 n = next;
528 }
529 spin_unlock(&sdp->sd_rindex_spin);
530
531 return NULL;
532 }
533
534 /**
535 * gfs2_rgrpd_get_first - get the first Resource Group in the filesystem
536 * @sdp: The GFS2 superblock
537 *
538 * Returns: The first rgrp in the filesystem
539 */
540
541 struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp)
542 {
543 const struct rb_node *n;
544 struct gfs2_rgrpd *rgd;
545
546 spin_lock(&sdp->sd_rindex_spin);
547 n = rb_first(&sdp->sd_rindex_tree);
548 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
549 spin_unlock(&sdp->sd_rindex_spin);
550
551 return rgd;
552 }
553
554 /**
555 * gfs2_rgrpd_get_next - get the next RG
556 * @rgd: the resource group descriptor
557 *
558 * Returns: The next rgrp
559 */
560
561 struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd)
562 {
563 struct gfs2_sbd *sdp = rgd->rd_sbd;
564 const struct rb_node *n;
565
566 spin_lock(&sdp->sd_rindex_spin);
567 n = rb_next(&rgd->rd_node);
568 if (n == NULL)
569 n = rb_first(&sdp->sd_rindex_tree);
570
571 if (unlikely(&rgd->rd_node == n)) {
572 spin_unlock(&sdp->sd_rindex_spin);
573 return NULL;
574 }
575 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
576 spin_unlock(&sdp->sd_rindex_spin);
577 return rgd;
578 }
579
580 void check_and_update_goal(struct gfs2_inode *ip)
581 {
582 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
583 if (!ip->i_goal || gfs2_blk2rgrpd(sdp, ip->i_goal, 1) == NULL)
584 ip->i_goal = ip->i_no_addr;
585 }
586
587 void gfs2_free_clones(struct gfs2_rgrpd *rgd)
588 {
589 int x;
590
591 for (x = 0; x < rgd->rd_length; x++) {
592 struct gfs2_bitmap *bi = rgd->rd_bits + x;
593 kfree(bi->bi_clone);
594 bi->bi_clone = NULL;
595 }
596 }
597
598 /**
599 * gfs2_rsqa_alloc - make sure we have a reservation assigned to the inode
600 * plus a quota allocations data structure, if necessary
601 * @ip: the inode for this reservation
602 */
603 int gfs2_rsqa_alloc(struct gfs2_inode *ip)
604 {
605 return gfs2_qa_alloc(ip);
606 }
607
608 static void dump_rs(struct seq_file *seq, const struct gfs2_blkreserv *rs)
609 {
610 gfs2_print_dbg(seq, " B: n:%llu s:%llu b:%u f:%u\n",
611 (unsigned long long)rs->rs_inum,
612 (unsigned long long)gfs2_rbm_to_block(&rs->rs_rbm),
613 rs->rs_rbm.offset, rs->rs_free);
614 }
615
616 /**
617 * __rs_deltree - remove a multi-block reservation from the rgd tree
618 * @rs: The reservation to remove
619 *
620 */
621 static void __rs_deltree(struct gfs2_blkreserv *rs)
622 {
623 struct gfs2_rgrpd *rgd;
624
625 if (!gfs2_rs_active(rs))
626 return;
627
628 rgd = rs->rs_rbm.rgd;
629 trace_gfs2_rs(rs, TRACE_RS_TREEDEL);
630 rb_erase(&rs->rs_node, &rgd->rd_rstree);
631 RB_CLEAR_NODE(&rs->rs_node);
632
633 if (rs->rs_free) {
634 struct gfs2_bitmap *bi = rbm_bi(&rs->rs_rbm);
635
636 /* return reserved blocks to the rgrp */
637 BUG_ON(rs->rs_rbm.rgd->rd_reserved < rs->rs_free);
638 rs->rs_rbm.rgd->rd_reserved -= rs->rs_free;
639 /* The rgrp extent failure point is likely not to increase;
640 it will only do so if the freed blocks are somehow
641 contiguous with a span of free blocks that follows. Still,
642 it will force the number to be recalculated later. */
643 rgd->rd_extfail_pt += rs->rs_free;
644 rs->rs_free = 0;
645 clear_bit(GBF_FULL, &bi->bi_flags);
646 }
647 }
648
649 /**
650 * gfs2_rs_deltree - remove a multi-block reservation from the rgd tree
651 * @rs: The reservation to remove
652 *
653 */
654 void gfs2_rs_deltree(struct gfs2_blkreserv *rs)
655 {
656 struct gfs2_rgrpd *rgd;
657
658 rgd = rs->rs_rbm.rgd;
659 if (rgd) {
660 spin_lock(&rgd->rd_rsspin);
661 __rs_deltree(rs);
662 spin_unlock(&rgd->rd_rsspin);
663 }
664 }
665
666 /**
667 * gfs2_rsqa_delete - delete a multi-block reservation and quota allocation
668 * @ip: The inode for this reservation
669 * @wcount: The inode's write count, or NULL
670 *
671 */
672 void gfs2_rsqa_delete(struct gfs2_inode *ip, atomic_t *wcount)
673 {
674 down_write(&ip->i_rw_mutex);
675 if ((wcount == NULL) || (atomic_read(wcount) <= 1)) {
676 gfs2_rs_deltree(&ip->i_res);
677 BUG_ON(ip->i_res.rs_free);
678 }
679 up_write(&ip->i_rw_mutex);
680 gfs2_qa_delete(ip, wcount);
681 }
682
683 /**
684 * return_all_reservations - return all reserved blocks back to the rgrp.
685 * @rgd: the rgrp that needs its space back
686 *
687 * We previously reserved a bunch of blocks for allocation. Now we need to
688 * give them back. This leave the reservation structures in tact, but removes
689 * all of their corresponding "no-fly zones".
690 */
691 static void return_all_reservations(struct gfs2_rgrpd *rgd)
692 {
693 struct rb_node *n;
694 struct gfs2_blkreserv *rs;
695
696 spin_lock(&rgd->rd_rsspin);
697 while ((n = rb_first(&rgd->rd_rstree))) {
698 rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
699 __rs_deltree(rs);
700 }
701 spin_unlock(&rgd->rd_rsspin);
702 }
703
704 void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
705 {
706 struct rb_node *n;
707 struct gfs2_rgrpd *rgd;
708 struct gfs2_glock *gl;
709
710 while ((n = rb_first(&sdp->sd_rindex_tree))) {
711 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
712 gl = rgd->rd_gl;
713
714 rb_erase(n, &sdp->sd_rindex_tree);
715
716 if (gl) {
717 spin_lock(&gl->gl_lockref.lock);
718 gl->gl_object = NULL;
719 spin_unlock(&gl->gl_lockref.lock);
720 gfs2_glock_add_to_lru(gl);
721 gfs2_glock_put(gl);
722 }
723
724 gfs2_free_clones(rgd);
725 kfree(rgd->rd_bits);
726 return_all_reservations(rgd);
727 kmem_cache_free(gfs2_rgrpd_cachep, rgd);
728 }
729 }
730
731 static void gfs2_rindex_print(const struct gfs2_rgrpd *rgd)
732 {
733 pr_info("ri_addr = %llu\n", (unsigned long long)rgd->rd_addr);
734 pr_info("ri_length = %u\n", rgd->rd_length);
735 pr_info("ri_data0 = %llu\n", (unsigned long long)rgd->rd_data0);
736 pr_info("ri_data = %u\n", rgd->rd_data);
737 pr_info("ri_bitbytes = %u\n", rgd->rd_bitbytes);
738 }
739
740 /**
741 * gfs2_compute_bitstructs - Compute the bitmap sizes
742 * @rgd: The resource group descriptor
743 *
744 * Calculates bitmap descriptors, one for each block that contains bitmap data
745 *
746 * Returns: errno
747 */
748
749 static int compute_bitstructs(struct gfs2_rgrpd *rgd)
750 {
751 struct gfs2_sbd *sdp = rgd->rd_sbd;
752 struct gfs2_bitmap *bi;
753 u32 length = rgd->rd_length; /* # blocks in hdr & bitmap */
754 u32 bytes_left, bytes;
755 int x;
756
757 if (!length)
758 return -EINVAL;
759
760 rgd->rd_bits = kcalloc(length, sizeof(struct gfs2_bitmap), GFP_NOFS);
761 if (!rgd->rd_bits)
762 return -ENOMEM;
763
764 bytes_left = rgd->rd_bitbytes;
765
766 for (x = 0; x < length; x++) {
767 bi = rgd->rd_bits + x;
768
769 bi->bi_flags = 0;
770 /* small rgrp; bitmap stored completely in header block */
771 if (length == 1) {
772 bytes = bytes_left;
773 bi->bi_offset = sizeof(struct gfs2_rgrp);
774 bi->bi_start = 0;
775 bi->bi_len = bytes;
776 bi->bi_blocks = bytes * GFS2_NBBY;
777 /* header block */
778 } else if (x == 0) {
779 bytes = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_rgrp);
780 bi->bi_offset = sizeof(struct gfs2_rgrp);
781 bi->bi_start = 0;
782 bi->bi_len = bytes;
783 bi->bi_blocks = bytes * GFS2_NBBY;
784 /* last block */
785 } else if (x + 1 == length) {
786 bytes = bytes_left;
787 bi->bi_offset = sizeof(struct gfs2_meta_header);
788 bi->bi_start = rgd->rd_bitbytes - bytes_left;
789 bi->bi_len = bytes;
790 bi->bi_blocks = bytes * GFS2_NBBY;
791 /* other blocks */
792 } else {
793 bytes = sdp->sd_sb.sb_bsize -
794 sizeof(struct gfs2_meta_header);
795 bi->bi_offset = sizeof(struct gfs2_meta_header);
796 bi->bi_start = rgd->rd_bitbytes - bytes_left;
797 bi->bi_len = bytes;
798 bi->bi_blocks = bytes * GFS2_NBBY;
799 }
800
801 bytes_left -= bytes;
802 }
803
804 if (bytes_left) {
805 gfs2_consist_rgrpd(rgd);
806 return -EIO;
807 }
808 bi = rgd->rd_bits + (length - 1);
809 if ((bi->bi_start + bi->bi_len) * GFS2_NBBY != rgd->rd_data) {
810 if (gfs2_consist_rgrpd(rgd)) {
811 gfs2_rindex_print(rgd);
812 fs_err(sdp, "start=%u len=%u offset=%u\n",
813 bi->bi_start, bi->bi_len, bi->bi_offset);
814 }
815 return -EIO;
816 }
817
818 return 0;
819 }
820
821 /**
822 * gfs2_ri_total - Total up the file system space, according to the rindex.
823 * @sdp: the filesystem
824 *
825 */
826 u64 gfs2_ri_total(struct gfs2_sbd *sdp)
827 {
828 u64 total_data = 0;
829 struct inode *inode = sdp->sd_rindex;
830 struct gfs2_inode *ip = GFS2_I(inode);
831 char buf[sizeof(struct gfs2_rindex)];
832 int error, rgrps;
833
834 for (rgrps = 0;; rgrps++) {
835 loff_t pos = rgrps * sizeof(struct gfs2_rindex);
836
837 if (pos + sizeof(struct gfs2_rindex) > i_size_read(inode))
838 break;
839 error = gfs2_internal_read(ip, buf, &pos,
840 sizeof(struct gfs2_rindex));
841 if (error != sizeof(struct gfs2_rindex))
842 break;
843 total_data += be32_to_cpu(((struct gfs2_rindex *)buf)->ri_data);
844 }
845 return total_data;
846 }
847
848 static int rgd_insert(struct gfs2_rgrpd *rgd)
849 {
850 struct gfs2_sbd *sdp = rgd->rd_sbd;
851 struct rb_node **newn = &sdp->sd_rindex_tree.rb_node, *parent = NULL;
852
853 /* Figure out where to put new node */
854 while (*newn) {
855 struct gfs2_rgrpd *cur = rb_entry(*newn, struct gfs2_rgrpd,
856 rd_node);
857
858 parent = *newn;
859 if (rgd->rd_addr < cur->rd_addr)
860 newn = &((*newn)->rb_left);
861 else if (rgd->rd_addr > cur->rd_addr)
862 newn = &((*newn)->rb_right);
863 else
864 return -EEXIST;
865 }
866
867 rb_link_node(&rgd->rd_node, parent, newn);
868 rb_insert_color(&rgd->rd_node, &sdp->sd_rindex_tree);
869 sdp->sd_rgrps++;
870 return 0;
871 }
872
873 /**
874 * read_rindex_entry - Pull in a new resource index entry from the disk
875 * @ip: Pointer to the rindex inode
876 *
877 * Returns: 0 on success, > 0 on EOF, error code otherwise
878 */
879
880 static int read_rindex_entry(struct gfs2_inode *ip)
881 {
882 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
883 const unsigned bsize = sdp->sd_sb.sb_bsize;
884 loff_t pos = sdp->sd_rgrps * sizeof(struct gfs2_rindex);
885 struct gfs2_rindex buf;
886 int error;
887 struct gfs2_rgrpd *rgd;
888
889 if (pos >= i_size_read(&ip->i_inode))
890 return 1;
891
892 error = gfs2_internal_read(ip, (char *)&buf, &pos,
893 sizeof(struct gfs2_rindex));
894
895 if (error != sizeof(struct gfs2_rindex))
896 return (error == 0) ? 1 : error;
897
898 rgd = kmem_cache_zalloc(gfs2_rgrpd_cachep, GFP_NOFS);
899 error = -ENOMEM;
900 if (!rgd)
901 return error;
902
903 rgd->rd_sbd = sdp;
904 rgd->rd_addr = be64_to_cpu(buf.ri_addr);
905 rgd->rd_length = be32_to_cpu(buf.ri_length);
906 rgd->rd_data0 = be64_to_cpu(buf.ri_data0);
907 rgd->rd_data = be32_to_cpu(buf.ri_data);
908 rgd->rd_bitbytes = be32_to_cpu(buf.ri_bitbytes);
909 spin_lock_init(&rgd->rd_rsspin);
910
911 error = compute_bitstructs(rgd);
912 if (error)
913 goto fail;
914
915 error = gfs2_glock_get(sdp, rgd->rd_addr,
916 &gfs2_rgrp_glops, CREATE, &rgd->rd_gl);
917 if (error)
918 goto fail;
919
920 rgd->rd_gl->gl_object = rgd;
921 rgd->rd_gl->gl_vm.start = (rgd->rd_addr * bsize) & PAGE_CACHE_MASK;
922 rgd->rd_gl->gl_vm.end = PAGE_CACHE_ALIGN((rgd->rd_addr +
923 rgd->rd_length) * bsize) - 1;
924 rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lksb.sb_lvbptr;
925 rgd->rd_flags &= ~(GFS2_RDF_UPTODATE | GFS2_RDF_PREFERRED);
926 if (rgd->rd_data > sdp->sd_max_rg_data)
927 sdp->sd_max_rg_data = rgd->rd_data;
928 spin_lock(&sdp->sd_rindex_spin);
929 error = rgd_insert(rgd);
930 spin_unlock(&sdp->sd_rindex_spin);
931 if (!error)
932 return 0;
933
934 error = 0; /* someone else read in the rgrp; free it and ignore it */
935 gfs2_glock_put(rgd->rd_gl);
936
937 fail:
938 kfree(rgd->rd_bits);
939 kmem_cache_free(gfs2_rgrpd_cachep, rgd);
940 return error;
941 }
942
943 /**
944 * set_rgrp_preferences - Run all the rgrps, selecting some we prefer to use
945 * @sdp: the GFS2 superblock
946 *
947 * The purpose of this function is to select a subset of the resource groups
948 * and mark them as PREFERRED. We do it in such a way that each node prefers
949 * to use a unique set of rgrps to minimize glock contention.
950 */
951 static void set_rgrp_preferences(struct gfs2_sbd *sdp)
952 {
953 struct gfs2_rgrpd *rgd, *first;
954 int i;
955
956 /* Skip an initial number of rgrps, based on this node's journal ID.
957 That should start each node out on its own set. */
958 rgd = gfs2_rgrpd_get_first(sdp);
959 for (i = 0; i < sdp->sd_lockstruct.ls_jid; i++)
960 rgd = gfs2_rgrpd_get_next(rgd);
961 first = rgd;
962
963 do {
964 rgd->rd_flags |= GFS2_RDF_PREFERRED;
965 for (i = 0; i < sdp->sd_journals; i++) {
966 rgd = gfs2_rgrpd_get_next(rgd);
967 if (!rgd || rgd == first)
968 break;
969 }
970 } while (rgd && rgd != first);
971 }
972
973 /**
974 * gfs2_ri_update - Pull in a new resource index from the disk
975 * @ip: pointer to the rindex inode
976 *
977 * Returns: 0 on successful update, error code otherwise
978 */
979
980 static int gfs2_ri_update(struct gfs2_inode *ip)
981 {
982 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
983 int error;
984
985 do {
986 error = read_rindex_entry(ip);
987 } while (error == 0);
988
989 if (error < 0)
990 return error;
991
992 set_rgrp_preferences(sdp);
993
994 sdp->sd_rindex_uptodate = 1;
995 return 0;
996 }
997
998 /**
999 * gfs2_rindex_update - Update the rindex if required
1000 * @sdp: The GFS2 superblock
1001 *
1002 * We grab a lock on the rindex inode to make sure that it doesn't
1003 * change whilst we are performing an operation. We keep this lock
1004 * for quite long periods of time compared to other locks. This
1005 * doesn't matter, since it is shared and it is very, very rarely
1006 * accessed in the exclusive mode (i.e. only when expanding the filesystem).
1007 *
1008 * This makes sure that we're using the latest copy of the resource index
1009 * special file, which might have been updated if someone expanded the
1010 * filesystem (via gfs2_grow utility), which adds new resource groups.
1011 *
1012 * Returns: 0 on succeess, error code otherwise
1013 */
1014
1015 int gfs2_rindex_update(struct gfs2_sbd *sdp)
1016 {
1017 struct gfs2_inode *ip = GFS2_I(sdp->sd_rindex);
1018 struct gfs2_glock *gl = ip->i_gl;
1019 struct gfs2_holder ri_gh;
1020 int error = 0;
1021 int unlock_required = 0;
1022
1023 /* Read new copy from disk if we don't have the latest */
1024 if (!sdp->sd_rindex_uptodate) {
1025 if (!gfs2_glock_is_locked_by_me(gl)) {
1026 error = gfs2_glock_nq_init(gl, LM_ST_SHARED, 0, &ri_gh);
1027 if (error)
1028 return error;
1029 unlock_required = 1;
1030 }
1031 if (!sdp->sd_rindex_uptodate)
1032 error = gfs2_ri_update(ip);
1033 if (unlock_required)
1034 gfs2_glock_dq_uninit(&ri_gh);
1035 }
1036
1037 return error;
1038 }
1039
1040 static void gfs2_rgrp_in(struct gfs2_rgrpd *rgd, const void *buf)
1041 {
1042 const struct gfs2_rgrp *str = buf;
1043 u32 rg_flags;
1044
1045 rg_flags = be32_to_cpu(str->rg_flags);
1046 rg_flags &= ~GFS2_RDF_MASK;
1047 rgd->rd_flags &= GFS2_RDF_MASK;
1048 rgd->rd_flags |= rg_flags;
1049 rgd->rd_free = be32_to_cpu(str->rg_free);
1050 rgd->rd_dinodes = be32_to_cpu(str->rg_dinodes);
1051 rgd->rd_igeneration = be64_to_cpu(str->rg_igeneration);
1052 }
1053
1054 static void gfs2_rgrp_out(struct gfs2_rgrpd *rgd, void *buf)
1055 {
1056 struct gfs2_rgrp *str = buf;
1057
1058 str->rg_flags = cpu_to_be32(rgd->rd_flags & ~GFS2_RDF_MASK);
1059 str->rg_free = cpu_to_be32(rgd->rd_free);
1060 str->rg_dinodes = cpu_to_be32(rgd->rd_dinodes);
1061 str->__pad = cpu_to_be32(0);
1062 str->rg_igeneration = cpu_to_be64(rgd->rd_igeneration);
1063 memset(&str->rg_reserved, 0, sizeof(str->rg_reserved));
1064 }
1065
1066 static int gfs2_rgrp_lvb_valid(struct gfs2_rgrpd *rgd)
1067 {
1068 struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
1069 struct gfs2_rgrp *str = (struct gfs2_rgrp *)rgd->rd_bits[0].bi_bh->b_data;
1070
1071 if (rgl->rl_flags != str->rg_flags || rgl->rl_free != str->rg_free ||
1072 rgl->rl_dinodes != str->rg_dinodes ||
1073 rgl->rl_igeneration != str->rg_igeneration)
1074 return 0;
1075 return 1;
1076 }
1077
1078 static void gfs2_rgrp_ondisk2lvb(struct gfs2_rgrp_lvb *rgl, const void *buf)
1079 {
1080 const struct gfs2_rgrp *str = buf;
1081
1082 rgl->rl_magic = cpu_to_be32(GFS2_MAGIC);
1083 rgl->rl_flags = str->rg_flags;
1084 rgl->rl_free = str->rg_free;
1085 rgl->rl_dinodes = str->rg_dinodes;
1086 rgl->rl_igeneration = str->rg_igeneration;
1087 rgl->__pad = 0UL;
1088 }
1089
1090 static void update_rgrp_lvb_unlinked(struct gfs2_rgrpd *rgd, u32 change)
1091 {
1092 struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
1093 u32 unlinked = be32_to_cpu(rgl->rl_unlinked) + change;
1094 rgl->rl_unlinked = cpu_to_be32(unlinked);
1095 }
1096
1097 static u32 count_unlinked(struct gfs2_rgrpd *rgd)
1098 {
1099 struct gfs2_bitmap *bi;
1100 const u32 length = rgd->rd_length;
1101 const u8 *buffer = NULL;
1102 u32 i, goal, count = 0;
1103
1104 for (i = 0, bi = rgd->rd_bits; i < length; i++, bi++) {
1105 goal = 0;
1106 buffer = bi->bi_bh->b_data + bi->bi_offset;
1107 WARN_ON(!buffer_uptodate(bi->bi_bh));
1108 while (goal < bi->bi_len * GFS2_NBBY) {
1109 goal = gfs2_bitfit(buffer, bi->bi_len, goal,
1110 GFS2_BLKST_UNLINKED);
1111 if (goal == BFITNOENT)
1112 break;
1113 count++;
1114 goal++;
1115 }
1116 }
1117
1118 return count;
1119 }
1120
1121
1122 /**
1123 * gfs2_rgrp_bh_get - Read in a RG's header and bitmaps
1124 * @rgd: the struct gfs2_rgrpd describing the RG to read in
1125 *
1126 * Read in all of a Resource Group's header and bitmap blocks.
1127 * Caller must eventually call gfs2_rgrp_relse() to free the bitmaps.
1128 *
1129 * Returns: errno
1130 */
1131
1132 static int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd)
1133 {
1134 struct gfs2_sbd *sdp = rgd->rd_sbd;
1135 struct gfs2_glock *gl = rgd->rd_gl;
1136 unsigned int length = rgd->rd_length;
1137 struct gfs2_bitmap *bi;
1138 unsigned int x, y;
1139 int error;
1140
1141 if (rgd->rd_bits[0].bi_bh != NULL)
1142 return 0;
1143
1144 for (x = 0; x < length; x++) {
1145 bi = rgd->rd_bits + x;
1146 error = gfs2_meta_read(gl, rgd->rd_addr + x, 0, 0, &bi->bi_bh);
1147 if (error)
1148 goto fail;
1149 }
1150
1151 for (y = length; y--;) {
1152 bi = rgd->rd_bits + y;
1153 error = gfs2_meta_wait(sdp, bi->bi_bh);
1154 if (error)
1155 goto fail;
1156 if (gfs2_metatype_check(sdp, bi->bi_bh, y ? GFS2_METATYPE_RB :
1157 GFS2_METATYPE_RG)) {
1158 error = -EIO;
1159 goto fail;
1160 }
1161 }
1162
1163 if (!(rgd->rd_flags & GFS2_RDF_UPTODATE)) {
1164 for (x = 0; x < length; x++)
1165 clear_bit(GBF_FULL, &rgd->rd_bits[x].bi_flags);
1166 gfs2_rgrp_in(rgd, (rgd->rd_bits[0].bi_bh)->b_data);
1167 rgd->rd_flags |= (GFS2_RDF_UPTODATE | GFS2_RDF_CHECK);
1168 rgd->rd_free_clone = rgd->rd_free;
1169 /* max out the rgrp allocation failure point */
1170 rgd->rd_extfail_pt = rgd->rd_free;
1171 }
1172 if (cpu_to_be32(GFS2_MAGIC) != rgd->rd_rgl->rl_magic) {
1173 rgd->rd_rgl->rl_unlinked = cpu_to_be32(count_unlinked(rgd));
1174 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl,
1175 rgd->rd_bits[0].bi_bh->b_data);
1176 }
1177 else if (sdp->sd_args.ar_rgrplvb) {
1178 if (!gfs2_rgrp_lvb_valid(rgd)){
1179 gfs2_consist_rgrpd(rgd);
1180 error = -EIO;
1181 goto fail;
1182 }
1183 if (rgd->rd_rgl->rl_unlinked == 0)
1184 rgd->rd_flags &= ~GFS2_RDF_CHECK;
1185 }
1186 return 0;
1187
1188 fail:
1189 while (x--) {
1190 bi = rgd->rd_bits + x;
1191 brelse(bi->bi_bh);
1192 bi->bi_bh = NULL;
1193 gfs2_assert_warn(sdp, !bi->bi_clone);
1194 }
1195
1196 return error;
1197 }
1198
1199 static int update_rgrp_lvb(struct gfs2_rgrpd *rgd)
1200 {
1201 u32 rl_flags;
1202
1203 if (rgd->rd_flags & GFS2_RDF_UPTODATE)
1204 return 0;
1205
1206 if (cpu_to_be32(GFS2_MAGIC) != rgd->rd_rgl->rl_magic)
1207 return gfs2_rgrp_bh_get(rgd);
1208
1209 rl_flags = be32_to_cpu(rgd->rd_rgl->rl_flags);
1210 rl_flags &= ~GFS2_RDF_MASK;
1211 rgd->rd_flags &= GFS2_RDF_MASK;
1212 rgd->rd_flags |= (rl_flags | GFS2_RDF_UPTODATE | GFS2_RDF_CHECK);
1213 if (rgd->rd_rgl->rl_unlinked == 0)
1214 rgd->rd_flags &= ~GFS2_RDF_CHECK;
1215 rgd->rd_free = be32_to_cpu(rgd->rd_rgl->rl_free);
1216 rgd->rd_free_clone = rgd->rd_free;
1217 rgd->rd_dinodes = be32_to_cpu(rgd->rd_rgl->rl_dinodes);
1218 rgd->rd_igeneration = be64_to_cpu(rgd->rd_rgl->rl_igeneration);
1219 return 0;
1220 }
1221
1222 int gfs2_rgrp_go_lock(struct gfs2_holder *gh)
1223 {
1224 struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object;
1225 struct gfs2_sbd *sdp = rgd->rd_sbd;
1226
1227 if (gh->gh_flags & GL_SKIP && sdp->sd_args.ar_rgrplvb)
1228 return 0;
1229 return gfs2_rgrp_bh_get(rgd);
1230 }
1231
1232 /**
1233 * gfs2_rgrp_brelse - Release RG bitmaps read in with gfs2_rgrp_bh_get()
1234 * @rgd: The resource group
1235 *
1236 */
1237
1238 void gfs2_rgrp_brelse(struct gfs2_rgrpd *rgd)
1239 {
1240 int x, length = rgd->rd_length;
1241
1242 for (x = 0; x < length; x++) {
1243 struct gfs2_bitmap *bi = rgd->rd_bits + x;
1244 if (bi->bi_bh) {
1245 brelse(bi->bi_bh);
1246 bi->bi_bh = NULL;
1247 }
1248 }
1249
1250 }
1251
1252 /**
1253 * gfs2_rgrp_go_unlock - Unlock a rgrp glock
1254 * @gh: The glock holder for the resource group
1255 *
1256 */
1257
1258 void gfs2_rgrp_go_unlock(struct gfs2_holder *gh)
1259 {
1260 struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object;
1261 int demote_requested = test_bit(GLF_DEMOTE, &gh->gh_gl->gl_flags) |
1262 test_bit(GLF_PENDING_DEMOTE, &gh->gh_gl->gl_flags);
1263
1264 if (rgd && demote_requested)
1265 gfs2_rgrp_brelse(rgd);
1266 }
1267
1268 int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
1269 struct buffer_head *bh,
1270 const struct gfs2_bitmap *bi, unsigned minlen, u64 *ptrimmed)
1271 {
1272 struct super_block *sb = sdp->sd_vfs;
1273 u64 blk;
1274 sector_t start = 0;
1275 sector_t nr_blks = 0;
1276 int rv;
1277 unsigned int x;
1278 u32 trimmed = 0;
1279 u8 diff;
1280
1281 for (x = 0; x < bi->bi_len; x++) {
1282 const u8 *clone = bi->bi_clone ? bi->bi_clone : bi->bi_bh->b_data;
1283 clone += bi->bi_offset;
1284 clone += x;
1285 if (bh) {
1286 const u8 *orig = bh->b_data + bi->bi_offset + x;
1287 diff = ~(*orig | (*orig >> 1)) & (*clone | (*clone >> 1));
1288 } else {
1289 diff = ~(*clone | (*clone >> 1));
1290 }
1291 diff &= 0x55;
1292 if (diff == 0)
1293 continue;
1294 blk = offset + ((bi->bi_start + x) * GFS2_NBBY);
1295 while(diff) {
1296 if (diff & 1) {
1297 if (nr_blks == 0)
1298 goto start_new_extent;
1299 if ((start + nr_blks) != blk) {
1300 if (nr_blks >= minlen) {
1301 rv = sb_issue_discard(sb,
1302 start, nr_blks,
1303 GFP_NOFS, 0);
1304 if (rv)
1305 goto fail;
1306 trimmed += nr_blks;
1307 }
1308 nr_blks = 0;
1309 start_new_extent:
1310 start = blk;
1311 }
1312 nr_blks++;
1313 }
1314 diff >>= 2;
1315 blk++;
1316 }
1317 }
1318 if (nr_blks >= minlen) {
1319 rv = sb_issue_discard(sb, start, nr_blks, GFP_NOFS, 0);
1320 if (rv)
1321 goto fail;
1322 trimmed += nr_blks;
1323 }
1324 if (ptrimmed)
1325 *ptrimmed = trimmed;
1326 return 0;
1327
1328 fail:
1329 if (sdp->sd_args.ar_discard)
1330 fs_warn(sdp, "error %d on discard request, turning discards off for this filesystem", rv);
1331 sdp->sd_args.ar_discard = 0;
1332 return -EIO;
1333 }
1334
1335 /**
1336 * gfs2_fitrim - Generate discard requests for unused bits of the filesystem
1337 * @filp: Any file on the filesystem
1338 * @argp: Pointer to the arguments (also used to pass result)
1339 *
1340 * Returns: 0 on success, otherwise error code
1341 */
1342
1343 int gfs2_fitrim(struct file *filp, void __user *argp)
1344 {
1345 struct inode *inode = file_inode(filp);
1346 struct gfs2_sbd *sdp = GFS2_SB(inode);
1347 struct request_queue *q = bdev_get_queue(sdp->sd_vfs->s_bdev);
1348 struct buffer_head *bh;
1349 struct gfs2_rgrpd *rgd;
1350 struct gfs2_rgrpd *rgd_end;
1351 struct gfs2_holder gh;
1352 struct fstrim_range r;
1353 int ret = 0;
1354 u64 amt;
1355 u64 trimmed = 0;
1356 u64 start, end, minlen;
1357 unsigned int x;
1358 unsigned bs_shift = sdp->sd_sb.sb_bsize_shift;
1359
1360 if (!capable(CAP_SYS_ADMIN))
1361 return -EPERM;
1362
1363 if (!blk_queue_discard(q))
1364 return -EOPNOTSUPP;
1365
1366 if (copy_from_user(&r, argp, sizeof(r)))
1367 return -EFAULT;
1368
1369 ret = gfs2_rindex_update(sdp);
1370 if (ret)
1371 return ret;
1372
1373 start = r.start >> bs_shift;
1374 end = start + (r.len >> bs_shift);
1375 minlen = max_t(u64, r.minlen,
1376 q->limits.discard_granularity) >> bs_shift;
1377
1378 if (end <= start || minlen > sdp->sd_max_rg_data)
1379 return -EINVAL;
1380
1381 rgd = gfs2_blk2rgrpd(sdp, start, 0);
1382 rgd_end = gfs2_blk2rgrpd(sdp, end, 0);
1383
1384 if ((gfs2_rgrpd_get_first(sdp) == gfs2_rgrpd_get_next(rgd_end))
1385 && (start > rgd_end->rd_data0 + rgd_end->rd_data))
1386 return -EINVAL; /* start is beyond the end of the fs */
1387
1388 while (1) {
1389
1390 ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh);
1391 if (ret)
1392 goto out;
1393
1394 if (!(rgd->rd_flags & GFS2_RGF_TRIMMED)) {
1395 /* Trim each bitmap in the rgrp */
1396 for (x = 0; x < rgd->rd_length; x++) {
1397 struct gfs2_bitmap *bi = rgd->rd_bits + x;
1398 ret = gfs2_rgrp_send_discards(sdp,
1399 rgd->rd_data0, NULL, bi, minlen,
1400 &amt);
1401 if (ret) {
1402 gfs2_glock_dq_uninit(&gh);
1403 goto out;
1404 }
1405 trimmed += amt;
1406 }
1407
1408 /* Mark rgrp as having been trimmed */
1409 ret = gfs2_trans_begin(sdp, RES_RG_HDR, 0);
1410 if (ret == 0) {
1411 bh = rgd->rd_bits[0].bi_bh;
1412 rgd->rd_flags |= GFS2_RGF_TRIMMED;
1413 gfs2_trans_add_meta(rgd->rd_gl, bh);
1414 gfs2_rgrp_out(rgd, bh->b_data);
1415 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, bh->b_data);
1416 gfs2_trans_end(sdp);
1417 }
1418 }
1419 gfs2_glock_dq_uninit(&gh);
1420
1421 if (rgd == rgd_end)
1422 break;
1423
1424 rgd = gfs2_rgrpd_get_next(rgd);
1425 }
1426
1427 out:
1428 r.len = trimmed << bs_shift;
1429 if (copy_to_user(argp, &r, sizeof(r)))
1430 return -EFAULT;
1431
1432 return ret;
1433 }
1434
1435 /**
1436 * rs_insert - insert a new multi-block reservation into the rgrp's rb_tree
1437 * @ip: the inode structure
1438 *
1439 */
1440 static void rs_insert(struct gfs2_inode *ip)
1441 {
1442 struct rb_node **newn, *parent = NULL;
1443 int rc;
1444 struct gfs2_blkreserv *rs = &ip->i_res;
1445 struct gfs2_rgrpd *rgd = rs->rs_rbm.rgd;
1446 u64 fsblock = gfs2_rbm_to_block(&rs->rs_rbm);
1447
1448 BUG_ON(gfs2_rs_active(rs));
1449
1450 spin_lock(&rgd->rd_rsspin);
1451 newn = &rgd->rd_rstree.rb_node;
1452 while (*newn) {
1453 struct gfs2_blkreserv *cur =
1454 rb_entry(*newn, struct gfs2_blkreserv, rs_node);
1455
1456 parent = *newn;
1457 rc = rs_cmp(fsblock, rs->rs_free, cur);
1458 if (rc > 0)
1459 newn = &((*newn)->rb_right);
1460 else if (rc < 0)
1461 newn = &((*newn)->rb_left);
1462 else {
1463 spin_unlock(&rgd->rd_rsspin);
1464 WARN_ON(1);
1465 return;
1466 }
1467 }
1468
1469 rb_link_node(&rs->rs_node, parent, newn);
1470 rb_insert_color(&rs->rs_node, &rgd->rd_rstree);
1471
1472 /* Do our rgrp accounting for the reservation */
1473 rgd->rd_reserved += rs->rs_free; /* blocks reserved */
1474 spin_unlock(&rgd->rd_rsspin);
1475 trace_gfs2_rs(rs, TRACE_RS_INSERT);
1476 }
1477
1478 /**
1479 * rg_mblk_search - find a group of multiple free blocks to form a reservation
1480 * @rgd: the resource group descriptor
1481 * @ip: pointer to the inode for which we're reserving blocks
1482 * @ap: the allocation parameters
1483 *
1484 */
1485
1486 static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
1487 const struct gfs2_alloc_parms *ap)
1488 {
1489 struct gfs2_rbm rbm = { .rgd = rgd, };
1490 u64 goal;
1491 struct gfs2_blkreserv *rs = &ip->i_res;
1492 u32 extlen;
1493 u32 free_blocks = rgd->rd_free_clone - rgd->rd_reserved;
1494 int ret;
1495 struct inode *inode = &ip->i_inode;
1496
1497 if (S_ISDIR(inode->i_mode))
1498 extlen = 1;
1499 else {
1500 extlen = max_t(u32, atomic_read(&rs->rs_sizehint), ap->target);
1501 extlen = clamp(extlen, RGRP_RSRV_MINBLKS, free_blocks);
1502 }
1503 if ((rgd->rd_free_clone < rgd->rd_reserved) || (free_blocks < extlen))
1504 return;
1505
1506 /* Find bitmap block that contains bits for goal block */
1507 if (rgrp_contains_block(rgd, ip->i_goal))
1508 goal = ip->i_goal;
1509 else
1510 goal = rgd->rd_last_alloc + rgd->rd_data0;
1511
1512 if (WARN_ON(gfs2_rbm_from_block(&rbm, goal)))
1513 return;
1514
1515 ret = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, &extlen, ip, true, ap);
1516 if (ret == 0) {
1517 rs->rs_rbm = rbm;
1518 rs->rs_free = extlen;
1519 rs->rs_inum = ip->i_no_addr;
1520 rs_insert(ip);
1521 } else {
1522 if (goal == rgd->rd_last_alloc + rgd->rd_data0)
1523 rgd->rd_last_alloc = 0;
1524 }
1525 }
1526
1527 /**
1528 * gfs2_next_unreserved_block - Return next block that is not reserved
1529 * @rgd: The resource group
1530 * @block: The starting block
1531 * @length: The required length
1532 * @ip: Ignore any reservations for this inode
1533 *
1534 * If the block does not appear in any reservation, then return the
1535 * block number unchanged. If it does appear in the reservation, then
1536 * keep looking through the tree of reservations in order to find the
1537 * first block number which is not reserved.
1538 */
1539
1540 static u64 gfs2_next_unreserved_block(struct gfs2_rgrpd *rgd, u64 block,
1541 u32 length,
1542 const struct gfs2_inode *ip)
1543 {
1544 struct gfs2_blkreserv *rs;
1545 struct rb_node *n;
1546 int rc;
1547
1548 spin_lock(&rgd->rd_rsspin);
1549 n = rgd->rd_rstree.rb_node;
1550 while (n) {
1551 rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
1552 rc = rs_cmp(block, length, rs);
1553 if (rc < 0)
1554 n = n->rb_left;
1555 else if (rc > 0)
1556 n = n->rb_right;
1557 else
1558 break;
1559 }
1560
1561 if (n) {
1562 while ((rs_cmp(block, length, rs) == 0) && (&ip->i_res != rs)) {
1563 block = gfs2_rbm_to_block(&rs->rs_rbm) + rs->rs_free;
1564 n = n->rb_right;
1565 if (n == NULL)
1566 break;
1567 rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
1568 }
1569 }
1570
1571 spin_unlock(&rgd->rd_rsspin);
1572 return block;
1573 }
1574
1575 /**
1576 * gfs2_reservation_check_and_update - Check for reservations during block alloc
1577 * @rbm: The current position in the resource group
1578 * @ip: The inode for which we are searching for blocks
1579 * @minext: The minimum extent length
1580 * @maxext: A pointer to the maximum extent structure
1581 *
1582 * This checks the current position in the rgrp to see whether there is
1583 * a reservation covering this block. If not then this function is a
1584 * no-op. If there is, then the position is moved to the end of the
1585 * contiguous reservation(s) so that we are pointing at the first
1586 * non-reserved block.
1587 *
1588 * Returns: 0 if no reservation, 1 if @rbm has changed, otherwise an error
1589 */
1590
1591 static int gfs2_reservation_check_and_update(struct gfs2_rbm *rbm,
1592 const struct gfs2_inode *ip,
1593 u32 minext,
1594 struct gfs2_extent *maxext)
1595 {
1596 u64 block = gfs2_rbm_to_block(rbm);
1597 u32 extlen = 1;
1598 u64 nblock;
1599 int ret;
1600
1601 /*
1602 * If we have a minimum extent length, then skip over any extent
1603 * which is less than the min extent length in size.
1604 */
1605 if (minext) {
1606 extlen = gfs2_free_extlen(rbm, minext);
1607 if (extlen <= maxext->len)
1608 goto fail;
1609 }
1610
1611 /*
1612 * Check the extent which has been found against the reservations
1613 * and skip if parts of it are already reserved
1614 */
1615 nblock = gfs2_next_unreserved_block(rbm->rgd, block, extlen, ip);
1616 if (nblock == block) {
1617 if (!minext || extlen >= minext)
1618 return 0;
1619
1620 if (extlen > maxext->len) {
1621 maxext->len = extlen;
1622 maxext->rbm = *rbm;
1623 }
1624 fail:
1625 nblock = block + extlen;
1626 }
1627 ret = gfs2_rbm_from_block(rbm, nblock);
1628 if (ret < 0)
1629 return ret;
1630 return 1;
1631 }
1632
1633 /**
1634 * gfs2_rbm_find - Look for blocks of a particular state
1635 * @rbm: Value/result starting position and final position
1636 * @state: The state which we want to find
1637 * @minext: Pointer to the requested extent length (NULL for a single block)
1638 * This is updated to be the actual reservation size.
1639 * @ip: If set, check for reservations
1640 * @nowrap: Stop looking at the end of the rgrp, rather than wrapping
1641 * around until we've reached the starting point.
1642 * @ap: the allocation parameters
1643 *
1644 * Side effects:
1645 * - If looking for free blocks, we set GBF_FULL on each bitmap which
1646 * has no free blocks in it.
1647 * - If looking for free blocks, we set rd_extfail_pt on each rgrp which
1648 * has come up short on a free block search.
1649 *
1650 * Returns: 0 on success, -ENOSPC if there is no block of the requested state
1651 */
1652
1653 static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
1654 const struct gfs2_inode *ip, bool nowrap,
1655 const struct gfs2_alloc_parms *ap)
1656 {
1657 struct buffer_head *bh;
1658 int initial_bii;
1659 u32 initial_offset;
1660 int first_bii = rbm->bii;
1661 u32 first_offset = rbm->offset;
1662 u32 offset;
1663 u8 *buffer;
1664 int n = 0;
1665 int iters = rbm->rgd->rd_length;
1666 int ret;
1667 struct gfs2_bitmap *bi;
1668 struct gfs2_extent maxext = { .rbm.rgd = rbm->rgd, };
1669
1670 /* If we are not starting at the beginning of a bitmap, then we
1671 * need to add one to the bitmap count to ensure that we search
1672 * the starting bitmap twice.
1673 */
1674 if (rbm->offset != 0)
1675 iters++;
1676
1677 while(1) {
1678 bi = rbm_bi(rbm);
1679 if (test_bit(GBF_FULL, &bi->bi_flags) &&
1680 (state == GFS2_BLKST_FREE))
1681 goto next_bitmap;
1682
1683 bh = bi->bi_bh;
1684 buffer = bh->b_data + bi->bi_offset;
1685 WARN_ON(!buffer_uptodate(bh));
1686 if (state != GFS2_BLKST_UNLINKED && bi->bi_clone)
1687 buffer = bi->bi_clone + bi->bi_offset;
1688 initial_offset = rbm->offset;
1689 offset = gfs2_bitfit(buffer, bi->bi_len, rbm->offset, state);
1690 if (offset == BFITNOENT)
1691 goto bitmap_full;
1692 rbm->offset = offset;
1693 if (ip == NULL)
1694 return 0;
1695
1696 initial_bii = rbm->bii;
1697 ret = gfs2_reservation_check_and_update(rbm, ip,
1698 minext ? *minext : 0,
1699 &maxext);
1700 if (ret == 0)
1701 return 0;
1702 if (ret > 0) {
1703 n += (rbm->bii - initial_bii);
1704 goto next_iter;
1705 }
1706 if (ret == -E2BIG) {
1707 rbm->bii = 0;
1708 rbm->offset = 0;
1709 n += (rbm->bii - initial_bii);
1710 goto res_covered_end_of_rgrp;
1711 }
1712 return ret;
1713
1714 bitmap_full: /* Mark bitmap as full and fall through */
1715 if ((state == GFS2_BLKST_FREE) && initial_offset == 0)
1716 set_bit(GBF_FULL, &bi->bi_flags);
1717
1718 next_bitmap: /* Find next bitmap in the rgrp */
1719 rbm->offset = 0;
1720 rbm->bii++;
1721 if (rbm->bii == rbm->rgd->rd_length)
1722 rbm->bii = 0;
1723 res_covered_end_of_rgrp:
1724 if ((rbm->bii == 0) && nowrap)
1725 break;
1726 n++;
1727 next_iter:
1728 if (n >= iters)
1729 break;
1730 }
1731
1732 if (minext == NULL || state != GFS2_BLKST_FREE)
1733 return -ENOSPC;
1734
1735 /* If the extent was too small, and it's smaller than the smallest
1736 to have failed before, remember for future reference that it's
1737 useless to search this rgrp again for this amount or more. */
1738 if ((first_offset == 0) && (first_bii == 0) &&
1739 (*minext < rbm->rgd->rd_extfail_pt))
1740 rbm->rgd->rd_extfail_pt = *minext;
1741
1742 /* If the maximum extent we found is big enough to fulfill the
1743 minimum requirements, use it anyway. */
1744 if (maxext.len) {
1745 *rbm = maxext.rbm;
1746 *minext = maxext.len;
1747 return 0;
1748 }
1749
1750 return -ENOSPC;
1751 }
1752
1753 /**
1754 * try_rgrp_unlink - Look for any unlinked, allocated, but unused inodes
1755 * @rgd: The rgrp
1756 * @last_unlinked: block address of the last dinode we unlinked
1757 * @skip: block address we should explicitly not unlink
1758 *
1759 * Returns: 0 if no error
1760 * The inode, if one has been found, in inode.
1761 */
1762
1763 static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip)
1764 {
1765 u64 block;
1766 struct gfs2_sbd *sdp = rgd->rd_sbd;
1767 struct gfs2_glock *gl;
1768 struct gfs2_inode *ip;
1769 int error;
1770 int found = 0;
1771 struct gfs2_rbm rbm = { .rgd = rgd, .bii = 0, .offset = 0 };
1772
1773 while (1) {
1774 down_write(&sdp->sd_log_flush_lock);
1775 error = gfs2_rbm_find(&rbm, GFS2_BLKST_UNLINKED, NULL, NULL,
1776 true, NULL);
1777 up_write(&sdp->sd_log_flush_lock);
1778 if (error == -ENOSPC)
1779 break;
1780 if (WARN_ON_ONCE(error))
1781 break;
1782
1783 block = gfs2_rbm_to_block(&rbm);
1784 if (gfs2_rbm_from_block(&rbm, block + 1))
1785 break;
1786 if (*last_unlinked != NO_BLOCK && block <= *last_unlinked)
1787 continue;
1788 if (block == skip)
1789 continue;
1790 *last_unlinked = block;
1791
1792 error = gfs2_glock_get(sdp, block, &gfs2_iopen_glops, CREATE, &gl);
1793 if (error)
1794 continue;
1795
1796 /* If the inode is already in cache, we can ignore it here
1797 * because the existing inode disposal code will deal with
1798 * it when all refs have gone away. Accessing gl_object like
1799 * this is not safe in general. Here it is ok because we do
1800 * not dereference the pointer, and we only need an approx
1801 * answer to whether it is NULL or not.
1802 */
1803 ip = gl->gl_object;
1804
1805 if (ip || queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
1806 gfs2_glock_put(gl);
1807 else
1808 found++;
1809
1810 /* Limit reclaim to sensible number of tasks */
1811 if (found > NR_CPUS)
1812 return;
1813 }
1814
1815 rgd->rd_flags &= ~GFS2_RDF_CHECK;
1816 return;
1817 }
1818
1819 /**
1820 * gfs2_rgrp_congested - Use stats to figure out whether an rgrp is congested
1821 * @rgd: The rgrp in question
1822 * @loops: An indication of how picky we can be (0=very, 1=less so)
1823 *
1824 * This function uses the recently added glock statistics in order to
1825 * figure out whether a parciular resource group is suffering from
1826 * contention from multiple nodes. This is done purely on the basis
1827 * of timings, since this is the only data we have to work with and
1828 * our aim here is to reject a resource group which is highly contended
1829 * but (very important) not to do this too often in order to ensure that
1830 * we do not land up introducing fragmentation by changing resource
1831 * groups when not actually required.
1832 *
1833 * The calculation is fairly simple, we want to know whether the SRTTB
1834 * (i.e. smoothed round trip time for blocking operations) to acquire
1835 * the lock for this rgrp's glock is significantly greater than the
1836 * time taken for resource groups on average. We introduce a margin in
1837 * the form of the variable @var which is computed as the sum of the two
1838 * respective variences, and multiplied by a factor depending on @loops
1839 * and whether we have a lot of data to base the decision on. This is
1840 * then tested against the square difference of the means in order to
1841 * decide whether the result is statistically significant or not.
1842 *
1843 * Returns: A boolean verdict on the congestion status
1844 */
1845
1846 static bool gfs2_rgrp_congested(const struct gfs2_rgrpd *rgd, int loops)
1847 {
1848 const struct gfs2_glock *gl = rgd->rd_gl;
1849 const struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
1850 struct gfs2_lkstats *st;
1851 u64 r_dcount, l_dcount;
1852 u64 l_srttb, a_srttb = 0;
1853 s64 srttb_diff;
1854 u64 sqr_diff;
1855 u64 var;
1856 int cpu, nonzero = 0;
1857
1858 preempt_disable();
1859 for_each_present_cpu(cpu) {
1860 st = &per_cpu_ptr(sdp->sd_lkstats, cpu)->lkstats[LM_TYPE_RGRP];
1861 if (st->stats[GFS2_LKS_SRTTB]) {
1862 a_srttb += st->stats[GFS2_LKS_SRTTB];
1863 nonzero++;
1864 }
1865 }
1866 st = &this_cpu_ptr(sdp->sd_lkstats)->lkstats[LM_TYPE_RGRP];
1867 if (nonzero)
1868 do_div(a_srttb, nonzero);
1869 r_dcount = st->stats[GFS2_LKS_DCOUNT];
1870 var = st->stats[GFS2_LKS_SRTTVARB] +
1871 gl->gl_stats.stats[GFS2_LKS_SRTTVARB];
1872 preempt_enable();
1873
1874 l_srttb = gl->gl_stats.stats[GFS2_LKS_SRTTB];
1875 l_dcount = gl->gl_stats.stats[GFS2_LKS_DCOUNT];
1876
1877 if ((l_dcount < 1) || (r_dcount < 1) || (a_srttb == 0))
1878 return false;
1879
1880 srttb_diff = a_srttb - l_srttb;
1881 sqr_diff = srttb_diff * srttb_diff;
1882
1883 var *= 2;
1884 if (l_dcount < 8 || r_dcount < 8)
1885 var *= 2;
1886 if (loops == 1)
1887 var *= 2;
1888
1889 return ((srttb_diff < 0) && (sqr_diff > var));
1890 }
1891
1892 /**
1893 * gfs2_rgrp_used_recently
1894 * @rs: The block reservation with the rgrp to test
1895 * @msecs: The time limit in milliseconds
1896 *
1897 * Returns: True if the rgrp glock has been used within the time limit
1898 */
1899 static bool gfs2_rgrp_used_recently(const struct gfs2_blkreserv *rs,
1900 u64 msecs)
1901 {
1902 u64 tdiff;
1903
1904 tdiff = ktime_to_ns(ktime_sub(ktime_get_real(),
1905 rs->rs_rbm.rgd->rd_gl->gl_dstamp));
1906
1907 return tdiff > (msecs * 1000 * 1000);
1908 }
1909
1910 static u32 gfs2_orlov_skip(const struct gfs2_inode *ip)
1911 {
1912 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1913 u32 skip;
1914
1915 get_random_bytes(&skip, sizeof(skip));
1916 return skip % sdp->sd_rgrps;
1917 }
1918
1919 static bool gfs2_select_rgrp(struct gfs2_rgrpd **pos, const struct gfs2_rgrpd *begin)
1920 {
1921 struct gfs2_rgrpd *rgd = *pos;
1922 struct gfs2_sbd *sdp = rgd->rd_sbd;
1923
1924 rgd = gfs2_rgrpd_get_next(rgd);
1925 if (rgd == NULL)
1926 rgd = gfs2_rgrpd_get_first(sdp);
1927 *pos = rgd;
1928 if (rgd != begin) /* If we didn't wrap */
1929 return true;
1930 return false;
1931 }
1932
1933 /**
1934 * fast_to_acquire - determine if a resource group will be fast to acquire
1935 *
1936 * If this is one of our preferred rgrps, it should be quicker to acquire,
1937 * because we tried to set ourselves up as dlm lock master.
1938 */
1939 static inline int fast_to_acquire(struct gfs2_rgrpd *rgd)
1940 {
1941 struct gfs2_glock *gl = rgd->rd_gl;
1942
1943 if (gl->gl_state != LM_ST_UNLOCKED && list_empty(&gl->gl_holders) &&
1944 !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
1945 !test_bit(GLF_DEMOTE, &gl->gl_flags))
1946 return 1;
1947 if (rgd->rd_flags & GFS2_RDF_PREFERRED)
1948 return 1;
1949 return 0;
1950 }
1951
1952 /**
1953 * gfs2_inplace_reserve - Reserve space in the filesystem
1954 * @ip: the inode to reserve space for
1955 * @ap: the allocation parameters
1956 *
1957 * We try our best to find an rgrp that has at least ap->target blocks
1958 * available. After a couple of passes (loops == 2), the prospects of finding
1959 * such an rgrp diminish. At this stage, we return the first rgrp that has
1960 * atleast ap->min_target blocks available. Either way, we set ap->allowed to
1961 * the number of blocks available in the chosen rgrp.
1962 *
1963 * Returns: 0 on success,
1964 * -ENOMEM if a suitable rgrp can't be found
1965 * errno otherwise
1966 */
1967
1968 int gfs2_inplace_reserve(struct gfs2_inode *ip, struct gfs2_alloc_parms *ap)
1969 {
1970 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1971 struct gfs2_rgrpd *begin = NULL;
1972 struct gfs2_blkreserv *rs = &ip->i_res;
1973 int error = 0, rg_locked, flags = 0;
1974 u64 last_unlinked = NO_BLOCK;
1975 int loops = 0;
1976 u32 skip = 0;
1977
1978 if (sdp->sd_args.ar_rgrplvb)
1979 flags |= GL_SKIP;
1980 if (gfs2_assert_warn(sdp, ap->target))
1981 return -EINVAL;
1982 if (gfs2_rs_active(rs)) {
1983 begin = rs->rs_rbm.rgd;
1984 } else if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, ip->i_goal)) {
1985 rs->rs_rbm.rgd = begin = ip->i_rgd;
1986 } else {
1987 check_and_update_goal(ip);
1988 rs->rs_rbm.rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1);
1989 }
1990 if (S_ISDIR(ip->i_inode.i_mode) && (ap->aflags & GFS2_AF_ORLOV))
1991 skip = gfs2_orlov_skip(ip);
1992 if (rs->rs_rbm.rgd == NULL)
1993 return -EBADSLT;
1994
1995 while (loops < 3) {
1996 rg_locked = 1;
1997
1998 if (!gfs2_glock_is_locked_by_me(rs->rs_rbm.rgd->rd_gl)) {
1999 rg_locked = 0;
2000 if (skip && skip--)
2001 goto next_rgrp;
2002 if (!gfs2_rs_active(rs)) {
2003 if (loops == 0 &&
2004 !fast_to_acquire(rs->rs_rbm.rgd))
2005 goto next_rgrp;
2006 if ((loops < 2) &&
2007 gfs2_rgrp_used_recently(rs, 1000) &&
2008 gfs2_rgrp_congested(rs->rs_rbm.rgd, loops))
2009 goto next_rgrp;
2010 }
2011 error = gfs2_glock_nq_init(rs->rs_rbm.rgd->rd_gl,
2012 LM_ST_EXCLUSIVE, flags,
2013 &rs->rs_rgd_gh);
2014 if (unlikely(error))
2015 return error;
2016 if (!gfs2_rs_active(rs) && (loops < 2) &&
2017 gfs2_rgrp_congested(rs->rs_rbm.rgd, loops))
2018 goto skip_rgrp;
2019 if (sdp->sd_args.ar_rgrplvb) {
2020 error = update_rgrp_lvb(rs->rs_rbm.rgd);
2021 if (unlikely(error)) {
2022 gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
2023 return error;
2024 }
2025 }
2026 }
2027
2028 /* Skip unuseable resource groups */
2029 if ((rs->rs_rbm.rgd->rd_flags & (GFS2_RGF_NOALLOC |
2030 GFS2_RDF_ERROR)) ||
2031 (loops == 0 && ap->target > rs->rs_rbm.rgd->rd_extfail_pt))
2032 goto skip_rgrp;
2033
2034 if (sdp->sd_args.ar_rgrplvb)
2035 gfs2_rgrp_bh_get(rs->rs_rbm.rgd);
2036
2037 /* Get a reservation if we don't already have one */
2038 if (!gfs2_rs_active(rs))
2039 rg_mblk_search(rs->rs_rbm.rgd, ip, ap);
2040
2041 /* Skip rgrps when we can't get a reservation on first pass */
2042 if (!gfs2_rs_active(rs) && (loops < 1))
2043 goto check_rgrp;
2044
2045 /* If rgrp has enough free space, use it */
2046 if (rs->rs_rbm.rgd->rd_free_clone >= ap->target ||
2047 (loops == 2 && ap->min_target &&
2048 rs->rs_rbm.rgd->rd_free_clone >= ap->min_target)) {
2049 ip->i_rgd = rs->rs_rbm.rgd;
2050 ap->allowed = ip->i_rgd->rd_free_clone;
2051 return 0;
2052 }
2053 check_rgrp:
2054 /* Check for unlinked inodes which can be reclaimed */
2055 if (rs->rs_rbm.rgd->rd_flags & GFS2_RDF_CHECK)
2056 try_rgrp_unlink(rs->rs_rbm.rgd, &last_unlinked,
2057 ip->i_no_addr);
2058 skip_rgrp:
2059 /* Drop reservation, if we couldn't use reserved rgrp */
2060 if (gfs2_rs_active(rs))
2061 gfs2_rs_deltree(rs);
2062
2063 /* Unlock rgrp if required */
2064 if (!rg_locked)
2065 gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
2066 next_rgrp:
2067 /* Find the next rgrp, and continue looking */
2068 if (gfs2_select_rgrp(&rs->rs_rbm.rgd, begin))
2069 continue;
2070 if (skip)
2071 continue;
2072
2073 /* If we've scanned all the rgrps, but found no free blocks
2074 * then this checks for some less likely conditions before
2075 * trying again.
2076 */
2077 loops++;
2078 /* Check that fs hasn't grown if writing to rindex */
2079 if (ip == GFS2_I(sdp->sd_rindex) && !sdp->sd_rindex_uptodate) {
2080 error = gfs2_ri_update(ip);
2081 if (error)
2082 return error;
2083 }
2084 /* Flushing the log may release space */
2085 if (loops == 2)
2086 gfs2_log_flush(sdp, NULL, NORMAL_FLUSH);
2087 }
2088
2089 return -ENOSPC;
2090 }
2091
2092 /**
2093 * gfs2_inplace_release - release an inplace reservation
2094 * @ip: the inode the reservation was taken out on
2095 *
2096 * Release a reservation made by gfs2_inplace_reserve().
2097 */
2098
2099 void gfs2_inplace_release(struct gfs2_inode *ip)
2100 {
2101 struct gfs2_blkreserv *rs = &ip->i_res;
2102
2103 if (rs->rs_rgd_gh.gh_gl)
2104 gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
2105 }
2106
2107 /**
2108 * gfs2_get_block_type - Check a block in a RG is of given type
2109 * @rgd: the resource group holding the block
2110 * @block: the block number
2111 *
2112 * Returns: The block type (GFS2_BLKST_*)
2113 */
2114
2115 static unsigned char gfs2_get_block_type(struct gfs2_rgrpd *rgd, u64 block)
2116 {
2117 struct gfs2_rbm rbm = { .rgd = rgd, };
2118 int ret;
2119
2120 ret = gfs2_rbm_from_block(&rbm, block);
2121 WARN_ON_ONCE(ret != 0);
2122
2123 return gfs2_testbit(&rbm);
2124 }
2125
2126
2127 /**
2128 * gfs2_alloc_extent - allocate an extent from a given bitmap
2129 * @rbm: the resource group information
2130 * @dinode: TRUE if the first block we allocate is for a dinode
2131 * @n: The extent length (value/result)
2132 *
2133 * Add the bitmap buffer to the transaction.
2134 * Set the found bits to @new_state to change block's allocation state.
2135 */
2136 static void gfs2_alloc_extent(const struct gfs2_rbm *rbm, bool dinode,
2137 unsigned int *n)
2138 {
2139 struct gfs2_rbm pos = { .rgd = rbm->rgd, };
2140 const unsigned int elen = *n;
2141 u64 block;
2142 int ret;
2143
2144 *n = 1;
2145 block = gfs2_rbm_to_block(rbm);
2146 gfs2_trans_add_meta(rbm->rgd->rd_gl, rbm_bi(rbm)->bi_bh);
2147 gfs2_setbit(rbm, true, dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
2148 block++;
2149 while (*n < elen) {
2150 ret = gfs2_rbm_from_block(&pos, block);
2151 if (ret || gfs2_testbit(&pos) != GFS2_BLKST_FREE)
2152 break;
2153 gfs2_trans_add_meta(pos.rgd->rd_gl, rbm_bi(&pos)->bi_bh);
2154 gfs2_setbit(&pos, true, GFS2_BLKST_USED);
2155 (*n)++;
2156 block++;
2157 }
2158 }
2159
2160 /**
2161 * rgblk_free - Change alloc state of given block(s)
2162 * @sdp: the filesystem
2163 * @bstart: the start of a run of blocks to free
2164 * @blen: the length of the block run (all must lie within ONE RG!)
2165 * @new_state: GFS2_BLKST_XXX the after-allocation block state
2166 *
2167 * Returns: Resource group containing the block(s)
2168 */
2169
2170 static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart,
2171 u32 blen, unsigned char new_state)
2172 {
2173 struct gfs2_rbm rbm;
2174 struct gfs2_bitmap *bi, *bi_prev = NULL;
2175
2176 rbm.rgd = gfs2_blk2rgrpd(sdp, bstart, 1);
2177 if (!rbm.rgd) {
2178 if (gfs2_consist(sdp))
2179 fs_err(sdp, "block = %llu\n", (unsigned long long)bstart);
2180 return NULL;
2181 }
2182
2183 gfs2_rbm_from_block(&rbm, bstart);
2184 while (blen--) {
2185 bi = rbm_bi(&rbm);
2186 if (bi != bi_prev) {
2187 if (!bi->bi_clone) {
2188 bi->bi_clone = kmalloc(bi->bi_bh->b_size,
2189 GFP_NOFS | __GFP_NOFAIL);
2190 memcpy(bi->bi_clone + bi->bi_offset,
2191 bi->bi_bh->b_data + bi->bi_offset,
2192 bi->bi_len);
2193 }
2194 gfs2_trans_add_meta(rbm.rgd->rd_gl, bi->bi_bh);
2195 bi_prev = bi;
2196 }
2197 gfs2_setbit(&rbm, false, new_state);
2198 gfs2_rbm_incr(&rbm);
2199 }
2200
2201 return rbm.rgd;
2202 }
2203
2204 /**
2205 * gfs2_rgrp_dump - print out an rgrp
2206 * @seq: The iterator
2207 * @gl: The glock in question
2208 *
2209 */
2210
2211 void gfs2_rgrp_dump(struct seq_file *seq, const struct gfs2_glock *gl)
2212 {
2213 struct gfs2_rgrpd *rgd = gl->gl_object;
2214 struct gfs2_blkreserv *trs;
2215 const struct rb_node *n;
2216
2217 if (rgd == NULL)
2218 return;
2219 gfs2_print_dbg(seq, " R: n:%llu f:%02x b:%u/%u i:%u r:%u e:%u\n",
2220 (unsigned long long)rgd->rd_addr, rgd->rd_flags,
2221 rgd->rd_free, rgd->rd_free_clone, rgd->rd_dinodes,
2222 rgd->rd_reserved, rgd->rd_extfail_pt);
2223 spin_lock(&rgd->rd_rsspin);
2224 for (n = rb_first(&rgd->rd_rstree); n; n = rb_next(&trs->rs_node)) {
2225 trs = rb_entry(n, struct gfs2_blkreserv, rs_node);
2226 dump_rs(seq, trs);
2227 }
2228 spin_unlock(&rgd->rd_rsspin);
2229 }
2230
2231 static void gfs2_rgrp_error(struct gfs2_rgrpd *rgd)
2232 {
2233 struct gfs2_sbd *sdp = rgd->rd_sbd;
2234 fs_warn(sdp, "rgrp %llu has an error, marking it readonly until umount\n",
2235 (unsigned long long)rgd->rd_addr);
2236 fs_warn(sdp, "umount on all nodes and run fsck.gfs2 to fix the error\n");
2237 gfs2_rgrp_dump(NULL, rgd->rd_gl);
2238 rgd->rd_flags |= GFS2_RDF_ERROR;
2239 }
2240
2241 /**
2242 * gfs2_adjust_reservation - Adjust (or remove) a reservation after allocation
2243 * @ip: The inode we have just allocated blocks for
2244 * @rbm: The start of the allocated blocks
2245 * @len: The extent length
2246 *
2247 * Adjusts a reservation after an allocation has taken place. If the
2248 * reservation does not match the allocation, or if it is now empty
2249 * then it is removed.
2250 */
2251
2252 static void gfs2_adjust_reservation(struct gfs2_inode *ip,
2253 const struct gfs2_rbm *rbm, unsigned len)
2254 {
2255 struct gfs2_blkreserv *rs = &ip->i_res;
2256 struct gfs2_rgrpd *rgd = rbm->rgd;
2257 unsigned rlen;
2258 u64 block;
2259 int ret;
2260
2261 spin_lock(&rgd->rd_rsspin);
2262 if (gfs2_rs_active(rs)) {
2263 if (gfs2_rbm_eq(&rs->rs_rbm, rbm)) {
2264 block = gfs2_rbm_to_block(rbm);
2265 ret = gfs2_rbm_from_block(&rs->rs_rbm, block + len);
2266 rlen = min(rs->rs_free, len);
2267 rs->rs_free -= rlen;
2268 rgd->rd_reserved -= rlen;
2269 trace_gfs2_rs(rs, TRACE_RS_CLAIM);
2270 if (rs->rs_free && !ret)
2271 goto out;
2272 /* We used up our block reservation, so we should
2273 reserve more blocks next time. */
2274 atomic_add(RGRP_RSRV_ADDBLKS, &rs->rs_sizehint);
2275 }
2276 __rs_deltree(rs);
2277 }
2278 out:
2279 spin_unlock(&rgd->rd_rsspin);
2280 }
2281
2282 /**
2283 * gfs2_set_alloc_start - Set starting point for block allocation
2284 * @rbm: The rbm which will be set to the required location
2285 * @ip: The gfs2 inode
2286 * @dinode: Flag to say if allocation includes a new inode
2287 *
2288 * This sets the starting point from the reservation if one is active
2289 * otherwise it falls back to guessing a start point based on the
2290 * inode's goal block or the last allocation point in the rgrp.
2291 */
2292
2293 static void gfs2_set_alloc_start(struct gfs2_rbm *rbm,
2294 const struct gfs2_inode *ip, bool dinode)
2295 {
2296 u64 goal;
2297
2298 if (gfs2_rs_active(&ip->i_res)) {
2299 *rbm = ip->i_res.rs_rbm;
2300 return;
2301 }
2302
2303 if (!dinode && rgrp_contains_block(rbm->rgd, ip->i_goal))
2304 goal = ip->i_goal;
2305 else
2306 goal = rbm->rgd->rd_last_alloc + rbm->rgd->rd_data0;
2307
2308 gfs2_rbm_from_block(rbm, goal);
2309 }
2310
2311 /**
2312 * gfs2_alloc_blocks - Allocate one or more blocks of data and/or a dinode
2313 * @ip: the inode to allocate the block for
2314 * @bn: Used to return the starting block number
2315 * @nblocks: requested number of blocks/extent length (value/result)
2316 * @dinode: 1 if we're allocating a dinode block, else 0
2317 * @generation: the generation number of the inode
2318 *
2319 * Returns: 0 or error
2320 */
2321
2322 int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
2323 bool dinode, u64 *generation)
2324 {
2325 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2326 struct buffer_head *dibh;
2327 struct gfs2_rbm rbm = { .rgd = ip->i_rgd, };
2328 unsigned int ndata;
2329 u64 block; /* block, within the file system scope */
2330 int error;
2331
2332 gfs2_set_alloc_start(&rbm, ip, dinode);
2333 error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, NULL, ip, false, NULL);
2334
2335 if (error == -ENOSPC) {
2336 gfs2_set_alloc_start(&rbm, ip, dinode);
2337 error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, NULL, NULL, false,
2338 NULL);
2339 }
2340
2341 /* Since all blocks are reserved in advance, this shouldn't happen */
2342 if (error) {
2343 fs_warn(sdp, "inum=%llu error=%d, nblocks=%u, full=%d fail_pt=%d\n",
2344 (unsigned long long)ip->i_no_addr, error, *nblocks,
2345 test_bit(GBF_FULL, &rbm.rgd->rd_bits->bi_flags),
2346 rbm.rgd->rd_extfail_pt);
2347 goto rgrp_error;
2348 }
2349
2350 gfs2_alloc_extent(&rbm, dinode, nblocks);
2351 block = gfs2_rbm_to_block(&rbm);
2352 rbm.rgd->rd_last_alloc = block - rbm.rgd->rd_data0;
2353 if (gfs2_rs_active(&ip->i_res))
2354 gfs2_adjust_reservation(ip, &rbm, *nblocks);
2355 ndata = *nblocks;
2356 if (dinode)
2357 ndata--;
2358
2359 if (!dinode) {
2360 ip->i_goal = block + ndata - 1;
2361 error = gfs2_meta_inode_buffer(ip, &dibh);
2362 if (error == 0) {
2363 struct gfs2_dinode *di =
2364 (struct gfs2_dinode *)dibh->b_data;
2365 gfs2_trans_add_meta(ip->i_gl, dibh);
2366 di->di_goal_meta = di->di_goal_data =
2367 cpu_to_be64(ip->i_goal);
2368 brelse(dibh);
2369 }
2370 }
2371 if (rbm.rgd->rd_free < *nblocks) {
2372 pr_warn("nblocks=%u\n", *nblocks);
2373 goto rgrp_error;
2374 }
2375
2376 rbm.rgd->rd_free -= *nblocks;
2377 if (dinode) {
2378 rbm.rgd->rd_dinodes++;
2379 *generation = rbm.rgd->rd_igeneration++;
2380 if (*generation == 0)
2381 *generation = rbm.rgd->rd_igeneration++;
2382 }
2383
2384 gfs2_trans_add_meta(rbm.rgd->rd_gl, rbm.rgd->rd_bits[0].bi_bh);
2385 gfs2_rgrp_out(rbm.rgd, rbm.rgd->rd_bits[0].bi_bh->b_data);
2386 gfs2_rgrp_ondisk2lvb(rbm.rgd->rd_rgl, rbm.rgd->rd_bits[0].bi_bh->b_data);
2387
2388 gfs2_statfs_change(sdp, 0, -(s64)*nblocks, dinode ? 1 : 0);
2389 if (dinode)
2390 gfs2_trans_add_unrevoke(sdp, block, *nblocks);
2391
2392 gfs2_quota_change(ip, *nblocks, ip->i_inode.i_uid, ip->i_inode.i_gid);
2393
2394 rbm.rgd->rd_free_clone -= *nblocks;
2395 trace_gfs2_block_alloc(ip, rbm.rgd, block, *nblocks,
2396 dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
2397 *bn = block;
2398 return 0;
2399
2400 rgrp_error:
2401 gfs2_rgrp_error(rbm.rgd);
2402 return -EIO;
2403 }
2404
2405 /**
2406 * __gfs2_free_blocks - free a contiguous run of block(s)
2407 * @ip: the inode these blocks are being freed from
2408 * @bstart: first block of a run of contiguous blocks
2409 * @blen: the length of the block run
2410 * @meta: 1 if the blocks represent metadata
2411 *
2412 */
2413
2414 void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta)
2415 {
2416 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2417 struct gfs2_rgrpd *rgd;
2418
2419 rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE);
2420 if (!rgd)
2421 return;
2422 trace_gfs2_block_alloc(ip, rgd, bstart, blen, GFS2_BLKST_FREE);
2423 rgd->rd_free += blen;
2424 rgd->rd_flags &= ~GFS2_RGF_TRIMMED;
2425 gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
2426 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
2427 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
2428
2429 /* Directories keep their data in the metadata address space */
2430 if (meta || ip->i_depth)
2431 gfs2_meta_wipe(ip, bstart, blen);
2432 }
2433
2434 /**
2435 * gfs2_free_meta - free a contiguous run of data block(s)
2436 * @ip: the inode these blocks are being freed from
2437 * @bstart: first block of a run of contiguous blocks
2438 * @blen: the length of the block run
2439 *
2440 */
2441
2442 void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen)
2443 {
2444 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2445
2446 __gfs2_free_blocks(ip, bstart, blen, 1);
2447 gfs2_statfs_change(sdp, 0, +blen, 0);
2448 gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid);
2449 }
2450
2451 void gfs2_unlink_di(struct inode *inode)
2452 {
2453 struct gfs2_inode *ip = GFS2_I(inode);
2454 struct gfs2_sbd *sdp = GFS2_SB(inode);
2455 struct gfs2_rgrpd *rgd;
2456 u64 blkno = ip->i_no_addr;
2457
2458 rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_UNLINKED);
2459 if (!rgd)
2460 return;
2461 trace_gfs2_block_alloc(ip, rgd, blkno, 1, GFS2_BLKST_UNLINKED);
2462 gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
2463 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
2464 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
2465 update_rgrp_lvb_unlinked(rgd, 1);
2466 }
2467
2468 static void gfs2_free_uninit_di(struct gfs2_rgrpd *rgd, u64 blkno)
2469 {
2470 struct gfs2_sbd *sdp = rgd->rd_sbd;
2471 struct gfs2_rgrpd *tmp_rgd;
2472
2473 tmp_rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_FREE);
2474 if (!tmp_rgd)
2475 return;
2476 gfs2_assert_withdraw(sdp, rgd == tmp_rgd);
2477
2478 if (!rgd->rd_dinodes)
2479 gfs2_consist_rgrpd(rgd);
2480 rgd->rd_dinodes--;
2481 rgd->rd_free++;
2482
2483 gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
2484 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
2485 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
2486 update_rgrp_lvb_unlinked(rgd, -1);
2487
2488 gfs2_statfs_change(sdp, 0, +1, -1);
2489 }
2490
2491
2492 void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
2493 {
2494 gfs2_free_uninit_di(rgd, ip->i_no_addr);
2495 trace_gfs2_block_alloc(ip, rgd, ip->i_no_addr, 1, GFS2_BLKST_FREE);
2496 gfs2_quota_change(ip, -1, ip->i_inode.i_uid, ip->i_inode.i_gid);
2497 gfs2_meta_wipe(ip, ip->i_no_addr, 1);
2498 }
2499
2500 /**
2501 * gfs2_check_blk_type - Check the type of a block
2502 * @sdp: The superblock
2503 * @no_addr: The block number to check
2504 * @type: The block type we are looking for
2505 *
2506 * Returns: 0 if the block type matches the expected type
2507 * -ESTALE if it doesn't match
2508 * or -ve errno if something went wrong while checking
2509 */
2510
2511 int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type)
2512 {
2513 struct gfs2_rgrpd *rgd;
2514 struct gfs2_holder rgd_gh;
2515 int error = -EINVAL;
2516
2517 rgd = gfs2_blk2rgrpd(sdp, no_addr, 1);
2518 if (!rgd)
2519 goto fail;
2520
2521 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_SHARED, 0, &rgd_gh);
2522 if (error)
2523 goto fail;
2524
2525 if (gfs2_get_block_type(rgd, no_addr) != type)
2526 error = -ESTALE;
2527
2528 gfs2_glock_dq_uninit(&rgd_gh);
2529 fail:
2530 return error;
2531 }
2532
2533 /**
2534 * gfs2_rlist_add - add a RG to a list of RGs
2535 * @ip: the inode
2536 * @rlist: the list of resource groups
2537 * @block: the block
2538 *
2539 * Figure out what RG a block belongs to and add that RG to the list
2540 *
2541 * FIXME: Don't use NOFAIL
2542 *
2543 */
2544
2545 void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist,
2546 u64 block)
2547 {
2548 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2549 struct gfs2_rgrpd *rgd;
2550 struct gfs2_rgrpd **tmp;
2551 unsigned int new_space;
2552 unsigned int x;
2553
2554 if (gfs2_assert_warn(sdp, !rlist->rl_ghs))
2555 return;
2556
2557 if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, block))
2558 rgd = ip->i_rgd;
2559 else
2560 rgd = gfs2_blk2rgrpd(sdp, block, 1);
2561 if (!rgd) {
2562 fs_err(sdp, "rlist_add: no rgrp for block %llu\n", (unsigned long long)block);
2563 return;
2564 }
2565 ip->i_rgd = rgd;
2566
2567 for (x = 0; x < rlist->rl_rgrps; x++)
2568 if (rlist->rl_rgd[x] == rgd)
2569 return;
2570
2571 if (rlist->rl_rgrps == rlist->rl_space) {
2572 new_space = rlist->rl_space + 10;
2573
2574 tmp = kcalloc(new_space, sizeof(struct gfs2_rgrpd *),
2575 GFP_NOFS | __GFP_NOFAIL);
2576
2577 if (rlist->rl_rgd) {
2578 memcpy(tmp, rlist->rl_rgd,
2579 rlist->rl_space * sizeof(struct gfs2_rgrpd *));
2580 kfree(rlist->rl_rgd);
2581 }
2582
2583 rlist->rl_space = new_space;
2584 rlist->rl_rgd = tmp;
2585 }
2586
2587 rlist->rl_rgd[rlist->rl_rgrps++] = rgd;
2588 }
2589
2590 /**
2591 * gfs2_rlist_alloc - all RGs have been added to the rlist, now allocate
2592 * and initialize an array of glock holders for them
2593 * @rlist: the list of resource groups
2594 * @state: the lock state to acquire the RG lock in
2595 *
2596 * FIXME: Don't use NOFAIL
2597 *
2598 */
2599
2600 void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state)
2601 {
2602 unsigned int x;
2603
2604 rlist->rl_ghs = kcalloc(rlist->rl_rgrps, sizeof(struct gfs2_holder),
2605 GFP_NOFS | __GFP_NOFAIL);
2606 for (x = 0; x < rlist->rl_rgrps; x++)
2607 gfs2_holder_init(rlist->rl_rgd[x]->rd_gl,
2608 state, 0,
2609 &rlist->rl_ghs[x]);
2610 }
2611
2612 /**
2613 * gfs2_rlist_free - free a resource group list
2614 * @rlist: the list of resource groups
2615 *
2616 */
2617
2618 void gfs2_rlist_free(struct gfs2_rgrp_list *rlist)
2619 {
2620 unsigned int x;
2621
2622 kfree(rlist->rl_rgd);
2623
2624 if (rlist->rl_ghs) {
2625 for (x = 0; x < rlist->rl_rgrps; x++)
2626 gfs2_holder_uninit(&rlist->rl_ghs[x]);
2627 kfree(rlist->rl_ghs);
2628 rlist->rl_ghs = NULL;
2629 }
2630 }
2631
This page took 0.083847 seconds and 5 git commands to generate.