Merge branch 'x86-platform-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[deliverable/linux.git] / fs / ext4 / balloc.c
... / ...
CommitLineData
1/*
2 * linux/fs/ext4/balloc.c
3 *
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
8 *
9 * Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993
10 * Big-endian to little-endian byte-swapping/bitmaps by
11 * David S. Miller (davem@caip.rutgers.edu), 1995
12 */
13
14#include <linux/time.h>
15#include <linux/capability.h>
16#include <linux/fs.h>
17#include <linux/jbd2.h>
18#include <linux/quotaops.h>
19#include <linux/buffer_head.h>
20#include "ext4.h"
21#include "ext4_jbd2.h"
22#include "mballoc.h"
23
24#include <trace/events/ext4.h>
25
26static unsigned ext4_num_base_meta_clusters(struct super_block *sb,
27 ext4_group_t block_group);
28/*
29 * balloc.c contains the blocks allocation and deallocation routines
30 */
31
32/*
33 * Calculate block group number for a given block number
34 */
35ext4_group_t ext4_get_group_number(struct super_block *sb,
36 ext4_fsblk_t block)
37{
38 ext4_group_t group;
39
40 if (test_opt2(sb, STD_GROUP_SIZE))
41 group = (block -
42 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) >>
43 (EXT4_BLOCK_SIZE_BITS(sb) + EXT4_CLUSTER_BITS(sb) + 3);
44 else
45 ext4_get_group_no_and_offset(sb, block, &group, NULL);
46 return group;
47}
48
49/*
50 * Calculate the block group number and offset into the block/cluster
51 * allocation bitmap, given a block number
52 */
53void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
54 ext4_group_t *blockgrpp, ext4_grpblk_t *offsetp)
55{
56 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
57 ext4_grpblk_t offset;
58
59 blocknr = blocknr - le32_to_cpu(es->s_first_data_block);
60 offset = do_div(blocknr, EXT4_BLOCKS_PER_GROUP(sb)) >>
61 EXT4_SB(sb)->s_cluster_bits;
62 if (offsetp)
63 *offsetp = offset;
64 if (blockgrpp)
65 *blockgrpp = blocknr;
66
67}
68
69/*
70 * Check whether the 'block' lives within the 'block_group'. Returns 1 if so
71 * and 0 otherwise.
72 */
73static inline int ext4_block_in_group(struct super_block *sb,
74 ext4_fsblk_t block,
75 ext4_group_t block_group)
76{
77 ext4_group_t actual_group;
78
79 actual_group = ext4_get_group_number(sb, block);
80 return (actual_group == block_group) ? 1 : 0;
81}
82
83/* Return the number of clusters used for file system metadata; this
84 * represents the overhead needed by the file system.
85 */
86static unsigned ext4_num_overhead_clusters(struct super_block *sb,
87 ext4_group_t block_group,
88 struct ext4_group_desc *gdp)
89{
90 unsigned num_clusters;
91 int block_cluster = -1, inode_cluster = -1, itbl_cluster = -1, i, c;
92 ext4_fsblk_t start = ext4_group_first_block_no(sb, block_group);
93 ext4_fsblk_t itbl_blk;
94 struct ext4_sb_info *sbi = EXT4_SB(sb);
95
96 /* This is the number of clusters used by the superblock,
97 * block group descriptors, and reserved block group
98 * descriptor blocks */
99 num_clusters = ext4_num_base_meta_clusters(sb, block_group);
100
101 /*
102 * For the allocation bitmaps and inode table, we first need
103 * to check to see if the block is in the block group. If it
104 * is, then check to see if the cluster is already accounted
105 * for in the clusters used for the base metadata cluster, or
106 * if we can increment the base metadata cluster to include
107 * that block. Otherwise, we will have to track the cluster
108 * used for the allocation bitmap or inode table explicitly.
109 * Normally all of these blocks are contiguous, so the special
110 * case handling shouldn't be necessary except for *very*
111 * unusual file system layouts.
112 */
113 if (ext4_block_in_group(sb, ext4_block_bitmap(sb, gdp), block_group)) {
114 block_cluster = EXT4_B2C(sbi,
115 ext4_block_bitmap(sb, gdp) - start);
116 if (block_cluster < num_clusters)
117 block_cluster = -1;
118 else if (block_cluster == num_clusters) {
119 num_clusters++;
120 block_cluster = -1;
121 }
122 }
123
124 if (ext4_block_in_group(sb, ext4_inode_bitmap(sb, gdp), block_group)) {
125 inode_cluster = EXT4_B2C(sbi,
126 ext4_inode_bitmap(sb, gdp) - start);
127 if (inode_cluster < num_clusters)
128 inode_cluster = -1;
129 else if (inode_cluster == num_clusters) {
130 num_clusters++;
131 inode_cluster = -1;
132 }
133 }
134
135 itbl_blk = ext4_inode_table(sb, gdp);
136 for (i = 0; i < sbi->s_itb_per_group; i++) {
137 if (ext4_block_in_group(sb, itbl_blk + i, block_group)) {
138 c = EXT4_B2C(sbi, itbl_blk + i - start);
139 if ((c < num_clusters) || (c == inode_cluster) ||
140 (c == block_cluster) || (c == itbl_cluster))
141 continue;
142 if (c == num_clusters) {
143 num_clusters++;
144 continue;
145 }
146 num_clusters++;
147 itbl_cluster = c;
148 }
149 }
150
151 if (block_cluster != -1)
152 num_clusters++;
153 if (inode_cluster != -1)
154 num_clusters++;
155
156 return num_clusters;
157}
158
159static unsigned int num_clusters_in_group(struct super_block *sb,
160 ext4_group_t block_group)
161{
162 unsigned int blocks;
163
164 if (block_group == ext4_get_groups_count(sb) - 1) {
165 /*
166 * Even though mke2fs always initializes the first and
167 * last group, just in case some other tool was used,
168 * we need to make sure we calculate the right free
169 * blocks.
170 */
171 blocks = ext4_blocks_count(EXT4_SB(sb)->s_es) -
172 ext4_group_first_block_no(sb, block_group);
173 } else
174 blocks = EXT4_BLOCKS_PER_GROUP(sb);
175 return EXT4_NUM_B2C(EXT4_SB(sb), blocks);
176}
177
178/* Initializes an uninitialized block bitmap */
179static int ext4_init_block_bitmap(struct super_block *sb,
180 struct buffer_head *bh,
181 ext4_group_t block_group,
182 struct ext4_group_desc *gdp)
183{
184 unsigned int bit, bit_max;
185 struct ext4_sb_info *sbi = EXT4_SB(sb);
186 ext4_fsblk_t start, tmp;
187 int flex_bg = 0;
188 struct ext4_group_info *grp;
189
190 J_ASSERT_BH(bh, buffer_locked(bh));
191
192 /* If checksum is bad mark all blocks used to prevent allocation
193 * essentially implementing a per-group read-only flag. */
194 if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
195 grp = ext4_get_group_info(sb, block_group);
196 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
197 percpu_counter_sub(&sbi->s_freeclusters_counter,
198 grp->bb_free);
199 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
200 if (!EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
201 int count;
202 count = ext4_free_inodes_count(sb, gdp);
203 percpu_counter_sub(&sbi->s_freeinodes_counter,
204 count);
205 }
206 set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
207 return -EIO;
208 }
209 memset(bh->b_data, 0, sb->s_blocksize);
210
211 bit_max = ext4_num_base_meta_clusters(sb, block_group);
212 for (bit = 0; bit < bit_max; bit++)
213 ext4_set_bit(bit, bh->b_data);
214
215 start = ext4_group_first_block_no(sb, block_group);
216
217 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG))
218 flex_bg = 1;
219
220 /* Set bits for block and inode bitmaps, and inode table */
221 tmp = ext4_block_bitmap(sb, gdp);
222 if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
223 ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
224
225 tmp = ext4_inode_bitmap(sb, gdp);
226 if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
227 ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
228
229 tmp = ext4_inode_table(sb, gdp);
230 for (; tmp < ext4_inode_table(sb, gdp) +
231 sbi->s_itb_per_group; tmp++) {
232 if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
233 ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
234 }
235
236 /*
237 * Also if the number of blocks within the group is less than
238 * the blocksize * 8 ( which is the size of bitmap ), set rest
239 * of the block bitmap to 1
240 */
241 ext4_mark_bitmap_end(num_clusters_in_group(sb, block_group),
242 sb->s_blocksize * 8, bh->b_data);
243 ext4_block_bitmap_csum_set(sb, block_group, gdp, bh);
244 ext4_group_desc_csum_set(sb, block_group, gdp);
245 return 0;
246}
247
248/* Return the number of free blocks in a block group. It is used when
249 * the block bitmap is uninitialized, so we can't just count the bits
250 * in the bitmap. */
251unsigned ext4_free_clusters_after_init(struct super_block *sb,
252 ext4_group_t block_group,
253 struct ext4_group_desc *gdp)
254{
255 return num_clusters_in_group(sb, block_group) -
256 ext4_num_overhead_clusters(sb, block_group, gdp);
257}
258
259/*
260 * The free blocks are managed by bitmaps. A file system contains several
261 * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap
262 * block for inodes, N blocks for the inode table and data blocks.
263 *
264 * The file system contains group descriptors which are located after the
265 * super block. Each descriptor contains the number of the bitmap block and
266 * the free blocks count in the block. The descriptors are loaded in memory
267 * when a file system is mounted (see ext4_fill_super).
268 */
269
270/**
271 * ext4_get_group_desc() -- load group descriptor from disk
272 * @sb: super block
273 * @block_group: given block group
274 * @bh: pointer to the buffer head to store the block
275 * group descriptor
276 */
277struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
278 ext4_group_t block_group,
279 struct buffer_head **bh)
280{
281 unsigned int group_desc;
282 unsigned int offset;
283 ext4_group_t ngroups = ext4_get_groups_count(sb);
284 struct ext4_group_desc *desc;
285 struct ext4_sb_info *sbi = EXT4_SB(sb);
286
287 if (block_group >= ngroups) {
288 ext4_error(sb, "block_group >= groups_count - block_group = %u,"
289 " groups_count = %u", block_group, ngroups);
290
291 return NULL;
292 }
293
294 group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);
295 offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);
296 if (!sbi->s_group_desc[group_desc]) {
297 ext4_error(sb, "Group descriptor not loaded - "
298 "block_group = %u, group_desc = %u, desc = %u",
299 block_group, group_desc, offset);
300 return NULL;
301 }
302
303 desc = (struct ext4_group_desc *)(
304 (__u8 *)sbi->s_group_desc[group_desc]->b_data +
305 offset * EXT4_DESC_SIZE(sb));
306 if (bh)
307 *bh = sbi->s_group_desc[group_desc];
308 return desc;
309}
310
311/*
312 * Return the block number which was discovered to be invalid, or 0 if
313 * the block bitmap is valid.
314 */
315static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb,
316 struct ext4_group_desc *desc,
317 ext4_group_t block_group,
318 struct buffer_head *bh)
319{
320 struct ext4_sb_info *sbi = EXT4_SB(sb);
321 ext4_grpblk_t offset;
322 ext4_grpblk_t next_zero_bit;
323 ext4_fsblk_t blk;
324 ext4_fsblk_t group_first_block;
325
326 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) {
327 /* with FLEX_BG, the inode/block bitmaps and itable
328 * blocks may not be in the group at all
329 * so the bitmap validation will be skipped for those groups
330 * or it has to also read the block group where the bitmaps
331 * are located to verify they are set.
332 */
333 return 0;
334 }
335 group_first_block = ext4_group_first_block_no(sb, block_group);
336
337 /* check whether block bitmap block number is set */
338 blk = ext4_block_bitmap(sb, desc);
339 offset = blk - group_first_block;
340 if (!ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
341 /* bad block bitmap */
342 return blk;
343
344 /* check whether the inode bitmap block number is set */
345 blk = ext4_inode_bitmap(sb, desc);
346 offset = blk - group_first_block;
347 if (!ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
348 /* bad block bitmap */
349 return blk;
350
351 /* check whether the inode table block number is set */
352 blk = ext4_inode_table(sb, desc);
353 offset = blk - group_first_block;
354 next_zero_bit = ext4_find_next_zero_bit(bh->b_data,
355 EXT4_B2C(sbi, offset + EXT4_SB(sb)->s_itb_per_group),
356 EXT4_B2C(sbi, offset));
357 if (next_zero_bit <
358 EXT4_B2C(sbi, offset + EXT4_SB(sb)->s_itb_per_group))
359 /* bad bitmap for inode tables */
360 return blk;
361 return 0;
362}
363
364static void ext4_validate_block_bitmap(struct super_block *sb,
365 struct ext4_group_desc *desc,
366 ext4_group_t block_group,
367 struct buffer_head *bh)
368{
369 ext4_fsblk_t blk;
370 struct ext4_group_info *grp = ext4_get_group_info(sb, block_group);
371 struct ext4_sb_info *sbi = EXT4_SB(sb);
372
373 if (buffer_verified(bh))
374 return;
375
376 ext4_lock_group(sb, block_group);
377 blk = ext4_valid_block_bitmap(sb, desc, block_group, bh);
378 if (unlikely(blk != 0)) {
379 ext4_unlock_group(sb, block_group);
380 ext4_error(sb, "bg %u: block %llu: invalid block bitmap",
381 block_group, blk);
382 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
383 percpu_counter_sub(&sbi->s_freeclusters_counter,
384 grp->bb_free);
385 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
386 return;
387 }
388 if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group,
389 desc, bh))) {
390 ext4_unlock_group(sb, block_group);
391 ext4_error(sb, "bg %u: bad block bitmap checksum", block_group);
392 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
393 percpu_counter_sub(&sbi->s_freeclusters_counter,
394 grp->bb_free);
395 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
396 return;
397 }
398 set_buffer_verified(bh);
399 ext4_unlock_group(sb, block_group);
400}
401
402/**
403 * ext4_read_block_bitmap_nowait()
404 * @sb: super block
405 * @block_group: given block group
406 *
407 * Read the bitmap for a given block_group,and validate the
408 * bits for block/inode/inode tables are set in the bitmaps
409 *
410 * Return buffer_head on success or NULL in case of failure.
411 */
412struct buffer_head *
413ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
414{
415 struct ext4_group_desc *desc;
416 struct buffer_head *bh;
417 ext4_fsblk_t bitmap_blk;
418
419 desc = ext4_get_group_desc(sb, block_group, NULL);
420 if (!desc)
421 return NULL;
422 bitmap_blk = ext4_block_bitmap(sb, desc);
423 bh = sb_getblk(sb, bitmap_blk);
424 if (unlikely(!bh)) {
425 ext4_error(sb, "Cannot get buffer for block bitmap - "
426 "block_group = %u, block_bitmap = %llu",
427 block_group, bitmap_blk);
428 return NULL;
429 }
430
431 if (bitmap_uptodate(bh))
432 goto verify;
433
434 lock_buffer(bh);
435 if (bitmap_uptodate(bh)) {
436 unlock_buffer(bh);
437 goto verify;
438 }
439 ext4_lock_group(sb, block_group);
440 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
441 int err;
442
443 err = ext4_init_block_bitmap(sb, bh, block_group, desc);
444 set_bitmap_uptodate(bh);
445 set_buffer_uptodate(bh);
446 ext4_unlock_group(sb, block_group);
447 unlock_buffer(bh);
448 if (err)
449 ext4_error(sb, "Checksum bad for grp %u", block_group);
450 return bh;
451 }
452 ext4_unlock_group(sb, block_group);
453 if (buffer_uptodate(bh)) {
454 /*
455 * if not uninit if bh is uptodate,
456 * bitmap is also uptodate
457 */
458 set_bitmap_uptodate(bh);
459 unlock_buffer(bh);
460 goto verify;
461 }
462 /*
463 * submit the buffer_head for reading
464 */
465 set_buffer_new(bh);
466 trace_ext4_read_block_bitmap_load(sb, block_group);
467 bh->b_end_io = ext4_end_bitmap_read;
468 get_bh(bh);
469 submit_bh(READ | REQ_META | REQ_PRIO, bh);
470 return bh;
471verify:
472 ext4_validate_block_bitmap(sb, desc, block_group, bh);
473 if (buffer_verified(bh))
474 return bh;
475 put_bh(bh);
476 return NULL;
477}
478
479/* Returns 0 on success, 1 on error */
480int ext4_wait_block_bitmap(struct super_block *sb, ext4_group_t block_group,
481 struct buffer_head *bh)
482{
483 struct ext4_group_desc *desc;
484
485 if (!buffer_new(bh))
486 return 0;
487 desc = ext4_get_group_desc(sb, block_group, NULL);
488 if (!desc)
489 return 1;
490 wait_on_buffer(bh);
491 if (!buffer_uptodate(bh)) {
492 ext4_error(sb, "Cannot read block bitmap - "
493 "block_group = %u, block_bitmap = %llu",
494 block_group, (unsigned long long) bh->b_blocknr);
495 return 1;
496 }
497 clear_buffer_new(bh);
498 /* Panic or remount fs read-only if block bitmap is invalid */
499 ext4_validate_block_bitmap(sb, desc, block_group, bh);
500 /* ...but check for error just in case errors=continue. */
501 return !buffer_verified(bh);
502}
503
504struct buffer_head *
505ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group)
506{
507 struct buffer_head *bh;
508
509 bh = ext4_read_block_bitmap_nowait(sb, block_group);
510 if (!bh)
511 return NULL;
512 if (ext4_wait_block_bitmap(sb, block_group, bh)) {
513 put_bh(bh);
514 return NULL;
515 }
516 return bh;
517}
518
519/**
520 * ext4_has_free_clusters()
521 * @sbi: in-core super block structure.
522 * @nclusters: number of needed blocks
523 * @flags: flags from ext4_mb_new_blocks()
524 *
525 * Check if filesystem has nclusters free & available for allocation.
526 * On success return 1, return 0 on failure.
527 */
528static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
529 s64 nclusters, unsigned int flags)
530{
531 s64 free_clusters, dirty_clusters, rsv, resv_clusters;
532 struct percpu_counter *fcc = &sbi->s_freeclusters_counter;
533 struct percpu_counter *dcc = &sbi->s_dirtyclusters_counter;
534
535 free_clusters = percpu_counter_read_positive(fcc);
536 dirty_clusters = percpu_counter_read_positive(dcc);
537 resv_clusters = atomic64_read(&sbi->s_resv_clusters);
538
539 /*
540 * r_blocks_count should always be multiple of the cluster ratio so
541 * we are safe to do a plane bit shift only.
542 */
543 rsv = (ext4_r_blocks_count(sbi->s_es) >> sbi->s_cluster_bits) +
544 resv_clusters;
545
546 if (free_clusters - (nclusters + rsv + dirty_clusters) <
547 EXT4_FREECLUSTERS_WATERMARK) {
548 free_clusters = percpu_counter_sum_positive(fcc);
549 dirty_clusters = percpu_counter_sum_positive(dcc);
550 }
551 /* Check whether we have space after accounting for current
552 * dirty clusters & root reserved clusters.
553 */
554 if (free_clusters >= (rsv + nclusters + dirty_clusters))
555 return 1;
556
557 /* Hm, nope. Are (enough) root reserved clusters available? */
558 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
559 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
560 capable(CAP_SYS_RESOURCE) ||
561 (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
562
563 if (free_clusters >= (nclusters + dirty_clusters +
564 resv_clusters))
565 return 1;
566 }
567 /* No free blocks. Let's see if we can dip into reserved pool */
568 if (flags & EXT4_MB_USE_RESERVED) {
569 if (free_clusters >= (nclusters + dirty_clusters))
570 return 1;
571 }
572
573 return 0;
574}
575
576int ext4_claim_free_clusters(struct ext4_sb_info *sbi,
577 s64 nclusters, unsigned int flags)
578{
579 if (ext4_has_free_clusters(sbi, nclusters, flags)) {
580 percpu_counter_add(&sbi->s_dirtyclusters_counter, nclusters);
581 return 0;
582 } else
583 return -ENOSPC;
584}
585
586/**
587 * ext4_should_retry_alloc()
588 * @sb: super block
589 * @retries number of attemps has been made
590 *
591 * ext4_should_retry_alloc() is called when ENOSPC is returned, and if
592 * it is profitable to retry the operation, this function will wait
593 * for the current or committing transaction to complete, and then
594 * return TRUE.
595 *
596 * if the total number of retries exceed three times, return FALSE.
597 */
598int ext4_should_retry_alloc(struct super_block *sb, int *retries)
599{
600 if (!ext4_has_free_clusters(EXT4_SB(sb), 1, 0) ||
601 (*retries)++ > 3 ||
602 !EXT4_SB(sb)->s_journal)
603 return 0;
604
605 jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);
606
607 return jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal);
608}
609
610/*
611 * ext4_new_meta_blocks() -- allocate block for meta data (indexing) blocks
612 *
613 * @handle: handle to this transaction
614 * @inode: file inode
615 * @goal: given target block(filesystem wide)
616 * @count: pointer to total number of clusters needed
617 * @errp: error code
618 *
619 * Return 1st allocated block number on success, *count stores total account
620 * error stores in errp pointer
621 */
622ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
623 ext4_fsblk_t goal, unsigned int flags,
624 unsigned long *count, int *errp)
625{
626 struct ext4_allocation_request ar;
627 ext4_fsblk_t ret;
628
629 memset(&ar, 0, sizeof(ar));
630 /* Fill with neighbour allocated blocks */
631 ar.inode = inode;
632 ar.goal = goal;
633 ar.len = count ? *count : 1;
634 ar.flags = flags;
635
636 ret = ext4_mb_new_blocks(handle, &ar, errp);
637 if (count)
638 *count = ar.len;
639 /*
640 * Account for the allocated meta blocks. We will never
641 * fail EDQUOT for metdata, but we do account for it.
642 */
643 if (!(*errp) && (flags & EXT4_MB_DELALLOC_RESERVED)) {
644 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
645 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
646 dquot_alloc_block_nofail(inode,
647 EXT4_C2B(EXT4_SB(inode->i_sb), ar.len));
648 }
649 return ret;
650}
651
652/**
653 * ext4_count_free_clusters() -- count filesystem free clusters
654 * @sb: superblock
655 *
656 * Adds up the number of free clusters from each block group.
657 */
658ext4_fsblk_t ext4_count_free_clusters(struct super_block *sb)
659{
660 ext4_fsblk_t desc_count;
661 struct ext4_group_desc *gdp;
662 ext4_group_t i;
663 ext4_group_t ngroups = ext4_get_groups_count(sb);
664 struct ext4_group_info *grp;
665#ifdef EXT4FS_DEBUG
666 struct ext4_super_block *es;
667 ext4_fsblk_t bitmap_count;
668 unsigned int x;
669 struct buffer_head *bitmap_bh = NULL;
670
671 es = EXT4_SB(sb)->s_es;
672 desc_count = 0;
673 bitmap_count = 0;
674 gdp = NULL;
675
676 for (i = 0; i < ngroups; i++) {
677 gdp = ext4_get_group_desc(sb, i, NULL);
678 if (!gdp)
679 continue;
680 grp = NULL;
681 if (EXT4_SB(sb)->s_group_info)
682 grp = ext4_get_group_info(sb, i);
683 if (!grp || !EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
684 desc_count += ext4_free_group_clusters(sb, gdp);
685 brelse(bitmap_bh);
686 bitmap_bh = ext4_read_block_bitmap(sb, i);
687 if (bitmap_bh == NULL)
688 continue;
689
690 x = ext4_count_free(bitmap_bh->b_data,
691 EXT4_CLUSTERS_PER_GROUP(sb) / 8);
692 printk(KERN_DEBUG "group %u: stored = %d, counted = %u\n",
693 i, ext4_free_group_clusters(sb, gdp), x);
694 bitmap_count += x;
695 }
696 brelse(bitmap_bh);
697 printk(KERN_DEBUG "ext4_count_free_clusters: stored = %llu"
698 ", computed = %llu, %llu\n",
699 EXT4_NUM_B2C(EXT4_SB(sb), ext4_free_blocks_count(es)),
700 desc_count, bitmap_count);
701 return bitmap_count;
702#else
703 desc_count = 0;
704 for (i = 0; i < ngroups; i++) {
705 gdp = ext4_get_group_desc(sb, i, NULL);
706 if (!gdp)
707 continue;
708 grp = NULL;
709 if (EXT4_SB(sb)->s_group_info)
710 grp = ext4_get_group_info(sb, i);
711 if (!grp || !EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
712 desc_count += ext4_free_group_clusters(sb, gdp);
713 }
714
715 return desc_count;
716#endif
717}
718
719static inline int test_root(ext4_group_t a, int b)
720{
721 while (1) {
722 if (a < b)
723 return 0;
724 if (a == b)
725 return 1;
726 if ((a % b) != 0)
727 return 0;
728 a = a / b;
729 }
730}
731
732/**
733 * ext4_bg_has_super - number of blocks used by the superblock in group
734 * @sb: superblock for filesystem
735 * @group: group number to check
736 *
737 * Return the number of blocks used by the superblock (primary or backup)
738 * in this group. Currently this will be only 0 or 1.
739 */
740int ext4_bg_has_super(struct super_block *sb, ext4_group_t group)
741{
742 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
743
744 if (group == 0)
745 return 1;
746 if (EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_SPARSE_SUPER2)) {
747 if (group == le32_to_cpu(es->s_backup_bgs[0]) ||
748 group == le32_to_cpu(es->s_backup_bgs[1]))
749 return 1;
750 return 0;
751 }
752 if ((group <= 1) || !EXT4_HAS_RO_COMPAT_FEATURE(sb,
753 EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER))
754 return 1;
755 if (!(group & 1))
756 return 0;
757 if (test_root(group, 3) || (test_root(group, 5)) ||
758 test_root(group, 7))
759 return 1;
760
761 return 0;
762}
763
764static unsigned long ext4_bg_num_gdb_meta(struct super_block *sb,
765 ext4_group_t group)
766{
767 unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
768 ext4_group_t first = metagroup * EXT4_DESC_PER_BLOCK(sb);
769 ext4_group_t last = first + EXT4_DESC_PER_BLOCK(sb) - 1;
770
771 if (group == first || group == first + 1 || group == last)
772 return 1;
773 return 0;
774}
775
776static unsigned long ext4_bg_num_gdb_nometa(struct super_block *sb,
777 ext4_group_t group)
778{
779 if (!ext4_bg_has_super(sb, group))
780 return 0;
781
782 if (EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG))
783 return le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg);
784 else
785 return EXT4_SB(sb)->s_gdb_count;
786}
787
788/**
789 * ext4_bg_num_gdb - number of blocks used by the group table in group
790 * @sb: superblock for filesystem
791 * @group: group number to check
792 *
793 * Return the number of blocks used by the group descriptor table
794 * (primary or backup) in this group. In the future there may be a
795 * different number of descriptor blocks in each group.
796 */
797unsigned long ext4_bg_num_gdb(struct super_block *sb, ext4_group_t group)
798{
799 unsigned long first_meta_bg =
800 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg);
801 unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
802
803 if (!EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG) ||
804 metagroup < first_meta_bg)
805 return ext4_bg_num_gdb_nometa(sb, group);
806
807 return ext4_bg_num_gdb_meta(sb,group);
808
809}
810
811/*
812 * This function returns the number of file system metadata clusters at
813 * the beginning of a block group, including the reserved gdt blocks.
814 */
815static unsigned ext4_num_base_meta_clusters(struct super_block *sb,
816 ext4_group_t block_group)
817{
818 struct ext4_sb_info *sbi = EXT4_SB(sb);
819 unsigned num;
820
821 /* Check for superblock and gdt backups in this group */
822 num = ext4_bg_has_super(sb, block_group);
823
824 if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG) ||
825 block_group < le32_to_cpu(sbi->s_es->s_first_meta_bg) *
826 sbi->s_desc_per_block) {
827 if (num) {
828 num += ext4_bg_num_gdb(sb, block_group);
829 num += le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks);
830 }
831 } else { /* For META_BG_BLOCK_GROUPS */
832 num += ext4_bg_num_gdb(sb, block_group);
833 }
834 return EXT4_NUM_B2C(sbi, num);
835}
836/**
837 * ext4_inode_to_goal_block - return a hint for block allocation
838 * @inode: inode for block allocation
839 *
840 * Return the ideal location to start allocating blocks for a
841 * newly created inode.
842 */
843ext4_fsblk_t ext4_inode_to_goal_block(struct inode *inode)
844{
845 struct ext4_inode_info *ei = EXT4_I(inode);
846 ext4_group_t block_group;
847 ext4_grpblk_t colour;
848 int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb));
849 ext4_fsblk_t bg_start;
850 ext4_fsblk_t last_block;
851
852 block_group = ei->i_block_group;
853 if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) {
854 /*
855 * If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME
856 * block groups per flexgroup, reserve the first block
857 * group for directories and special files. Regular
858 * files will start at the second block group. This
859 * tends to speed up directory access and improves
860 * fsck times.
861 */
862 block_group &= ~(flex_size-1);
863 if (S_ISREG(inode->i_mode))
864 block_group++;
865 }
866 bg_start = ext4_group_first_block_no(inode->i_sb, block_group);
867 last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;
868
869 /*
870 * If we are doing delayed allocation, we don't need take
871 * colour into account.
872 */
873 if (test_opt(inode->i_sb, DELALLOC))
874 return bg_start;
875
876 if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
877 colour = (current->pid % 16) *
878 (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
879 else
880 colour = (current->pid % 16) * ((last_block - bg_start) / 16);
881 return bg_start + colour;
882}
883
This page took 0.027002 seconds and 5 git commands to generate.