Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs-2.6
[deliverable/linux.git] / fs / ext2 / super.c
1 /*
2 * linux/fs/ext2/super.c
3 *
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
8 *
9 * from
10 *
11 * linux/fs/minix/inode.c
12 *
13 * Copyright (C) 1991, 1992 Linus Torvalds
14 *
15 * Big-endian to little-endian byte-swapping/bitmaps by
16 * David S. Miller (davem@caip.rutgers.edu), 1995
17 */
18
19 #include <linux/module.h>
20 #include <linux/string.h>
21 #include <linux/fs.h>
22 #include <linux/slab.h>
23 #include <linux/init.h>
24 #include <linux/blkdev.h>
25 #include <linux/parser.h>
26 #include <linux/random.h>
27 #include <linux/buffer_head.h>
28 #include <linux/exportfs.h>
29 #include <linux/vfs.h>
30 #include <linux/seq_file.h>
31 #include <linux/mount.h>
32 #include <linux/log2.h>
33 #include <linux/quotaops.h>
34 #include <asm/uaccess.h>
35 #include "ext2.h"
36 #include "xattr.h"
37 #include "acl.h"
38 #include "xip.h"
39
40 static void ext2_sync_super(struct super_block *sb,
41 struct ext2_super_block *es, int wait);
42 static int ext2_remount (struct super_block * sb, int * flags, char * data);
43 static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf);
44 static int ext2_sync_fs(struct super_block *sb, int wait);
45
46 void ext2_error (struct super_block * sb, const char * function,
47 const char * fmt, ...)
48 {
49 va_list args;
50 struct ext2_sb_info *sbi = EXT2_SB(sb);
51 struct ext2_super_block *es = sbi->s_es;
52
53 if (!(sb->s_flags & MS_RDONLY)) {
54 spin_lock(&sbi->s_lock);
55 sbi->s_mount_state |= EXT2_ERROR_FS;
56 es->s_state |= cpu_to_le16(EXT2_ERROR_FS);
57 spin_unlock(&sbi->s_lock);
58 ext2_sync_super(sb, es, 1);
59 }
60
61 va_start(args, fmt);
62 printk(KERN_CRIT "EXT2-fs (%s): error: %s: ", sb->s_id, function);
63 vprintk(fmt, args);
64 printk("\n");
65 va_end(args);
66
67 if (test_opt(sb, ERRORS_PANIC))
68 panic("EXT2-fs: panic from previous error\n");
69 if (test_opt(sb, ERRORS_RO)) {
70 ext2_msg(sb, KERN_CRIT,
71 "error: remounting filesystem read-only");
72 sb->s_flags |= MS_RDONLY;
73 }
74 }
75
76 void ext2_msg(struct super_block *sb, const char *prefix,
77 const char *fmt, ...)
78 {
79 va_list args;
80
81 va_start(args, fmt);
82 printk("%sEXT2-fs (%s): ", prefix, sb->s_id);
83 vprintk(fmt, args);
84 printk("\n");
85 va_end(args);
86 }
87
88 /*
89 * This must be called with sbi->s_lock held.
90 */
91 void ext2_update_dynamic_rev(struct super_block *sb)
92 {
93 struct ext2_super_block *es = EXT2_SB(sb)->s_es;
94
95 if (le32_to_cpu(es->s_rev_level) > EXT2_GOOD_OLD_REV)
96 return;
97
98 ext2_msg(sb, KERN_WARNING,
99 "warning: updating to rev %d because of "
100 "new feature flag, running e2fsck is recommended",
101 EXT2_DYNAMIC_REV);
102
103 es->s_first_ino = cpu_to_le32(EXT2_GOOD_OLD_FIRST_INO);
104 es->s_inode_size = cpu_to_le16(EXT2_GOOD_OLD_INODE_SIZE);
105 es->s_rev_level = cpu_to_le32(EXT2_DYNAMIC_REV);
106 /* leave es->s_feature_*compat flags alone */
107 /* es->s_uuid will be set by e2fsck if empty */
108
109 /*
110 * The rest of the superblock fields should be zero, and if not it
111 * means they are likely already in use, so leave them alone. We
112 * can leave it up to e2fsck to clean up any inconsistencies there.
113 */
114 }
115
116 static void ext2_put_super (struct super_block * sb)
117 {
118 int db_count;
119 int i;
120 struct ext2_sb_info *sbi = EXT2_SB(sb);
121
122 dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
123
124 if (sb->s_dirt)
125 ext2_write_super(sb);
126
127 ext2_xattr_put_super(sb);
128 if (!(sb->s_flags & MS_RDONLY)) {
129 struct ext2_super_block *es = sbi->s_es;
130
131 spin_lock(&sbi->s_lock);
132 es->s_state = cpu_to_le16(sbi->s_mount_state);
133 spin_unlock(&sbi->s_lock);
134 ext2_sync_super(sb, es, 1);
135 }
136 db_count = sbi->s_gdb_count;
137 for (i = 0; i < db_count; i++)
138 if (sbi->s_group_desc[i])
139 brelse (sbi->s_group_desc[i]);
140 kfree(sbi->s_group_desc);
141 kfree(sbi->s_debts);
142 percpu_counter_destroy(&sbi->s_freeblocks_counter);
143 percpu_counter_destroy(&sbi->s_freeinodes_counter);
144 percpu_counter_destroy(&sbi->s_dirs_counter);
145 brelse (sbi->s_sbh);
146 sb->s_fs_info = NULL;
147 kfree(sbi->s_blockgroup_lock);
148 kfree(sbi);
149 }
150
151 static struct kmem_cache * ext2_inode_cachep;
152
153 static struct inode *ext2_alloc_inode(struct super_block *sb)
154 {
155 struct ext2_inode_info *ei;
156 ei = (struct ext2_inode_info *)kmem_cache_alloc(ext2_inode_cachep, GFP_KERNEL);
157 if (!ei)
158 return NULL;
159 ei->i_block_alloc_info = NULL;
160 ei->vfs_inode.i_version = 1;
161 return &ei->vfs_inode;
162 }
163
164 static void ext2_destroy_inode(struct inode *inode)
165 {
166 kmem_cache_free(ext2_inode_cachep, EXT2_I(inode));
167 }
168
169 static void init_once(void *foo)
170 {
171 struct ext2_inode_info *ei = (struct ext2_inode_info *) foo;
172
173 rwlock_init(&ei->i_meta_lock);
174 #ifdef CONFIG_EXT2_FS_XATTR
175 init_rwsem(&ei->xattr_sem);
176 #endif
177 mutex_init(&ei->truncate_mutex);
178 inode_init_once(&ei->vfs_inode);
179 }
180
181 static int init_inodecache(void)
182 {
183 ext2_inode_cachep = kmem_cache_create("ext2_inode_cache",
184 sizeof(struct ext2_inode_info),
185 0, (SLAB_RECLAIM_ACCOUNT|
186 SLAB_MEM_SPREAD),
187 init_once);
188 if (ext2_inode_cachep == NULL)
189 return -ENOMEM;
190 return 0;
191 }
192
193 static void destroy_inodecache(void)
194 {
195 kmem_cache_destroy(ext2_inode_cachep);
196 }
197
198 static int ext2_show_options(struct seq_file *seq, struct vfsmount *vfs)
199 {
200 struct super_block *sb = vfs->mnt_sb;
201 struct ext2_sb_info *sbi = EXT2_SB(sb);
202 struct ext2_super_block *es = sbi->s_es;
203 unsigned long def_mount_opts;
204
205 spin_lock(&sbi->s_lock);
206 def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
207
208 if (sbi->s_sb_block != 1)
209 seq_printf(seq, ",sb=%lu", sbi->s_sb_block);
210 if (test_opt(sb, MINIX_DF))
211 seq_puts(seq, ",minixdf");
212 if (test_opt(sb, GRPID))
213 seq_puts(seq, ",grpid");
214 if (!test_opt(sb, GRPID) && (def_mount_opts & EXT2_DEFM_BSDGROUPS))
215 seq_puts(seq, ",nogrpid");
216 if (sbi->s_resuid != EXT2_DEF_RESUID ||
217 le16_to_cpu(es->s_def_resuid) != EXT2_DEF_RESUID) {
218 seq_printf(seq, ",resuid=%u", sbi->s_resuid);
219 }
220 if (sbi->s_resgid != EXT2_DEF_RESGID ||
221 le16_to_cpu(es->s_def_resgid) != EXT2_DEF_RESGID) {
222 seq_printf(seq, ",resgid=%u", sbi->s_resgid);
223 }
224 if (test_opt(sb, ERRORS_RO)) {
225 int def_errors = le16_to_cpu(es->s_errors);
226
227 if (def_errors == EXT2_ERRORS_PANIC ||
228 def_errors == EXT2_ERRORS_CONTINUE) {
229 seq_puts(seq, ",errors=remount-ro");
230 }
231 }
232 if (test_opt(sb, ERRORS_CONT))
233 seq_puts(seq, ",errors=continue");
234 if (test_opt(sb, ERRORS_PANIC))
235 seq_puts(seq, ",errors=panic");
236 if (test_opt(sb, NO_UID32))
237 seq_puts(seq, ",nouid32");
238 if (test_opt(sb, DEBUG))
239 seq_puts(seq, ",debug");
240 if (test_opt(sb, OLDALLOC))
241 seq_puts(seq, ",oldalloc");
242
243 #ifdef CONFIG_EXT2_FS_XATTR
244 if (test_opt(sb, XATTR_USER))
245 seq_puts(seq, ",user_xattr");
246 if (!test_opt(sb, XATTR_USER) &&
247 (def_mount_opts & EXT2_DEFM_XATTR_USER)) {
248 seq_puts(seq, ",nouser_xattr");
249 }
250 #endif
251
252 #ifdef CONFIG_EXT2_FS_POSIX_ACL
253 if (test_opt(sb, POSIX_ACL))
254 seq_puts(seq, ",acl");
255 if (!test_opt(sb, POSIX_ACL) && (def_mount_opts & EXT2_DEFM_ACL))
256 seq_puts(seq, ",noacl");
257 #endif
258
259 if (test_opt(sb, NOBH))
260 seq_puts(seq, ",nobh");
261
262 #if defined(CONFIG_QUOTA)
263 if (sbi->s_mount_opt & EXT2_MOUNT_USRQUOTA)
264 seq_puts(seq, ",usrquota");
265
266 if (sbi->s_mount_opt & EXT2_MOUNT_GRPQUOTA)
267 seq_puts(seq, ",grpquota");
268 #endif
269
270 #if defined(CONFIG_EXT2_FS_XIP)
271 if (sbi->s_mount_opt & EXT2_MOUNT_XIP)
272 seq_puts(seq, ",xip");
273 #endif
274
275 if (!test_opt(sb, RESERVATION))
276 seq_puts(seq, ",noreservation");
277
278 spin_unlock(&sbi->s_lock);
279 return 0;
280 }
281
282 #ifdef CONFIG_QUOTA
283 static ssize_t ext2_quota_read(struct super_block *sb, int type, char *data, size_t len, loff_t off);
284 static ssize_t ext2_quota_write(struct super_block *sb, int type, const char *data, size_t len, loff_t off);
285 #endif
286
287 static const struct super_operations ext2_sops = {
288 .alloc_inode = ext2_alloc_inode,
289 .destroy_inode = ext2_destroy_inode,
290 .write_inode = ext2_write_inode,
291 .evict_inode = ext2_evict_inode,
292 .put_super = ext2_put_super,
293 .write_super = ext2_write_super,
294 .sync_fs = ext2_sync_fs,
295 .statfs = ext2_statfs,
296 .remount_fs = ext2_remount,
297 .show_options = ext2_show_options,
298 #ifdef CONFIG_QUOTA
299 .quota_read = ext2_quota_read,
300 .quota_write = ext2_quota_write,
301 #endif
302 };
303
304 static struct inode *ext2_nfs_get_inode(struct super_block *sb,
305 u64 ino, u32 generation)
306 {
307 struct inode *inode;
308
309 if (ino < EXT2_FIRST_INO(sb) && ino != EXT2_ROOT_INO)
310 return ERR_PTR(-ESTALE);
311 if (ino > le32_to_cpu(EXT2_SB(sb)->s_es->s_inodes_count))
312 return ERR_PTR(-ESTALE);
313
314 /* iget isn't really right if the inode is currently unallocated!!
315 * ext2_read_inode currently does appropriate checks, but
316 * it might be "neater" to call ext2_get_inode first and check
317 * if the inode is valid.....
318 */
319 inode = ext2_iget(sb, ino);
320 if (IS_ERR(inode))
321 return ERR_CAST(inode);
322 if (generation && inode->i_generation != generation) {
323 /* we didn't find the right inode.. */
324 iput(inode);
325 return ERR_PTR(-ESTALE);
326 }
327 return inode;
328 }
329
330 static struct dentry *ext2_fh_to_dentry(struct super_block *sb, struct fid *fid,
331 int fh_len, int fh_type)
332 {
333 return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
334 ext2_nfs_get_inode);
335 }
336
337 static struct dentry *ext2_fh_to_parent(struct super_block *sb, struct fid *fid,
338 int fh_len, int fh_type)
339 {
340 return generic_fh_to_parent(sb, fid, fh_len, fh_type,
341 ext2_nfs_get_inode);
342 }
343
344 /* Yes, most of these are left as NULL!!
345 * A NULL value implies the default, which works with ext2-like file
346 * systems, but can be improved upon.
347 * Currently only get_parent is required.
348 */
349 static const struct export_operations ext2_export_ops = {
350 .fh_to_dentry = ext2_fh_to_dentry,
351 .fh_to_parent = ext2_fh_to_parent,
352 .get_parent = ext2_get_parent,
353 };
354
355 static unsigned long get_sb_block(void **data)
356 {
357 unsigned long sb_block;
358 char *options = (char *) *data;
359
360 if (!options || strncmp(options, "sb=", 3) != 0)
361 return 1; /* Default location */
362 options += 3;
363 sb_block = simple_strtoul(options, &options, 0);
364 if (*options && *options != ',') {
365 printk("EXT2-fs: Invalid sb specification: %s\n",
366 (char *) *data);
367 return 1;
368 }
369 if (*options == ',')
370 options++;
371 *data = (void *) options;
372 return sb_block;
373 }
374
375 enum {
376 Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid,
377 Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic,
378 Opt_err_ro, Opt_nouid32, Opt_nocheck, Opt_debug,
379 Opt_oldalloc, Opt_orlov, Opt_nobh, Opt_user_xattr, Opt_nouser_xattr,
380 Opt_acl, Opt_noacl, Opt_xip, Opt_ignore, Opt_err, Opt_quota,
381 Opt_usrquota, Opt_grpquota, Opt_reservation, Opt_noreservation
382 };
383
384 static const match_table_t tokens = {
385 {Opt_bsd_df, "bsddf"},
386 {Opt_minix_df, "minixdf"},
387 {Opt_grpid, "grpid"},
388 {Opt_grpid, "bsdgroups"},
389 {Opt_nogrpid, "nogrpid"},
390 {Opt_nogrpid, "sysvgroups"},
391 {Opt_resgid, "resgid=%u"},
392 {Opt_resuid, "resuid=%u"},
393 {Opt_sb, "sb=%u"},
394 {Opt_err_cont, "errors=continue"},
395 {Opt_err_panic, "errors=panic"},
396 {Opt_err_ro, "errors=remount-ro"},
397 {Opt_nouid32, "nouid32"},
398 {Opt_nocheck, "check=none"},
399 {Opt_nocheck, "nocheck"},
400 {Opt_debug, "debug"},
401 {Opt_oldalloc, "oldalloc"},
402 {Opt_orlov, "orlov"},
403 {Opt_nobh, "nobh"},
404 {Opt_user_xattr, "user_xattr"},
405 {Opt_nouser_xattr, "nouser_xattr"},
406 {Opt_acl, "acl"},
407 {Opt_noacl, "noacl"},
408 {Opt_xip, "xip"},
409 {Opt_grpquota, "grpquota"},
410 {Opt_ignore, "noquota"},
411 {Opt_quota, "quota"},
412 {Opt_usrquota, "usrquota"},
413 {Opt_reservation, "reservation"},
414 {Opt_noreservation, "noreservation"},
415 {Opt_err, NULL}
416 };
417
418 static int parse_options(char *options, struct super_block *sb)
419 {
420 char *p;
421 struct ext2_sb_info *sbi = EXT2_SB(sb);
422 substring_t args[MAX_OPT_ARGS];
423 int option;
424
425 if (!options)
426 return 1;
427
428 while ((p = strsep (&options, ",")) != NULL) {
429 int token;
430 if (!*p)
431 continue;
432
433 token = match_token(p, tokens, args);
434 switch (token) {
435 case Opt_bsd_df:
436 clear_opt (sbi->s_mount_opt, MINIX_DF);
437 break;
438 case Opt_minix_df:
439 set_opt (sbi->s_mount_opt, MINIX_DF);
440 break;
441 case Opt_grpid:
442 set_opt (sbi->s_mount_opt, GRPID);
443 break;
444 case Opt_nogrpid:
445 clear_opt (sbi->s_mount_opt, GRPID);
446 break;
447 case Opt_resuid:
448 if (match_int(&args[0], &option))
449 return 0;
450 sbi->s_resuid = option;
451 break;
452 case Opt_resgid:
453 if (match_int(&args[0], &option))
454 return 0;
455 sbi->s_resgid = option;
456 break;
457 case Opt_sb:
458 /* handled by get_sb_block() instead of here */
459 /* *sb_block = match_int(&args[0]); */
460 break;
461 case Opt_err_panic:
462 clear_opt (sbi->s_mount_opt, ERRORS_CONT);
463 clear_opt (sbi->s_mount_opt, ERRORS_RO);
464 set_opt (sbi->s_mount_opt, ERRORS_PANIC);
465 break;
466 case Opt_err_ro:
467 clear_opt (sbi->s_mount_opt, ERRORS_CONT);
468 clear_opt (sbi->s_mount_opt, ERRORS_PANIC);
469 set_opt (sbi->s_mount_opt, ERRORS_RO);
470 break;
471 case Opt_err_cont:
472 clear_opt (sbi->s_mount_opt, ERRORS_RO);
473 clear_opt (sbi->s_mount_opt, ERRORS_PANIC);
474 set_opt (sbi->s_mount_opt, ERRORS_CONT);
475 break;
476 case Opt_nouid32:
477 set_opt (sbi->s_mount_opt, NO_UID32);
478 break;
479 case Opt_nocheck:
480 clear_opt (sbi->s_mount_opt, CHECK);
481 break;
482 case Opt_debug:
483 set_opt (sbi->s_mount_opt, DEBUG);
484 break;
485 case Opt_oldalloc:
486 set_opt (sbi->s_mount_opt, OLDALLOC);
487 break;
488 case Opt_orlov:
489 clear_opt (sbi->s_mount_opt, OLDALLOC);
490 break;
491 case Opt_nobh:
492 set_opt (sbi->s_mount_opt, NOBH);
493 break;
494 #ifdef CONFIG_EXT2_FS_XATTR
495 case Opt_user_xattr:
496 set_opt (sbi->s_mount_opt, XATTR_USER);
497 break;
498 case Opt_nouser_xattr:
499 clear_opt (sbi->s_mount_opt, XATTR_USER);
500 break;
501 #else
502 case Opt_user_xattr:
503 case Opt_nouser_xattr:
504 ext2_msg(sb, KERN_INFO, "(no)user_xattr options"
505 "not supported");
506 break;
507 #endif
508 #ifdef CONFIG_EXT2_FS_POSIX_ACL
509 case Opt_acl:
510 set_opt(sbi->s_mount_opt, POSIX_ACL);
511 break;
512 case Opt_noacl:
513 clear_opt(sbi->s_mount_opt, POSIX_ACL);
514 break;
515 #else
516 case Opt_acl:
517 case Opt_noacl:
518 ext2_msg(sb, KERN_INFO,
519 "(no)acl options not supported");
520 break;
521 #endif
522 case Opt_xip:
523 #ifdef CONFIG_EXT2_FS_XIP
524 set_opt (sbi->s_mount_opt, XIP);
525 #else
526 ext2_msg(sb, KERN_INFO, "xip option not supported");
527 #endif
528 break;
529
530 #if defined(CONFIG_QUOTA)
531 case Opt_quota:
532 case Opt_usrquota:
533 set_opt(sbi->s_mount_opt, USRQUOTA);
534 break;
535
536 case Opt_grpquota:
537 set_opt(sbi->s_mount_opt, GRPQUOTA);
538 break;
539 #else
540 case Opt_quota:
541 case Opt_usrquota:
542 case Opt_grpquota:
543 ext2_msg(sb, KERN_INFO,
544 "quota operations not supported");
545 break;
546 #endif
547
548 case Opt_reservation:
549 set_opt(sbi->s_mount_opt, RESERVATION);
550 ext2_msg(sb, KERN_INFO, "reservations ON");
551 break;
552 case Opt_noreservation:
553 clear_opt(sbi->s_mount_opt, RESERVATION);
554 ext2_msg(sb, KERN_INFO, "reservations OFF");
555 break;
556 case Opt_ignore:
557 break;
558 default:
559 return 0;
560 }
561 }
562 return 1;
563 }
564
565 static int ext2_setup_super (struct super_block * sb,
566 struct ext2_super_block * es,
567 int read_only)
568 {
569 int res = 0;
570 struct ext2_sb_info *sbi = EXT2_SB(sb);
571
572 if (le32_to_cpu(es->s_rev_level) > EXT2_MAX_SUPP_REV) {
573 ext2_msg(sb, KERN_ERR,
574 "error: revision level too high, "
575 "forcing read-only mode");
576 res = MS_RDONLY;
577 }
578 if (read_only)
579 return res;
580 if (!(sbi->s_mount_state & EXT2_VALID_FS))
581 ext2_msg(sb, KERN_WARNING,
582 "warning: mounting unchecked fs, "
583 "running e2fsck is recommended");
584 else if ((sbi->s_mount_state & EXT2_ERROR_FS))
585 ext2_msg(sb, KERN_WARNING,
586 "warning: mounting fs with errors, "
587 "running e2fsck is recommended");
588 else if ((__s16) le16_to_cpu(es->s_max_mnt_count) >= 0 &&
589 le16_to_cpu(es->s_mnt_count) >=
590 (unsigned short) (__s16) le16_to_cpu(es->s_max_mnt_count))
591 ext2_msg(sb, KERN_WARNING,
592 "warning: maximal mount count reached, "
593 "running e2fsck is recommended");
594 else if (le32_to_cpu(es->s_checkinterval) &&
595 (le32_to_cpu(es->s_lastcheck) +
596 le32_to_cpu(es->s_checkinterval) <= get_seconds()))
597 ext2_msg(sb, KERN_WARNING,
598 "warning: checktime reached, "
599 "running e2fsck is recommended");
600 if (!le16_to_cpu(es->s_max_mnt_count))
601 es->s_max_mnt_count = cpu_to_le16(EXT2_DFL_MAX_MNT_COUNT);
602 le16_add_cpu(&es->s_mnt_count, 1);
603 if (test_opt (sb, DEBUG))
604 ext2_msg(sb, KERN_INFO, "%s, %s, bs=%lu, fs=%lu, gc=%lu, "
605 "bpg=%lu, ipg=%lu, mo=%04lx]",
606 EXT2FS_VERSION, EXT2FS_DATE, sb->s_blocksize,
607 sbi->s_frag_size,
608 sbi->s_groups_count,
609 EXT2_BLOCKS_PER_GROUP(sb),
610 EXT2_INODES_PER_GROUP(sb),
611 sbi->s_mount_opt);
612 return res;
613 }
614
615 static int ext2_check_descriptors(struct super_block *sb)
616 {
617 int i;
618 struct ext2_sb_info *sbi = EXT2_SB(sb);
619
620 ext2_debug ("Checking group descriptors");
621
622 for (i = 0; i < sbi->s_groups_count; i++) {
623 struct ext2_group_desc *gdp = ext2_get_group_desc(sb, i, NULL);
624 ext2_fsblk_t first_block = ext2_group_first_block_no(sb, i);
625 ext2_fsblk_t last_block;
626
627 if (i == sbi->s_groups_count - 1)
628 last_block = le32_to_cpu(sbi->s_es->s_blocks_count) - 1;
629 else
630 last_block = first_block +
631 (EXT2_BLOCKS_PER_GROUP(sb) - 1);
632
633 if (le32_to_cpu(gdp->bg_block_bitmap) < first_block ||
634 le32_to_cpu(gdp->bg_block_bitmap) > last_block)
635 {
636 ext2_error (sb, "ext2_check_descriptors",
637 "Block bitmap for group %d"
638 " not in group (block %lu)!",
639 i, (unsigned long) le32_to_cpu(gdp->bg_block_bitmap));
640 return 0;
641 }
642 if (le32_to_cpu(gdp->bg_inode_bitmap) < first_block ||
643 le32_to_cpu(gdp->bg_inode_bitmap) > last_block)
644 {
645 ext2_error (sb, "ext2_check_descriptors",
646 "Inode bitmap for group %d"
647 " not in group (block %lu)!",
648 i, (unsigned long) le32_to_cpu(gdp->bg_inode_bitmap));
649 return 0;
650 }
651 if (le32_to_cpu(gdp->bg_inode_table) < first_block ||
652 le32_to_cpu(gdp->bg_inode_table) + sbi->s_itb_per_group - 1 >
653 last_block)
654 {
655 ext2_error (sb, "ext2_check_descriptors",
656 "Inode table for group %d"
657 " not in group (block %lu)!",
658 i, (unsigned long) le32_to_cpu(gdp->bg_inode_table));
659 return 0;
660 }
661 }
662 return 1;
663 }
664
665 /*
666 * Maximal file size. There is a direct, and {,double-,triple-}indirect
667 * block limit, and also a limit of (2^32 - 1) 512-byte sectors in i_blocks.
668 * We need to be 1 filesystem block less than the 2^32 sector limit.
669 */
670 static loff_t ext2_max_size(int bits)
671 {
672 loff_t res = EXT2_NDIR_BLOCKS;
673 int meta_blocks;
674 loff_t upper_limit;
675
676 /* This is calculated to be the largest file size for a
677 * dense, file such that the total number of
678 * sectors in the file, including data and all indirect blocks,
679 * does not exceed 2^32 -1
680 * __u32 i_blocks representing the total number of
681 * 512 bytes blocks of the file
682 */
683 upper_limit = (1LL << 32) - 1;
684
685 /* total blocks in file system block size */
686 upper_limit >>= (bits - 9);
687
688
689 /* indirect blocks */
690 meta_blocks = 1;
691 /* double indirect blocks */
692 meta_blocks += 1 + (1LL << (bits-2));
693 /* tripple indirect blocks */
694 meta_blocks += 1 + (1LL << (bits-2)) + (1LL << (2*(bits-2)));
695
696 upper_limit -= meta_blocks;
697 upper_limit <<= bits;
698
699 res += 1LL << (bits-2);
700 res += 1LL << (2*(bits-2));
701 res += 1LL << (3*(bits-2));
702 res <<= bits;
703 if (res > upper_limit)
704 res = upper_limit;
705
706 if (res > MAX_LFS_FILESIZE)
707 res = MAX_LFS_FILESIZE;
708
709 return res;
710 }
711
712 static unsigned long descriptor_loc(struct super_block *sb,
713 unsigned long logic_sb_block,
714 int nr)
715 {
716 struct ext2_sb_info *sbi = EXT2_SB(sb);
717 unsigned long bg, first_meta_bg;
718 int has_super = 0;
719
720 first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg);
721
722 if (!EXT2_HAS_INCOMPAT_FEATURE(sb, EXT2_FEATURE_INCOMPAT_META_BG) ||
723 nr < first_meta_bg)
724 return (logic_sb_block + nr + 1);
725 bg = sbi->s_desc_per_block * nr;
726 if (ext2_bg_has_super(sb, bg))
727 has_super = 1;
728
729 return ext2_group_first_block_no(sb, bg) + has_super;
730 }
731
732 static int ext2_fill_super(struct super_block *sb, void *data, int silent)
733 {
734 struct buffer_head * bh;
735 struct ext2_sb_info * sbi;
736 struct ext2_super_block * es;
737 struct inode *root;
738 unsigned long block;
739 unsigned long sb_block = get_sb_block(&data);
740 unsigned long logic_sb_block;
741 unsigned long offset = 0;
742 unsigned long def_mount_opts;
743 long ret = -EINVAL;
744 int blocksize = BLOCK_SIZE;
745 int db_count;
746 int i, j;
747 __le32 features;
748 int err;
749
750 err = -ENOMEM;
751 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
752 if (!sbi)
753 goto failed_unlock;
754
755 sbi->s_blockgroup_lock =
756 kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
757 if (!sbi->s_blockgroup_lock) {
758 kfree(sbi);
759 goto failed_unlock;
760 }
761 sb->s_fs_info = sbi;
762 sbi->s_sb_block = sb_block;
763
764 spin_lock_init(&sbi->s_lock);
765
766 /*
767 * See what the current blocksize for the device is, and
768 * use that as the blocksize. Otherwise (or if the blocksize
769 * is smaller than the default) use the default.
770 * This is important for devices that have a hardware
771 * sectorsize that is larger than the default.
772 */
773 blocksize = sb_min_blocksize(sb, BLOCK_SIZE);
774 if (!blocksize) {
775 ext2_msg(sb, KERN_ERR, "error: unable to set blocksize");
776 goto failed_sbi;
777 }
778
779 /*
780 * If the superblock doesn't start on a hardware sector boundary,
781 * calculate the offset.
782 */
783 if (blocksize != BLOCK_SIZE) {
784 logic_sb_block = (sb_block*BLOCK_SIZE) / blocksize;
785 offset = (sb_block*BLOCK_SIZE) % blocksize;
786 } else {
787 logic_sb_block = sb_block;
788 }
789
790 if (!(bh = sb_bread(sb, logic_sb_block))) {
791 ext2_msg(sb, KERN_ERR, "error: unable to read superblock");
792 goto failed_sbi;
793 }
794 /*
795 * Note: s_es must be initialized as soon as possible because
796 * some ext2 macro-instructions depend on its value
797 */
798 es = (struct ext2_super_block *) (((char *)bh->b_data) + offset);
799 sbi->s_es = es;
800 sb->s_magic = le16_to_cpu(es->s_magic);
801
802 if (sb->s_magic != EXT2_SUPER_MAGIC)
803 goto cantfind_ext2;
804
805 /* Set defaults before we parse the mount options */
806 def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
807 if (def_mount_opts & EXT2_DEFM_DEBUG)
808 set_opt(sbi->s_mount_opt, DEBUG);
809 if (def_mount_opts & EXT2_DEFM_BSDGROUPS)
810 set_opt(sbi->s_mount_opt, GRPID);
811 if (def_mount_opts & EXT2_DEFM_UID16)
812 set_opt(sbi->s_mount_opt, NO_UID32);
813 #ifdef CONFIG_EXT2_FS_XATTR
814 if (def_mount_opts & EXT2_DEFM_XATTR_USER)
815 set_opt(sbi->s_mount_opt, XATTR_USER);
816 #endif
817 #ifdef CONFIG_EXT2_FS_POSIX_ACL
818 if (def_mount_opts & EXT2_DEFM_ACL)
819 set_opt(sbi->s_mount_opt, POSIX_ACL);
820 #endif
821
822 if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_PANIC)
823 set_opt(sbi->s_mount_opt, ERRORS_PANIC);
824 else if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_CONTINUE)
825 set_opt(sbi->s_mount_opt, ERRORS_CONT);
826 else
827 set_opt(sbi->s_mount_opt, ERRORS_RO);
828
829 sbi->s_resuid = le16_to_cpu(es->s_def_resuid);
830 sbi->s_resgid = le16_to_cpu(es->s_def_resgid);
831
832 set_opt(sbi->s_mount_opt, RESERVATION);
833
834 if (!parse_options((char *) data, sb))
835 goto failed_mount;
836
837 sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
838 ((EXT2_SB(sb)->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ?
839 MS_POSIXACL : 0);
840
841 ext2_xip_verify_sb(sb); /* see if bdev supports xip, unset
842 EXT2_MOUNT_XIP if not */
843
844 if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV &&
845 (EXT2_HAS_COMPAT_FEATURE(sb, ~0U) ||
846 EXT2_HAS_RO_COMPAT_FEATURE(sb, ~0U) ||
847 EXT2_HAS_INCOMPAT_FEATURE(sb, ~0U)))
848 ext2_msg(sb, KERN_WARNING,
849 "warning: feature flags set on rev 0 fs, "
850 "running e2fsck is recommended");
851 /*
852 * Check feature flags regardless of the revision level, since we
853 * previously didn't change the revision level when setting the flags,
854 * so there is a chance incompat flags are set on a rev 0 filesystem.
855 */
856 features = EXT2_HAS_INCOMPAT_FEATURE(sb, ~EXT2_FEATURE_INCOMPAT_SUPP);
857 if (features) {
858 ext2_msg(sb, KERN_ERR, "error: couldn't mount because of "
859 "unsupported optional features (%x)",
860 le32_to_cpu(features));
861 goto failed_mount;
862 }
863 if (!(sb->s_flags & MS_RDONLY) &&
864 (features = EXT2_HAS_RO_COMPAT_FEATURE(sb, ~EXT2_FEATURE_RO_COMPAT_SUPP))){
865 ext2_msg(sb, KERN_ERR, "error: couldn't mount RDWR because of "
866 "unsupported optional features (%x)",
867 le32_to_cpu(features));
868 goto failed_mount;
869 }
870
871 blocksize = BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);
872
873 if (ext2_use_xip(sb) && blocksize != PAGE_SIZE) {
874 if (!silent)
875 ext2_msg(sb, KERN_ERR,
876 "error: unsupported blocksize for xip");
877 goto failed_mount;
878 }
879
880 /* If the blocksize doesn't match, re-read the thing.. */
881 if (sb->s_blocksize != blocksize) {
882 brelse(bh);
883
884 if (!sb_set_blocksize(sb, blocksize)) {
885 ext2_msg(sb, KERN_ERR, "error: blocksize is too small");
886 goto failed_sbi;
887 }
888
889 logic_sb_block = (sb_block*BLOCK_SIZE) / blocksize;
890 offset = (sb_block*BLOCK_SIZE) % blocksize;
891 bh = sb_bread(sb, logic_sb_block);
892 if(!bh) {
893 ext2_msg(sb, KERN_ERR, "error: couldn't read"
894 "superblock on 2nd try");
895 goto failed_sbi;
896 }
897 es = (struct ext2_super_block *) (((char *)bh->b_data) + offset);
898 sbi->s_es = es;
899 if (es->s_magic != cpu_to_le16(EXT2_SUPER_MAGIC)) {
900 ext2_msg(sb, KERN_ERR, "error: magic mismatch");
901 goto failed_mount;
902 }
903 }
904
905 sb->s_maxbytes = ext2_max_size(sb->s_blocksize_bits);
906
907 if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV) {
908 sbi->s_inode_size = EXT2_GOOD_OLD_INODE_SIZE;
909 sbi->s_first_ino = EXT2_GOOD_OLD_FIRST_INO;
910 } else {
911 sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
912 sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
913 if ((sbi->s_inode_size < EXT2_GOOD_OLD_INODE_SIZE) ||
914 !is_power_of_2(sbi->s_inode_size) ||
915 (sbi->s_inode_size > blocksize)) {
916 ext2_msg(sb, KERN_ERR,
917 "error: unsupported inode size: %d",
918 sbi->s_inode_size);
919 goto failed_mount;
920 }
921 }
922
923 sbi->s_frag_size = EXT2_MIN_FRAG_SIZE <<
924 le32_to_cpu(es->s_log_frag_size);
925 if (sbi->s_frag_size == 0)
926 goto cantfind_ext2;
927 sbi->s_frags_per_block = sb->s_blocksize / sbi->s_frag_size;
928
929 sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
930 sbi->s_frags_per_group = le32_to_cpu(es->s_frags_per_group);
931 sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
932
933 if (EXT2_INODE_SIZE(sb) == 0)
934 goto cantfind_ext2;
935 sbi->s_inodes_per_block = sb->s_blocksize / EXT2_INODE_SIZE(sb);
936 if (sbi->s_inodes_per_block == 0 || sbi->s_inodes_per_group == 0)
937 goto cantfind_ext2;
938 sbi->s_itb_per_group = sbi->s_inodes_per_group /
939 sbi->s_inodes_per_block;
940 sbi->s_desc_per_block = sb->s_blocksize /
941 sizeof (struct ext2_group_desc);
942 sbi->s_sbh = bh;
943 sbi->s_mount_state = le16_to_cpu(es->s_state);
944 sbi->s_addr_per_block_bits =
945 ilog2 (EXT2_ADDR_PER_BLOCK(sb));
946 sbi->s_desc_per_block_bits =
947 ilog2 (EXT2_DESC_PER_BLOCK(sb));
948
949 if (sb->s_magic != EXT2_SUPER_MAGIC)
950 goto cantfind_ext2;
951
952 if (sb->s_blocksize != bh->b_size) {
953 if (!silent)
954 ext2_msg(sb, KERN_ERR, "error: unsupported blocksize");
955 goto failed_mount;
956 }
957
958 if (sb->s_blocksize != sbi->s_frag_size) {
959 ext2_msg(sb, KERN_ERR,
960 "error: fragsize %lu != blocksize %lu"
961 "(not supported yet)",
962 sbi->s_frag_size, sb->s_blocksize);
963 goto failed_mount;
964 }
965
966 if (sbi->s_blocks_per_group > sb->s_blocksize * 8) {
967 ext2_msg(sb, KERN_ERR,
968 "error: #blocks per group too big: %lu",
969 sbi->s_blocks_per_group);
970 goto failed_mount;
971 }
972 if (sbi->s_frags_per_group > sb->s_blocksize * 8) {
973 ext2_msg(sb, KERN_ERR,
974 "error: #fragments per group too big: %lu",
975 sbi->s_frags_per_group);
976 goto failed_mount;
977 }
978 if (sbi->s_inodes_per_group > sb->s_blocksize * 8) {
979 ext2_msg(sb, KERN_ERR,
980 "error: #inodes per group too big: %lu",
981 sbi->s_inodes_per_group);
982 goto failed_mount;
983 }
984
985 if (EXT2_BLOCKS_PER_GROUP(sb) == 0)
986 goto cantfind_ext2;
987 sbi->s_groups_count = ((le32_to_cpu(es->s_blocks_count) -
988 le32_to_cpu(es->s_first_data_block) - 1)
989 / EXT2_BLOCKS_PER_GROUP(sb)) + 1;
990 db_count = (sbi->s_groups_count + EXT2_DESC_PER_BLOCK(sb) - 1) /
991 EXT2_DESC_PER_BLOCK(sb);
992 sbi->s_group_desc = kmalloc (db_count * sizeof (struct buffer_head *), GFP_KERNEL);
993 if (sbi->s_group_desc == NULL) {
994 ext2_msg(sb, KERN_ERR, "error: not enough memory");
995 goto failed_mount;
996 }
997 bgl_lock_init(sbi->s_blockgroup_lock);
998 sbi->s_debts = kcalloc(sbi->s_groups_count, sizeof(*sbi->s_debts), GFP_KERNEL);
999 if (!sbi->s_debts) {
1000 ext2_msg(sb, KERN_ERR, "error: not enough memory");
1001 goto failed_mount_group_desc;
1002 }
1003 for (i = 0; i < db_count; i++) {
1004 block = descriptor_loc(sb, logic_sb_block, i);
1005 sbi->s_group_desc[i] = sb_bread(sb, block);
1006 if (!sbi->s_group_desc[i]) {
1007 for (j = 0; j < i; j++)
1008 brelse (sbi->s_group_desc[j]);
1009 ext2_msg(sb, KERN_ERR,
1010 "error: unable to read group descriptors");
1011 goto failed_mount_group_desc;
1012 }
1013 }
1014 if (!ext2_check_descriptors (sb)) {
1015 ext2_msg(sb, KERN_ERR, "group descriptors corrupted");
1016 goto failed_mount2;
1017 }
1018 sbi->s_gdb_count = db_count;
1019 get_random_bytes(&sbi->s_next_generation, sizeof(u32));
1020 spin_lock_init(&sbi->s_next_gen_lock);
1021
1022 /* per fileystem reservation list head & lock */
1023 spin_lock_init(&sbi->s_rsv_window_lock);
1024 sbi->s_rsv_window_root = RB_ROOT;
1025 /*
1026 * Add a single, static dummy reservation to the start of the
1027 * reservation window list --- it gives us a placeholder for
1028 * append-at-start-of-list which makes the allocation logic
1029 * _much_ simpler.
1030 */
1031 sbi->s_rsv_window_head.rsv_start = EXT2_RESERVE_WINDOW_NOT_ALLOCATED;
1032 sbi->s_rsv_window_head.rsv_end = EXT2_RESERVE_WINDOW_NOT_ALLOCATED;
1033 sbi->s_rsv_window_head.rsv_alloc_hit = 0;
1034 sbi->s_rsv_window_head.rsv_goal_size = 0;
1035 ext2_rsv_window_add(sb, &sbi->s_rsv_window_head);
1036
1037 err = percpu_counter_init(&sbi->s_freeblocks_counter,
1038 ext2_count_free_blocks(sb));
1039 if (!err) {
1040 err = percpu_counter_init(&sbi->s_freeinodes_counter,
1041 ext2_count_free_inodes(sb));
1042 }
1043 if (!err) {
1044 err = percpu_counter_init(&sbi->s_dirs_counter,
1045 ext2_count_dirs(sb));
1046 }
1047 if (err) {
1048 ext2_msg(sb, KERN_ERR, "error: insufficient memory");
1049 goto failed_mount3;
1050 }
1051 /*
1052 * set up enough so that it can read an inode
1053 */
1054 sb->s_op = &ext2_sops;
1055 sb->s_export_op = &ext2_export_ops;
1056 sb->s_xattr = ext2_xattr_handlers;
1057
1058 #ifdef CONFIG_QUOTA
1059 sb->dq_op = &dquot_operations;
1060 sb->s_qcop = &dquot_quotactl_ops;
1061 #endif
1062
1063 root = ext2_iget(sb, EXT2_ROOT_INO);
1064 if (IS_ERR(root)) {
1065 ret = PTR_ERR(root);
1066 goto failed_mount3;
1067 }
1068 if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
1069 iput(root);
1070 ext2_msg(sb, KERN_ERR, "error: corrupt root inode, run e2fsck");
1071 goto failed_mount3;
1072 }
1073
1074 sb->s_root = d_alloc_root(root);
1075 if (!sb->s_root) {
1076 iput(root);
1077 ext2_msg(sb, KERN_ERR, "error: get root inode failed");
1078 ret = -ENOMEM;
1079 goto failed_mount3;
1080 }
1081 if (EXT2_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL))
1082 ext2_msg(sb, KERN_WARNING,
1083 "warning: mounting ext3 filesystem as ext2");
1084 if (ext2_setup_super (sb, es, sb->s_flags & MS_RDONLY))
1085 sb->s_flags |= MS_RDONLY;
1086 ext2_write_super(sb);
1087 return 0;
1088
1089 cantfind_ext2:
1090 if (!silent)
1091 ext2_msg(sb, KERN_ERR,
1092 "error: can't find an ext2 filesystem on dev %s.",
1093 sb->s_id);
1094 goto failed_mount;
1095 failed_mount3:
1096 percpu_counter_destroy(&sbi->s_freeblocks_counter);
1097 percpu_counter_destroy(&sbi->s_freeinodes_counter);
1098 percpu_counter_destroy(&sbi->s_dirs_counter);
1099 failed_mount2:
1100 for (i = 0; i < db_count; i++)
1101 brelse(sbi->s_group_desc[i]);
1102 failed_mount_group_desc:
1103 kfree(sbi->s_group_desc);
1104 kfree(sbi->s_debts);
1105 failed_mount:
1106 brelse(bh);
1107 failed_sbi:
1108 sb->s_fs_info = NULL;
1109 kfree(sbi->s_blockgroup_lock);
1110 kfree(sbi);
1111 failed_unlock:
1112 return ret;
1113 }
1114
1115 static void ext2_clear_super_error(struct super_block *sb)
1116 {
1117 struct buffer_head *sbh = EXT2_SB(sb)->s_sbh;
1118
1119 if (buffer_write_io_error(sbh)) {
1120 /*
1121 * Oh, dear. A previous attempt to write the
1122 * superblock failed. This could happen because the
1123 * USB device was yanked out. Or it could happen to
1124 * be a transient write error and maybe the block will
1125 * be remapped. Nothing we can do but to retry the
1126 * write and hope for the best.
1127 */
1128 ext2_msg(sb, KERN_ERR,
1129 "previous I/O error to superblock detected\n");
1130 clear_buffer_write_io_error(sbh);
1131 set_buffer_uptodate(sbh);
1132 }
1133 }
1134
1135 static void ext2_sync_super(struct super_block *sb, struct ext2_super_block *es,
1136 int wait)
1137 {
1138 ext2_clear_super_error(sb);
1139 spin_lock(&EXT2_SB(sb)->s_lock);
1140 es->s_free_blocks_count = cpu_to_le32(ext2_count_free_blocks(sb));
1141 es->s_free_inodes_count = cpu_to_le32(ext2_count_free_inodes(sb));
1142 es->s_wtime = cpu_to_le32(get_seconds());
1143 /* unlock before we do IO */
1144 spin_unlock(&EXT2_SB(sb)->s_lock);
1145 mark_buffer_dirty(EXT2_SB(sb)->s_sbh);
1146 if (wait)
1147 sync_dirty_buffer(EXT2_SB(sb)->s_sbh);
1148 sb->s_dirt = 0;
1149 }
1150
1151 /*
1152 * In the second extended file system, it is not necessary to
1153 * write the super block since we use a mapping of the
1154 * disk super block in a buffer.
1155 *
1156 * However, this function is still used to set the fs valid
1157 * flags to 0. We need to set this flag to 0 since the fs
1158 * may have been checked while mounted and e2fsck may have
1159 * set s_state to EXT2_VALID_FS after some corrections.
1160 */
1161 static int ext2_sync_fs(struct super_block *sb, int wait)
1162 {
1163 struct ext2_sb_info *sbi = EXT2_SB(sb);
1164 struct ext2_super_block *es = EXT2_SB(sb)->s_es;
1165
1166 spin_lock(&sbi->s_lock);
1167 if (es->s_state & cpu_to_le16(EXT2_VALID_FS)) {
1168 ext2_debug("setting valid to 0\n");
1169 es->s_state &= cpu_to_le16(~EXT2_VALID_FS);
1170 }
1171 spin_unlock(&sbi->s_lock);
1172 ext2_sync_super(sb, es, wait);
1173 return 0;
1174 }
1175
1176
1177 void ext2_write_super(struct super_block *sb)
1178 {
1179 if (!(sb->s_flags & MS_RDONLY))
1180 ext2_sync_fs(sb, 1);
1181 else
1182 sb->s_dirt = 0;
1183 }
1184
1185 static int ext2_remount (struct super_block * sb, int * flags, char * data)
1186 {
1187 struct ext2_sb_info * sbi = EXT2_SB(sb);
1188 struct ext2_super_block * es;
1189 unsigned long old_mount_opt = sbi->s_mount_opt;
1190 struct ext2_mount_options old_opts;
1191 unsigned long old_sb_flags;
1192 int err;
1193
1194 spin_lock(&sbi->s_lock);
1195
1196 /* Store the old options */
1197 old_sb_flags = sb->s_flags;
1198 old_opts.s_mount_opt = sbi->s_mount_opt;
1199 old_opts.s_resuid = sbi->s_resuid;
1200 old_opts.s_resgid = sbi->s_resgid;
1201
1202 /*
1203 * Allow the "check" option to be passed as a remount option.
1204 */
1205 if (!parse_options(data, sb)) {
1206 err = -EINVAL;
1207 goto restore_opts;
1208 }
1209
1210 sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
1211 ((sbi->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
1212
1213 ext2_xip_verify_sb(sb); /* see if bdev supports xip, unset
1214 EXT2_MOUNT_XIP if not */
1215
1216 if ((ext2_use_xip(sb)) && (sb->s_blocksize != PAGE_SIZE)) {
1217 ext2_msg(sb, KERN_WARNING,
1218 "warning: unsupported blocksize for xip");
1219 err = -EINVAL;
1220 goto restore_opts;
1221 }
1222
1223 es = sbi->s_es;
1224 if ((sbi->s_mount_opt ^ old_mount_opt) & EXT2_MOUNT_XIP) {
1225 ext2_msg(sb, KERN_WARNING, "warning: refusing change of "
1226 "xip flag with busy inodes while remounting");
1227 sbi->s_mount_opt &= ~EXT2_MOUNT_XIP;
1228 sbi->s_mount_opt |= old_mount_opt & EXT2_MOUNT_XIP;
1229 }
1230 if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) {
1231 spin_unlock(&sbi->s_lock);
1232 return 0;
1233 }
1234 if (*flags & MS_RDONLY) {
1235 if (le16_to_cpu(es->s_state) & EXT2_VALID_FS ||
1236 !(sbi->s_mount_state & EXT2_VALID_FS)) {
1237 spin_unlock(&sbi->s_lock);
1238 return 0;
1239 }
1240
1241 /*
1242 * OK, we are remounting a valid rw partition rdonly, so set
1243 * the rdonly flag and then mark the partition as valid again.
1244 */
1245 es->s_state = cpu_to_le16(sbi->s_mount_state);
1246 es->s_mtime = cpu_to_le32(get_seconds());
1247 spin_unlock(&sbi->s_lock);
1248
1249 err = dquot_suspend(sb, -1);
1250 if (err < 0) {
1251 spin_lock(&sbi->s_lock);
1252 goto restore_opts;
1253 }
1254
1255 ext2_sync_super(sb, es, 1);
1256 } else {
1257 __le32 ret = EXT2_HAS_RO_COMPAT_FEATURE(sb,
1258 ~EXT2_FEATURE_RO_COMPAT_SUPP);
1259 if (ret) {
1260 ext2_msg(sb, KERN_WARNING,
1261 "warning: couldn't remount RDWR because of "
1262 "unsupported optional features (%x).",
1263 le32_to_cpu(ret));
1264 err = -EROFS;
1265 goto restore_opts;
1266 }
1267 /*
1268 * Mounting a RDONLY partition read-write, so reread and
1269 * store the current valid flag. (It may have been changed
1270 * by e2fsck since we originally mounted the partition.)
1271 */
1272 sbi->s_mount_state = le16_to_cpu(es->s_state);
1273 if (!ext2_setup_super (sb, es, 0))
1274 sb->s_flags &= ~MS_RDONLY;
1275 spin_unlock(&sbi->s_lock);
1276
1277 ext2_write_super(sb);
1278
1279 dquot_resume(sb, -1);
1280 }
1281
1282 return 0;
1283 restore_opts:
1284 sbi->s_mount_opt = old_opts.s_mount_opt;
1285 sbi->s_resuid = old_opts.s_resuid;
1286 sbi->s_resgid = old_opts.s_resgid;
1287 sb->s_flags = old_sb_flags;
1288 spin_unlock(&sbi->s_lock);
1289 return err;
1290 }
1291
1292 static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf)
1293 {
1294 struct super_block *sb = dentry->d_sb;
1295 struct ext2_sb_info *sbi = EXT2_SB(sb);
1296 struct ext2_super_block *es = sbi->s_es;
1297 u64 fsid;
1298
1299 spin_lock(&sbi->s_lock);
1300
1301 if (test_opt (sb, MINIX_DF))
1302 sbi->s_overhead_last = 0;
1303 else if (sbi->s_blocks_last != le32_to_cpu(es->s_blocks_count)) {
1304 unsigned long i, overhead = 0;
1305 smp_rmb();
1306
1307 /*
1308 * Compute the overhead (FS structures). This is constant
1309 * for a given filesystem unless the number of block groups
1310 * changes so we cache the previous value until it does.
1311 */
1312
1313 /*
1314 * All of the blocks before first_data_block are
1315 * overhead
1316 */
1317 overhead = le32_to_cpu(es->s_first_data_block);
1318
1319 /*
1320 * Add the overhead attributed to the superblock and
1321 * block group descriptors. If the sparse superblocks
1322 * feature is turned on, then not all groups have this.
1323 */
1324 for (i = 0; i < sbi->s_groups_count; i++)
1325 overhead += ext2_bg_has_super(sb, i) +
1326 ext2_bg_num_gdb(sb, i);
1327
1328 /*
1329 * Every block group has an inode bitmap, a block
1330 * bitmap, and an inode table.
1331 */
1332 overhead += (sbi->s_groups_count *
1333 (2 + sbi->s_itb_per_group));
1334 sbi->s_overhead_last = overhead;
1335 smp_wmb();
1336 sbi->s_blocks_last = le32_to_cpu(es->s_blocks_count);
1337 }
1338
1339 buf->f_type = EXT2_SUPER_MAGIC;
1340 buf->f_bsize = sb->s_blocksize;
1341 buf->f_blocks = le32_to_cpu(es->s_blocks_count) - sbi->s_overhead_last;
1342 buf->f_bfree = ext2_count_free_blocks(sb);
1343 es->s_free_blocks_count = cpu_to_le32(buf->f_bfree);
1344 buf->f_bavail = buf->f_bfree - le32_to_cpu(es->s_r_blocks_count);
1345 if (buf->f_bfree < le32_to_cpu(es->s_r_blocks_count))
1346 buf->f_bavail = 0;
1347 buf->f_files = le32_to_cpu(es->s_inodes_count);
1348 buf->f_ffree = ext2_count_free_inodes(sb);
1349 es->s_free_inodes_count = cpu_to_le32(buf->f_ffree);
1350 buf->f_namelen = EXT2_NAME_LEN;
1351 fsid = le64_to_cpup((void *)es->s_uuid) ^
1352 le64_to_cpup((void *)es->s_uuid + sizeof(u64));
1353 buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL;
1354 buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL;
1355 spin_unlock(&sbi->s_lock);
1356 return 0;
1357 }
1358
1359 static int ext2_get_sb(struct file_system_type *fs_type,
1360 int flags, const char *dev_name, void *data, struct vfsmount *mnt)
1361 {
1362 return get_sb_bdev(fs_type, flags, dev_name, data, ext2_fill_super, mnt);
1363 }
1364
1365 #ifdef CONFIG_QUOTA
1366
1367 /* Read data from quotafile - avoid pagecache and such because we cannot afford
1368 * acquiring the locks... As quota files are never truncated and quota code
1369 * itself serializes the operations (and noone else should touch the files)
1370 * we don't have to be afraid of races */
1371 static ssize_t ext2_quota_read(struct super_block *sb, int type, char *data,
1372 size_t len, loff_t off)
1373 {
1374 struct inode *inode = sb_dqopt(sb)->files[type];
1375 sector_t blk = off >> EXT2_BLOCK_SIZE_BITS(sb);
1376 int err = 0;
1377 int offset = off & (sb->s_blocksize - 1);
1378 int tocopy;
1379 size_t toread;
1380 struct buffer_head tmp_bh;
1381 struct buffer_head *bh;
1382 loff_t i_size = i_size_read(inode);
1383
1384 if (off > i_size)
1385 return 0;
1386 if (off+len > i_size)
1387 len = i_size-off;
1388 toread = len;
1389 while (toread > 0) {
1390 tocopy = sb->s_blocksize - offset < toread ?
1391 sb->s_blocksize - offset : toread;
1392
1393 tmp_bh.b_state = 0;
1394 tmp_bh.b_size = sb->s_blocksize;
1395 err = ext2_get_block(inode, blk, &tmp_bh, 0);
1396 if (err < 0)
1397 return err;
1398 if (!buffer_mapped(&tmp_bh)) /* A hole? */
1399 memset(data, 0, tocopy);
1400 else {
1401 bh = sb_bread(sb, tmp_bh.b_blocknr);
1402 if (!bh)
1403 return -EIO;
1404 memcpy(data, bh->b_data+offset, tocopy);
1405 brelse(bh);
1406 }
1407 offset = 0;
1408 toread -= tocopy;
1409 data += tocopy;
1410 blk++;
1411 }
1412 return len;
1413 }
1414
1415 /* Write to quotafile */
1416 static ssize_t ext2_quota_write(struct super_block *sb, int type,
1417 const char *data, size_t len, loff_t off)
1418 {
1419 struct inode *inode = sb_dqopt(sb)->files[type];
1420 sector_t blk = off >> EXT2_BLOCK_SIZE_BITS(sb);
1421 int err = 0;
1422 int offset = off & (sb->s_blocksize - 1);
1423 int tocopy;
1424 size_t towrite = len;
1425 struct buffer_head tmp_bh;
1426 struct buffer_head *bh;
1427
1428 mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
1429 while (towrite > 0) {
1430 tocopy = sb->s_blocksize - offset < towrite ?
1431 sb->s_blocksize - offset : towrite;
1432
1433 tmp_bh.b_state = 0;
1434 err = ext2_get_block(inode, blk, &tmp_bh, 1);
1435 if (err < 0)
1436 goto out;
1437 if (offset || tocopy != EXT2_BLOCK_SIZE(sb))
1438 bh = sb_bread(sb, tmp_bh.b_blocknr);
1439 else
1440 bh = sb_getblk(sb, tmp_bh.b_blocknr);
1441 if (!bh) {
1442 err = -EIO;
1443 goto out;
1444 }
1445 lock_buffer(bh);
1446 memcpy(bh->b_data+offset, data, tocopy);
1447 flush_dcache_page(bh->b_page);
1448 set_buffer_uptodate(bh);
1449 mark_buffer_dirty(bh);
1450 unlock_buffer(bh);
1451 brelse(bh);
1452 offset = 0;
1453 towrite -= tocopy;
1454 data += tocopy;
1455 blk++;
1456 }
1457 out:
1458 if (len == towrite) {
1459 mutex_unlock(&inode->i_mutex);
1460 return err;
1461 }
1462 if (inode->i_size < off+len-towrite)
1463 i_size_write(inode, off+len-towrite);
1464 inode->i_version++;
1465 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1466 mark_inode_dirty(inode);
1467 mutex_unlock(&inode->i_mutex);
1468 return len - towrite;
1469 }
1470
1471 #endif
1472
1473 static struct file_system_type ext2_fs_type = {
1474 .owner = THIS_MODULE,
1475 .name = "ext2",
1476 .get_sb = ext2_get_sb,
1477 .kill_sb = kill_block_super,
1478 .fs_flags = FS_REQUIRES_DEV,
1479 };
1480
1481 static int __init init_ext2_fs(void)
1482 {
1483 int err = init_ext2_xattr();
1484 if (err)
1485 return err;
1486 err = init_inodecache();
1487 if (err)
1488 goto out1;
1489 err = register_filesystem(&ext2_fs_type);
1490 if (err)
1491 goto out;
1492 return 0;
1493 out:
1494 destroy_inodecache();
1495 out1:
1496 exit_ext2_xattr();
1497 return err;
1498 }
1499
1500 static void __exit exit_ext2_fs(void)
1501 {
1502 unregister_filesystem(&ext2_fs_type);
1503 destroy_inodecache();
1504 exit_ext2_xattr();
1505 }
1506
1507 module_init(init_ext2_fs)
1508 module_exit(exit_ext2_fs)
This page took 0.073649 seconds and 5 git commands to generate.