IB/iser: Fix max_sectors calculation
[deliverable/linux.git] / fs / hfsplus / super.c
1 /*
2 * linux/fs/hfsplus/super.c
3 *
4 * Copyright (C) 2001
5 * Brad Boyer (flar@allandria.com)
6 * (C) 2003 Ardis Technologies <roman@ardistech.com>
7 *
8 */
9
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/pagemap.h>
13 #include <linux/blkdev.h>
14 #include <linux/backing-dev.h>
15 #include <linux/fs.h>
16 #include <linux/slab.h>
17 #include <linux/vfs.h>
18 #include <linux/nls.h>
19
20 static struct inode *hfsplus_alloc_inode(struct super_block *sb);
21 static void hfsplus_destroy_inode(struct inode *inode);
22
23 #include "hfsplus_fs.h"
24 #include "xattr.h"
25
26 static int hfsplus_system_read_inode(struct inode *inode)
27 {
28 struct hfsplus_vh *vhdr = HFSPLUS_SB(inode->i_sb)->s_vhdr;
29
30 switch (inode->i_ino) {
31 case HFSPLUS_EXT_CNID:
32 hfsplus_inode_read_fork(inode, &vhdr->ext_file);
33 inode->i_mapping->a_ops = &hfsplus_btree_aops;
34 break;
35 case HFSPLUS_CAT_CNID:
36 hfsplus_inode_read_fork(inode, &vhdr->cat_file);
37 inode->i_mapping->a_ops = &hfsplus_btree_aops;
38 break;
39 case HFSPLUS_ALLOC_CNID:
40 hfsplus_inode_read_fork(inode, &vhdr->alloc_file);
41 inode->i_mapping->a_ops = &hfsplus_aops;
42 break;
43 case HFSPLUS_START_CNID:
44 hfsplus_inode_read_fork(inode, &vhdr->start_file);
45 break;
46 case HFSPLUS_ATTR_CNID:
47 hfsplus_inode_read_fork(inode, &vhdr->attr_file);
48 inode->i_mapping->a_ops = &hfsplus_btree_aops;
49 break;
50 default:
51 return -EIO;
52 }
53
54 return 0;
55 }
56
57 struct inode *hfsplus_iget(struct super_block *sb, unsigned long ino)
58 {
59 struct hfs_find_data fd;
60 struct inode *inode;
61 int err;
62
63 inode = iget_locked(sb, ino);
64 if (!inode)
65 return ERR_PTR(-ENOMEM);
66 if (!(inode->i_state & I_NEW))
67 return inode;
68
69 INIT_LIST_HEAD(&HFSPLUS_I(inode)->open_dir_list);
70 mutex_init(&HFSPLUS_I(inode)->extents_lock);
71 HFSPLUS_I(inode)->flags = 0;
72 HFSPLUS_I(inode)->extent_state = 0;
73 HFSPLUS_I(inode)->rsrc_inode = NULL;
74 atomic_set(&HFSPLUS_I(inode)->opencnt, 0);
75
76 if (inode->i_ino >= HFSPLUS_FIRSTUSER_CNID ||
77 inode->i_ino == HFSPLUS_ROOT_CNID) {
78 err = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &fd);
79 if (!err) {
80 err = hfsplus_find_cat(inode->i_sb, inode->i_ino, &fd);
81 if (!err)
82 err = hfsplus_cat_read_inode(inode, &fd);
83 hfs_find_exit(&fd);
84 }
85 } else {
86 err = hfsplus_system_read_inode(inode);
87 }
88
89 if (err) {
90 iget_failed(inode);
91 return ERR_PTR(err);
92 }
93
94 unlock_new_inode(inode);
95 return inode;
96 }
97
98 static int hfsplus_system_write_inode(struct inode *inode)
99 {
100 struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb);
101 struct hfsplus_vh *vhdr = sbi->s_vhdr;
102 struct hfsplus_fork_raw *fork;
103 struct hfs_btree *tree = NULL;
104
105 switch (inode->i_ino) {
106 case HFSPLUS_EXT_CNID:
107 fork = &vhdr->ext_file;
108 tree = sbi->ext_tree;
109 break;
110 case HFSPLUS_CAT_CNID:
111 fork = &vhdr->cat_file;
112 tree = sbi->cat_tree;
113 break;
114 case HFSPLUS_ALLOC_CNID:
115 fork = &vhdr->alloc_file;
116 break;
117 case HFSPLUS_START_CNID:
118 fork = &vhdr->start_file;
119 break;
120 case HFSPLUS_ATTR_CNID:
121 fork = &vhdr->attr_file;
122 tree = sbi->attr_tree;
123 break;
124 default:
125 return -EIO;
126 }
127
128 if (fork->total_size != cpu_to_be64(inode->i_size)) {
129 set_bit(HFSPLUS_SB_WRITEBACKUP, &sbi->flags);
130 hfsplus_mark_mdb_dirty(inode->i_sb);
131 }
132 hfsplus_inode_write_fork(inode, fork);
133 if (tree) {
134 int err = hfs_btree_write(tree);
135
136 if (err) {
137 pr_err("b-tree write err: %d, ino %lu\n",
138 err, inode->i_ino);
139 return err;
140 }
141 }
142 return 0;
143 }
144
145 static int hfsplus_write_inode(struct inode *inode,
146 struct writeback_control *wbc)
147 {
148 int err;
149
150 hfs_dbg(INODE, "hfsplus_write_inode: %lu\n", inode->i_ino);
151
152 err = hfsplus_ext_write_extent(inode);
153 if (err)
154 return err;
155
156 if (inode->i_ino >= HFSPLUS_FIRSTUSER_CNID ||
157 inode->i_ino == HFSPLUS_ROOT_CNID)
158 return hfsplus_cat_write_inode(inode);
159 else
160 return hfsplus_system_write_inode(inode);
161 }
162
163 static void hfsplus_evict_inode(struct inode *inode)
164 {
165 hfs_dbg(INODE, "hfsplus_evict_inode: %lu\n", inode->i_ino);
166 truncate_inode_pages_final(&inode->i_data);
167 clear_inode(inode);
168 if (HFSPLUS_IS_RSRC(inode)) {
169 HFSPLUS_I(HFSPLUS_I(inode)->rsrc_inode)->rsrc_inode = NULL;
170 iput(HFSPLUS_I(inode)->rsrc_inode);
171 }
172 }
173
174 static int hfsplus_sync_fs(struct super_block *sb, int wait)
175 {
176 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
177 struct hfsplus_vh *vhdr = sbi->s_vhdr;
178 int write_backup = 0;
179 int error, error2;
180
181 if (!wait)
182 return 0;
183
184 hfs_dbg(SUPER, "hfsplus_sync_fs\n");
185
186 /*
187 * Explicitly write out the special metadata inodes.
188 *
189 * While these special inodes are marked as hashed and written
190 * out peridocically by the flusher threads we redirty them
191 * during writeout of normal inodes, and thus the life lock
192 * prevents us from getting the latest state to disk.
193 */
194 error = filemap_write_and_wait(sbi->cat_tree->inode->i_mapping);
195 error2 = filemap_write_and_wait(sbi->ext_tree->inode->i_mapping);
196 if (!error)
197 error = error2;
198 if (sbi->attr_tree) {
199 error2 =
200 filemap_write_and_wait(sbi->attr_tree->inode->i_mapping);
201 if (!error)
202 error = error2;
203 }
204 error2 = filemap_write_and_wait(sbi->alloc_file->i_mapping);
205 if (!error)
206 error = error2;
207
208 mutex_lock(&sbi->vh_mutex);
209 mutex_lock(&sbi->alloc_mutex);
210 vhdr->free_blocks = cpu_to_be32(sbi->free_blocks);
211 vhdr->next_cnid = cpu_to_be32(sbi->next_cnid);
212 vhdr->folder_count = cpu_to_be32(sbi->folder_count);
213 vhdr->file_count = cpu_to_be32(sbi->file_count);
214
215 if (test_and_clear_bit(HFSPLUS_SB_WRITEBACKUP, &sbi->flags)) {
216 memcpy(sbi->s_backup_vhdr, sbi->s_vhdr, sizeof(*sbi->s_vhdr));
217 write_backup = 1;
218 }
219
220 error2 = hfsplus_submit_bio(sb,
221 sbi->part_start + HFSPLUS_VOLHEAD_SECTOR,
222 sbi->s_vhdr_buf, NULL, WRITE_SYNC);
223 if (!error)
224 error = error2;
225 if (!write_backup)
226 goto out;
227
228 error2 = hfsplus_submit_bio(sb,
229 sbi->part_start + sbi->sect_count - 2,
230 sbi->s_backup_vhdr_buf, NULL, WRITE_SYNC);
231 if (!error)
232 error2 = error;
233 out:
234 mutex_unlock(&sbi->alloc_mutex);
235 mutex_unlock(&sbi->vh_mutex);
236
237 if (!test_bit(HFSPLUS_SB_NOBARRIER, &sbi->flags))
238 blkdev_issue_flush(sb->s_bdev, GFP_KERNEL, NULL);
239
240 return error;
241 }
242
243 static void delayed_sync_fs(struct work_struct *work)
244 {
245 int err;
246 struct hfsplus_sb_info *sbi;
247
248 sbi = container_of(work, struct hfsplus_sb_info, sync_work.work);
249
250 spin_lock(&sbi->work_lock);
251 sbi->work_queued = 0;
252 spin_unlock(&sbi->work_lock);
253
254 err = hfsplus_sync_fs(sbi->alloc_file->i_sb, 1);
255 if (err)
256 pr_err("delayed sync fs err %d\n", err);
257 }
258
259 void hfsplus_mark_mdb_dirty(struct super_block *sb)
260 {
261 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
262 unsigned long delay;
263
264 if (sb->s_flags & MS_RDONLY)
265 return;
266
267 spin_lock(&sbi->work_lock);
268 if (!sbi->work_queued) {
269 delay = msecs_to_jiffies(dirty_writeback_interval * 10);
270 queue_delayed_work(system_long_wq, &sbi->sync_work, delay);
271 sbi->work_queued = 1;
272 }
273 spin_unlock(&sbi->work_lock);
274 }
275
276 static void hfsplus_put_super(struct super_block *sb)
277 {
278 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
279
280 hfs_dbg(SUPER, "hfsplus_put_super\n");
281
282 cancel_delayed_work_sync(&sbi->sync_work);
283
284 if (!(sb->s_flags & MS_RDONLY) && sbi->s_vhdr) {
285 struct hfsplus_vh *vhdr = sbi->s_vhdr;
286
287 vhdr->modify_date = hfsp_now2mt();
288 vhdr->attributes |= cpu_to_be32(HFSPLUS_VOL_UNMNT);
289 vhdr->attributes &= cpu_to_be32(~HFSPLUS_VOL_INCNSTNT);
290
291 hfsplus_sync_fs(sb, 1);
292 }
293
294 hfs_btree_close(sbi->attr_tree);
295 hfs_btree_close(sbi->cat_tree);
296 hfs_btree_close(sbi->ext_tree);
297 iput(sbi->alloc_file);
298 iput(sbi->hidden_dir);
299 kfree(sbi->s_vhdr_buf);
300 kfree(sbi->s_backup_vhdr_buf);
301 unload_nls(sbi->nls);
302 kfree(sb->s_fs_info);
303 sb->s_fs_info = NULL;
304 }
305
306 static int hfsplus_statfs(struct dentry *dentry, struct kstatfs *buf)
307 {
308 struct super_block *sb = dentry->d_sb;
309 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
310 u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
311
312 buf->f_type = HFSPLUS_SUPER_MAGIC;
313 buf->f_bsize = sb->s_blocksize;
314 buf->f_blocks = sbi->total_blocks << sbi->fs_shift;
315 buf->f_bfree = sbi->free_blocks << sbi->fs_shift;
316 buf->f_bavail = buf->f_bfree;
317 buf->f_files = 0xFFFFFFFF;
318 buf->f_ffree = 0xFFFFFFFF - sbi->next_cnid;
319 buf->f_fsid.val[0] = (u32)id;
320 buf->f_fsid.val[1] = (u32)(id >> 32);
321 buf->f_namelen = HFSPLUS_MAX_STRLEN;
322
323 return 0;
324 }
325
326 static int hfsplus_remount(struct super_block *sb, int *flags, char *data)
327 {
328 sync_filesystem(sb);
329 if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
330 return 0;
331 if (!(*flags & MS_RDONLY)) {
332 struct hfsplus_vh *vhdr = HFSPLUS_SB(sb)->s_vhdr;
333 int force = 0;
334
335 if (!hfsplus_parse_options_remount(data, &force))
336 return -EINVAL;
337
338 if (!(vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_UNMNT))) {
339 pr_warn("filesystem was not cleanly unmounted, running fsck.hfsplus is recommended. leaving read-only.\n");
340 sb->s_flags |= MS_RDONLY;
341 *flags |= MS_RDONLY;
342 } else if (force) {
343 /* nothing */
344 } else if (vhdr->attributes &
345 cpu_to_be32(HFSPLUS_VOL_SOFTLOCK)) {
346 pr_warn("filesystem is marked locked, leaving read-only.\n");
347 sb->s_flags |= MS_RDONLY;
348 *flags |= MS_RDONLY;
349 } else if (vhdr->attributes &
350 cpu_to_be32(HFSPLUS_VOL_JOURNALED)) {
351 pr_warn("filesystem is marked journaled, leaving read-only.\n");
352 sb->s_flags |= MS_RDONLY;
353 *flags |= MS_RDONLY;
354 }
355 }
356 return 0;
357 }
358
359 static const struct super_operations hfsplus_sops = {
360 .alloc_inode = hfsplus_alloc_inode,
361 .destroy_inode = hfsplus_destroy_inode,
362 .write_inode = hfsplus_write_inode,
363 .evict_inode = hfsplus_evict_inode,
364 .put_super = hfsplus_put_super,
365 .sync_fs = hfsplus_sync_fs,
366 .statfs = hfsplus_statfs,
367 .remount_fs = hfsplus_remount,
368 .show_options = hfsplus_show_options,
369 };
370
371 static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
372 {
373 struct hfsplus_vh *vhdr;
374 struct hfsplus_sb_info *sbi;
375 hfsplus_cat_entry entry;
376 struct hfs_find_data fd;
377 struct inode *root, *inode;
378 struct qstr str;
379 struct nls_table *nls = NULL;
380 u64 last_fs_block, last_fs_page;
381 int err;
382
383 err = -ENOMEM;
384 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
385 if (!sbi)
386 goto out;
387
388 sb->s_fs_info = sbi;
389 mutex_init(&sbi->alloc_mutex);
390 mutex_init(&sbi->vh_mutex);
391 spin_lock_init(&sbi->work_lock);
392 INIT_DELAYED_WORK(&sbi->sync_work, delayed_sync_fs);
393 hfsplus_fill_defaults(sbi);
394
395 err = -EINVAL;
396 if (!hfsplus_parse_options(data, sbi)) {
397 pr_err("unable to parse mount options\n");
398 goto out_unload_nls;
399 }
400
401 /* temporarily use utf8 to correctly find the hidden dir below */
402 nls = sbi->nls;
403 sbi->nls = load_nls("utf8");
404 if (!sbi->nls) {
405 pr_err("unable to load nls for utf8\n");
406 goto out_unload_nls;
407 }
408
409 /* Grab the volume header */
410 if (hfsplus_read_wrapper(sb)) {
411 if (!silent)
412 pr_warn("unable to find HFS+ superblock\n");
413 goto out_unload_nls;
414 }
415 vhdr = sbi->s_vhdr;
416
417 /* Copy parts of the volume header into the superblock */
418 sb->s_magic = HFSPLUS_VOLHEAD_SIG;
419 if (be16_to_cpu(vhdr->version) < HFSPLUS_MIN_VERSION ||
420 be16_to_cpu(vhdr->version) > HFSPLUS_CURRENT_VERSION) {
421 pr_err("wrong filesystem version\n");
422 goto out_free_vhdr;
423 }
424 sbi->total_blocks = be32_to_cpu(vhdr->total_blocks);
425 sbi->free_blocks = be32_to_cpu(vhdr->free_blocks);
426 sbi->next_cnid = be32_to_cpu(vhdr->next_cnid);
427 sbi->file_count = be32_to_cpu(vhdr->file_count);
428 sbi->folder_count = be32_to_cpu(vhdr->folder_count);
429 sbi->data_clump_blocks =
430 be32_to_cpu(vhdr->data_clump_sz) >> sbi->alloc_blksz_shift;
431 if (!sbi->data_clump_blocks)
432 sbi->data_clump_blocks = 1;
433 sbi->rsrc_clump_blocks =
434 be32_to_cpu(vhdr->rsrc_clump_sz) >> sbi->alloc_blksz_shift;
435 if (!sbi->rsrc_clump_blocks)
436 sbi->rsrc_clump_blocks = 1;
437
438 err = -EFBIG;
439 last_fs_block = sbi->total_blocks - 1;
440 last_fs_page = (last_fs_block << sbi->alloc_blksz_shift) >>
441 PAGE_CACHE_SHIFT;
442
443 if ((last_fs_block > (sector_t)(~0ULL) >> (sbi->alloc_blksz_shift - 9)) ||
444 (last_fs_page > (pgoff_t)(~0ULL))) {
445 pr_err("filesystem size too large\n");
446 goto out_free_vhdr;
447 }
448
449 /* Set up operations so we can load metadata */
450 sb->s_op = &hfsplus_sops;
451 sb->s_maxbytes = MAX_LFS_FILESIZE;
452
453 if (!(vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_UNMNT))) {
454 pr_warn("Filesystem was not cleanly unmounted, running fsck.hfsplus is recommended. mounting read-only.\n");
455 sb->s_flags |= MS_RDONLY;
456 } else if (test_and_clear_bit(HFSPLUS_SB_FORCE, &sbi->flags)) {
457 /* nothing */
458 } else if (vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_SOFTLOCK)) {
459 pr_warn("Filesystem is marked locked, mounting read-only.\n");
460 sb->s_flags |= MS_RDONLY;
461 } else if ((vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_JOURNALED)) &&
462 !(sb->s_flags & MS_RDONLY)) {
463 pr_warn("write access to a journaled filesystem is not supported, use the force option at your own risk, mounting read-only.\n");
464 sb->s_flags |= MS_RDONLY;
465 }
466
467 err = -EINVAL;
468
469 /* Load metadata objects (B*Trees) */
470 sbi->ext_tree = hfs_btree_open(sb, HFSPLUS_EXT_CNID);
471 if (!sbi->ext_tree) {
472 pr_err("failed to load extents file\n");
473 goto out_free_vhdr;
474 }
475 sbi->cat_tree = hfs_btree_open(sb, HFSPLUS_CAT_CNID);
476 if (!sbi->cat_tree) {
477 pr_err("failed to load catalog file\n");
478 goto out_close_ext_tree;
479 }
480 atomic_set(&sbi->attr_tree_state, HFSPLUS_EMPTY_ATTR_TREE);
481 if (vhdr->attr_file.total_blocks != 0) {
482 sbi->attr_tree = hfs_btree_open(sb, HFSPLUS_ATTR_CNID);
483 if (!sbi->attr_tree) {
484 pr_err("failed to load attributes file\n");
485 goto out_close_cat_tree;
486 }
487 atomic_set(&sbi->attr_tree_state, HFSPLUS_VALID_ATTR_TREE);
488 }
489 sb->s_xattr = hfsplus_xattr_handlers;
490
491 inode = hfsplus_iget(sb, HFSPLUS_ALLOC_CNID);
492 if (IS_ERR(inode)) {
493 pr_err("failed to load allocation file\n");
494 err = PTR_ERR(inode);
495 goto out_close_attr_tree;
496 }
497 sbi->alloc_file = inode;
498
499 /* Load the root directory */
500 root = hfsplus_iget(sb, HFSPLUS_ROOT_CNID);
501 if (IS_ERR(root)) {
502 pr_err("failed to load root directory\n");
503 err = PTR_ERR(root);
504 goto out_put_alloc_file;
505 }
506
507 sb->s_d_op = &hfsplus_dentry_operations;
508 sb->s_root = d_make_root(root);
509 if (!sb->s_root) {
510 err = -ENOMEM;
511 goto out_put_alloc_file;
512 }
513
514 str.len = sizeof(HFSP_HIDDENDIR_NAME) - 1;
515 str.name = HFSP_HIDDENDIR_NAME;
516 err = hfs_find_init(sbi->cat_tree, &fd);
517 if (err)
518 goto out_put_root;
519 err = hfsplus_cat_build_key(sb, fd.search_key, HFSPLUS_ROOT_CNID, &str);
520 if (unlikely(err < 0))
521 goto out_put_root;
522 if (!hfs_brec_read(&fd, &entry, sizeof(entry))) {
523 hfs_find_exit(&fd);
524 if (entry.type != cpu_to_be16(HFSPLUS_FOLDER))
525 goto out_put_root;
526 inode = hfsplus_iget(sb, be32_to_cpu(entry.folder.id));
527 if (IS_ERR(inode)) {
528 err = PTR_ERR(inode);
529 goto out_put_root;
530 }
531 sbi->hidden_dir = inode;
532 } else
533 hfs_find_exit(&fd);
534
535 if (!(sb->s_flags & MS_RDONLY)) {
536 /*
537 * H+LX == hfsplusutils, H+Lx == this driver, H+lx is unused
538 * all three are registered with Apple for our use
539 */
540 vhdr->last_mount_vers = cpu_to_be32(HFSP_MOUNT_VERSION);
541 vhdr->modify_date = hfsp_now2mt();
542 be32_add_cpu(&vhdr->write_count, 1);
543 vhdr->attributes &= cpu_to_be32(~HFSPLUS_VOL_UNMNT);
544 vhdr->attributes |= cpu_to_be32(HFSPLUS_VOL_INCNSTNT);
545 hfsplus_sync_fs(sb, 1);
546
547 if (!sbi->hidden_dir) {
548 mutex_lock(&sbi->vh_mutex);
549 sbi->hidden_dir = hfsplus_new_inode(sb, S_IFDIR);
550 if (!sbi->hidden_dir) {
551 mutex_unlock(&sbi->vh_mutex);
552 err = -ENOMEM;
553 goto out_put_root;
554 }
555 err = hfsplus_create_cat(sbi->hidden_dir->i_ino, root,
556 &str, sbi->hidden_dir);
557 if (err) {
558 mutex_unlock(&sbi->vh_mutex);
559 goto out_put_hidden_dir;
560 }
561
562 err = hfsplus_init_inode_security(sbi->hidden_dir,
563 root, &str);
564 if (err == -EOPNOTSUPP)
565 err = 0; /* Operation is not supported. */
566 else if (err) {
567 /*
568 * Try to delete anyway without
569 * error analysis.
570 */
571 hfsplus_delete_cat(sbi->hidden_dir->i_ino,
572 root, &str);
573 mutex_unlock(&sbi->vh_mutex);
574 goto out_put_hidden_dir;
575 }
576
577 mutex_unlock(&sbi->vh_mutex);
578 hfsplus_mark_inode_dirty(sbi->hidden_dir,
579 HFSPLUS_I_CAT_DIRTY);
580 }
581 }
582
583 unload_nls(sbi->nls);
584 sbi->nls = nls;
585 return 0;
586
587 out_put_hidden_dir:
588 iput(sbi->hidden_dir);
589 out_put_root:
590 dput(sb->s_root);
591 sb->s_root = NULL;
592 out_put_alloc_file:
593 iput(sbi->alloc_file);
594 out_close_attr_tree:
595 hfs_btree_close(sbi->attr_tree);
596 out_close_cat_tree:
597 hfs_btree_close(sbi->cat_tree);
598 out_close_ext_tree:
599 hfs_btree_close(sbi->ext_tree);
600 out_free_vhdr:
601 kfree(sbi->s_vhdr_buf);
602 kfree(sbi->s_backup_vhdr_buf);
603 out_unload_nls:
604 unload_nls(sbi->nls);
605 unload_nls(nls);
606 kfree(sbi);
607 out:
608 return err;
609 }
610
611 MODULE_AUTHOR("Brad Boyer");
612 MODULE_DESCRIPTION("Extended Macintosh Filesystem");
613 MODULE_LICENSE("GPL");
614
615 static struct kmem_cache *hfsplus_inode_cachep;
616
617 static struct inode *hfsplus_alloc_inode(struct super_block *sb)
618 {
619 struct hfsplus_inode_info *i;
620
621 i = kmem_cache_alloc(hfsplus_inode_cachep, GFP_KERNEL);
622 return i ? &i->vfs_inode : NULL;
623 }
624
625 static void hfsplus_i_callback(struct rcu_head *head)
626 {
627 struct inode *inode = container_of(head, struct inode, i_rcu);
628
629 kmem_cache_free(hfsplus_inode_cachep, HFSPLUS_I(inode));
630 }
631
632 static void hfsplus_destroy_inode(struct inode *inode)
633 {
634 call_rcu(&inode->i_rcu, hfsplus_i_callback);
635 }
636
637 #define HFSPLUS_INODE_SIZE sizeof(struct hfsplus_inode_info)
638
639 static struct dentry *hfsplus_mount(struct file_system_type *fs_type,
640 int flags, const char *dev_name, void *data)
641 {
642 return mount_bdev(fs_type, flags, dev_name, data, hfsplus_fill_super);
643 }
644
645 static struct file_system_type hfsplus_fs_type = {
646 .owner = THIS_MODULE,
647 .name = "hfsplus",
648 .mount = hfsplus_mount,
649 .kill_sb = kill_block_super,
650 .fs_flags = FS_REQUIRES_DEV,
651 };
652 MODULE_ALIAS_FS("hfsplus");
653
654 static void hfsplus_init_once(void *p)
655 {
656 struct hfsplus_inode_info *i = p;
657
658 inode_init_once(&i->vfs_inode);
659 }
660
661 static int __init init_hfsplus_fs(void)
662 {
663 int err;
664
665 hfsplus_inode_cachep = kmem_cache_create("hfsplus_icache",
666 HFSPLUS_INODE_SIZE, 0, SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT,
667 hfsplus_init_once);
668 if (!hfsplus_inode_cachep)
669 return -ENOMEM;
670 err = hfsplus_create_attr_tree_cache();
671 if (err)
672 goto destroy_inode_cache;
673 err = register_filesystem(&hfsplus_fs_type);
674 if (err)
675 goto destroy_attr_tree_cache;
676 return 0;
677
678 destroy_attr_tree_cache:
679 hfsplus_destroy_attr_tree_cache();
680
681 destroy_inode_cache:
682 kmem_cache_destroy(hfsplus_inode_cachep);
683
684 return err;
685 }
686
687 static void __exit exit_hfsplus_fs(void)
688 {
689 unregister_filesystem(&hfsplus_fs_type);
690
691 /*
692 * Make sure all delayed rcu free inodes are flushed before we
693 * destroy cache.
694 */
695 rcu_barrier();
696 hfsplus_destroy_attr_tree_cache();
697 kmem_cache_destroy(hfsplus_inode_cachep);
698 }
699
700 module_init(init_hfsplus_fs)
701 module_exit(exit_hfsplus_fs)
This page took 0.044363 seconds and 5 git commands to generate.