[PATCH] map multiple blocks for mpage_readpages()
[deliverable/linux.git] / fs / hfsplus / inode.c
CommitLineData
1da177e4
LT
1/*
2 * linux/fs/hfsplus/inode.c
3 *
4 * Copyright (C) 2001
5 * Brad Boyer (flar@allandria.com)
6 * (C) 2003 Ardis Technologies <roman@ardistech.com>
7 *
8 * Inode handling routines
9 */
10
11#include <linux/mm.h>
12#include <linux/fs.h>
13#include <linux/pagemap.h>
1da177e4
LT
14#include <linux/mpage.h>
15
16#include "hfsplus_fs.h"
17#include "hfsplus_raw.h"
18
19static int hfsplus_readpage(struct file *file, struct page *page)
20{
1da177e4
LT
21 return block_read_full_page(page, hfsplus_get_block);
22}
23
24static int hfsplus_writepage(struct page *page, struct writeback_control *wbc)
25{
1da177e4
LT
26 return block_write_full_page(page, hfsplus_get_block, wbc);
27}
28
29static int hfsplus_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
30{
31 return cont_prepare_write(page, from, to, hfsplus_get_block,
32 &HFSPLUS_I(page->mapping->host).phys_size);
33}
34
35static sector_t hfsplus_bmap(struct address_space *mapping, sector_t block)
36{
37 return generic_block_bmap(mapping, block, hfsplus_get_block);
38}
39
27496a8c 40static int hfsplus_releasepage(struct page *page, gfp_t mask)
1da177e4
LT
41{
42 struct inode *inode = page->mapping->host;
43 struct super_block *sb = inode->i_sb;
44 struct hfs_btree *tree;
45 struct hfs_bnode *node;
46 u32 nidx;
47 int i, res = 1;
48
49 switch (inode->i_ino) {
50 case HFSPLUS_EXT_CNID:
51 tree = HFSPLUS_SB(sb).ext_tree;
52 break;
53 case HFSPLUS_CAT_CNID:
54 tree = HFSPLUS_SB(sb).cat_tree;
55 break;
56 case HFSPLUS_ATTR_CNID:
57 tree = HFSPLUS_SB(sb).attr_tree;
58 break;
59 default:
60 BUG();
61 return 0;
62 }
63 if (tree->node_size >= PAGE_CACHE_SIZE) {
64 nidx = page->index >> (tree->node_size_shift - PAGE_CACHE_SHIFT);
65 spin_lock(&tree->hash_lock);
66 node = hfs_bnode_findhash(tree, nidx);
67 if (!node)
68 ;
69 else if (atomic_read(&node->refcnt))
70 res = 0;
71 if (res && node) {
72 hfs_bnode_unhash(node);
73 hfs_bnode_free(node);
74 }
75 spin_unlock(&tree->hash_lock);
76 } else {
77 nidx = page->index << (PAGE_CACHE_SHIFT - tree->node_size_shift);
78 i = 1 << (PAGE_CACHE_SHIFT - tree->node_size_shift);
79 spin_lock(&tree->hash_lock);
80 do {
81 node = hfs_bnode_findhash(tree, nidx++);
82 if (!node)
83 continue;
84 if (atomic_read(&node->refcnt)) {
85 res = 0;
86 break;
87 }
88 hfs_bnode_unhash(node);
89 hfs_bnode_free(node);
90 } while (--i && nidx < tree->node_count);
91 spin_unlock(&tree->hash_lock);
92 }
1da177e4
LT
93 return res ? try_to_free_buffers(page) : 0;
94}
95
96static int hfsplus_get_blocks(struct inode *inode, sector_t iblock, unsigned long max_blocks,
97 struct buffer_head *bh_result, int create)
98{
99 int ret;
100
101 ret = hfsplus_get_block(inode, iblock, bh_result, create);
102 if (!ret)
103 bh_result->b_size = (1 << inode->i_blkbits);
104 return ret;
105}
106
107static ssize_t hfsplus_direct_IO(int rw, struct kiocb *iocb,
108 const struct iovec *iov, loff_t offset, unsigned long nr_segs)
109{
110 struct file *file = iocb->ki_filp;
111 struct inode *inode = file->f_dentry->d_inode->i_mapping->host;
112
113 return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
114 offset, nr_segs, hfsplus_get_blocks, NULL);
115}
116
117static int hfsplus_writepages(struct address_space *mapping,
118 struct writeback_control *wbc)
119{
120 return mpage_writepages(mapping, wbc, hfsplus_get_block);
121}
122
123struct address_space_operations hfsplus_btree_aops = {
124 .readpage = hfsplus_readpage,
125 .writepage = hfsplus_writepage,
126 .sync_page = block_sync_page,
127 .prepare_write = hfsplus_prepare_write,
128 .commit_write = generic_commit_write,
129 .bmap = hfsplus_bmap,
130 .releasepage = hfsplus_releasepage,
131};
132
133struct address_space_operations hfsplus_aops = {
134 .readpage = hfsplus_readpage,
135 .writepage = hfsplus_writepage,
136 .sync_page = block_sync_page,
137 .prepare_write = hfsplus_prepare_write,
138 .commit_write = generic_commit_write,
139 .bmap = hfsplus_bmap,
140 .direct_IO = hfsplus_direct_IO,
141 .writepages = hfsplus_writepages,
142};
143
144static struct dentry *hfsplus_file_lookup(struct inode *dir, struct dentry *dentry,
145 struct nameidata *nd)
146{
147 struct hfs_find_data fd;
148 struct super_block *sb = dir->i_sb;
149 struct inode *inode = NULL;
150 int err;
151
152 if (HFSPLUS_IS_RSRC(dir) || strcmp(dentry->d_name.name, "rsrc"))
153 goto out;
154
155 inode = HFSPLUS_I(dir).rsrc_inode;
156 if (inode)
157 goto out;
158
159 inode = new_inode(sb);
160 if (!inode)
161 return ERR_PTR(-ENOMEM);
162
163 inode->i_ino = dir->i_ino;
164 INIT_LIST_HEAD(&HFSPLUS_I(inode).open_dir_list);
165 init_MUTEX(&HFSPLUS_I(inode).extents_lock);
166 HFSPLUS_I(inode).flags = HFSPLUS_FLG_RSRC;
167
168 hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
169 err = hfsplus_find_cat(sb, dir->i_ino, &fd);
170 if (!err)
171 err = hfsplus_cat_read_inode(inode, &fd);
172 hfs_find_exit(&fd);
173 if (err) {
174 iput(inode);
175 return ERR_PTR(err);
176 }
177 HFSPLUS_I(inode).rsrc_inode = dir;
178 HFSPLUS_I(dir).rsrc_inode = inode;
179 igrab(dir);
180 hlist_add_head(&inode->i_hash, &HFSPLUS_SB(sb).rsrc_inodes);
181 mark_inode_dirty(inode);
1da177e4
LT
182out:
183 d_add(dentry, inode);
184 return NULL;
185}
186
187static void hfsplus_get_perms(struct inode *inode, struct hfsplus_perm *perms, int dir)
188{
189 struct super_block *sb = inode->i_sb;
190 u16 mode;
191
192 mode = be16_to_cpu(perms->mode);
193
194 inode->i_uid = be32_to_cpu(perms->owner);
195 if (!inode->i_uid && !mode)
196 inode->i_uid = HFSPLUS_SB(sb).uid;
197
198 inode->i_gid = be32_to_cpu(perms->group);
199 if (!inode->i_gid && !mode)
200 inode->i_gid = HFSPLUS_SB(sb).gid;
201
202 if (dir) {
203 mode = mode ? (mode & S_IALLUGO) :
204 (S_IRWXUGO & ~(HFSPLUS_SB(sb).umask));
205 mode |= S_IFDIR;
206 } else if (!mode)
207 mode = S_IFREG | ((S_IRUGO|S_IWUGO) &
208 ~(HFSPLUS_SB(sb).umask));
209 inode->i_mode = mode;
210
211 HFSPLUS_I(inode).rootflags = perms->rootflags;
212 HFSPLUS_I(inode).userflags = perms->userflags;
213 if (perms->rootflags & HFSPLUS_FLG_IMMUTABLE)
214 inode->i_flags |= S_IMMUTABLE;
215 else
216 inode->i_flags &= ~S_IMMUTABLE;
217 if (perms->rootflags & HFSPLUS_FLG_APPEND)
218 inode->i_flags |= S_APPEND;
219 else
220 inode->i_flags &= ~S_APPEND;
221}
222
223static void hfsplus_set_perms(struct inode *inode, struct hfsplus_perm *perms)
224{
225 if (inode->i_flags & S_IMMUTABLE)
226 perms->rootflags |= HFSPLUS_FLG_IMMUTABLE;
227 else
228 perms->rootflags &= ~HFSPLUS_FLG_IMMUTABLE;
229 if (inode->i_flags & S_APPEND)
230 perms->rootflags |= HFSPLUS_FLG_APPEND;
231 else
232 perms->rootflags &= ~HFSPLUS_FLG_APPEND;
233 perms->userflags = HFSPLUS_I(inode).userflags;
234 perms->mode = cpu_to_be16(inode->i_mode);
235 perms->owner = cpu_to_be32(inode->i_uid);
236 perms->group = cpu_to_be32(inode->i_gid);
237 perms->dev = cpu_to_be32(HFSPLUS_I(inode).dev);
238}
239
240static int hfsplus_permission(struct inode *inode, int mask, struct nameidata *nd)
241{
242 /* MAY_EXEC is also used for lookup, if no x bit is set allow lookup,
243 * open_exec has the same test, so it's still not executable, if a x bit
244 * is set fall back to standard permission check.
245 */
246 if (S_ISREG(inode->i_mode) && mask & MAY_EXEC && !(inode->i_mode & 0111))
247 return 0;
248 return generic_permission(inode, mask, NULL);
249}
250
251
252static int hfsplus_file_open(struct inode *inode, struct file *file)
253{
254 if (HFSPLUS_IS_RSRC(inode))
255 inode = HFSPLUS_I(inode).rsrc_inode;
256 if (atomic_read(&file->f_count) != 1)
257 return 0;
258 atomic_inc(&HFSPLUS_I(inode).opencnt);
259 return 0;
260}
261
262static int hfsplus_file_release(struct inode *inode, struct file *file)
263{
264 struct super_block *sb = inode->i_sb;
265
266 if (HFSPLUS_IS_RSRC(inode))
267 inode = HFSPLUS_I(inode).rsrc_inode;
268 if (atomic_read(&file->f_count) != 0)
269 return 0;
270 if (atomic_dec_and_test(&HFSPLUS_I(inode).opencnt)) {
1b1dcc1b 271 mutex_lock(&inode->i_mutex);
1da177e4
LT
272 hfsplus_file_truncate(inode);
273 if (inode->i_flags & S_DEAD) {
274 hfsplus_delete_cat(inode->i_ino, HFSPLUS_SB(sb).hidden_dir, NULL);
275 hfsplus_delete_inode(inode);
276 }
1b1dcc1b 277 mutex_unlock(&inode->i_mutex);
1da177e4
LT
278 }
279 return 0;
280}
281
282extern struct inode_operations hfsplus_dir_inode_operations;
283extern struct file_operations hfsplus_dir_operations;
284
285static struct inode_operations hfsplus_file_inode_operations = {
286 .lookup = hfsplus_file_lookup,
287 .truncate = hfsplus_file_truncate,
288 .permission = hfsplus_permission,
289 .setxattr = hfsplus_setxattr,
290 .getxattr = hfsplus_getxattr,
291 .listxattr = hfsplus_listxattr,
292};
293
294static struct file_operations hfsplus_file_operations = {
295 .llseek = generic_file_llseek,
296 .read = generic_file_read,
297 .write = generic_file_write,
298 .mmap = generic_file_mmap,
299 .sendfile = generic_file_sendfile,
300 .fsync = file_fsync,
301 .open = hfsplus_file_open,
302 .release = hfsplus_file_release,
303 .ioctl = hfsplus_ioctl,
304};
305
306struct inode *hfsplus_new_inode(struct super_block *sb, int mode)
307{
308 struct inode *inode = new_inode(sb);
309 if (!inode)
310 return NULL;
311
1da177e4
LT
312 inode->i_ino = HFSPLUS_SB(sb).next_cnid++;
313 inode->i_mode = mode;
314 inode->i_uid = current->fsuid;
315 inode->i_gid = current->fsgid;
316 inode->i_nlink = 1;
317 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
318 inode->i_blksize = HFSPLUS_SB(sb).alloc_blksz;
319 INIT_LIST_HEAD(&HFSPLUS_I(inode).open_dir_list);
320 init_MUTEX(&HFSPLUS_I(inode).extents_lock);
321 atomic_set(&HFSPLUS_I(inode).opencnt, 0);
322 HFSPLUS_I(inode).flags = 0;
323 memset(HFSPLUS_I(inode).first_extents, 0, sizeof(hfsplus_extent_rec));
324 memset(HFSPLUS_I(inode).cached_extents, 0, sizeof(hfsplus_extent_rec));
325 HFSPLUS_I(inode).alloc_blocks = 0;
326 HFSPLUS_I(inode).first_blocks = 0;
327 HFSPLUS_I(inode).cached_start = 0;
328 HFSPLUS_I(inode).cached_blocks = 0;
329 HFSPLUS_I(inode).phys_size = 0;
330 HFSPLUS_I(inode).fs_blocks = 0;
331 HFSPLUS_I(inode).rsrc_inode = NULL;
332 if (S_ISDIR(inode->i_mode)) {
333 inode->i_size = 2;
334 HFSPLUS_SB(sb).folder_count++;
335 inode->i_op = &hfsplus_dir_inode_operations;
336 inode->i_fop = &hfsplus_dir_operations;
337 } else if (S_ISREG(inode->i_mode)) {
338 HFSPLUS_SB(sb).file_count++;
339 inode->i_op = &hfsplus_file_inode_operations;
340 inode->i_fop = &hfsplus_file_operations;
341 inode->i_mapping->a_ops = &hfsplus_aops;
342 HFSPLUS_I(inode).clump_blocks = HFSPLUS_SB(sb).data_clump_blocks;
343 } else if (S_ISLNK(inode->i_mode)) {
344 HFSPLUS_SB(sb).file_count++;
345 inode->i_op = &page_symlink_inode_operations;
346 inode->i_mapping->a_ops = &hfsplus_aops;
347 HFSPLUS_I(inode).clump_blocks = 1;
348 } else
349 HFSPLUS_SB(sb).file_count++;
350 insert_inode_hash(inode);
351 mark_inode_dirty(inode);
352 sb->s_dirt = 1;
353
354 return inode;
355}
356
357void hfsplus_delete_inode(struct inode *inode)
358{
359 struct super_block *sb = inode->i_sb;
360
361 if (S_ISDIR(inode->i_mode)) {
362 HFSPLUS_SB(sb).folder_count--;
363 sb->s_dirt = 1;
364 return;
365 }
366 HFSPLUS_SB(sb).file_count--;
367 if (S_ISREG(inode->i_mode)) {
368 if (!inode->i_nlink) {
369 inode->i_size = 0;
370 hfsplus_file_truncate(inode);
371 }
372 } else if (S_ISLNK(inode->i_mode)) {
373 inode->i_size = 0;
374 hfsplus_file_truncate(inode);
375 }
376 sb->s_dirt = 1;
377}
378
379void hfsplus_inode_read_fork(struct inode *inode, struct hfsplus_fork_raw *fork)
380{
381 struct super_block *sb = inode->i_sb;
382 u32 count;
383 int i;
384
385 memcpy(&HFSPLUS_I(inode).first_extents, &fork->extents,
386 sizeof(hfsplus_extent_rec));
387 for (count = 0, i = 0; i < 8; i++)
388 count += be32_to_cpu(fork->extents[i].block_count);
389 HFSPLUS_I(inode).first_blocks = count;
390 memset(HFSPLUS_I(inode).cached_extents, 0, sizeof(hfsplus_extent_rec));
391 HFSPLUS_I(inode).cached_start = 0;
392 HFSPLUS_I(inode).cached_blocks = 0;
393
394 HFSPLUS_I(inode).alloc_blocks = be32_to_cpu(fork->total_blocks);
395 inode->i_size = HFSPLUS_I(inode).phys_size = be64_to_cpu(fork->total_size);
396 HFSPLUS_I(inode).fs_blocks = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
397 inode_set_bytes(inode, HFSPLUS_I(inode).fs_blocks << sb->s_blocksize_bits);
398 HFSPLUS_I(inode).clump_blocks = be32_to_cpu(fork->clump_size) >> HFSPLUS_SB(sb).alloc_blksz_shift;
399 if (!HFSPLUS_I(inode).clump_blocks)
400 HFSPLUS_I(inode).clump_blocks = HFSPLUS_IS_RSRC(inode) ? HFSPLUS_SB(sb).rsrc_clump_blocks :
401 HFSPLUS_SB(sb).data_clump_blocks;
402}
403
404void hfsplus_inode_write_fork(struct inode *inode, struct hfsplus_fork_raw *fork)
405{
406 memcpy(&fork->extents, &HFSPLUS_I(inode).first_extents,
407 sizeof(hfsplus_extent_rec));
408 fork->total_size = cpu_to_be64(inode->i_size);
409 fork->total_blocks = cpu_to_be32(HFSPLUS_I(inode).alloc_blocks);
410}
411
412int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
413{
414 hfsplus_cat_entry entry;
415 int res = 0;
416 u16 type;
417
418 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
419
420 HFSPLUS_I(inode).dev = 0;
421 inode->i_blksize = HFSPLUS_SB(inode->i_sb).alloc_blksz;
422 if (type == HFSPLUS_FOLDER) {
423 struct hfsplus_cat_folder *folder = &entry.folder;
424
425 if (fd->entrylength < sizeof(struct hfsplus_cat_folder))
426 /* panic? */;
427 hfs_bnode_read(fd->bnode, &entry, fd->entryoffset,
428 sizeof(struct hfsplus_cat_folder));
429 hfsplus_get_perms(inode, &folder->permissions, 1);
430 inode->i_nlink = 1;
431 inode->i_size = 2 + be32_to_cpu(folder->valence);
432 inode->i_atime = hfsp_mt2ut(folder->access_date);
433 inode->i_mtime = hfsp_mt2ut(folder->content_mod_date);
9a4cad95
RZ
434 inode->i_ctime = hfsp_mt2ut(folder->attribute_mod_date);
435 HFSPLUS_I(inode).create_date = folder->create_date;
1da177e4
LT
436 HFSPLUS_I(inode).fs_blocks = 0;
437 inode->i_op = &hfsplus_dir_inode_operations;
438 inode->i_fop = &hfsplus_dir_operations;
439 } else if (type == HFSPLUS_FILE) {
440 struct hfsplus_cat_file *file = &entry.file;
441
442 if (fd->entrylength < sizeof(struct hfsplus_cat_file))
443 /* panic? */;
444 hfs_bnode_read(fd->bnode, &entry, fd->entryoffset,
445 sizeof(struct hfsplus_cat_file));
446
447 hfsplus_inode_read_fork(inode, HFSPLUS_IS_DATA(inode) ?
448 &file->data_fork : &file->rsrc_fork);
449 hfsplus_get_perms(inode, &file->permissions, 0);
450 inode->i_nlink = 1;
451 if (S_ISREG(inode->i_mode)) {
452 if (file->permissions.dev)
453 inode->i_nlink = be32_to_cpu(file->permissions.dev);
454 inode->i_op = &hfsplus_file_inode_operations;
455 inode->i_fop = &hfsplus_file_operations;
456 inode->i_mapping->a_ops = &hfsplus_aops;
457 } else if (S_ISLNK(inode->i_mode)) {
458 inode->i_op = &page_symlink_inode_operations;
459 inode->i_mapping->a_ops = &hfsplus_aops;
460 } else {
461 init_special_inode(inode, inode->i_mode,
462 be32_to_cpu(file->permissions.dev));
463 }
464 inode->i_atime = hfsp_mt2ut(file->access_date);
465 inode->i_mtime = hfsp_mt2ut(file->content_mod_date);
9a4cad95
RZ
466 inode->i_ctime = hfsp_mt2ut(file->attribute_mod_date);
467 HFSPLUS_I(inode).create_date = file->create_date;
1da177e4 468 } else {
634725a9 469 printk(KERN_ERR "hfs: bad catalog entry used to create inode\n");
1da177e4
LT
470 res = -EIO;
471 }
472 return res;
473}
474
475int hfsplus_cat_write_inode(struct inode *inode)
476{
477 struct inode *main_inode = inode;
478 struct hfs_find_data fd;
479 hfsplus_cat_entry entry;
480
481 if (HFSPLUS_IS_RSRC(inode))
482 main_inode = HFSPLUS_I(inode).rsrc_inode;
483
484 if (!main_inode->i_nlink)
485 return 0;
486
487 if (hfs_find_init(HFSPLUS_SB(main_inode->i_sb).cat_tree, &fd))
488 /* panic? */
489 return -EIO;
490
491 if (hfsplus_find_cat(main_inode->i_sb, main_inode->i_ino, &fd))
492 /* panic? */
493 goto out;
494
495 if (S_ISDIR(main_inode->i_mode)) {
496 struct hfsplus_cat_folder *folder = &entry.folder;
497
498 if (fd.entrylength < sizeof(struct hfsplus_cat_folder))
499 /* panic? */;
500 hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
501 sizeof(struct hfsplus_cat_folder));
502 /* simple node checks? */
503 hfsplus_set_perms(inode, &folder->permissions);
504 folder->access_date = hfsp_ut2mt(inode->i_atime);
505 folder->content_mod_date = hfsp_ut2mt(inode->i_mtime);
506 folder->attribute_mod_date = hfsp_ut2mt(inode->i_ctime);
507 folder->valence = cpu_to_be32(inode->i_size - 2);
508 hfs_bnode_write(fd.bnode, &entry, fd.entryoffset,
509 sizeof(struct hfsplus_cat_folder));
510 } else if (HFSPLUS_IS_RSRC(inode)) {
511 struct hfsplus_cat_file *file = &entry.file;
512 hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
513 sizeof(struct hfsplus_cat_file));
514 hfsplus_inode_write_fork(inode, &file->rsrc_fork);
515 hfs_bnode_write(fd.bnode, &entry, fd.entryoffset,
516 sizeof(struct hfsplus_cat_file));
517 } else {
518 struct hfsplus_cat_file *file = &entry.file;
519
520 if (fd.entrylength < sizeof(struct hfsplus_cat_file))
521 /* panic? */;
522 hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
523 sizeof(struct hfsplus_cat_file));
524 hfsplus_inode_write_fork(inode, &file->data_fork);
525 if (S_ISREG(inode->i_mode))
526 HFSPLUS_I(inode).dev = inode->i_nlink;
527 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
528 HFSPLUS_I(inode).dev = kdev_t_to_nr(inode->i_rdev);
529 hfsplus_set_perms(inode, &file->permissions);
530 if ((file->permissions.rootflags | file->permissions.userflags) & HFSPLUS_FLG_IMMUTABLE)
531 file->flags |= cpu_to_be16(HFSPLUS_FILE_LOCKED);
532 else
533 file->flags &= cpu_to_be16(~HFSPLUS_FILE_LOCKED);
534 file->access_date = hfsp_ut2mt(inode->i_atime);
535 file->content_mod_date = hfsp_ut2mt(inode->i_mtime);
536 file->attribute_mod_date = hfsp_ut2mt(inode->i_ctime);
537 hfs_bnode_write(fd.bnode, &entry, fd.entryoffset,
538 sizeof(struct hfsplus_cat_file));
539 }
540out:
541 hfs_find_exit(&fd);
542 return 0;
543}
This page took 0.198715 seconds and 5 git commands to generate.