4 * (c) 1996 Hans-Joachim Widmaier - Rewritten
6 * (C) 1993 Ray Burr - Modified for Amiga FFS filesystem.
8 * (C) 1992 Eric Youngdale Modified for ISO 9660 filesystem.
10 * (C) 1991 Linus Torvalds - minix filesystem
12 * affs regular file handling primitives
15 #include <linux/aio.h>
18 static struct buffer_head
*affs_get_extblock_slow(struct inode
*inode
, u32 ext
);
21 affs_file_open(struct inode
*inode
, struct file
*filp
)
23 pr_debug("open(%lu,%d)\n",
24 inode
->i_ino
, atomic_read(&AFFS_I(inode
)->i_opencnt
));
25 atomic_inc(&AFFS_I(inode
)->i_opencnt
);
30 affs_file_release(struct inode
*inode
, struct file
*filp
)
32 pr_debug("release(%lu, %d)\n",
33 inode
->i_ino
, atomic_read(&AFFS_I(inode
)->i_opencnt
));
35 if (atomic_dec_and_test(&AFFS_I(inode
)->i_opencnt
)) {
36 mutex_lock(&inode
->i_mutex
);
37 if (inode
->i_size
!= AFFS_I(inode
)->mmu_private
)
39 affs_free_prealloc(inode
);
40 mutex_unlock(&inode
->i_mutex
);
47 affs_grow_extcache(struct inode
*inode
, u32 lc_idx
)
49 struct super_block
*sb
= inode
->i_sb
;
50 struct buffer_head
*bh
;
54 if (!AFFS_I(inode
)->i_lc
) {
55 char *ptr
= (char *)get_zeroed_page(GFP_NOFS
);
58 AFFS_I(inode
)->i_lc
= (u32
*)ptr
;
59 AFFS_I(inode
)->i_ac
= (struct affs_ext_key
*)(ptr
+ AFFS_CACHE_SIZE
/ 2);
62 lc_max
= AFFS_LC_SIZE
<< AFFS_I(inode
)->i_lc_shift
;
64 if (AFFS_I(inode
)->i_extcnt
> lc_max
) {
65 u32 lc_shift
, lc_mask
, tmp
, off
;
67 /* need to recalculate linear cache, start from old size */
68 lc_shift
= AFFS_I(inode
)->i_lc_shift
;
69 tmp
= (AFFS_I(inode
)->i_extcnt
/ AFFS_LC_SIZE
) >> lc_shift
;
70 for (; tmp
; tmp
>>= 1)
72 lc_mask
= (1 << lc_shift
) - 1;
74 /* fix idx and old size to new shift */
75 lc_idx
>>= (lc_shift
- AFFS_I(inode
)->i_lc_shift
);
76 AFFS_I(inode
)->i_lc_size
>>= (lc_shift
- AFFS_I(inode
)->i_lc_shift
);
78 /* first shrink old cache to make more space */
79 off
= 1 << (lc_shift
- AFFS_I(inode
)->i_lc_shift
);
80 for (i
= 1, j
= off
; j
< AFFS_LC_SIZE
; i
++, j
+= off
)
81 AFFS_I(inode
)->i_ac
[i
] = AFFS_I(inode
)->i_ac
[j
];
83 AFFS_I(inode
)->i_lc_shift
= lc_shift
;
84 AFFS_I(inode
)->i_lc_mask
= lc_mask
;
87 /* fill cache to the needed index */
88 i
= AFFS_I(inode
)->i_lc_size
;
89 AFFS_I(inode
)->i_lc_size
= lc_idx
+ 1;
90 for (; i
<= lc_idx
; i
++) {
92 AFFS_I(inode
)->i_lc
[0] = inode
->i_ino
;
95 key
= AFFS_I(inode
)->i_lc
[i
- 1];
96 j
= AFFS_I(inode
)->i_lc_mask
+ 1;
99 bh
= affs_bread(sb
, key
);
102 key
= be32_to_cpu(AFFS_TAIL(sb
, bh
)->extension
);
106 AFFS_I(inode
)->i_lc
[i
] = key
;
116 static struct buffer_head
*
117 affs_alloc_extblock(struct inode
*inode
, struct buffer_head
*bh
, u32 ext
)
119 struct super_block
*sb
= inode
->i_sb
;
120 struct buffer_head
*new_bh
;
123 blocknr
= affs_alloc_block(inode
, bh
->b_blocknr
);
125 return ERR_PTR(-ENOSPC
);
127 new_bh
= affs_getzeroblk(sb
, blocknr
);
129 affs_free_block(sb
, blocknr
);
130 return ERR_PTR(-EIO
);
133 AFFS_HEAD(new_bh
)->ptype
= cpu_to_be32(T_LIST
);
134 AFFS_HEAD(new_bh
)->key
= cpu_to_be32(blocknr
);
135 AFFS_TAIL(sb
, new_bh
)->stype
= cpu_to_be32(ST_FILE
);
136 AFFS_TAIL(sb
, new_bh
)->parent
= cpu_to_be32(inode
->i_ino
);
137 affs_fix_checksum(sb
, new_bh
);
139 mark_buffer_dirty_inode(new_bh
, inode
);
141 tmp
= be32_to_cpu(AFFS_TAIL(sb
, bh
)->extension
);
143 affs_warning(sb
, "alloc_ext", "previous extension set (%x)", tmp
);
144 AFFS_TAIL(sb
, bh
)->extension
= cpu_to_be32(blocknr
);
145 affs_adjust_checksum(bh
, blocknr
- tmp
);
146 mark_buffer_dirty_inode(bh
, inode
);
148 AFFS_I(inode
)->i_extcnt
++;
149 mark_inode_dirty(inode
);
154 static inline struct buffer_head
*
155 affs_get_extblock(struct inode
*inode
, u32 ext
)
157 /* inline the simplest case: same extended block as last time */
158 struct buffer_head
*bh
= AFFS_I(inode
)->i_ext_bh
;
159 if (ext
== AFFS_I(inode
)->i_ext_last
)
162 /* we have to do more (not inlined) */
163 bh
= affs_get_extblock_slow(inode
, ext
);
168 static struct buffer_head
*
169 affs_get_extblock_slow(struct inode
*inode
, u32 ext
)
171 struct super_block
*sb
= inode
->i_sb
;
172 struct buffer_head
*bh
;
174 u32 lc_idx
, lc_off
, ac_idx
;
177 if (ext
== AFFS_I(inode
)->i_ext_last
+ 1) {
178 /* read the next extended block from the current one */
179 bh
= AFFS_I(inode
)->i_ext_bh
;
180 ext_key
= be32_to_cpu(AFFS_TAIL(sb
, bh
)->extension
);
181 if (ext
< AFFS_I(inode
)->i_extcnt
)
183 BUG_ON(ext
> AFFS_I(inode
)->i_extcnt
);
184 bh
= affs_alloc_extblock(inode
, bh
, ext
);
191 /* we seek back to the file header block */
192 ext_key
= inode
->i_ino
;
196 if (ext
>= AFFS_I(inode
)->i_extcnt
) {
197 struct buffer_head
*prev_bh
;
199 /* allocate a new extended block */
200 BUG_ON(ext
> AFFS_I(inode
)->i_extcnt
);
202 /* get previous extended block */
203 prev_bh
= affs_get_extblock(inode
, ext
- 1);
206 bh
= affs_alloc_extblock(inode
, prev_bh
, ext
);
207 affs_brelse(prev_bh
);
214 /* check if there is an extended cache and whether it's large enough */
215 lc_idx
= ext
>> AFFS_I(inode
)->i_lc_shift
;
216 lc_off
= ext
& AFFS_I(inode
)->i_lc_mask
;
218 if (lc_idx
>= AFFS_I(inode
)->i_lc_size
) {
221 err
= affs_grow_extcache(inode
, lc_idx
);
227 /* every n'th key we find in the linear cache */
229 ext_key
= AFFS_I(inode
)->i_lc
[lc_idx
];
233 /* maybe it's still in the associative cache */
234 ac_idx
= (ext
- lc_idx
- 1) & AFFS_AC_MASK
;
235 if (AFFS_I(inode
)->i_ac
[ac_idx
].ext
== ext
) {
236 ext_key
= AFFS_I(inode
)->i_ac
[ac_idx
].key
;
240 /* try to find one of the previous extended blocks */
243 while (--tmp
, --lc_off
> 0) {
244 idx
= (idx
- 1) & AFFS_AC_MASK
;
245 if (AFFS_I(inode
)->i_ac
[idx
].ext
== tmp
) {
246 ext_key
= AFFS_I(inode
)->i_ac
[idx
].key
;
251 /* fall back to the linear cache */
252 ext_key
= AFFS_I(inode
)->i_lc
[lc_idx
];
254 /* read all extended blocks until we find the one we need */
257 bh
= affs_bread(sb
, ext_key
);
260 ext_key
= be32_to_cpu(AFFS_TAIL(sb
, bh
)->extension
);
266 /* store it in the associative cache */
267 // recalculate ac_idx?
268 AFFS_I(inode
)->i_ac
[ac_idx
].ext
= ext
;
269 AFFS_I(inode
)->i_ac
[ac_idx
].key
= ext_key
;
272 /* finally read the right extended block */
274 bh
= affs_bread(sb
, ext_key
);
280 /* release old cached extended block and store the new one */
281 affs_brelse(AFFS_I(inode
)->i_ext_bh
);
282 AFFS_I(inode
)->i_ext_last
= ext
;
283 AFFS_I(inode
)->i_ext_bh
= bh
;
290 return ERR_PTR(-EIO
);
294 affs_get_block(struct inode
*inode
, sector_t block
, struct buffer_head
*bh_result
, int create
)
296 struct super_block
*sb
= inode
->i_sb
;
297 struct buffer_head
*ext_bh
;
300 pr_debug("%s(%lu, %llu)\n", __func__
, inode
->i_ino
,
301 (unsigned long long)block
);
303 BUG_ON(block
> (sector_t
)0x7fffffffUL
);
305 if (block
>= AFFS_I(inode
)->i_blkcnt
) {
306 if (block
> AFFS_I(inode
)->i_blkcnt
|| !create
)
312 affs_lock_ext(inode
);
314 ext
= (u32
)block
/ AFFS_SB(sb
)->s_hashsize
;
315 block
-= ext
* AFFS_SB(sb
)->s_hashsize
;
316 ext_bh
= affs_get_extblock(inode
, ext
);
319 map_bh(bh_result
, sb
, (sector_t
)be32_to_cpu(AFFS_BLOCK(sb
, ext_bh
, block
)));
322 u32 blocknr
= affs_alloc_block(inode
, ext_bh
->b_blocknr
);
325 set_buffer_new(bh_result
);
326 AFFS_I(inode
)->mmu_private
+= AFFS_SB(sb
)->s_data_blksize
;
327 AFFS_I(inode
)->i_blkcnt
++;
329 /* store new block */
330 if (bh_result
->b_blocknr
)
331 affs_warning(sb
, "get_block",
332 "block already set (%llx)",
333 (unsigned long long)bh_result
->b_blocknr
);
334 AFFS_BLOCK(sb
, ext_bh
, block
) = cpu_to_be32(blocknr
);
335 AFFS_HEAD(ext_bh
)->block_count
= cpu_to_be32(block
+ 1);
336 affs_adjust_checksum(ext_bh
, blocknr
- bh_result
->b_blocknr
+ 1);
337 bh_result
->b_blocknr
= blocknr
;
340 /* insert first block into header block */
341 u32 tmp
= be32_to_cpu(AFFS_HEAD(ext_bh
)->first_data
);
343 affs_warning(sb
, "get_block", "first block already set (%d)", tmp
);
344 AFFS_HEAD(ext_bh
)->first_data
= cpu_to_be32(blocknr
);
345 affs_adjust_checksum(ext_bh
, blocknr
- tmp
);
351 affs_unlock_ext(inode
);
355 affs_error(inode
->i_sb
, "get_block", "strange block request %llu",
356 (unsigned long long)block
);
360 affs_unlock_ext(inode
);
361 return PTR_ERR(ext_bh
);
364 clear_buffer_mapped(bh_result
);
365 bh_result
->b_bdev
= NULL
;
367 affs_unlock_ext(inode
);
371 static int affs_writepage(struct page
*page
, struct writeback_control
*wbc
)
373 return block_write_full_page(page
, affs_get_block
, wbc
);
376 static int affs_readpage(struct file
*file
, struct page
*page
)
378 return block_read_full_page(page
, affs_get_block
);
381 static void affs_write_failed(struct address_space
*mapping
, loff_t to
)
383 struct inode
*inode
= mapping
->host
;
385 if (to
> inode
->i_size
) {
386 truncate_pagecache(inode
, inode
->i_size
);
387 affs_truncate(inode
);
392 affs_direct_IO(int rw
, struct kiocb
*iocb
, struct iov_iter
*iter
,
395 struct file
*file
= iocb
->ki_filp
;
396 struct address_space
*mapping
= file
->f_mapping
;
397 struct inode
*inode
= mapping
->host
;
398 size_t count
= iov_iter_count(iter
);
402 loff_t size
= offset
+ count
;
404 if (AFFS_I(inode
)->mmu_private
< size
)
408 ret
= blockdev_direct_IO(rw
, iocb
, inode
, iter
, offset
, affs_get_block
);
409 if (ret
< 0 && (rw
& WRITE
))
410 affs_write_failed(mapping
, offset
+ count
);
414 static int affs_write_begin(struct file
*file
, struct address_space
*mapping
,
415 loff_t pos
, unsigned len
, unsigned flags
,
416 struct page
**pagep
, void **fsdata
)
421 ret
= cont_write_begin(file
, mapping
, pos
, len
, flags
, pagep
, fsdata
,
423 &AFFS_I(mapping
->host
)->mmu_private
);
425 affs_write_failed(mapping
, pos
+ len
);
430 static sector_t
_affs_bmap(struct address_space
*mapping
, sector_t block
)
432 return generic_block_bmap(mapping
,block
,affs_get_block
);
435 const struct address_space_operations affs_aops
= {
436 .readpage
= affs_readpage
,
437 .writepage
= affs_writepage
,
438 .write_begin
= affs_write_begin
,
439 .write_end
= generic_write_end
,
440 .direct_IO
= affs_direct_IO
,
444 static inline struct buffer_head
*
445 affs_bread_ino(struct inode
*inode
, int block
, int create
)
447 struct buffer_head
*bh
, tmp_bh
;
451 err
= affs_get_block(inode
, block
, &tmp_bh
, create
);
453 bh
= affs_bread(inode
->i_sb
, tmp_bh
.b_blocknr
);
455 bh
->b_state
|= tmp_bh
.b_state
;
463 static inline struct buffer_head
*
464 affs_getzeroblk_ino(struct inode
*inode
, int block
)
466 struct buffer_head
*bh
, tmp_bh
;
470 err
= affs_get_block(inode
, block
, &tmp_bh
, 1);
472 bh
= affs_getzeroblk(inode
->i_sb
, tmp_bh
.b_blocknr
);
474 bh
->b_state
|= tmp_bh
.b_state
;
482 static inline struct buffer_head
*
483 affs_getemptyblk_ino(struct inode
*inode
, int block
)
485 struct buffer_head
*bh
, tmp_bh
;
489 err
= affs_get_block(inode
, block
, &tmp_bh
, 1);
491 bh
= affs_getemptyblk(inode
->i_sb
, tmp_bh
.b_blocknr
);
493 bh
->b_state
|= tmp_bh
.b_state
;
502 affs_do_readpage_ofs(struct page
*page
, unsigned to
)
504 struct inode
*inode
= page
->mapping
->host
;
505 struct super_block
*sb
= inode
->i_sb
;
506 struct buffer_head
*bh
;
509 u32 bidx
, boff
, bsize
;
512 pr_debug("%s(%lu, %ld, 0, %d)\n", __func__
, inode
->i_ino
,
514 BUG_ON(to
> PAGE_CACHE_SIZE
);
516 data
= page_address(page
);
517 bsize
= AFFS_SB(sb
)->s_data_blksize
;
518 tmp
= page
->index
<< PAGE_CACHE_SHIFT
;
523 bh
= affs_bread_ino(inode
, bidx
, 0);
526 tmp
= min(bsize
- boff
, to
- pos
);
527 BUG_ON(pos
+ tmp
> to
|| tmp
> bsize
);
528 memcpy(data
+ pos
, AFFS_DATA(bh
) + boff
, tmp
);
534 flush_dcache_page(page
);
540 affs_extent_file_ofs(struct inode
*inode
, u32 newsize
)
542 struct super_block
*sb
= inode
->i_sb
;
543 struct buffer_head
*bh
, *prev_bh
;
548 pr_debug("%s(%lu, %d)\n", __func__
, inode
->i_ino
, newsize
);
549 bsize
= AFFS_SB(sb
)->s_data_blksize
;
551 size
= AFFS_I(inode
)->mmu_private
;
555 bh
= affs_bread_ino(inode
, bidx
, 0);
558 tmp
= min(bsize
- boff
, newsize
- size
);
559 BUG_ON(boff
+ tmp
> bsize
|| tmp
> bsize
);
560 memset(AFFS_DATA(bh
) + boff
, 0, tmp
);
561 be32_add_cpu(&AFFS_DATA_HEAD(bh
)->size
, tmp
);
562 affs_fix_checksum(sb
, bh
);
563 mark_buffer_dirty_inode(bh
, inode
);
567 bh
= affs_bread_ino(inode
, bidx
- 1, 0);
572 while (size
< newsize
) {
574 bh
= affs_getzeroblk_ino(inode
, bidx
);
577 tmp
= min(bsize
, newsize
- size
);
579 AFFS_DATA_HEAD(bh
)->ptype
= cpu_to_be32(T_DATA
);
580 AFFS_DATA_HEAD(bh
)->key
= cpu_to_be32(inode
->i_ino
);
581 AFFS_DATA_HEAD(bh
)->sequence
= cpu_to_be32(bidx
);
582 AFFS_DATA_HEAD(bh
)->size
= cpu_to_be32(tmp
);
583 affs_fix_checksum(sb
, bh
);
584 bh
->b_state
&= ~(1UL << BH_New
);
585 mark_buffer_dirty_inode(bh
, inode
);
587 u32 tmp_next
= be32_to_cpu(AFFS_DATA_HEAD(prev_bh
)->next
);
590 affs_warning(sb
, "extent_file_ofs",
591 "next block already set for %d (%d)",
593 AFFS_DATA_HEAD(prev_bh
)->next
= cpu_to_be32(bh
->b_blocknr
);
594 affs_adjust_checksum(prev_bh
, bh
->b_blocknr
- tmp_next
);
595 mark_buffer_dirty_inode(prev_bh
, inode
);
596 affs_brelse(prev_bh
);
602 inode
->i_size
= AFFS_I(inode
)->mmu_private
= newsize
;
606 inode
->i_size
= AFFS_I(inode
)->mmu_private
= newsize
;
611 affs_readpage_ofs(struct file
*file
, struct page
*page
)
613 struct inode
*inode
= page
->mapping
->host
;
617 pr_debug("%s(%lu, %ld)\n", __func__
, inode
->i_ino
, page
->index
);
618 to
= PAGE_CACHE_SIZE
;
619 if (((page
->index
+ 1) << PAGE_CACHE_SHIFT
) > inode
->i_size
) {
620 to
= inode
->i_size
& ~PAGE_CACHE_MASK
;
621 memset(page_address(page
) + to
, 0, PAGE_CACHE_SIZE
- to
);
624 err
= affs_do_readpage_ofs(page
, to
);
626 SetPageUptodate(page
);
631 static int affs_write_begin_ofs(struct file
*file
, struct address_space
*mapping
,
632 loff_t pos
, unsigned len
, unsigned flags
,
633 struct page
**pagep
, void **fsdata
)
635 struct inode
*inode
= mapping
->host
;
640 pr_debug("%s(%lu, %llu, %llu)\n", __func__
, inode
->i_ino
, pos
,
642 if (pos
> AFFS_I(inode
)->mmu_private
) {
643 /* XXX: this probably leaves a too-big i_size in case of
644 * failure. Should really be updating i_size at write_end time
646 err
= affs_extent_file_ofs(inode
, pos
);
651 index
= pos
>> PAGE_CACHE_SHIFT
;
652 page
= grab_cache_page_write_begin(mapping
, index
, flags
);
657 if (PageUptodate(page
))
660 /* XXX: inefficient but safe in the face of short writes */
661 err
= affs_do_readpage_ofs(page
, PAGE_CACHE_SIZE
);
664 page_cache_release(page
);
669 static int affs_write_end_ofs(struct file
*file
, struct address_space
*mapping
,
670 loff_t pos
, unsigned len
, unsigned copied
,
671 struct page
*page
, void *fsdata
)
673 struct inode
*inode
= mapping
->host
;
674 struct super_block
*sb
= inode
->i_sb
;
675 struct buffer_head
*bh
, *prev_bh
;
677 u32 bidx
, boff
, bsize
;
682 from
= pos
& (PAGE_CACHE_SIZE
- 1);
685 * XXX: not sure if this can handle short copies (len < copied), but
686 * we don't have to, because the page should always be uptodate here,
687 * due to write_begin.
690 pr_debug("%s(%lu, %llu, %llu)\n", __func__
, inode
->i_ino
, pos
,
692 bsize
= AFFS_SB(sb
)->s_data_blksize
;
693 data
= page_address(page
);
697 tmp
= (page
->index
<< PAGE_CACHE_SHIFT
) + from
;
701 bh
= affs_bread_ino(inode
, bidx
, 0);
703 written
= PTR_ERR(bh
);
706 tmp
= min(bsize
- boff
, to
- from
);
707 BUG_ON(boff
+ tmp
> bsize
|| tmp
> bsize
);
708 memcpy(AFFS_DATA(bh
) + boff
, data
+ from
, tmp
);
709 be32_add_cpu(&AFFS_DATA_HEAD(bh
)->size
, tmp
);
710 affs_fix_checksum(sb
, bh
);
711 mark_buffer_dirty_inode(bh
, inode
);
716 bh
= affs_bread_ino(inode
, bidx
- 1, 0);
718 written
= PTR_ERR(bh
);
722 while (from
+ bsize
<= to
) {
724 bh
= affs_getemptyblk_ino(inode
, bidx
);
727 memcpy(AFFS_DATA(bh
), data
+ from
, bsize
);
728 if (buffer_new(bh
)) {
729 AFFS_DATA_HEAD(bh
)->ptype
= cpu_to_be32(T_DATA
);
730 AFFS_DATA_HEAD(bh
)->key
= cpu_to_be32(inode
->i_ino
);
731 AFFS_DATA_HEAD(bh
)->sequence
= cpu_to_be32(bidx
);
732 AFFS_DATA_HEAD(bh
)->size
= cpu_to_be32(bsize
);
733 AFFS_DATA_HEAD(bh
)->next
= 0;
734 bh
->b_state
&= ~(1UL << BH_New
);
736 u32 tmp_next
= be32_to_cpu(AFFS_DATA_HEAD(prev_bh
)->next
);
739 affs_warning(sb
, "commit_write_ofs",
740 "next block already set for %d (%d)",
742 AFFS_DATA_HEAD(prev_bh
)->next
= cpu_to_be32(bh
->b_blocknr
);
743 affs_adjust_checksum(prev_bh
, bh
->b_blocknr
- tmp_next
);
744 mark_buffer_dirty_inode(prev_bh
, inode
);
747 affs_brelse(prev_bh
);
748 affs_fix_checksum(sb
, bh
);
749 mark_buffer_dirty_inode(bh
, inode
);
756 bh
= affs_bread_ino(inode
, bidx
, 1);
759 tmp
= min(bsize
, to
- from
);
761 memcpy(AFFS_DATA(bh
), data
+ from
, tmp
);
762 if (buffer_new(bh
)) {
763 AFFS_DATA_HEAD(bh
)->ptype
= cpu_to_be32(T_DATA
);
764 AFFS_DATA_HEAD(bh
)->key
= cpu_to_be32(inode
->i_ino
);
765 AFFS_DATA_HEAD(bh
)->sequence
= cpu_to_be32(bidx
);
766 AFFS_DATA_HEAD(bh
)->size
= cpu_to_be32(tmp
);
767 AFFS_DATA_HEAD(bh
)->next
= 0;
768 bh
->b_state
&= ~(1UL << BH_New
);
770 u32 tmp_next
= be32_to_cpu(AFFS_DATA_HEAD(prev_bh
)->next
);
773 affs_warning(sb
, "commit_write_ofs",
774 "next block already set for %d (%d)",
776 AFFS_DATA_HEAD(prev_bh
)->next
= cpu_to_be32(bh
->b_blocknr
);
777 affs_adjust_checksum(prev_bh
, bh
->b_blocknr
- tmp_next
);
778 mark_buffer_dirty_inode(prev_bh
, inode
);
780 } else if (be32_to_cpu(AFFS_DATA_HEAD(bh
)->size
) < tmp
)
781 AFFS_DATA_HEAD(bh
)->size
= cpu_to_be32(tmp
);
782 affs_brelse(prev_bh
);
783 affs_fix_checksum(sb
, bh
);
784 mark_buffer_dirty_inode(bh
, inode
);
789 SetPageUptodate(page
);
793 tmp
= (page
->index
<< PAGE_CACHE_SHIFT
) + from
;
794 if (tmp
> inode
->i_size
)
795 inode
->i_size
= AFFS_I(inode
)->mmu_private
= tmp
;
799 page_cache_release(page
);
806 written
= PTR_ERR(bh
);
810 const struct address_space_operations affs_aops_ofs
= {
811 .readpage
= affs_readpage_ofs
,
812 //.writepage = affs_writepage_ofs,
813 .write_begin
= affs_write_begin_ofs
,
814 .write_end
= affs_write_end_ofs
817 /* Free any preallocated blocks. */
820 affs_free_prealloc(struct inode
*inode
)
822 struct super_block
*sb
= inode
->i_sb
;
824 pr_debug("free_prealloc(ino=%lu)\n", inode
->i_ino
);
826 while (AFFS_I(inode
)->i_pa_cnt
) {
827 AFFS_I(inode
)->i_pa_cnt
--;
828 affs_free_block(sb
, ++AFFS_I(inode
)->i_lastalloc
);
832 /* Truncate (or enlarge) a file to the requested size. */
835 affs_truncate(struct inode
*inode
)
837 struct super_block
*sb
= inode
->i_sb
;
839 u32 last_blk
, blkcnt
, blk
;
841 struct buffer_head
*ext_bh
;
844 pr_debug("truncate(inode=%lu, oldsize=%llu, newsize=%llu)\n",
845 inode
->i_ino
, AFFS_I(inode
)->mmu_private
, inode
->i_size
);
850 last_blk
= ((u32
)inode
->i_size
- 1) / AFFS_SB(sb
)->s_data_blksize
;
851 ext
= last_blk
/ AFFS_SB(sb
)->s_hashsize
;
854 if (inode
->i_size
> AFFS_I(inode
)->mmu_private
) {
855 struct address_space
*mapping
= inode
->i_mapping
;
858 loff_t isize
= inode
->i_size
;
861 res
= mapping
->a_ops
->write_begin(NULL
, mapping
, isize
, 0, 0, &page
, &fsdata
);
863 res
= mapping
->a_ops
->write_end(NULL
, mapping
, isize
, 0, 0, page
, fsdata
);
865 inode
->i_size
= AFFS_I(inode
)->mmu_private
;
866 mark_inode_dirty(inode
);
868 } else if (inode
->i_size
== AFFS_I(inode
)->mmu_private
)
872 ext_bh
= affs_get_extblock(inode
, ext
);
873 if (IS_ERR(ext_bh
)) {
874 affs_warning(sb
, "truncate",
875 "unexpected read error for ext block %u (%ld)",
876 ext
, PTR_ERR(ext_bh
));
879 if (AFFS_I(inode
)->i_lc
) {
880 /* clear linear cache */
881 i
= (ext
+ 1) >> AFFS_I(inode
)->i_lc_shift
;
882 if (AFFS_I(inode
)->i_lc_size
> i
) {
883 AFFS_I(inode
)->i_lc_size
= i
;
884 for (; i
< AFFS_LC_SIZE
; i
++)
885 AFFS_I(inode
)->i_lc
[i
] = 0;
887 /* clear associative cache */
888 for (i
= 0; i
< AFFS_AC_SIZE
; i
++)
889 if (AFFS_I(inode
)->i_ac
[i
].ext
>= ext
)
890 AFFS_I(inode
)->i_ac
[i
].ext
= 0;
892 ext_key
= be32_to_cpu(AFFS_TAIL(sb
, ext_bh
)->extension
);
894 blkcnt
= AFFS_I(inode
)->i_blkcnt
;
898 i
= last_blk
% AFFS_SB(sb
)->s_hashsize
+ 1;
901 AFFS_HEAD(ext_bh
)->first_data
= 0;
902 AFFS_HEAD(ext_bh
)->block_count
= cpu_to_be32(i
);
903 size
= AFFS_SB(sb
)->s_hashsize
;
904 if (size
> blkcnt
- blk
+ i
)
905 size
= blkcnt
- blk
+ i
;
906 for (; i
< size
; i
++, blk
++) {
907 affs_free_block(sb
, be32_to_cpu(AFFS_BLOCK(sb
, ext_bh
, i
)));
908 AFFS_BLOCK(sb
, ext_bh
, i
) = 0;
910 AFFS_TAIL(sb
, ext_bh
)->extension
= 0;
911 affs_fix_checksum(sb
, ext_bh
);
912 mark_buffer_dirty_inode(ext_bh
, inode
);
916 AFFS_I(inode
)->i_blkcnt
= last_blk
+ 1;
917 AFFS_I(inode
)->i_extcnt
= ext
+ 1;
918 if (AFFS_SB(sb
)->s_flags
& SF_OFS
) {
919 struct buffer_head
*bh
= affs_bread_ino(inode
, last_blk
, 0);
922 affs_warning(sb
, "truncate",
923 "unexpected read error for last block %u (%ld)",
927 tmp
= be32_to_cpu(AFFS_DATA_HEAD(bh
)->next
);
928 AFFS_DATA_HEAD(bh
)->next
= 0;
929 affs_adjust_checksum(bh
, -tmp
);
933 AFFS_I(inode
)->i_blkcnt
= 0;
934 AFFS_I(inode
)->i_extcnt
= 1;
936 AFFS_I(inode
)->mmu_private
= inode
->i_size
;
940 ext_bh
= affs_bread(sb
, ext_key
);
941 size
= AFFS_SB(sb
)->s_hashsize
;
942 if (size
> blkcnt
- blk
)
944 for (i
= 0; i
< size
; i
++, blk
++)
945 affs_free_block(sb
, be32_to_cpu(AFFS_BLOCK(sb
, ext_bh
, i
)));
946 affs_free_block(sb
, ext_key
);
947 ext_key
= be32_to_cpu(AFFS_TAIL(sb
, ext_bh
)->extension
);
950 affs_free_prealloc(inode
);
953 int affs_file_fsync(struct file
*filp
, loff_t start
, loff_t end
, int datasync
)
955 struct inode
*inode
= filp
->f_mapping
->host
;
958 err
= filemap_write_and_wait_range(inode
->i_mapping
, start
, end
);
962 mutex_lock(&inode
->i_mutex
);
963 ret
= write_inode_now(inode
, 0);
964 err
= sync_blockdev(inode
->i_sb
->s_bdev
);
967 mutex_unlock(&inode
->i_mutex
);
970 const struct file_operations affs_file_operations
= {
971 .llseek
= generic_file_llseek
,
972 .read
= new_sync_read
,
973 .read_iter
= generic_file_read_iter
,
974 .write
= new_sync_write
,
975 .write_iter
= generic_file_write_iter
,
976 .mmap
= generic_file_mmap
,
977 .open
= affs_file_open
,
978 .release
= affs_file_release
,
979 .fsync
= affs_file_fsync
,
980 .splice_read
= generic_file_splice_read
,
983 const struct inode_operations affs_file_inode_operations
= {
984 .setattr
= affs_notify_change
,