2 * Copyright (C) 2005, 2006
3 * Avishay Traeger (avishay@gmail.com)
4 * Copyright (C) 2008, 2009
5 * Boaz Harrosh <bharrosh@panasas.com>
7 * Copyrights for code taken from ext2:
8 * Copyright (C) 1992, 1993, 1994, 1995
9 * Remy Card (card@masi.ibp.fr)
10 * Laboratoire MASI - Institut Blaise Pascal
11 * Universite Pierre et Marie Curie (Paris VI)
13 * linux/fs/minix/inode.c
14 * Copyright (C) 1991, 1992 Linus Torvalds
16 * This file is part of exofs.
18 * exofs is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation. Since it is based on ext2, and the only
21 * valid version of GPL for the Linux kernel is version 2, the only valid
22 * version of GPL for exofs is version 2.
24 * exofs is distributed in the hope that it will be useful,
25 * but WITHOUT ANY WARRANTY; without even the implied warranty of
26 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
27 * GNU General Public License for more details.
29 * You should have received a copy of the GNU General Public License
30 * along with exofs; if not, write to the Free Software
31 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
34 #include <linux/slab.h>
38 #define EXOFS_DBGMSG2(M...) do {} while (0)
40 enum {MAX_PAGES_KMALLOC
= PAGE_SIZE
/ sizeof(struct page
*), };
42 unsigned exofs_max_io_pages(struct ore_layout
*layout
,
43 unsigned expected_pages
)
45 unsigned pages
= min_t(unsigned, expected_pages
, MAX_PAGES_KMALLOC
);
47 /* TODO: easily support bio chaining */
48 pages
= min_t(unsigned, pages
, layout
->max_io_length
/ PAGE_SIZE
);
53 struct exofs_sb_info
*sbi
;
55 unsigned expected_pages
;
56 struct ore_io_state
*ios
;
62 loff_t pg_first
; /* keep 64bit also in 32-arches */
63 bool read_4_write
; /* This means two things: that the read is sync
64 * And the pages should not be unlocked.
68 static void _pcol_init(struct page_collect
*pcol
, unsigned expected_pages
,
71 struct exofs_sb_info
*sbi
= inode
->i_sb
->s_fs_info
;
75 pcol
->expected_pages
= expected_pages
;
79 pcol
->alloc_pages
= 0;
83 pcol
->read_4_write
= false;
86 static void _pcol_reset(struct page_collect
*pcol
)
88 pcol
->expected_pages
-= min(pcol
->nr_pages
, pcol
->expected_pages
);
91 pcol
->alloc_pages
= 0;
97 /* this is probably the end of the loop but in writes
98 * it might not end here. don't be left with nothing
100 if (!pcol
->expected_pages
)
101 pcol
->expected_pages
= MAX_PAGES_KMALLOC
;
104 static int pcol_try_alloc(struct page_collect
*pcol
)
108 /* TODO: easily support bio chaining */
109 pages
= exofs_max_io_pages(&pcol
->sbi
->layout
, pcol
->expected_pages
);
111 for (; pages
; pages
>>= 1) {
112 pcol
->pages
= kmalloc(pages
* sizeof(struct page
*),
114 if (likely(pcol
->pages
)) {
115 pcol
->alloc_pages
= pages
;
120 EXOFS_ERR("Failed to kmalloc expected_pages=%u\n",
121 pcol
->expected_pages
);
125 static void pcol_free(struct page_collect
*pcol
)
131 ore_put_io_state(pcol
->ios
);
136 static int pcol_add_page(struct page_collect
*pcol
, struct page
*page
,
139 if (unlikely(pcol
->nr_pages
>= pcol
->alloc_pages
))
142 pcol
->pages
[pcol
->nr_pages
++] = page
;
147 enum {PAGE_WAS_NOT_IN_IO
= 17};
148 static int update_read_page(struct page
*page
, int ret
)
152 /* Everything is OK */
153 SetPageUptodate(page
);
155 ClearPageError(page
);
158 /* In this case we were trying to read something that wasn't on
159 * disk yet - return a page full of zeroes. This should be OK,
160 * because the object should be empty (if there was a write
161 * before this read, the read would be waiting with the page
163 clear_highpage(page
);
165 SetPageUptodate(page
);
167 ClearPageError(page
);
168 EXOFS_DBGMSG("recovered read error\n");
170 case PAGE_WAS_NOT_IN_IO
:
171 ret
= 0; /* recovered error */
179 static void update_write_page(struct page
*page
, int ret
)
181 if (unlikely(ret
== PAGE_WAS_NOT_IN_IO
))
182 return; /* don't pass start don't collect $200 */
185 mapping_set_error(page
->mapping
, ret
);
188 end_page_writeback(page
);
191 /* Called at the end of reads, to optionally unlock pages and update their
194 static int __readpages_done(struct page_collect
*pcol
)
199 int ret
= ore_check_io(pcol
->ios
, NULL
);
202 good_bytes
= pcol
->length
;
203 ret
= PAGE_WAS_NOT_IN_IO
;
208 EXOFS_DBGMSG2("readpages_done(0x%lx) good_bytes=0x%llx"
209 " length=0x%lx nr_pages=%u\n",
210 pcol
->inode
->i_ino
, _LLU(good_bytes
), pcol
->length
,
213 for (i
= 0; i
< pcol
->nr_pages
; i
++) {
214 struct page
*page
= pcol
->pages
[i
];
215 struct inode
*inode
= page
->mapping
->host
;
218 if (inode
!= pcol
->inode
)
219 continue; /* osd might add more pages at end */
221 if (likely(length
< good_bytes
))
226 EXOFS_DBGMSG2(" readpages_done(0x%lx, 0x%lx) %s\n",
227 inode
->i_ino
, page
->index
,
228 page_stat
? "bad_bytes" : "good_bytes");
230 ret
= update_read_page(page
, page_stat
);
231 if (!pcol
->read_4_write
)
237 EXOFS_DBGMSG2("readpages_done END\n");
241 /* callback of async reads */
242 static void readpages_done(struct ore_io_state
*ios
, void *p
)
244 struct page_collect
*pcol
= p
;
246 __readpages_done(pcol
);
247 atomic_dec(&pcol
->sbi
->s_curr_pending
);
251 static void _unlock_pcol_pages(struct page_collect
*pcol
, int ret
, int rw
)
255 for (i
= 0; i
< pcol
->nr_pages
; i
++) {
256 struct page
*page
= pcol
->pages
[i
];
259 update_read_page(page
, ret
);
261 update_write_page(page
, ret
);
267 static int _maybe_not_all_in_one_io(struct ore_io_state
*ios
,
268 struct page_collect
*pcol_src
, struct page_collect
*pcol
)
270 /* length was wrong or offset was not page aligned */
271 BUG_ON(pcol_src
->nr_pages
< ios
->nr_pages
);
273 if (pcol_src
->nr_pages
> ios
->nr_pages
) {
274 struct page
**src_page
;
275 unsigned pages_less
= pcol_src
->nr_pages
- ios
->nr_pages
;
276 unsigned long len_less
= pcol_src
->length
- ios
->length
;
280 /* This IO was trimmed */
281 pcol_src
->nr_pages
= ios
->nr_pages
;
282 pcol_src
->length
= ios
->length
;
284 /* Left over pages are passed to the next io */
285 pcol
->expected_pages
+= pages_less
;
286 pcol
->nr_pages
= pages_less
;
287 pcol
->length
= len_less
;
288 src_page
= pcol_src
->pages
+ pcol_src
->nr_pages
;
289 pcol
->pg_first
= (*src_page
)->index
;
291 ret
= pcol_try_alloc(pcol
);
295 for (i
= 0; i
< pages_less
; ++i
)
296 pcol
->pages
[i
] = *src_page
++;
298 EXOFS_DBGMSG("Length was adjusted nr_pages=0x%x "
299 "pages_less=0x%x expected_pages=0x%x "
300 "next_offset=0x%llx next_len=0x%lx\n",
301 pcol_src
->nr_pages
, pages_less
, pcol
->expected_pages
,
302 pcol
->pg_first
* PAGE_SIZE
, pcol
->length
);
307 static int read_exec(struct page_collect
*pcol
)
309 struct exofs_i_info
*oi
= exofs_i(pcol
->inode
);
310 struct ore_io_state
*ios
;
311 struct page_collect
*pcol_copy
= NULL
;
318 int ret
= ore_get_rw_state(&pcol
->sbi
->layout
, &oi
->oc
, true,
319 pcol
->pg_first
<< PAGE_CACHE_SHIFT
,
320 pcol
->length
, &pcol
->ios
);
327 ios
->pages
= pcol
->pages
;
329 if (pcol
->read_4_write
) {
331 return __readpages_done(pcol
);
334 pcol_copy
= kmalloc(sizeof(*pcol_copy
), GFP_KERNEL
);
341 ios
->done
= readpages_done
;
342 ios
->private = pcol_copy
;
344 /* pages ownership was passed to pcol_copy */
347 ret
= _maybe_not_all_in_one_io(ios
, pcol_copy
, pcol
);
351 EXOFS_DBGMSG2("read_exec(0x%lx) offset=0x%llx length=0x%llx\n",
352 pcol
->inode
->i_ino
, _LLU(ios
->offset
), _LLU(ios
->length
));
358 atomic_inc(&pcol
->sbi
->s_curr_pending
);
363 if (!pcol
->read_4_write
)
364 _unlock_pcol_pages(pcol
, ret
, READ
);
372 /* readpage_strip is called either directly from readpage() or by the VFS from
373 * within read_cache_pages(), to add one more page to be read. It will try to
374 * collect as many contiguous pages as posible. If a discontinuity is
375 * encountered, or it runs out of resources, it will submit the previous segment
376 * and will start a new collection. Eventually caller must submit the last
377 * segment if present.
379 static int readpage_strip(void *data
, struct page
*page
)
381 struct page_collect
*pcol
= data
;
382 struct inode
*inode
= pcol
->inode
;
383 struct exofs_i_info
*oi
= exofs_i(inode
);
384 loff_t i_size
= i_size_read(inode
);
385 pgoff_t end_index
= i_size
>> PAGE_CACHE_SHIFT
;
389 /* FIXME: Just for debugging, will be removed */
390 if (PageUptodate(page
))
391 EXOFS_ERR("PageUptodate(0x%lx, 0x%lx)\n", pcol
->inode
->i_ino
,
394 if (page
->index
< end_index
)
395 len
= PAGE_CACHE_SIZE
;
396 else if (page
->index
== end_index
)
397 len
= i_size
& ~PAGE_CACHE_MASK
;
401 if (!len
|| !obj_created(oi
)) {
402 /* this will be out of bounds, or doesn't exist yet.
403 * Current page is cleared and the request is split
405 clear_highpage(page
);
407 SetPageUptodate(page
);
409 ClearPageError(page
);
411 if (!pcol
->read_4_write
)
413 EXOFS_DBGMSG("readpage_strip(0x%lx) empty page len=%zx "
414 "read_4_write=%d index=0x%lx end_index=0x%lx "
415 "splitting\n", inode
->i_ino
, len
,
416 pcol
->read_4_write
, page
->index
, end_index
);
418 return read_exec(pcol
);
423 if (unlikely(pcol
->pg_first
== -1)) {
424 pcol
->pg_first
= page
->index
;
425 } else if (unlikely((pcol
->pg_first
+ pcol
->nr_pages
) !=
427 /* Discontinuity detected, split the request */
428 ret
= read_exec(pcol
);
435 ret
= pcol_try_alloc(pcol
);
440 if (len
!= PAGE_CACHE_SIZE
)
441 zero_user(page
, len
, PAGE_CACHE_SIZE
- len
);
443 EXOFS_DBGMSG2(" readpage_strip(0x%lx, 0x%lx) len=0x%zx\n",
444 inode
->i_ino
, page
->index
, len
);
446 ret
= pcol_add_page(pcol
, page
, len
);
448 EXOFS_DBGMSG2("Failed pcol_add_page pages[i]=%p "
449 "this_len=0x%zx nr_pages=%u length=0x%lx\n",
450 page
, len
, pcol
->nr_pages
, pcol
->length
);
452 /* split the request, and start again with current page */
453 ret
= read_exec(pcol
);
463 /* SetPageError(page); ??? */
468 static int exofs_readpages(struct file
*file
, struct address_space
*mapping
,
469 struct list_head
*pages
, unsigned nr_pages
)
471 struct page_collect pcol
;
474 _pcol_init(&pcol
, nr_pages
, mapping
->host
);
476 ret
= read_cache_pages(mapping
, pages
, readpage_strip
, &pcol
);
478 EXOFS_ERR("read_cache_pages => %d\n", ret
);
482 ret
= read_exec(&pcol
);
486 return read_exec(&pcol
);
489 static int _readpage(struct page
*page
, bool read_4_write
)
491 struct page_collect pcol
;
494 _pcol_init(&pcol
, 1, page
->mapping
->host
);
496 pcol
.read_4_write
= read_4_write
;
497 ret
= readpage_strip(&pcol
, page
);
499 EXOFS_ERR("_readpage => %d\n", ret
);
503 return read_exec(&pcol
);
507 * We don't need the file
509 static int exofs_readpage(struct file
*file
, struct page
*page
)
511 return _readpage(page
, false);
514 /* Callback for osd_write. All writes are asynchronous */
515 static void writepages_done(struct ore_io_state
*ios
, void *p
)
517 struct page_collect
*pcol
= p
;
521 int ret
= ore_check_io(ios
, NULL
);
523 atomic_dec(&pcol
->sbi
->s_curr_pending
);
526 good_bytes
= pcol
->length
;
527 ret
= PAGE_WAS_NOT_IN_IO
;
532 EXOFS_DBGMSG2("writepages_done(0x%lx) good_bytes=0x%llx"
533 " length=0x%lx nr_pages=%u\n",
534 pcol
->inode
->i_ino
, _LLU(good_bytes
), pcol
->length
,
537 for (i
= 0; i
< pcol
->nr_pages
; i
++) {
538 struct page
*page
= pcol
->pages
[i
];
539 struct inode
*inode
= page
->mapping
->host
;
542 if (inode
!= pcol
->inode
)
543 continue; /* osd might add more pages to a bio */
545 if (likely(length
< good_bytes
))
550 update_write_page(page
, page_stat
);
552 EXOFS_DBGMSG2(" writepages_done(0x%lx, 0x%lx) status=%d\n",
553 inode
->i_ino
, page
->index
, page_stat
);
560 EXOFS_DBGMSG2("writepages_done END\n");
563 static int write_exec(struct page_collect
*pcol
)
565 struct exofs_i_info
*oi
= exofs_i(pcol
->inode
);
566 struct ore_io_state
*ios
;
567 struct page_collect
*pcol_copy
= NULL
;
574 ret
= ore_get_rw_state(&pcol
->sbi
->layout
, &oi
->oc
, false,
575 pcol
->pg_first
<< PAGE_CACHE_SHIFT
,
576 pcol
->length
, &pcol
->ios
);
580 pcol_copy
= kmalloc(sizeof(*pcol_copy
), GFP_KERNEL
);
582 EXOFS_ERR("write_exec: Failed to kmalloc(pcol)\n");
590 ios
->pages
= pcol_copy
->pages
;
591 ios
->done
= writepages_done
;
592 ios
->private = pcol_copy
;
594 /* pages ownership was passed to pcol_copy */
597 ret
= _maybe_not_all_in_one_io(ios
, pcol_copy
, pcol
);
601 EXOFS_DBGMSG2("write_exec(0x%lx) offset=0x%llx length=0x%llx\n",
602 pcol
->inode
->i_ino
, _LLU(ios
->offset
), _LLU(ios
->length
));
604 ret
= ore_write(ios
);
606 EXOFS_ERR("write_exec: ore_write() Failed\n");
610 atomic_inc(&pcol
->sbi
->s_curr_pending
);
614 _unlock_pcol_pages(pcol
, ret
, WRITE
);
621 /* writepage_strip is called either directly from writepage() or by the VFS from
622 * within write_cache_pages(), to add one more page to be written to storage.
623 * It will try to collect as many contiguous pages as possible. If a
624 * discontinuity is encountered or it runs out of resources it will submit the
625 * previous segment and will start a new collection.
626 * Eventually caller must submit the last segment if present.
628 static int writepage_strip(struct page
*page
,
629 struct writeback_control
*wbc_unused
, void *data
)
631 struct page_collect
*pcol
= data
;
632 struct inode
*inode
= pcol
->inode
;
633 struct exofs_i_info
*oi
= exofs_i(inode
);
634 loff_t i_size
= i_size_read(inode
);
635 pgoff_t end_index
= i_size
>> PAGE_CACHE_SHIFT
;
639 BUG_ON(!PageLocked(page
));
641 ret
= wait_obj_created(oi
);
645 if (page
->index
< end_index
)
646 /* in this case, the page is within the limits of the file */
647 len
= PAGE_CACHE_SIZE
;
649 len
= i_size
& ~PAGE_CACHE_MASK
;
651 if (page
->index
> end_index
|| !len
) {
652 /* in this case, the page is outside the limits
653 * (truncate in progress)
655 ret
= write_exec(pcol
);
659 ClearPageError(page
);
661 EXOFS_DBGMSG("writepage_strip(0x%lx, 0x%lx) "
662 "outside the limits\n",
663 inode
->i_ino
, page
->index
);
670 if (unlikely(pcol
->pg_first
== -1)) {
671 pcol
->pg_first
= page
->index
;
672 } else if (unlikely((pcol
->pg_first
+ pcol
->nr_pages
) !=
674 /* Discontinuity detected, split the request */
675 ret
= write_exec(pcol
);
679 EXOFS_DBGMSG("writepage_strip(0x%lx, 0x%lx) Discontinuity\n",
680 inode
->i_ino
, page
->index
);
685 ret
= pcol_try_alloc(pcol
);
690 EXOFS_DBGMSG2(" writepage_strip(0x%lx, 0x%lx) len=0x%zx\n",
691 inode
->i_ino
, page
->index
, len
);
693 ret
= pcol_add_page(pcol
, page
, len
);
695 EXOFS_DBGMSG2("Failed pcol_add_page "
696 "nr_pages=%u total_length=0x%lx\n",
697 pcol
->nr_pages
, pcol
->length
);
699 /* split the request, next loop will start again */
700 ret
= write_exec(pcol
);
702 EXOFS_DBGMSG("write_exec failed => %d", ret
);
709 BUG_ON(PageWriteback(page
));
710 set_page_writeback(page
);
715 EXOFS_DBGMSG("Error: writepage_strip(0x%lx, 0x%lx)=>%d\n",
716 inode
->i_ino
, page
->index
, ret
);
717 set_bit(AS_EIO
, &page
->mapping
->flags
);
722 static int exofs_writepages(struct address_space
*mapping
,
723 struct writeback_control
*wbc
)
725 struct page_collect pcol
;
726 long start
, end
, expected_pages
;
729 start
= wbc
->range_start
>> PAGE_CACHE_SHIFT
;
730 end
= (wbc
->range_end
== LLONG_MAX
) ?
731 start
+ mapping
->nrpages
:
732 wbc
->range_end
>> PAGE_CACHE_SHIFT
;
735 expected_pages
= end
- start
+ 1;
737 expected_pages
= mapping
->nrpages
;
739 if (expected_pages
< 32L)
740 expected_pages
= 32L;
742 EXOFS_DBGMSG2("inode(0x%lx) wbc->start=0x%llx wbc->end=0x%llx "
743 "nrpages=%lu start=0x%lx end=0x%lx expected_pages=%ld\n",
744 mapping
->host
->i_ino
, wbc
->range_start
, wbc
->range_end
,
745 mapping
->nrpages
, start
, end
, expected_pages
);
747 _pcol_init(&pcol
, expected_pages
, mapping
->host
);
749 ret
= write_cache_pages(mapping
, wbc
, writepage_strip
, &pcol
);
751 EXOFS_ERR("write_cache_pages => %d\n", ret
);
755 ret
= write_exec(&pcol
);
759 if (wbc
->sync_mode
== WB_SYNC_ALL
) {
760 return write_exec(&pcol
); /* pump the last reminder */
761 } else if (pcol
.nr_pages
) {
762 /* not SYNC let the reminder join the next writeout */
765 for (i
= 0; i
< pcol
.nr_pages
; i
++) {
766 struct page
*page
= pcol
.pages
[i
];
768 end_page_writeback(page
);
769 set_page_dirty(page
);
776 static int exofs_writepage(struct page
*page
, struct writeback_control
*wbc
)
778 struct page_collect pcol
;
781 _pcol_init(&pcol
, 1, page
->mapping
->host
);
783 ret
= writepage_strip(page
, NULL
, &pcol
);
785 EXOFS_ERR("exofs_writepage => %d\n", ret
);
789 return write_exec(&pcol
);
792 /* i_mutex held using inode->i_size directly */
793 static void _write_failed(struct inode
*inode
, loff_t to
)
795 if (to
> inode
->i_size
)
796 truncate_pagecache(inode
, to
, inode
->i_size
);
799 int exofs_write_begin(struct file
*file
, struct address_space
*mapping
,
800 loff_t pos
, unsigned len
, unsigned flags
,
801 struct page
**pagep
, void **fsdata
)
808 ret
= simple_write_begin(file
, mapping
, pos
, len
, flags
, pagep
,
811 EXOFS_DBGMSG("simple_write_begin failed\n");
818 /* read modify write */
819 if (!PageUptodate(page
) && (len
!= PAGE_CACHE_SIZE
)) {
820 loff_t i_size
= i_size_read(mapping
->host
);
821 pgoff_t end_index
= i_size
>> PAGE_CACHE_SHIFT
;
824 if (page
->index
< end_index
)
825 rlen
= PAGE_CACHE_SIZE
;
826 else if (page
->index
== end_index
)
827 rlen
= i_size
& ~PAGE_CACHE_MASK
;
832 clear_highpage(page
);
833 SetPageUptodate(page
);
837 ret
= _readpage(page
, true);
839 /*SetPageError was done by _readpage. Is it ok?*/
841 EXOFS_DBGMSG("__readpage failed\n");
846 _write_failed(mapping
->host
, pos
+ len
);
851 static int exofs_write_begin_export(struct file
*file
,
852 struct address_space
*mapping
,
853 loff_t pos
, unsigned len
, unsigned flags
,
854 struct page
**pagep
, void **fsdata
)
858 return exofs_write_begin(file
, mapping
, pos
, len
, flags
, pagep
,
862 static int exofs_write_end(struct file
*file
, struct address_space
*mapping
,
863 loff_t pos
, unsigned len
, unsigned copied
,
864 struct page
*page
, void *fsdata
)
866 struct inode
*inode
= mapping
->host
;
867 /* According to comment in simple_write_end i_mutex is held */
868 loff_t i_size
= inode
->i_size
;
871 ret
= simple_write_end(file
, mapping
,pos
, len
, copied
, page
, fsdata
);
873 _write_failed(inode
, pos
+ len
);
875 /* TODO: once simple_write_end marks inode dirty remove */
876 if (i_size
!= inode
->i_size
)
877 mark_inode_dirty(inode
);
881 static int exofs_releasepage(struct page
*page
, gfp_t gfp
)
883 EXOFS_DBGMSG("page 0x%lx\n", page
->index
);
888 static void exofs_invalidatepage(struct page
*page
, unsigned long offset
)
890 EXOFS_DBGMSG("page 0x%lx offset 0x%lx\n", page
->index
, offset
);
894 const struct address_space_operations exofs_aops
= {
895 .readpage
= exofs_readpage
,
896 .readpages
= exofs_readpages
,
897 .writepage
= exofs_writepage
,
898 .writepages
= exofs_writepages
,
899 .write_begin
= exofs_write_begin_export
,
900 .write_end
= exofs_write_end
,
901 .releasepage
= exofs_releasepage
,
902 .set_page_dirty
= __set_page_dirty_nobuffers
,
903 .invalidatepage
= exofs_invalidatepage
,
905 /* Not implemented Yet */
906 .bmap
= NULL
, /* TODO: use osd's OSD_ACT_READ_MAP */
907 .direct_IO
= NULL
, /* TODO: Should be trivial to do */
909 /* With these NULL has special meaning or default is not exported */
912 .launder_page
= NULL
,
913 .is_partially_uptodate
= NULL
,
914 .error_remove_page
= NULL
,
917 /******************************************************************************
919 *****************************************************************************/
922 * Test whether an inode is a fast symlink.
924 static inline int exofs_inode_is_fast_symlink(struct inode
*inode
)
926 struct exofs_i_info
*oi
= exofs_i(inode
);
928 return S_ISLNK(inode
->i_mode
) && (oi
->i_data
[0] != 0);
931 static int _do_truncate(struct inode
*inode
, loff_t newsize
)
933 struct exofs_i_info
*oi
= exofs_i(inode
);
934 struct exofs_sb_info
*sbi
= inode
->i_sb
->s_fs_info
;
937 inode
->i_mtime
= inode
->i_ctime
= CURRENT_TIME
;
939 ret
= ore_truncate(&sbi
->layout
, &oi
->oc
, (u64
)newsize
);
941 truncate_setsize(inode
, newsize
);
943 EXOFS_DBGMSG("(0x%lx) size=0x%llx ret=>%d\n",
944 inode
->i_ino
, newsize
, ret
);
949 * Set inode attributes - update size attribute on OSD if needed,
950 * otherwise just call generic functions.
952 int exofs_setattr(struct dentry
*dentry
, struct iattr
*iattr
)
954 struct inode
*inode
= dentry
->d_inode
;
957 /* if we are about to modify an object, and it hasn't been
960 error
= wait_obj_created(exofs_i(inode
));
964 error
= inode_change_ok(inode
, iattr
);
968 if ((iattr
->ia_valid
& ATTR_SIZE
) &&
969 iattr
->ia_size
!= i_size_read(inode
)) {
970 error
= _do_truncate(inode
, iattr
->ia_size
);
975 setattr_copy(inode
, iattr
);
976 mark_inode_dirty(inode
);
980 static const struct osd_attr g_attr_inode_file_layout
= ATTR_DEF(
982 EXOFS_ATTR_INODE_FILE_LAYOUT
,
984 static const struct osd_attr g_attr_inode_dir_layout
= ATTR_DEF(
986 EXOFS_ATTR_INODE_DIR_LAYOUT
,
990 * Read the Linux inode info from the OSD, and return it as is. In exofs the
991 * inode info is in an application specific page/attribute of the osd-object.
993 static int exofs_get_inode(struct super_block
*sb
, struct exofs_i_info
*oi
,
994 struct exofs_fcb
*inode
)
996 struct exofs_sb_info
*sbi
= sb
->s_fs_info
;
997 struct osd_attr attrs
[] = {
998 [0] = g_attr_inode_data
,
999 [1] = g_attr_inode_file_layout
,
1000 [2] = g_attr_inode_dir_layout
,
1002 struct ore_io_state
*ios
;
1003 struct exofs_on_disk_inode_layout
*layout
;
1006 ret
= ore_get_io_state(&sbi
->layout
, &oi
->oc
, &ios
);
1007 if (unlikely(ret
)) {
1008 EXOFS_ERR("%s: ore_get_io_state failed.\n", __func__
);
1012 attrs
[1].len
= exofs_on_disk_inode_layout_size(sbi
->oc
.numdevs
);
1013 attrs
[2].len
= exofs_on_disk_inode_layout_size(sbi
->oc
.numdevs
);
1015 ios
->in_attr
= attrs
;
1016 ios
->in_attr_len
= ARRAY_SIZE(attrs
);
1018 ret
= ore_read(ios
);
1019 if (unlikely(ret
)) {
1020 EXOFS_ERR("object(0x%llx) corrupted, return empty file=>%d\n",
1021 _LLU(oi
->one_comp
.obj
.id
), ret
);
1022 memset(inode
, 0, sizeof(*inode
));
1023 inode
->i_mode
= 0040000 | (0777 & ~022);
1024 /* If object is lost on target we might as well enable it's
1027 if ((ret
== -ENOENT
) || (ret
== -EINVAL
))
1032 ret
= extract_attr_from_ios(ios
, &attrs
[0]);
1034 EXOFS_ERR("%s: extract_attr of inode_data failed\n", __func__
);
1037 WARN_ON(attrs
[0].len
!= EXOFS_INO_ATTR_SIZE
);
1038 memcpy(inode
, attrs
[0].val_ptr
, EXOFS_INO_ATTR_SIZE
);
1040 ret
= extract_attr_from_ios(ios
, &attrs
[1]);
1042 EXOFS_ERR("%s: extract_attr of inode_data failed\n", __func__
);
1046 layout
= attrs
[1].val_ptr
;
1047 if (layout
->gen_func
!= cpu_to_le16(LAYOUT_MOVING_WINDOW
)) {
1048 EXOFS_ERR("%s: unsupported files layout %d\n",
1049 __func__
, layout
->gen_func
);
1055 ret
= extract_attr_from_ios(ios
, &attrs
[2]);
1057 EXOFS_ERR("%s: extract_attr of inode_data failed\n", __func__
);
1061 layout
= attrs
[2].val_ptr
;
1062 if (layout
->gen_func
!= cpu_to_le16(LAYOUT_MOVING_WINDOW
)) {
1063 EXOFS_ERR("%s: unsupported meta-data layout %d\n",
1064 __func__
, layout
->gen_func
);
1071 ore_put_io_state(ios
);
1075 static void __oi_init(struct exofs_i_info
*oi
)
1077 init_waitqueue_head(&oi
->i_wq
);
1081 * Fill in an inode read from the OSD and set it up for use
1083 struct inode
*exofs_iget(struct super_block
*sb
, unsigned long ino
)
1085 struct exofs_i_info
*oi
;
1086 struct exofs_fcb fcb
;
1087 struct inode
*inode
;
1090 inode
= iget_locked(sb
, ino
);
1092 return ERR_PTR(-ENOMEM
);
1093 if (!(inode
->i_state
& I_NEW
))
1095 oi
= exofs_i(inode
);
1097 exofs_init_comps(&oi
->oc
, &oi
->one_comp
, sb
->s_fs_info
,
1098 exofs_oi_objno(oi
));
1100 /* read the inode from the osd */
1101 ret
= exofs_get_inode(sb
, oi
, &fcb
);
1105 set_obj_created(oi
);
1107 /* copy stuff from on-disk struct to in-memory struct */
1108 inode
->i_mode
= le16_to_cpu(fcb
.i_mode
);
1109 inode
->i_uid
= le32_to_cpu(fcb
.i_uid
);
1110 inode
->i_gid
= le32_to_cpu(fcb
.i_gid
);
1111 inode
->i_nlink
= le16_to_cpu(fcb
.i_links_count
);
1112 inode
->i_ctime
.tv_sec
= (signed)le32_to_cpu(fcb
.i_ctime
);
1113 inode
->i_atime
.tv_sec
= (signed)le32_to_cpu(fcb
.i_atime
);
1114 inode
->i_mtime
.tv_sec
= (signed)le32_to_cpu(fcb
.i_mtime
);
1115 inode
->i_ctime
.tv_nsec
=
1116 inode
->i_atime
.tv_nsec
= inode
->i_mtime
.tv_nsec
= 0;
1117 oi
->i_commit_size
= le64_to_cpu(fcb
.i_size
);
1118 i_size_write(inode
, oi
->i_commit_size
);
1119 inode
->i_blkbits
= EXOFS_BLKSHIFT
;
1120 inode
->i_generation
= le32_to_cpu(fcb
.i_generation
);
1122 oi
->i_dir_start_lookup
= 0;
1124 if ((inode
->i_nlink
== 0) && (inode
->i_mode
== 0)) {
1129 if (S_ISCHR(inode
->i_mode
) || S_ISBLK(inode
->i_mode
)) {
1132 old_decode_dev(le32_to_cpu(fcb
.i_data
[0]));
1135 new_decode_dev(le32_to_cpu(fcb
.i_data
[1]));
1137 memcpy(oi
->i_data
, fcb
.i_data
, sizeof(fcb
.i_data
));
1140 inode
->i_mapping
->backing_dev_info
= sb
->s_bdi
;
1141 if (S_ISREG(inode
->i_mode
)) {
1142 inode
->i_op
= &exofs_file_inode_operations
;
1143 inode
->i_fop
= &exofs_file_operations
;
1144 inode
->i_mapping
->a_ops
= &exofs_aops
;
1145 } else if (S_ISDIR(inode
->i_mode
)) {
1146 inode
->i_op
= &exofs_dir_inode_operations
;
1147 inode
->i_fop
= &exofs_dir_operations
;
1148 inode
->i_mapping
->a_ops
= &exofs_aops
;
1149 } else if (S_ISLNK(inode
->i_mode
)) {
1150 if (exofs_inode_is_fast_symlink(inode
))
1151 inode
->i_op
= &exofs_fast_symlink_inode_operations
;
1153 inode
->i_op
= &exofs_symlink_inode_operations
;
1154 inode
->i_mapping
->a_ops
= &exofs_aops
;
1157 inode
->i_op
= &exofs_special_inode_operations
;
1159 init_special_inode(inode
, inode
->i_mode
,
1160 old_decode_dev(le32_to_cpu(fcb
.i_data
[0])));
1162 init_special_inode(inode
, inode
->i_mode
,
1163 new_decode_dev(le32_to_cpu(fcb
.i_data
[1])));
1166 unlock_new_inode(inode
);
1171 return ERR_PTR(ret
);
1174 int __exofs_wait_obj_created(struct exofs_i_info
*oi
)
1176 if (!obj_created(oi
)) {
1177 EXOFS_DBGMSG("!obj_created\n");
1178 BUG_ON(!obj_2bcreated(oi
));
1179 wait_event(oi
->i_wq
, obj_created(oi
));
1180 EXOFS_DBGMSG("wait_event done\n");
1182 return unlikely(is_bad_inode(&oi
->vfs_inode
)) ? -EIO
: 0;
1186 * Callback function from exofs_new_inode(). The important thing is that we
1187 * set the obj_created flag so that other methods know that the object exists on
1190 static void create_done(struct ore_io_state
*ios
, void *p
)
1192 struct inode
*inode
= p
;
1193 struct exofs_i_info
*oi
= exofs_i(inode
);
1194 struct exofs_sb_info
*sbi
= inode
->i_sb
->s_fs_info
;
1197 ret
= ore_check_io(ios
, NULL
);
1198 ore_put_io_state(ios
);
1200 atomic_dec(&sbi
->s_curr_pending
);
1202 if (unlikely(ret
)) {
1203 EXOFS_ERR("object=0x%llx creation failed in pid=0x%llx",
1204 _LLU(exofs_oi_objno(oi
)),
1205 _LLU(oi
->one_comp
.obj
.partition
));
1206 /*TODO: When FS is corrupted creation can fail, object already
1207 * exist. Get rid of this asynchronous creation, if exist
1208 * increment the obj counter and try the next object. Until we
1209 * succeed. All these dangling objects will be made into lost
1210 * files by chkfs.exofs
1214 set_obj_created(oi
);
1220 * Set up a new inode and create an object for it on the OSD
1222 struct inode
*exofs_new_inode(struct inode
*dir
, int mode
)
1224 struct super_block
*sb
= dir
->i_sb
;
1225 struct exofs_sb_info
*sbi
= sb
->s_fs_info
;
1226 struct inode
*inode
;
1227 struct exofs_i_info
*oi
;
1228 struct ore_io_state
*ios
;
1231 inode
= new_inode(sb
);
1233 return ERR_PTR(-ENOMEM
);
1235 oi
= exofs_i(inode
);
1238 set_obj_2bcreated(oi
);
1240 inode
->i_mapping
->backing_dev_info
= sb
->s_bdi
;
1241 inode_init_owner(inode
, dir
, mode
);
1242 inode
->i_ino
= sbi
->s_nextid
++;
1243 inode
->i_blkbits
= EXOFS_BLKSHIFT
;
1244 inode
->i_mtime
= inode
->i_atime
= inode
->i_ctime
= CURRENT_TIME
;
1245 oi
->i_commit_size
= inode
->i_size
= 0;
1246 spin_lock(&sbi
->s_next_gen_lock
);
1247 inode
->i_generation
= sbi
->s_next_generation
++;
1248 spin_unlock(&sbi
->s_next_gen_lock
);
1249 insert_inode_hash(inode
);
1251 exofs_init_comps(&oi
->oc
, &oi
->one_comp
, sb
->s_fs_info
,
1252 exofs_oi_objno(oi
));
1253 exofs_sbi_write_stats(sbi
); /* Make sure new sbi->s_nextid is on disk */
1255 mark_inode_dirty(inode
);
1257 ret
= ore_get_io_state(&sbi
->layout
, &oi
->oc
, &ios
);
1258 if (unlikely(ret
)) {
1259 EXOFS_ERR("exofs_new_inode: ore_get_io_state failed\n");
1260 return ERR_PTR(ret
);
1263 ios
->done
= create_done
;
1264 ios
->private = inode
;
1266 ret
= ore_create(ios
);
1268 ore_put_io_state(ios
);
1269 return ERR_PTR(ret
);
1271 atomic_inc(&sbi
->s_curr_pending
);
1277 * struct to pass two arguments to update_inode's callback
1279 struct updatei_args
{
1280 struct exofs_sb_info
*sbi
;
1281 struct exofs_fcb fcb
;
1285 * Callback function from exofs_update_inode().
1287 static void updatei_done(struct ore_io_state
*ios
, void *p
)
1289 struct updatei_args
*args
= p
;
1291 ore_put_io_state(ios
);
1293 atomic_dec(&args
->sbi
->s_curr_pending
);
1299 * Write the inode to the OSD. Just fill up the struct, and set the attribute
1300 * synchronously or asynchronously depending on the do_sync flag.
1302 static int exofs_update_inode(struct inode
*inode
, int do_sync
)
1304 struct exofs_i_info
*oi
= exofs_i(inode
);
1305 struct super_block
*sb
= inode
->i_sb
;
1306 struct exofs_sb_info
*sbi
= sb
->s_fs_info
;
1307 struct ore_io_state
*ios
;
1308 struct osd_attr attr
;
1309 struct exofs_fcb
*fcb
;
1310 struct updatei_args
*args
;
1313 args
= kzalloc(sizeof(*args
), GFP_KERNEL
);
1315 EXOFS_DBGMSG("Failed kzalloc of args\n");
1321 fcb
->i_mode
= cpu_to_le16(inode
->i_mode
);
1322 fcb
->i_uid
= cpu_to_le32(inode
->i_uid
);
1323 fcb
->i_gid
= cpu_to_le32(inode
->i_gid
);
1324 fcb
->i_links_count
= cpu_to_le16(inode
->i_nlink
);
1325 fcb
->i_ctime
= cpu_to_le32(inode
->i_ctime
.tv_sec
);
1326 fcb
->i_atime
= cpu_to_le32(inode
->i_atime
.tv_sec
);
1327 fcb
->i_mtime
= cpu_to_le32(inode
->i_mtime
.tv_sec
);
1328 oi
->i_commit_size
= i_size_read(inode
);
1329 fcb
->i_size
= cpu_to_le64(oi
->i_commit_size
);
1330 fcb
->i_generation
= cpu_to_le32(inode
->i_generation
);
1332 if (S_ISCHR(inode
->i_mode
) || S_ISBLK(inode
->i_mode
)) {
1333 if (old_valid_dev(inode
->i_rdev
)) {
1335 cpu_to_le32(old_encode_dev(inode
->i_rdev
));
1340 cpu_to_le32(new_encode_dev(inode
->i_rdev
));
1344 memcpy(fcb
->i_data
, oi
->i_data
, sizeof(fcb
->i_data
));
1346 ret
= ore_get_io_state(&sbi
->layout
, &oi
->oc
, &ios
);
1347 if (unlikely(ret
)) {
1348 EXOFS_ERR("%s: ore_get_io_state failed.\n", __func__
);
1352 attr
= g_attr_inode_data
;
1354 ios
->out_attr_len
= 1;
1355 ios
->out_attr
= &attr
;
1357 wait_obj_created(oi
);
1361 ios
->done
= updatei_done
;
1362 ios
->private = args
;
1365 ret
= ore_write(ios
);
1366 if (!do_sync
&& !ret
) {
1367 atomic_inc(&sbi
->s_curr_pending
);
1368 goto out
; /* deallocation in updatei_done */
1371 ore_put_io_state(ios
);
1375 EXOFS_DBGMSG("(0x%lx) do_sync=%d ret=>%d\n",
1376 inode
->i_ino
, do_sync
, ret
);
1380 int exofs_write_inode(struct inode
*inode
, struct writeback_control
*wbc
)
1382 /* FIXME: fix fsync and use wbc->sync_mode == WB_SYNC_ALL */
1383 return exofs_update_inode(inode
, 1);
1387 * Callback function from exofs_delete_inode() - don't have much cleaning up to
1390 static void delete_done(struct ore_io_state
*ios
, void *p
)
1392 struct exofs_sb_info
*sbi
= p
;
1394 ore_put_io_state(ios
);
1396 atomic_dec(&sbi
->s_curr_pending
);
1400 * Called when the refcount of an inode reaches zero. We remove the object
1401 * from the OSD here. We make sure the object was created before we try and
1404 void exofs_evict_inode(struct inode
*inode
)
1406 struct exofs_i_info
*oi
= exofs_i(inode
);
1407 struct super_block
*sb
= inode
->i_sb
;
1408 struct exofs_sb_info
*sbi
= sb
->s_fs_info
;
1409 struct ore_io_state
*ios
;
1412 truncate_inode_pages(&inode
->i_data
, 0);
1414 /* TODO: should do better here */
1415 if (inode
->i_nlink
|| is_bad_inode(inode
))
1419 end_writeback(inode
);
1421 /* if we are deleting an obj that hasn't been created yet, wait.
1422 * This also makes sure that create_done cannot be called with an
1423 * already evicted inode.
1425 wait_obj_created(oi
);
1426 /* ignore the error, attempt a remove anyway */
1428 /* Now Remove the OSD objects */
1429 ret
= ore_get_io_state(&sbi
->layout
, &oi
->oc
, &ios
);
1430 if (unlikely(ret
)) {
1431 EXOFS_ERR("%s: ore_get_io_state failed\n", __func__
);
1435 ios
->done
= delete_done
;
1438 ret
= ore_remove(ios
);
1440 EXOFS_ERR("%s: ore_remove failed\n", __func__
);
1441 ore_put_io_state(ios
);
1444 atomic_inc(&sbi
->s_curr_pending
);
1449 end_writeback(inode
);