1 #include "ceph_debug.h"
3 #include <linux/backing-dev.h>
6 #include <linux/pagemap.h>
7 #include <linux/writeback.h> /* generic_writepages */
8 #include <linux/pagevec.h>
9 #include <linux/task_io_accounting_ops.h>
12 #include "osd_client.h"
15 * Ceph address space ops.
17 * There are a few funny things going on here.
19 * The page->private field is used to reference a struct
20 * ceph_snap_context for _every_ dirty page. This indicates which
21 * snapshot the page was logically dirtied in, and thus which snap
22 * context needs to be associated with the osd write during writeback.
24 * Similarly, struct ceph_inode_info maintains a set of counters to
25 * count dirty pages on the inode. In the absense of snapshots,
26 * i_wrbuffer_ref == i_wrbuffer_ref_head == the dirty page count.
28 * When a snapshot is taken (that is, when the client receives
29 * notification that a snapshot was taken), each inode with caps and
30 * with dirty pages (dirty pages implies there is a cap) gets a new
31 * ceph_cap_snap in the i_cap_snaps list (which is sorted in ascending
32 * order, new snaps go to the tail). The i_wrbuffer_ref_head count is
33 * moved to capsnap->dirty. (Unless a sync write is currently in
34 * progress. In that case, the capsnap is said to be "pending", new
35 * writes cannot start, and the capsnap isn't "finalized" until the
36 * write completes (or fails) and a final size/mtime for the inode for
37 * that snap can be settled upon.) i_wrbuffer_ref_head is reset to 0.
39 * On writeback, we must submit writes to the osd IN SNAP ORDER. So,
40 * we look for the first capsnap in i_cap_snaps and write out pages in
41 * that snap context _only_. Then we move on to the next capsnap,
42 * eventually reaching the "live" or "head" context (i.e., pages that
43 * are not yet snapped) and are writing the most recently dirtied
46 * Invalidate and so forth must take care to ensure the dirty page
47 * accounting is preserved.
52 * Dirty a page. Optimistically adjust accounting, on the assumption
53 * that we won't race with invalidate. If we do, readjust.
55 static int ceph_set_page_dirty(struct page
*page
)
57 struct address_space
*mapping
= page
->mapping
;
59 struct ceph_inode_info
*ci
;
61 struct ceph_snap_context
*snapc
;
63 if (unlikely(!mapping
))
64 return !TestSetPageDirty(page
);
66 if (TestSetPageDirty(page
)) {
67 dout("%p set_page_dirty %p idx %lu -- already dirty\n",
68 mapping
->host
, page
, page
->index
);
72 inode
= mapping
->host
;
73 ci
= ceph_inode(inode
);
76 * Note that we're grabbing a snapc ref here without holding
79 snapc
= ceph_get_snap_context(ci
->i_snap_realm
->cached_context
);
82 spin_lock(&inode
->i_lock
);
83 if (ci
->i_wrbuffer_ref_head
== 0)
84 ci
->i_head_snapc
= ceph_get_snap_context(snapc
);
85 ++ci
->i_wrbuffer_ref_head
;
86 if (ci
->i_wrbuffer_ref
== 0)
89 dout("%p set_page_dirty %p idx %lu head %d/%d -> %d/%d "
90 "snapc %p seq %lld (%d snaps)\n",
91 mapping
->host
, page
, page
->index
,
92 ci
->i_wrbuffer_ref
-1, ci
->i_wrbuffer_ref_head
-1,
93 ci
->i_wrbuffer_ref
, ci
->i_wrbuffer_ref_head
,
94 snapc
, snapc
->seq
, snapc
->num_snaps
);
95 spin_unlock(&inode
->i_lock
);
98 spin_lock_irq(&mapping
->tree_lock
);
99 if (page
->mapping
) { /* Race with truncate? */
100 WARN_ON_ONCE(!PageUptodate(page
));
102 if (mapping_cap_account_dirty(mapping
)) {
103 __inc_zone_page_state(page
, NR_FILE_DIRTY
);
104 __inc_bdi_stat(mapping
->backing_dev_info
,
106 task_io_account_write(PAGE_CACHE_SIZE
);
108 radix_tree_tag_set(&mapping
->page_tree
,
109 page_index(page
), PAGECACHE_TAG_DIRTY
);
112 * Reference snap context in page->private. Also set
113 * PagePrivate so that we get invalidatepage callback.
115 page
->private = (unsigned long)snapc
;
116 SetPagePrivate(page
);
118 dout("ANON set_page_dirty %p (raced truncate?)\n", page
);
122 spin_unlock_irq(&mapping
->tree_lock
);
125 /* whoops, we failed to dirty the page */
126 ceph_put_wrbuffer_cap_refs(ci
, 1, snapc
);
128 __mark_inode_dirty(mapping
->host
, I_DIRTY_PAGES
);
130 BUG_ON(!PageDirty(page
));
135 * If we are truncating the full page (i.e. offset == 0), adjust the
136 * dirty page counters appropriately. Only called if there is private
139 static void ceph_invalidatepage(struct page
*page
, unsigned long offset
)
141 struct inode
*inode
= page
->mapping
->host
;
142 struct ceph_inode_info
*ci
;
143 struct ceph_snap_context
*snapc
= (void *)page
->private;
145 BUG_ON(!PageLocked(page
));
146 BUG_ON(!page
->private);
147 BUG_ON(!PagePrivate(page
));
148 BUG_ON(!page
->mapping
);
151 * We can get non-dirty pages here due to races between
152 * set_page_dirty and truncate_complete_page; just spit out a
153 * warning, in case we end up with accounting problems later.
155 if (!PageDirty(page
))
156 pr_err("%p invalidatepage %p page not dirty\n", inode
, page
);
159 ClearPageChecked(page
);
161 ci
= ceph_inode(inode
);
163 dout("%p invalidatepage %p idx %lu full dirty page %lu\n",
164 inode
, page
, page
->index
, offset
);
165 ceph_put_wrbuffer_cap_refs(ci
, 1, snapc
);
166 ceph_put_snap_context(snapc
);
168 ClearPagePrivate(page
);
170 dout("%p invalidatepage %p idx %lu partial dirty page\n",
171 inode
, page
, page
->index
);
175 /* just a sanity check */
176 static int ceph_releasepage(struct page
*page
, gfp_t g
)
178 struct inode
*inode
= page
->mapping
? page
->mapping
->host
: NULL
;
179 dout("%p releasepage %p idx %lu\n", inode
, page
, page
->index
);
180 WARN_ON(PageDirty(page
));
181 WARN_ON(page
->private);
182 WARN_ON(PagePrivate(page
));
187 * read a single page, without unlocking it.
189 static int readpage_nounlock(struct file
*filp
, struct page
*page
)
191 struct inode
*inode
= filp
->f_dentry
->d_inode
;
192 struct ceph_inode_info
*ci
= ceph_inode(inode
);
193 struct ceph_osd_client
*osdc
= &ceph_inode_to_client(inode
)->osdc
;
195 u64 len
= PAGE_CACHE_SIZE
;
197 dout("readpage inode %p file %p page %p index %lu\n",
198 inode
, filp
, page
, page
->index
);
199 err
= ceph_osdc_readpages(osdc
, ceph_vino(inode
), &ci
->i_layout
,
200 page
->index
<< PAGE_CACHE_SHIFT
, &len
,
201 ci
->i_truncate_seq
, ci
->i_truncate_size
,
208 } else if (err
< PAGE_CACHE_SIZE
) {
209 /* zero fill remainder of page */
210 zero_user_segment(page
, err
, PAGE_CACHE_SIZE
);
212 SetPageUptodate(page
);
215 return err
< 0 ? err
: 0;
218 static int ceph_readpage(struct file
*filp
, struct page
*page
)
220 int r
= readpage_nounlock(filp
, page
);
226 * Build a vector of contiguous pages from the provided page list.
228 static struct page
**page_vector_from_list(struct list_head
*page_list
,
233 int next_index
, contig_pages
= 0;
235 /* build page vector */
236 pages
= kmalloc(sizeof(*pages
) * *nr_pages
, GFP_NOFS
);
238 return ERR_PTR(-ENOMEM
);
240 BUG_ON(list_empty(page_list
));
241 next_index
= list_entry(page_list
->prev
, struct page
, lru
)->index
;
242 list_for_each_entry_reverse(page
, page_list
, lru
) {
243 if (page
->index
== next_index
) {
244 dout("readpages page %d %p\n", contig_pages
, page
);
245 pages
[contig_pages
] = page
;
252 *nr_pages
= contig_pages
;
257 * Read multiple pages. Leave pages we don't read + unlock in page_list;
258 * the caller (VM) cleans them up.
260 static int ceph_readpages(struct file
*file
, struct address_space
*mapping
,
261 struct list_head
*page_list
, unsigned nr_pages
)
263 struct inode
*inode
= file
->f_dentry
->d_inode
;
264 struct ceph_inode_info
*ci
= ceph_inode(inode
);
265 struct ceph_osd_client
*osdc
= &ceph_inode_to_client(inode
)->osdc
;
272 dout("readpages %p file %p nr_pages %d\n",
273 inode
, file
, nr_pages
);
275 pages
= page_vector_from_list(page_list
, &nr_pages
);
277 return PTR_ERR(pages
);
279 /* guess read extent */
280 offset
= pages
[0]->index
<< PAGE_CACHE_SHIFT
;
281 len
= nr_pages
<< PAGE_CACHE_SHIFT
;
282 rc
= ceph_osdc_readpages(osdc
, ceph_vino(inode
), &ci
->i_layout
,
284 ci
->i_truncate_seq
, ci
->i_truncate_size
,
291 /* set uptodate and add to lru in pagevec-sized chunks */
292 pagevec_init(&pvec
, 0);
293 for (; !list_empty(page_list
) && len
> 0;
294 rc
-= PAGE_CACHE_SIZE
, len
-= PAGE_CACHE_SIZE
) {
296 list_entry(page_list
->prev
, struct page
, lru
);
298 list_del(&page
->lru
);
300 if (rc
< (int)PAGE_CACHE_SIZE
) {
301 /* zero (remainder of) page */
302 int s
= rc
< 0 ? 0 : rc
;
303 zero_user_segment(page
, s
, PAGE_CACHE_SIZE
);
306 if (add_to_page_cache(page
, mapping
, page
->index
, GFP_NOFS
)) {
307 page_cache_release(page
);
308 dout("readpages %p add_to_page_cache failed %p\n",
312 dout("readpages %p adding %p idx %lu\n", inode
, page
,
314 flush_dcache_page(page
);
315 SetPageUptodate(page
);
317 if (pagevec_add(&pvec
, page
) == 0)
318 pagevec_lru_add_file(&pvec
); /* add to lru */
320 pagevec_lru_add_file(&pvec
);
329 * Get ref for the oldest snapc for an inode with dirty data... that is, the
330 * only snap context we are allowed to write back.
332 * Caller holds i_lock.
334 static struct ceph_snap_context
*__get_oldest_context(struct inode
*inode
,
337 struct ceph_inode_info
*ci
= ceph_inode(inode
);
338 struct ceph_snap_context
*snapc
= NULL
;
339 struct ceph_cap_snap
*capsnap
= NULL
;
341 list_for_each_entry(capsnap
, &ci
->i_cap_snaps
, ci_item
) {
342 dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap
,
343 capsnap
->context
, capsnap
->dirty_pages
);
344 if (capsnap
->dirty_pages
) {
345 snapc
= ceph_get_snap_context(capsnap
->context
);
347 *snap_size
= capsnap
->size
;
351 if (!snapc
&& ci
->i_snap_realm
) {
352 snapc
= ceph_get_snap_context(ci
->i_snap_realm
->cached_context
);
353 dout(" head snapc %p has %d dirty pages\n",
354 snapc
, ci
->i_wrbuffer_ref_head
);
359 static struct ceph_snap_context
*get_oldest_context(struct inode
*inode
,
362 struct ceph_snap_context
*snapc
= NULL
;
364 spin_lock(&inode
->i_lock
);
365 snapc
= __get_oldest_context(inode
, snap_size
);
366 spin_unlock(&inode
->i_lock
);
371 * Write a single page, but leave the page locked.
373 * If we get a write error, set the page error bit, but still adjust the
374 * dirty page accounting (i.e., page is no longer dirty).
376 static int writepage_nounlock(struct page
*page
, struct writeback_control
*wbc
)
379 struct ceph_inode_info
*ci
;
380 struct ceph_osd_client
*osdc
;
381 loff_t page_off
= page
->index
<< PAGE_CACHE_SHIFT
;
382 int len
= PAGE_CACHE_SIZE
;
385 struct ceph_snap_context
*snapc
;
388 dout("writepage %p idx %lu\n", page
, page
->index
);
390 if (!page
->mapping
|| !page
->mapping
->host
) {
391 dout("writepage %p - no mapping\n", page
);
394 inode
= page
->mapping
->host
;
395 ci
= ceph_inode(inode
);
396 osdc
= &ceph_inode_to_client(inode
)->osdc
;
398 /* verify this is a writeable snap context */
399 snapc
= (void *)page
->private;
401 dout("writepage %p page %p not dirty?\n", inode
, page
);
404 if (snapc
!= get_oldest_context(inode
, &snap_size
)) {
405 dout("writepage %p page %p snapc %p not writeable - noop\n",
406 inode
, page
, (void *)page
->private);
407 /* we should only noop if called by kswapd */
408 WARN_ON((current
->flags
& PF_MEMALLOC
) == 0);
412 /* is this a partial page at end of file? */
416 i_size
= i_size_read(inode
);
417 if (i_size
< page_off
+ len
)
418 len
= i_size
- page_off
;
420 dout("writepage %p page %p index %lu on %llu~%u\n",
421 inode
, page
, page
->index
, page_off
, len
);
423 set_page_writeback(page
);
424 err
= ceph_osdc_writepages(osdc
, ceph_vino(inode
),
425 &ci
->i_layout
, snapc
,
427 ci
->i_truncate_seq
, ci
->i_truncate_size
,
429 &page
, 1, 0, 0, true);
431 dout("writepage setting page/mapping error %d %p\n", err
, page
);
433 mapping_set_error(&inode
->i_data
, err
);
435 wbc
->pages_skipped
++;
437 dout("writepage cleaned page %p\n", page
);
438 err
= 0; /* vfs expects us to return 0 */
441 ClearPagePrivate(page
);
442 end_page_writeback(page
);
443 ceph_put_wrbuffer_cap_refs(ci
, 1, snapc
);
444 ceph_put_snap_context(snapc
);
449 static int ceph_writepage(struct page
*page
, struct writeback_control
*wbc
)
451 int err
= writepage_nounlock(page
, wbc
);
458 * lame release_pages helper. release_pages() isn't exported to
461 static void ceph_release_pages(struct page
**pages
, int num
)
466 pagevec_init(&pvec
, 0);
467 for (i
= 0; i
< num
; i
++) {
468 if (pagevec_add(&pvec
, pages
[i
]) == 0)
469 pagevec_release(&pvec
);
471 pagevec_release(&pvec
);
476 * async writeback completion handler.
478 * If we get an error, set the mapping error bit, but not the individual
481 static void writepages_finish(struct ceph_osd_request
*req
,
482 struct ceph_msg
*msg
)
484 struct inode
*inode
= req
->r_inode
;
485 struct ceph_osd_reply_head
*replyhead
;
486 struct ceph_osd_op
*op
;
487 struct ceph_inode_info
*ci
= ceph_inode(inode
);
489 loff_t offset
= req
->r_pages
[0]->index
<< PAGE_CACHE_SHIFT
;
492 struct ceph_snap_context
*snapc
= req
->r_snapc
;
493 struct address_space
*mapping
= inode
->i_mapping
;
494 struct writeback_control
*wbc
= req
->r_wbc
;
499 replyhead
= msg
->front
.iov_base
;
500 WARN_ON(le32_to_cpu(replyhead
->num_ops
) == 0);
501 op
= (void *)(replyhead
+ 1);
502 rc
= le32_to_cpu(replyhead
->result
);
503 bytes
= le64_to_cpu(op
->extent
.length
);
506 wrote
= (bytes
+ (offset
& ~PAGE_CACHE_MASK
) + ~PAGE_CACHE_MASK
)
508 WARN_ON(wrote
!= req
->r_num_pages
);
511 mapping_set_error(mapping
, rc
);
513 dout("writepages_finish %p rc %d bytes %llu wrote %d (pages)\n",
514 inode
, rc
, bytes
, wrote
);
516 /* clean all pages */
517 for (i
= 0; i
< req
->r_num_pages
; i
++) {
518 page
= req
->r_pages
[i
];
520 WARN_ON(!PageUptodate(page
));
523 dout("inode %p skipping page %p\n", inode
, page
);
524 wbc
->pages_skipped
++;
527 ClearPagePrivate(page
);
528 ceph_put_snap_context(snapc
);
529 dout("unlocking %d %p\n", i
, page
);
530 end_page_writeback(page
);
533 dout("%p wrote+cleaned %d pages\n", inode
, wrote
);
534 ceph_put_wrbuffer_cap_refs(ci
, req
->r_num_pages
, snapc
);
536 ceph_release_pages(req
->r_pages
, req
->r_num_pages
);
537 if (req
->r_pages_from_pool
)
538 mempool_free(req
->r_pages
,
539 ceph_client(inode
->i_sb
)->wb_pagevec_pool
);
542 ceph_osdc_put_request(req
);
546 * allocate a page vec, either directly, or if necessary, via a the
547 * mempool. we avoid the mempool if we can because req->r_num_pages
548 * may be less than the maximum write size.
550 static void alloc_page_vec(struct ceph_client
*client
,
551 struct ceph_osd_request
*req
)
553 req
->r_pages
= kmalloc(sizeof(struct page
*) * req
->r_num_pages
,
556 req
->r_pages
= mempool_alloc(client
->wb_pagevec_pool
, GFP_NOFS
);
557 req
->r_pages_from_pool
= 1;
558 WARN_ON(!req
->r_pages
);
563 * initiate async writeback
565 static int ceph_writepages_start(struct address_space
*mapping
,
566 struct writeback_control
*wbc
)
568 struct inode
*inode
= mapping
->host
;
569 struct backing_dev_info
*bdi
= mapping
->backing_dev_info
;
570 struct ceph_inode_info
*ci
= ceph_inode(inode
);
571 struct ceph_client
*client
= ceph_inode_to_client(inode
);
572 pgoff_t index
, start
, end
;
575 pgoff_t max_pages
= 0, max_pages_ever
= 0;
576 struct ceph_snap_context
*snapc
= NULL
, *last_snapc
= NULL
;
580 unsigned wsize
= 1 << inode
->i_blkbits
;
581 struct ceph_osd_request
*req
= NULL
;
586 * Include a 'sync' in the OSD request if this is a data
587 * integrity write (e.g., O_SYNC write or fsync()), or if our
588 * cap is being revoked.
590 do_sync
= wbc
->sync_mode
== WB_SYNC_ALL
;
591 if (ceph_caps_revoking(ci
, CEPH_CAP_FILE_BUFFER
))
593 dout("writepages_start %p dosync=%d (mode=%s)\n",
595 wbc
->sync_mode
== WB_SYNC_NONE
? "NONE" :
596 (wbc
->sync_mode
== WB_SYNC_ALL
? "ALL" : "HOLD"));
598 client
= ceph_inode_to_client(inode
);
599 if (client
->mount_state
== CEPH_MOUNT_SHUTDOWN
) {
600 pr_warning("writepage_start %p on forced umount\n", inode
);
601 return -EIO
; /* we're in a forced umount, don't write! */
603 if (client
->mount_args
->wsize
&& client
->mount_args
->wsize
< wsize
)
604 wsize
= client
->mount_args
->wsize
;
605 if (wsize
< PAGE_CACHE_SIZE
)
606 wsize
= PAGE_CACHE_SIZE
;
607 max_pages_ever
= wsize
>> PAGE_CACHE_SHIFT
;
609 pagevec_init(&pvec
, 0);
612 if (wbc
->nonblocking
&& bdi_write_congested(bdi
)) {
613 dout(" writepages congested\n");
614 wbc
->encountered_congestion
= 1;
618 /* where to start/end? */
619 if (wbc
->range_cyclic
) {
620 start
= mapping
->writeback_index
; /* Start from prev offset */
622 dout(" cyclic, start at %lu\n", start
);
624 start
= wbc
->range_start
>> PAGE_CACHE_SHIFT
;
625 end
= wbc
->range_end
>> PAGE_CACHE_SHIFT
;
626 if (wbc
->range_start
== 0 && wbc
->range_end
== LLONG_MAX
)
629 dout(" not cyclic, %lu to %lu\n", start
, end
);
634 /* find oldest snap context with dirty data */
635 ceph_put_snap_context(snapc
);
636 snapc
= get_oldest_context(inode
, &snap_size
);
638 /* hmm, why does writepages get called when there
640 dout(" no snap context with dirty data?\n");
643 dout(" oldest snapc is %p seq %lld (%d snaps)\n",
644 snapc
, snapc
->seq
, snapc
->num_snaps
);
645 if (last_snapc
&& snapc
!= last_snapc
) {
646 /* if we switched to a newer snapc, restart our scan at the
647 * start of the original file range. */
648 dout(" snapc differs from last pass, restarting at %lu\n",
654 while (!done
&& index
<= end
) {
658 int pvec_pages
, locked_pages
;
662 struct ceph_osd_request_head
*reqhead
;
663 struct ceph_osd_op
*op
;
667 max_pages
= max_pages_ever
;
671 want
= min(end
- index
,
672 min((pgoff_t
)PAGEVEC_SIZE
,
673 max_pages
- (pgoff_t
)locked_pages
) - 1)
675 pvec_pages
= pagevec_lookup_tag(&pvec
, mapping
, &index
,
678 dout("pagevec_lookup_tag got %d\n", pvec_pages
);
679 if (!pvec_pages
&& !locked_pages
)
681 for (i
= 0; i
< pvec_pages
&& locked_pages
< max_pages
; i
++) {
682 page
= pvec
.pages
[i
];
683 dout("? %p idx %lu\n", page
, page
->index
);
684 if (locked_pages
== 0)
685 lock_page(page
); /* first page */
686 else if (!trylock_page(page
))
689 /* only dirty pages, or our accounting breaks */
690 if (unlikely(!PageDirty(page
)) ||
691 unlikely(page
->mapping
!= mapping
)) {
692 dout("!dirty or !mapping %p\n", page
);
696 if (!wbc
->range_cyclic
&& page
->index
> end
) {
697 dout("end of range %p\n", page
);
702 if (next
&& (page
->index
!= next
)) {
703 dout("not consecutive %p\n", page
);
707 if (wbc
->sync_mode
!= WB_SYNC_NONE
) {
708 dout("waiting on writeback %p\n", page
);
709 wait_on_page_writeback(page
);
711 if ((snap_size
&& page_offset(page
) > snap_size
) ||
713 page_offset(page
) > i_size_read(inode
))) {
714 dout("%p page eof %llu\n", page
, snap_size
?
715 snap_size
: i_size_read(inode
));
720 if (PageWriteback(page
)) {
721 dout("%p under writeback\n", page
);
726 /* only if matching snap context */
727 if (snapc
!= (void *)page
->private) {
728 dout("page snapc %p != oldest %p\n",
729 (void *)page
->private, snapc
);
732 continue; /* keep looking for snap */
736 if (!clear_page_dirty_for_io(page
)) {
737 dout("%p !clear_page_dirty_for_io\n", page
);
743 if (locked_pages
== 0) {
744 /* prepare async write request */
745 offset
= page
->index
<< PAGE_CACHE_SHIFT
;
747 req
= ceph_osdc_new_request(&client
->osdc
,
752 CEPH_OSD_FLAG_WRITE
|
753 CEPH_OSD_FLAG_ONDISK
,
757 &inode
->i_mtime
, true, 1);
758 max_pages
= req
->r_num_pages
;
760 alloc_page_vec(client
, req
);
761 req
->r_callback
= writepages_finish
;
762 req
->r_inode
= inode
;
766 /* note position of first page in pvec */
769 dout("%p will write page %p idx %lu\n",
770 inode
, page
, page
->index
);
771 set_page_writeback(page
);
772 req
->r_pages
[locked_pages
] = page
;
774 next
= page
->index
+ 1;
777 /* did we get anything? */
779 goto release_pvec_pages
;
782 BUG_ON(!locked_pages
|| first
< 0);
784 if (pvec_pages
&& i
== pvec_pages
&&
785 locked_pages
< max_pages
) {
786 dout("reached end pvec, trying for more\n");
787 pagevec_reinit(&pvec
);
791 /* shift unused pages over in the pvec... we
792 * will need to release them below. */
793 for (j
= i
; j
< pvec_pages
; j
++) {
794 dout(" pvec leftover page %p\n",
796 pvec
.pages
[j
-i
+first
] = pvec
.pages
[j
];
801 /* submit the write */
802 offset
= req
->r_pages
[0]->index
<< PAGE_CACHE_SHIFT
;
803 len
= min((snap_size
? snap_size
: i_size_read(inode
)) - offset
,
804 (u64
)locked_pages
<< PAGE_CACHE_SHIFT
);
805 dout("writepages got %d pages at %llu~%llu\n",
806 locked_pages
, offset
, len
);
808 /* revise final length, page count */
809 req
->r_num_pages
= locked_pages
;
810 reqhead
= req
->r_request
->front
.iov_base
;
811 op
= (void *)(reqhead
+ 1);
812 op
->extent
.length
= cpu_to_le64(len
);
813 op
->payload_len
= cpu_to_le32(len
);
814 req
->r_request
->hdr
.data_len
= cpu_to_le32(len
);
816 ceph_osdc_start_request(&client
->osdc
, req
, true);
821 wbc
->nr_to_write
-= locked_pages
;
822 if (wbc
->nr_to_write
<= 0)
826 dout("pagevec_release on %d pages (%p)\n", (int)pvec
.nr
,
827 pvec
.nr
? pvec
.pages
[0] : NULL
);
828 pagevec_release(&pvec
);
830 if (locked_pages
&& !done
)
834 if (should_loop
&& !done
) {
835 /* more to do; loop back to beginning of file */
836 dout("writepages looping back to beginning of file\n");
842 if (wbc
->range_cyclic
|| (range_whole
&& wbc
->nr_to_write
> 0))
843 mapping
->writeback_index
= index
;
847 ceph_osdc_put_request(req
);
849 rc
= 0; /* vfs expects us to return 0 */
850 ceph_put_snap_context(snapc
);
851 dout("writepages done, rc = %d\n", rc
);
859 * See if a given @snapc is either writeable, or already written.
861 static int context_is_writeable_or_written(struct inode
*inode
,
862 struct ceph_snap_context
*snapc
)
864 struct ceph_snap_context
*oldest
= get_oldest_context(inode
, NULL
);
865 return !oldest
|| snapc
->seq
<= oldest
->seq
;
869 * We are only allowed to write into/dirty the page if the page is
870 * clean, or already dirty within the same snap context.
872 static int ceph_write_begin(struct file
*file
, struct address_space
*mapping
,
873 loff_t pos
, unsigned len
, unsigned flags
,
874 struct page
**pagep
, void **fsdata
)
876 struct inode
*inode
= file
->f_dentry
->d_inode
;
877 struct ceph_inode_info
*ci
= ceph_inode(inode
);
878 struct ceph_mds_client
*mdsc
= &ceph_inode_to_client(inode
)->mdsc
;
880 pgoff_t index
= pos
>> PAGE_CACHE_SHIFT
;
881 loff_t page_off
= pos
& PAGE_CACHE_MASK
;
882 int pos_in_page
= pos
& ~PAGE_CACHE_MASK
;
883 int end_in_page
= pos_in_page
+ len
;
885 struct ceph_snap_context
*snapc
;
890 page
= grab_cache_page_write_begin(mapping
, index
, 0);
895 dout("write_begin file %p inode %p page %p %d~%d\n", file
,
896 inode
, page
, (int)pos
, (int)len
);
899 /* writepages currently holds page lock, but if we change that later, */
900 wait_on_page_writeback(page
);
902 /* check snap context */
903 BUG_ON(!ci
->i_snap_realm
);
904 down_read(&mdsc
->snap_rwsem
);
905 BUG_ON(!ci
->i_snap_realm
->cached_context
);
907 (void *)page
->private != ci
->i_snap_realm
->cached_context
) {
909 * this page is already dirty in another (older) snap
910 * context! is it writeable now?
912 snapc
= get_oldest_context(inode
, NULL
);
913 up_read(&mdsc
->snap_rwsem
);
915 if (snapc
!= (void *)page
->private) {
916 dout(" page %p snapc %p not current or oldest\n",
917 page
, (void *)page
->private);
919 * queue for writeback, and wait for snapc to
920 * be writeable or written
922 snapc
= ceph_get_snap_context((void *)page
->private);
924 if (ceph_queue_writeback(inode
))
926 wait_event_interruptible(ci
->i_cap_wq
,
927 context_is_writeable_or_written(inode
, snapc
));
928 ceph_put_snap_context(snapc
);
932 /* yay, writeable, do it now (without dropping page lock) */
933 dout(" page %p snapc %p not current, but oldest\n",
935 if (!clear_page_dirty_for_io(page
))
937 r
= writepage_nounlock(page
, NULL
);
943 if (PageUptodate(page
)) {
944 dout(" page %p already uptodate\n", page
);
949 if (pos_in_page
== 0 && len
== PAGE_CACHE_SIZE
)
952 /* past end of file? */
953 i_size
= inode
->i_size
; /* caller holds i_mutex */
955 if (i_size
+ len
> inode
->i_sb
->s_maxbytes
) {
956 /* file is too big */
961 if (page_off
>= i_size
||
962 (pos_in_page
== 0 && (pos
+len
) >= i_size
&&
963 end_in_page
- pos_in_page
!= PAGE_CACHE_SIZE
)) {
964 dout(" zeroing %p 0 - %d and %d - %d\n",
965 page
, pos_in_page
, end_in_page
, (int)PAGE_CACHE_SIZE
);
966 zero_user_segments(page
,
968 end_in_page
, PAGE_CACHE_SIZE
);
972 /* we need to read it. */
973 up_read(&mdsc
->snap_rwsem
);
974 r
= readpage_nounlock(file
, page
);
980 up_read(&mdsc
->snap_rwsem
);
987 * we don't do anything in here that simple_write_end doesn't do
988 * except adjust dirty page accounting and drop read lock on
991 static int ceph_write_end(struct file
*file
, struct address_space
*mapping
,
992 loff_t pos
, unsigned len
, unsigned copied
,
993 struct page
*page
, void *fsdata
)
995 struct inode
*inode
= file
->f_dentry
->d_inode
;
996 struct ceph_mds_client
*mdsc
= &ceph_inode_to_client(inode
)->mdsc
;
997 unsigned from
= pos
& (PAGE_CACHE_SIZE
- 1);
1000 dout("write_end file %p inode %p page %p %d~%d (%d)\n", file
,
1001 inode
, page
, (int)pos
, (int)copied
, (int)len
);
1003 /* zero the stale part of the page if we did a short copy */
1005 zero_user_segment(page
, from
+copied
, len
);
1007 /* did file size increase? */
1008 /* (no need for i_size_read(); we caller holds i_mutex */
1009 if (pos
+copied
> inode
->i_size
)
1010 check_cap
= ceph_inode_set_size(inode
, pos
+copied
);
1012 if (!PageUptodate(page
))
1013 SetPageUptodate(page
);
1015 set_page_dirty(page
);
1018 up_read(&mdsc
->snap_rwsem
);
1019 page_cache_release(page
);
1022 ceph_check_caps(ceph_inode(inode
), CHECK_CAPS_AUTHONLY
, NULL
);
1028 * we set .direct_IO to indicate direct io is supported, but since we
1029 * intercept O_DIRECT reads and writes early, this function should
1032 static ssize_t
ceph_direct_io(int rw
, struct kiocb
*iocb
,
1033 const struct iovec
*iov
,
1034 loff_t pos
, unsigned long nr_segs
)
1040 const struct address_space_operations ceph_aops
= {
1041 .readpage
= ceph_readpage
,
1042 .readpages
= ceph_readpages
,
1043 .writepage
= ceph_writepage
,
1044 .writepages
= ceph_writepages_start
,
1045 .write_begin
= ceph_write_begin
,
1046 .write_end
= ceph_write_end
,
1047 .set_page_dirty
= ceph_set_page_dirty
,
1048 .invalidatepage
= ceph_invalidatepage
,
1049 .releasepage
= ceph_releasepage
,
1050 .direct_IO
= ceph_direct_io
,
1059 * Reuse write_begin here for simplicity.
1061 static int ceph_page_mkwrite(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1063 struct inode
*inode
= vma
->vm_file
->f_dentry
->d_inode
;
1064 struct page
*page
= vmf
->page
;
1065 struct ceph_mds_client
*mdsc
= &ceph_inode_to_client(inode
)->mdsc
;
1066 loff_t off
= page
->index
<< PAGE_CACHE_SHIFT
;
1068 struct page
*locked_page
= NULL
;
1069 void *fsdata
= NULL
;
1072 size
= i_size_read(inode
);
1073 if (off
+ PAGE_CACHE_SIZE
<= size
)
1074 len
= PAGE_CACHE_SIZE
;
1076 len
= size
& ~PAGE_CACHE_MASK
;
1078 dout("page_mkwrite %p %llu~%llu page %p idx %lu\n", inode
,
1079 off
, len
, page
, page
->index
);
1080 ret
= ceph_write_begin(vma
->vm_file
, inode
->i_mapping
, off
, len
, 0,
1081 &locked_page
, &fsdata
);
1082 WARN_ON(page
!= locked_page
);
1085 * doing the following, instead of calling
1086 * ceph_write_end. Note that we keep the
1089 set_page_dirty(page
);
1090 up_read(&mdsc
->snap_rwsem
);
1091 page_cache_release(page
);
1092 ret
= VM_FAULT_LOCKED
;
1094 ret
= VM_FAULT_SIGBUS
;
1096 dout("page_mkwrite %p %llu~%llu = %d\n", inode
, off
, len
, ret
);
1100 static struct vm_operations_struct ceph_vmops
= {
1101 .fault
= filemap_fault
,
1102 .page_mkwrite
= ceph_page_mkwrite
,
1105 int ceph_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1107 struct address_space
*mapping
= file
->f_mapping
;
1109 if (!mapping
->a_ops
->readpage
)
1111 file_accessed(file
);
1112 vma
->vm_ops
= &ceph_vmops
;
1113 vma
->vm_flags
|= VM_CAN_NONLINEAR
;