Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /** |
2 | * aops.c - NTFS kernel address space operations and page cache handling. | |
3 | * Part of the Linux-NTFS project. | |
4 | * | |
78af34f0 | 5 | * Copyright (c) 2001-2006 Anton Altaparmakov |
1da177e4 LT |
6 | * Copyright (c) 2002 Richard Russon |
7 | * | |
8 | * This program/include file is free software; you can redistribute it and/or | |
9 | * modify it under the terms of the GNU General Public License as published | |
10 | * by the Free Software Foundation; either version 2 of the License, or | |
11 | * (at your option) any later version. | |
12 | * | |
13 | * This program/include file is distributed in the hope that it will be | |
14 | * useful, but WITHOUT ANY WARRANTY; without even the implied warranty | |
15 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | * GNU General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program (in the main directory of the Linux-NTFS | |
20 | * distribution in the file COPYING); if not, write to the Free Software | |
21 | * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
22 | */ | |
23 | ||
24 | #include <linux/errno.h> | |
78264bd9 | 25 | #include <linux/fs.h> |
1da177e4 LT |
26 | #include <linux/mm.h> |
27 | #include <linux/pagemap.h> | |
28 | #include <linux/swap.h> | |
29 | #include <linux/buffer_head.h> | |
30 | #include <linux/writeback.h> | |
b4012a98 | 31 | #include <linux/bit_spinlock.h> |
1da177e4 LT |
32 | |
33 | #include "aops.h" | |
34 | #include "attrib.h" | |
35 | #include "debug.h" | |
36 | #include "inode.h" | |
37 | #include "mft.h" | |
38 | #include "runlist.h" | |
39 | #include "types.h" | |
40 | #include "ntfs.h" | |
41 | ||
42 | /** | |
43 | * ntfs_end_buffer_async_read - async io completion for reading attributes | |
44 | * @bh: buffer head on which io is completed | |
45 | * @uptodate: whether @bh is now uptodate or not | |
46 | * | |
47 | * Asynchronous I/O completion handler for reading pages belonging to the | |
48 | * attribute address space of an inode. The inodes can either be files or | |
49 | * directories or they can be fake inodes describing some attribute. | |
50 | * | |
51 | * If NInoMstProtected(), perform the post read mst fixups when all IO on the | |
52 | * page has been completed and mark the page uptodate or set the error bit on | |
53 | * the page. To determine the size of the records that need fixing up, we | |
54 | * cheat a little bit by setting the index_block_size in ntfs_inode to the ntfs | |
55 | * record size, and index_block_size_bits, to the log(base 2) of the ntfs | |
56 | * record size. | |
57 | */ | |
58 | static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate) | |
59 | { | |
1da177e4 | 60 | unsigned long flags; |
e604635c | 61 | struct buffer_head *first, *tmp; |
1da177e4 | 62 | struct page *page; |
f6098cf4 | 63 | struct inode *vi; |
1da177e4 LT |
64 | ntfs_inode *ni; |
65 | int page_uptodate = 1; | |
66 | ||
67 | page = bh->b_page; | |
f6098cf4 AA |
68 | vi = page->mapping->host; |
69 | ni = NTFS_I(vi); | |
1da177e4 LT |
70 | |
71 | if (likely(uptodate)) { | |
f6098cf4 AA |
72 | loff_t i_size; |
73 | s64 file_ofs, init_size; | |
1da177e4 LT |
74 | |
75 | set_buffer_uptodate(bh); | |
76 | ||
77 | file_ofs = ((s64)page->index << PAGE_CACHE_SHIFT) + | |
78 | bh_offset(bh); | |
07a4e2da | 79 | read_lock_irqsave(&ni->size_lock, flags); |
f6098cf4 AA |
80 | init_size = ni->initialized_size; |
81 | i_size = i_size_read(vi); | |
07a4e2da | 82 | read_unlock_irqrestore(&ni->size_lock, flags); |
f6098cf4 AA |
83 | if (unlikely(init_size > i_size)) { |
84 | /* Race with shrinking truncate. */ | |
85 | init_size = i_size; | |
86 | } | |
1da177e4 | 87 | /* Check for the current buffer head overflowing. */ |
f6098cf4 AA |
88 | if (unlikely(file_ofs + bh->b_size > init_size)) { |
89 | u8 *kaddr; | |
90 | int ofs; | |
91 | ||
92 | ofs = 0; | |
93 | if (file_ofs < init_size) | |
94 | ofs = init_size - file_ofs; | |
95 | kaddr = kmap_atomic(page, KM_BIO_SRC_IRQ); | |
96 | memset(kaddr + bh_offset(bh) + ofs, 0, | |
97 | bh->b_size - ofs); | |
98 | kunmap_atomic(kaddr, KM_BIO_SRC_IRQ); | |
1da177e4 | 99 | flush_dcache_page(page); |
1da177e4 LT |
100 | } |
101 | } else { | |
102 | clear_buffer_uptodate(bh); | |
e604635c | 103 | SetPageError(page); |
f6098cf4 AA |
104 | ntfs_error(ni->vol->sb, "Buffer I/O error, logical block " |
105 | "0x%llx.", (unsigned long long)bh->b_blocknr); | |
1da177e4 | 106 | } |
e604635c AA |
107 | first = page_buffers(page); |
108 | local_irq_save(flags); | |
109 | bit_spin_lock(BH_Uptodate_Lock, &first->b_state); | |
1da177e4 LT |
110 | clear_buffer_async_read(bh); |
111 | unlock_buffer(bh); | |
112 | tmp = bh; | |
113 | do { | |
114 | if (!buffer_uptodate(tmp)) | |
115 | page_uptodate = 0; | |
116 | if (buffer_async_read(tmp)) { | |
117 | if (likely(buffer_locked(tmp))) | |
118 | goto still_busy; | |
119 | /* Async buffers must be locked. */ | |
120 | BUG(); | |
121 | } | |
122 | tmp = tmp->b_this_page; | |
123 | } while (tmp != bh); | |
e604635c AA |
124 | bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); |
125 | local_irq_restore(flags); | |
1da177e4 LT |
126 | /* |
127 | * If none of the buffers had errors then we can set the page uptodate, | |
128 | * but we first have to perform the post read mst fixups, if the | |
129 | * attribute is mst protected, i.e. if NInoMstProteced(ni) is true. | |
130 | * Note we ignore fixup errors as those are detected when | |
131 | * map_mft_record() is called which gives us per record granularity | |
132 | * rather than per page granularity. | |
133 | */ | |
134 | if (!NInoMstProtected(ni)) { | |
135 | if (likely(page_uptodate && !PageError(page))) | |
136 | SetPageUptodate(page); | |
137 | } else { | |
f6098cf4 | 138 | u8 *kaddr; |
1da177e4 LT |
139 | unsigned int i, recs; |
140 | u32 rec_size; | |
141 | ||
142 | rec_size = ni->itype.index.block_size; | |
143 | recs = PAGE_CACHE_SIZE / rec_size; | |
144 | /* Should have been verified before we got here... */ | |
145 | BUG_ON(!recs); | |
f6098cf4 | 146 | kaddr = kmap_atomic(page, KM_BIO_SRC_IRQ); |
1da177e4 | 147 | for (i = 0; i < recs; i++) |
f6098cf4 | 148 | post_read_mst_fixup((NTFS_RECORD*)(kaddr + |
1da177e4 | 149 | i * rec_size), rec_size); |
f6098cf4 | 150 | kunmap_atomic(kaddr, KM_BIO_SRC_IRQ); |
1da177e4 | 151 | flush_dcache_page(page); |
b6ad6c52 | 152 | if (likely(page_uptodate && !PageError(page))) |
1da177e4 LT |
153 | SetPageUptodate(page); |
154 | } | |
155 | unlock_page(page); | |
156 | return; | |
157 | still_busy: | |
e604635c AA |
158 | bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); |
159 | local_irq_restore(flags); | |
1da177e4 LT |
160 | return; |
161 | } | |
162 | ||
163 | /** | |
164 | * ntfs_read_block - fill a @page of an address space with data | |
165 | * @page: page cache page to fill with data | |
166 | * | |
167 | * Fill the page @page of the address space belonging to the @page->host inode. | |
168 | * We read each buffer asynchronously and when all buffers are read in, our io | |
169 | * completion handler ntfs_end_buffer_read_async(), if required, automatically | |
170 | * applies the mst fixups to the page before finally marking it uptodate and | |
171 | * unlocking it. | |
172 | * | |
173 | * We only enforce allocated_size limit because i_size is checked for in | |
174 | * generic_file_read(). | |
175 | * | |
176 | * Return 0 on success and -errno on error. | |
177 | * | |
178 | * Contains an adapted version of fs/buffer.c::block_read_full_page(). | |
179 | */ | |
180 | static int ntfs_read_block(struct page *page) | |
181 | { | |
f6098cf4 | 182 | loff_t i_size; |
1da177e4 LT |
183 | VCN vcn; |
184 | LCN lcn; | |
f6098cf4 AA |
185 | s64 init_size; |
186 | struct inode *vi; | |
1da177e4 LT |
187 | ntfs_inode *ni; |
188 | ntfs_volume *vol; | |
189 | runlist_element *rl; | |
190 | struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE]; | |
191 | sector_t iblock, lblock, zblock; | |
07a4e2da | 192 | unsigned long flags; |
1da177e4 LT |
193 | unsigned int blocksize, vcn_ofs; |
194 | int i, nr; | |
195 | unsigned char blocksize_bits; | |
196 | ||
f6098cf4 AA |
197 | vi = page->mapping->host; |
198 | ni = NTFS_I(vi); | |
1da177e4 LT |
199 | vol = ni->vol; |
200 | ||
201 | /* $MFT/$DATA must have its complete runlist in memory at all times. */ | |
202 | BUG_ON(!ni->runlist.rl && !ni->mft_no && !NInoAttr(ni)); | |
203 | ||
78af34f0 AA |
204 | blocksize = vol->sb->s_blocksize; |
205 | blocksize_bits = vol->sb->s_blocksize_bits; | |
1da177e4 | 206 | |
a01ac532 | 207 | if (!page_has_buffers(page)) { |
1da177e4 | 208 | create_empty_buffers(page, blocksize, 0); |
a01ac532 AA |
209 | if (unlikely(!page_has_buffers(page))) { |
210 | unlock_page(page); | |
211 | return -ENOMEM; | |
212 | } | |
1da177e4 | 213 | } |
a01ac532 AA |
214 | bh = head = page_buffers(page); |
215 | BUG_ON(!bh); | |
1da177e4 | 216 | |
f6098cf4 AA |
217 | /* |
218 | * We may be racing with truncate. To avoid some of the problems we | |
219 | * now take a snapshot of the various sizes and use those for the whole | |
220 | * of the function. In case of an extending truncate it just means we | |
221 | * may leave some buffers unmapped which are now allocated. This is | |
222 | * not a problem since these buffers will just get mapped when a write | |
223 | * occurs. In case of a shrinking truncate, we will detect this later | |
224 | * on due to the runlist being incomplete and if the page is being | |
225 | * fully truncated, truncate will throw it away as soon as we unlock | |
226 | * it so no need to worry what we do with it. | |
227 | */ | |
1da177e4 | 228 | iblock = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits); |
07a4e2da | 229 | read_lock_irqsave(&ni->size_lock, flags); |
1da177e4 | 230 | lblock = (ni->allocated_size + blocksize - 1) >> blocksize_bits; |
f6098cf4 AA |
231 | init_size = ni->initialized_size; |
232 | i_size = i_size_read(vi); | |
07a4e2da | 233 | read_unlock_irqrestore(&ni->size_lock, flags); |
f6098cf4 AA |
234 | if (unlikely(init_size > i_size)) { |
235 | /* Race with shrinking truncate. */ | |
236 | init_size = i_size; | |
237 | } | |
238 | zblock = (init_size + blocksize - 1) >> blocksize_bits; | |
1da177e4 LT |
239 | |
240 | /* Loop through all the buffers in the page. */ | |
241 | rl = NULL; | |
242 | nr = i = 0; | |
243 | do { | |
244 | u8 *kaddr; | |
8273d5d4 | 245 | int err; |
1da177e4 LT |
246 | |
247 | if (unlikely(buffer_uptodate(bh))) | |
248 | continue; | |
249 | if (unlikely(buffer_mapped(bh))) { | |
250 | arr[nr++] = bh; | |
251 | continue; | |
252 | } | |
8273d5d4 | 253 | err = 0; |
1da177e4 LT |
254 | bh->b_bdev = vol->sb->s_bdev; |
255 | /* Is the block within the allowed limits? */ | |
256 | if (iblock < lblock) { | |
c49c3111 | 257 | bool is_retry = false; |
1da177e4 LT |
258 | |
259 | /* Convert iblock into corresponding vcn and offset. */ | |
260 | vcn = (VCN)iblock << blocksize_bits >> | |
261 | vol->cluster_size_bits; | |
262 | vcn_ofs = ((VCN)iblock << blocksize_bits) & | |
263 | vol->cluster_size_mask; | |
264 | if (!rl) { | |
265 | lock_retry_remap: | |
266 | down_read(&ni->runlist.lock); | |
267 | rl = ni->runlist.rl; | |
268 | } | |
269 | if (likely(rl != NULL)) { | |
270 | /* Seek to element containing target vcn. */ | |
271 | while (rl->length && rl[1].vcn <= vcn) | |
272 | rl++; | |
273 | lcn = ntfs_rl_vcn_to_lcn(rl, vcn); | |
274 | } else | |
275 | lcn = LCN_RL_NOT_MAPPED; | |
276 | /* Successful remap. */ | |
277 | if (lcn >= 0) { | |
278 | /* Setup buffer head to correct block. */ | |
279 | bh->b_blocknr = ((lcn << vol->cluster_size_bits) | |
280 | + vcn_ofs) >> blocksize_bits; | |
281 | set_buffer_mapped(bh); | |
282 | /* Only read initialized data blocks. */ | |
283 | if (iblock < zblock) { | |
284 | arr[nr++] = bh; | |
285 | continue; | |
286 | } | |
287 | /* Fully non-initialized data block, zero it. */ | |
288 | goto handle_zblock; | |
289 | } | |
290 | /* It is a hole, need to zero it. */ | |
291 | if (lcn == LCN_HOLE) | |
292 | goto handle_hole; | |
293 | /* If first try and runlist unmapped, map and retry. */ | |
294 | if (!is_retry && lcn == LCN_RL_NOT_MAPPED) { | |
c49c3111 | 295 | is_retry = true; |
1da177e4 LT |
296 | /* |
297 | * Attempt to map runlist, dropping lock for | |
298 | * the duration. | |
299 | */ | |
300 | up_read(&ni->runlist.lock); | |
301 | err = ntfs_map_runlist(ni, vcn); | |
302 | if (likely(!err)) | |
303 | goto lock_retry_remap; | |
304 | rl = NULL; | |
9f993fe4 AA |
305 | } else if (!rl) |
306 | up_read(&ni->runlist.lock); | |
8273d5d4 AA |
307 | /* |
308 | * If buffer is outside the runlist, treat it as a | |
309 | * hole. This can happen due to concurrent truncate | |
310 | * for example. | |
311 | */ | |
312 | if (err == -ENOENT || lcn == LCN_ENOENT) { | |
313 | err = 0; | |
314 | goto handle_hole; | |
315 | } | |
1da177e4 | 316 | /* Hard error, zero out region. */ |
8273d5d4 AA |
317 | if (!err) |
318 | err = -EIO; | |
1da177e4 LT |
319 | bh->b_blocknr = -1; |
320 | SetPageError(page); | |
321 | ntfs_error(vol->sb, "Failed to read from inode 0x%lx, " | |
322 | "attribute type 0x%x, vcn 0x%llx, " | |
323 | "offset 0x%x because its location on " | |
324 | "disk could not be determined%s " | |
8273d5d4 | 325 | "(error code %i).", ni->mft_no, |
1da177e4 LT |
326 | ni->type, (unsigned long long)vcn, |
327 | vcn_ofs, is_retry ? " even after " | |
8273d5d4 | 328 | "retrying" : "", err); |
1da177e4 LT |
329 | } |
330 | /* | |
331 | * Either iblock was outside lblock limits or | |
332 | * ntfs_rl_vcn_to_lcn() returned error. Just zero that portion | |
333 | * of the page and set the buffer uptodate. | |
334 | */ | |
335 | handle_hole: | |
336 | bh->b_blocknr = -1UL; | |
337 | clear_buffer_mapped(bh); | |
338 | handle_zblock: | |
339 | kaddr = kmap_atomic(page, KM_USER0); | |
340 | memset(kaddr + i * blocksize, 0, blocksize); | |
1da177e4 | 341 | kunmap_atomic(kaddr, KM_USER0); |
8273d5d4 AA |
342 | flush_dcache_page(page); |
343 | if (likely(!err)) | |
344 | set_buffer_uptodate(bh); | |
1da177e4 LT |
345 | } while (i++, iblock++, (bh = bh->b_this_page) != head); |
346 | ||
347 | /* Release the lock if we took it. */ | |
348 | if (rl) | |
349 | up_read(&ni->runlist.lock); | |
350 | ||
351 | /* Check we have at least one buffer ready for i/o. */ | |
352 | if (nr) { | |
353 | struct buffer_head *tbh; | |
354 | ||
355 | /* Lock the buffers. */ | |
356 | for (i = 0; i < nr; i++) { | |
357 | tbh = arr[i]; | |
358 | lock_buffer(tbh); | |
359 | tbh->b_end_io = ntfs_end_buffer_async_read; | |
360 | set_buffer_async_read(tbh); | |
361 | } | |
362 | /* Finally, start i/o on the buffers. */ | |
363 | for (i = 0; i < nr; i++) { | |
364 | tbh = arr[i]; | |
365 | if (likely(!buffer_uptodate(tbh))) | |
366 | submit_bh(READ, tbh); | |
367 | else | |
368 | ntfs_end_buffer_async_read(tbh, 1); | |
369 | } | |
370 | return 0; | |
371 | } | |
372 | /* No i/o was scheduled on any of the buffers. */ | |
373 | if (likely(!PageError(page))) | |
374 | SetPageUptodate(page); | |
375 | else /* Signal synchronous i/o error. */ | |
376 | nr = -EIO; | |
377 | unlock_page(page); | |
378 | return nr; | |
379 | } | |
380 | ||
381 | /** | |
382 | * ntfs_readpage - fill a @page of a @file with data from the device | |
383 | * @file: open file to which the page @page belongs or NULL | |
384 | * @page: page cache page to fill with data | |
385 | * | |
386 | * For non-resident attributes, ntfs_readpage() fills the @page of the open | |
387 | * file @file by calling the ntfs version of the generic block_read_full_page() | |
388 | * function, ntfs_read_block(), which in turn creates and reads in the buffers | |
389 | * associated with the page asynchronously. | |
390 | * | |
391 | * For resident attributes, OTOH, ntfs_readpage() fills @page by copying the | |
392 | * data from the mft record (which at this stage is most likely in memory) and | |
393 | * fills the remainder with zeroes. Thus, in this case, I/O is synchronous, as | |
394 | * even if the mft record is not cached at this point in time, we need to wait | |
395 | * for it to be read in before we can do the copy. | |
396 | * | |
397 | * Return 0 on success and -errno on error. | |
398 | */ | |
399 | static int ntfs_readpage(struct file *file, struct page *page) | |
400 | { | |
f6098cf4 AA |
401 | loff_t i_size; |
402 | struct inode *vi; | |
1da177e4 LT |
403 | ntfs_inode *ni, *base_ni; |
404 | u8 *kaddr; | |
405 | ntfs_attr_search_ctx *ctx; | |
406 | MFT_RECORD *mrec; | |
b6ad6c52 | 407 | unsigned long flags; |
1da177e4 LT |
408 | u32 attr_len; |
409 | int err = 0; | |
410 | ||
905685f6 | 411 | retry_readpage: |
1da177e4 LT |
412 | BUG_ON(!PageLocked(page)); |
413 | /* | |
414 | * This can potentially happen because we clear PageUptodate() during | |
415 | * ntfs_writepage() of MstProtected() attributes. | |
416 | */ | |
417 | if (PageUptodate(page)) { | |
418 | unlock_page(page); | |
419 | return 0; | |
420 | } | |
f6098cf4 AA |
421 | vi = page->mapping->host; |
422 | ni = NTFS_I(vi); | |
311120ec AA |
423 | /* |
424 | * Only $DATA attributes can be encrypted and only unnamed $DATA | |
425 | * attributes can be compressed. Index root can have the flags set but | |
426 | * this means to create compressed/encrypted files, not that the | |
4e64c886 AA |
427 | * attribute is compressed/encrypted. Note we need to check for |
428 | * AT_INDEX_ALLOCATION since this is the type of both directory and | |
429 | * index inodes. | |
311120ec | 430 | */ |
4e64c886 | 431 | if (ni->type != AT_INDEX_ALLOCATION) { |
311120ec AA |
432 | /* If attribute is encrypted, deny access, just like NT4. */ |
433 | if (NInoEncrypted(ni)) { | |
434 | BUG_ON(ni->type != AT_DATA); | |
435 | err = -EACCES; | |
436 | goto err_out; | |
437 | } | |
438 | /* Compressed data streams are handled in compress.c. */ | |
439 | if (NInoNonResident(ni) && NInoCompressed(ni)) { | |
440 | BUG_ON(ni->type != AT_DATA); | |
441 | BUG_ON(ni->name_len); | |
442 | return ntfs_read_compressed_block(page); | |
443 | } | |
444 | } | |
1da177e4 LT |
445 | /* NInoNonResident() == NInoIndexAllocPresent() */ |
446 | if (NInoNonResident(ni)) { | |
311120ec | 447 | /* Normal, non-resident data stream. */ |
1da177e4 LT |
448 | return ntfs_read_block(page); |
449 | } | |
450 | /* | |
451 | * Attribute is resident, implying it is not compressed or encrypted. | |
452 | * This also means the attribute is smaller than an mft record and | |
453 | * hence smaller than a page, so can simply zero out any pages with | |
311120ec AA |
454 | * index above 0. Note the attribute can actually be marked compressed |
455 | * but if it is resident the actual data is not compressed so we are | |
456 | * ok to ignore the compressed flag here. | |
1da177e4 | 457 | */ |
b6ad6c52 | 458 | if (unlikely(page->index > 0)) { |
1da177e4 LT |
459 | kaddr = kmap_atomic(page, KM_USER0); |
460 | memset(kaddr, 0, PAGE_CACHE_SIZE); | |
461 | flush_dcache_page(page); | |
462 | kunmap_atomic(kaddr, KM_USER0); | |
463 | goto done; | |
464 | } | |
465 | if (!NInoAttr(ni)) | |
466 | base_ni = ni; | |
467 | else | |
468 | base_ni = ni->ext.base_ntfs_ino; | |
469 | /* Map, pin, and lock the mft record. */ | |
470 | mrec = map_mft_record(base_ni); | |
471 | if (IS_ERR(mrec)) { | |
472 | err = PTR_ERR(mrec); | |
473 | goto err_out; | |
474 | } | |
905685f6 AA |
475 | /* |
476 | * If a parallel write made the attribute non-resident, drop the mft | |
477 | * record and retry the readpage. | |
478 | */ | |
479 | if (unlikely(NInoNonResident(ni))) { | |
480 | unmap_mft_record(base_ni); | |
481 | goto retry_readpage; | |
482 | } | |
1da177e4 LT |
483 | ctx = ntfs_attr_get_search_ctx(base_ni, mrec); |
484 | if (unlikely(!ctx)) { | |
485 | err = -ENOMEM; | |
486 | goto unm_err_out; | |
487 | } | |
488 | err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len, | |
489 | CASE_SENSITIVE, 0, NULL, 0, ctx); | |
490 | if (unlikely(err)) | |
491 | goto put_unm_err_out; | |
492 | attr_len = le32_to_cpu(ctx->attr->data.resident.value_length); | |
b6ad6c52 AA |
493 | read_lock_irqsave(&ni->size_lock, flags); |
494 | if (unlikely(attr_len > ni->initialized_size)) | |
495 | attr_len = ni->initialized_size; | |
f6098cf4 | 496 | i_size = i_size_read(vi); |
b6ad6c52 | 497 | read_unlock_irqrestore(&ni->size_lock, flags); |
f6098cf4 AA |
498 | if (unlikely(attr_len > i_size)) { |
499 | /* Race with shrinking truncate. */ | |
500 | attr_len = i_size; | |
501 | } | |
1da177e4 LT |
502 | kaddr = kmap_atomic(page, KM_USER0); |
503 | /* Copy the data to the page. */ | |
504 | memcpy(kaddr, (u8*)ctx->attr + | |
505 | le16_to_cpu(ctx->attr->data.resident.value_offset), | |
506 | attr_len); | |
507 | /* Zero the remainder of the page. */ | |
508 | memset(kaddr + attr_len, 0, PAGE_CACHE_SIZE - attr_len); | |
509 | flush_dcache_page(page); | |
510 | kunmap_atomic(kaddr, KM_USER0); | |
511 | put_unm_err_out: | |
512 | ntfs_attr_put_search_ctx(ctx); | |
513 | unm_err_out: | |
514 | unmap_mft_record(base_ni); | |
515 | done: | |
516 | SetPageUptodate(page); | |
517 | err_out: | |
518 | unlock_page(page); | |
519 | return err; | |
520 | } | |
521 | ||
522 | #ifdef NTFS_RW | |
523 | ||
524 | /** | |
525 | * ntfs_write_block - write a @page to the backing store | |
526 | * @page: page cache page to write out | |
527 | * @wbc: writeback control structure | |
528 | * | |
529 | * This function is for writing pages belonging to non-resident, non-mst | |
530 | * protected attributes to their backing store. | |
531 | * | |
532 | * For a page with buffers, map and write the dirty buffers asynchronously | |
533 | * under page writeback. For a page without buffers, create buffers for the | |
534 | * page, then proceed as above. | |
535 | * | |
536 | * If a page doesn't have buffers the page dirty state is definitive. If a page | |
537 | * does have buffers, the page dirty state is just a hint, and the buffer dirty | |
538 | * state is definitive. (A hint which has rules: dirty buffers against a clean | |
539 | * page is illegal. Other combinations are legal and need to be handled. In | |
540 | * particular a dirty page containing clean buffers for example.) | |
541 | * | |
542 | * Return 0 on success and -errno on error. | |
543 | * | |
544 | * Based on ntfs_read_block() and __block_write_full_page(). | |
545 | */ | |
546 | static int ntfs_write_block(struct page *page, struct writeback_control *wbc) | |
547 | { | |
548 | VCN vcn; | |
549 | LCN lcn; | |
07a4e2da AA |
550 | s64 initialized_size; |
551 | loff_t i_size; | |
1da177e4 LT |
552 | sector_t block, dblock, iblock; |
553 | struct inode *vi; | |
554 | ntfs_inode *ni; | |
555 | ntfs_volume *vol; | |
556 | runlist_element *rl; | |
557 | struct buffer_head *bh, *head; | |
07a4e2da | 558 | unsigned long flags; |
1da177e4 LT |
559 | unsigned int blocksize, vcn_ofs; |
560 | int err; | |
c49c3111 | 561 | bool need_end_writeback; |
1da177e4 LT |
562 | unsigned char blocksize_bits; |
563 | ||
564 | vi = page->mapping->host; | |
565 | ni = NTFS_I(vi); | |
566 | vol = ni->vol; | |
567 | ||
568 | ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index " | |
569 | "0x%lx.", ni->mft_no, ni->type, page->index); | |
570 | ||
571 | BUG_ON(!NInoNonResident(ni)); | |
572 | BUG_ON(NInoMstProtected(ni)); | |
78af34f0 AA |
573 | blocksize = vol->sb->s_blocksize; |
574 | blocksize_bits = vol->sb->s_blocksize_bits; | |
1da177e4 LT |
575 | if (!page_has_buffers(page)) { |
576 | BUG_ON(!PageUptodate(page)); | |
577 | create_empty_buffers(page, blocksize, | |
578 | (1 << BH_Uptodate) | (1 << BH_Dirty)); | |
a01ac532 AA |
579 | if (unlikely(!page_has_buffers(page))) { |
580 | ntfs_warning(vol->sb, "Error allocating page " | |
581 | "buffers. Redirtying page so we try " | |
582 | "again later."); | |
583 | /* | |
584 | * Put the page back on mapping->dirty_pages, but leave | |
585 | * its buffers' dirty state as-is. | |
586 | */ | |
587 | redirty_page_for_writepage(wbc, page); | |
588 | unlock_page(page); | |
589 | return 0; | |
590 | } | |
1da177e4 LT |
591 | } |
592 | bh = head = page_buffers(page); | |
a01ac532 | 593 | BUG_ON(!bh); |
1da177e4 LT |
594 | |
595 | /* NOTE: Different naming scheme to ntfs_read_block()! */ | |
596 | ||
597 | /* The first block in the page. */ | |
598 | block = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits); | |
599 | ||
07a4e2da AA |
600 | read_lock_irqsave(&ni->size_lock, flags); |
601 | i_size = i_size_read(vi); | |
602 | initialized_size = ni->initialized_size; | |
603 | read_unlock_irqrestore(&ni->size_lock, flags); | |
604 | ||
1da177e4 | 605 | /* The first out of bounds block for the data size. */ |
07a4e2da | 606 | dblock = (i_size + blocksize - 1) >> blocksize_bits; |
1da177e4 LT |
607 | |
608 | /* The last (fully or partially) initialized block. */ | |
07a4e2da | 609 | iblock = initialized_size >> blocksize_bits; |
1da177e4 LT |
610 | |
611 | /* | |
612 | * Be very careful. We have no exclusion from __set_page_dirty_buffers | |
613 | * here, and the (potentially unmapped) buffers may become dirty at | |
614 | * any time. If a buffer becomes dirty here after we've inspected it | |
615 | * then we just miss that fact, and the page stays dirty. | |
616 | * | |
617 | * Buffers outside i_size may be dirtied by __set_page_dirty_buffers; | |
618 | * handle that here by just cleaning them. | |
619 | */ | |
620 | ||
621 | /* | |
622 | * Loop through all the buffers in the page, mapping all the dirty | |
623 | * buffers to disk addresses and handling any aliases from the | |
624 | * underlying block device's mapping. | |
625 | */ | |
626 | rl = NULL; | |
627 | err = 0; | |
628 | do { | |
c49c3111 | 629 | bool is_retry = false; |
1da177e4 LT |
630 | |
631 | if (unlikely(block >= dblock)) { | |
632 | /* | |
633 | * Mapped buffers outside i_size will occur, because | |
634 | * this page can be outside i_size when there is a | |
635 | * truncate in progress. The contents of such buffers | |
636 | * were zeroed by ntfs_writepage(). | |
637 | * | |
638 | * FIXME: What about the small race window where | |
639 | * ntfs_writepage() has not done any clearing because | |
640 | * the page was within i_size but before we get here, | |
641 | * vmtruncate() modifies i_size? | |
642 | */ | |
643 | clear_buffer_dirty(bh); | |
644 | set_buffer_uptodate(bh); | |
645 | continue; | |
646 | } | |
647 | ||
648 | /* Clean buffers are not written out, so no need to map them. */ | |
649 | if (!buffer_dirty(bh)) | |
650 | continue; | |
651 | ||
652 | /* Make sure we have enough initialized size. */ | |
653 | if (unlikely((block >= iblock) && | |
07a4e2da | 654 | (initialized_size < i_size))) { |
1da177e4 LT |
655 | /* |
656 | * If this page is fully outside initialized size, zero | |
657 | * out all pages between the current initialized size | |
658 | * and the current page. Just use ntfs_readpage() to do | |
659 | * the zeroing transparently. | |
660 | */ | |
661 | if (block > iblock) { | |
662 | // TODO: | |
663 | // For each page do: | |
664 | // - read_cache_page() | |
665 | // Again for each page do: | |
666 | // - wait_on_page_locked() | |
667 | // - Check (PageUptodate(page) && | |
668 | // !PageError(page)) | |
669 | // Update initialized size in the attribute and | |
670 | // in the inode. | |
671 | // Again, for each page do: | |
672 | // __set_page_dirty_buffers(); | |
673 | // page_cache_release() | |
674 | // We don't need to wait on the writes. | |
675 | // Update iblock. | |
676 | } | |
677 | /* | |
678 | * The current page straddles initialized size. Zero | |
679 | * all non-uptodate buffers and set them uptodate (and | |
680 | * dirty?). Note, there aren't any non-uptodate buffers | |
681 | * if the page is uptodate. | |
682 | * FIXME: For an uptodate page, the buffers may need to | |
683 | * be written out because they were not initialized on | |
684 | * disk before. | |
685 | */ | |
686 | if (!PageUptodate(page)) { | |
687 | // TODO: | |
688 | // Zero any non-uptodate buffers up to i_size. | |
689 | // Set them uptodate and dirty. | |
690 | } | |
691 | // TODO: | |
692 | // Update initialized size in the attribute and in the | |
693 | // inode (up to i_size). | |
694 | // Update iblock. | |
695 | // FIXME: This is inefficient. Try to batch the two | |
696 | // size changes to happen in one go. | |
697 | ntfs_error(vol->sb, "Writing beyond initialized size " | |
698 | "is not supported yet. Sorry."); | |
699 | err = -EOPNOTSUPP; | |
700 | break; | |
701 | // Do NOT set_buffer_new() BUT DO clear buffer range | |
702 | // outside write request range. | |
703 | // set_buffer_uptodate() on complete buffers as well as | |
704 | // set_buffer_dirty(). | |
705 | } | |
706 | ||
707 | /* No need to map buffers that are already mapped. */ | |
708 | if (buffer_mapped(bh)) | |
709 | continue; | |
710 | ||
711 | /* Unmapped, dirty buffer. Need to map it. */ | |
712 | bh->b_bdev = vol->sb->s_bdev; | |
713 | ||
714 | /* Convert block into corresponding vcn and offset. */ | |
715 | vcn = (VCN)block << blocksize_bits; | |
716 | vcn_ofs = vcn & vol->cluster_size_mask; | |
717 | vcn >>= vol->cluster_size_bits; | |
718 | if (!rl) { | |
719 | lock_retry_remap: | |
720 | down_read(&ni->runlist.lock); | |
721 | rl = ni->runlist.rl; | |
722 | } | |
723 | if (likely(rl != NULL)) { | |
724 | /* Seek to element containing target vcn. */ | |
725 | while (rl->length && rl[1].vcn <= vcn) | |
726 | rl++; | |
727 | lcn = ntfs_rl_vcn_to_lcn(rl, vcn); | |
728 | } else | |
729 | lcn = LCN_RL_NOT_MAPPED; | |
730 | /* Successful remap. */ | |
731 | if (lcn >= 0) { | |
732 | /* Setup buffer head to point to correct block. */ | |
733 | bh->b_blocknr = ((lcn << vol->cluster_size_bits) + | |
734 | vcn_ofs) >> blocksize_bits; | |
735 | set_buffer_mapped(bh); | |
736 | continue; | |
737 | } | |
738 | /* It is a hole, need to instantiate it. */ | |
739 | if (lcn == LCN_HOLE) { | |
8dcdebaf AA |
740 | u8 *kaddr; |
741 | unsigned long *bpos, *bend; | |
742 | ||
743 | /* Check if the buffer is zero. */ | |
744 | kaddr = kmap_atomic(page, KM_USER0); | |
745 | bpos = (unsigned long *)(kaddr + bh_offset(bh)); | |
746 | bend = (unsigned long *)((u8*)bpos + blocksize); | |
747 | do { | |
748 | if (unlikely(*bpos)) | |
749 | break; | |
750 | } while (likely(++bpos < bend)); | |
751 | kunmap_atomic(kaddr, KM_USER0); | |
752 | if (bpos == bend) { | |
753 | /* | |
754 | * Buffer is zero and sparse, no need to write | |
755 | * it. | |
756 | */ | |
757 | bh->b_blocknr = -1; | |
758 | clear_buffer_dirty(bh); | |
759 | continue; | |
760 | } | |
1da177e4 LT |
761 | // TODO: Instantiate the hole. |
762 | // clear_buffer_new(bh); | |
763 | // unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr); | |
764 | ntfs_error(vol->sb, "Writing into sparse regions is " | |
765 | "not supported yet. Sorry."); | |
766 | err = -EOPNOTSUPP; | |
767 | break; | |
768 | } | |
769 | /* If first try and runlist unmapped, map and retry. */ | |
770 | if (!is_retry && lcn == LCN_RL_NOT_MAPPED) { | |
c49c3111 | 771 | is_retry = true; |
1da177e4 LT |
772 | /* |
773 | * Attempt to map runlist, dropping lock for | |
774 | * the duration. | |
775 | */ | |
776 | up_read(&ni->runlist.lock); | |
777 | err = ntfs_map_runlist(ni, vcn); | |
778 | if (likely(!err)) | |
779 | goto lock_retry_remap; | |
780 | rl = NULL; | |
9f993fe4 AA |
781 | } else if (!rl) |
782 | up_read(&ni->runlist.lock); | |
8273d5d4 AA |
783 | /* |
784 | * If buffer is outside the runlist, truncate has cut it out | |
785 | * of the runlist. Just clean and clear the buffer and set it | |
786 | * uptodate so it can get discarded by the VM. | |
787 | */ | |
788 | if (err == -ENOENT || lcn == LCN_ENOENT) { | |
789 | u8 *kaddr; | |
790 | ||
791 | bh->b_blocknr = -1; | |
792 | clear_buffer_dirty(bh); | |
793 | kaddr = kmap_atomic(page, KM_USER0); | |
794 | memset(kaddr + bh_offset(bh), 0, blocksize); | |
795 | kunmap_atomic(kaddr, KM_USER0); | |
796 | flush_dcache_page(page); | |
797 | set_buffer_uptodate(bh); | |
798 | err = 0; | |
799 | continue; | |
800 | } | |
1da177e4 | 801 | /* Failed to map the buffer, even after retrying. */ |
8273d5d4 AA |
802 | if (!err) |
803 | err = -EIO; | |
1da177e4 LT |
804 | bh->b_blocknr = -1; |
805 | ntfs_error(vol->sb, "Failed to write to inode 0x%lx, " | |
806 | "attribute type 0x%x, vcn 0x%llx, offset 0x%x " | |
807 | "because its location on disk could not be " | |
8273d5d4 | 808 | "determined%s (error code %i).", ni->mft_no, |
1da177e4 LT |
809 | ni->type, (unsigned long long)vcn, |
810 | vcn_ofs, is_retry ? " even after " | |
8273d5d4 | 811 | "retrying" : "", err); |
1da177e4 LT |
812 | break; |
813 | } while (block++, (bh = bh->b_this_page) != head); | |
814 | ||
815 | /* Release the lock if we took it. */ | |
816 | if (rl) | |
817 | up_read(&ni->runlist.lock); | |
818 | ||
819 | /* For the error case, need to reset bh to the beginning. */ | |
820 | bh = head; | |
821 | ||
54b02eb0 | 822 | /* Just an optimization, so ->readpage() is not called later. */ |
1da177e4 LT |
823 | if (unlikely(!PageUptodate(page))) { |
824 | int uptodate = 1; | |
825 | do { | |
826 | if (!buffer_uptodate(bh)) { | |
827 | uptodate = 0; | |
828 | bh = head; | |
829 | break; | |
830 | } | |
831 | } while ((bh = bh->b_this_page) != head); | |
832 | if (uptodate) | |
833 | SetPageUptodate(page); | |
834 | } | |
835 | ||
836 | /* Setup all mapped, dirty buffers for async write i/o. */ | |
837 | do { | |
1da177e4 LT |
838 | if (buffer_mapped(bh) && buffer_dirty(bh)) { |
839 | lock_buffer(bh); | |
840 | if (test_clear_buffer_dirty(bh)) { | |
841 | BUG_ON(!buffer_uptodate(bh)); | |
842 | mark_buffer_async_write(bh); | |
843 | } else | |
844 | unlock_buffer(bh); | |
845 | } else if (unlikely(err)) { | |
846 | /* | |
847 | * For the error case. The buffer may have been set | |
848 | * dirty during attachment to a dirty page. | |
849 | */ | |
850 | if (err != -ENOMEM) | |
851 | clear_buffer_dirty(bh); | |
852 | } | |
853 | } while ((bh = bh->b_this_page) != head); | |
854 | ||
855 | if (unlikely(err)) { | |
856 | // TODO: Remove the -EOPNOTSUPP check later on... | |
857 | if (unlikely(err == -EOPNOTSUPP)) | |
858 | err = 0; | |
859 | else if (err == -ENOMEM) { | |
860 | ntfs_warning(vol->sb, "Error allocating memory. " | |
861 | "Redirtying page so we try again " | |
862 | "later."); | |
863 | /* | |
864 | * Put the page back on mapping->dirty_pages, but | |
865 | * leave its buffer's dirty state as-is. | |
866 | */ | |
867 | redirty_page_for_writepage(wbc, page); | |
868 | err = 0; | |
869 | } else | |
870 | SetPageError(page); | |
871 | } | |
872 | ||
873 | BUG_ON(PageWriteback(page)); | |
874 | set_page_writeback(page); /* Keeps try_to_free_buffers() away. */ | |
1da177e4 | 875 | |
54b02eb0 | 876 | /* Submit the prepared buffers for i/o. */ |
c49c3111 | 877 | need_end_writeback = true; |
1da177e4 LT |
878 | do { |
879 | struct buffer_head *next = bh->b_this_page; | |
880 | if (buffer_async_write(bh)) { | |
881 | submit_bh(WRITE, bh); | |
c49c3111 | 882 | need_end_writeback = false; |
1da177e4 | 883 | } |
1da177e4 LT |
884 | bh = next; |
885 | } while (bh != head); | |
54b02eb0 | 886 | unlock_page(page); |
1da177e4 LT |
887 | |
888 | /* If no i/o was started, need to end_page_writeback(). */ | |
889 | if (unlikely(need_end_writeback)) | |
890 | end_page_writeback(page); | |
891 | ||
892 | ntfs_debug("Done."); | |
893 | return err; | |
894 | } | |
895 | ||
896 | /** | |
897 | * ntfs_write_mst_block - write a @page to the backing store | |
898 | * @page: page cache page to write out | |
899 | * @wbc: writeback control structure | |
900 | * | |
901 | * This function is for writing pages belonging to non-resident, mst protected | |
902 | * attributes to their backing store. The only supported attributes are index | |
903 | * allocation and $MFT/$DATA. Both directory inodes and index inodes are | |
904 | * supported for the index allocation case. | |
905 | * | |
906 | * The page must remain locked for the duration of the write because we apply | |
907 | * the mst fixups, write, and then undo the fixups, so if we were to unlock the | |
908 | * page before undoing the fixups, any other user of the page will see the | |
909 | * page contents as corrupt. | |
910 | * | |
911 | * We clear the page uptodate flag for the duration of the function to ensure | |
912 | * exclusion for the $MFT/$DATA case against someone mapping an mft record we | |
913 | * are about to apply the mst fixups to. | |
914 | * | |
915 | * Return 0 on success and -errno on error. | |
916 | * | |
917 | * Based on ntfs_write_block(), ntfs_mft_writepage(), and | |
918 | * write_mft_record_nolock(). | |
919 | */ | |
920 | static int ntfs_write_mst_block(struct page *page, | |
921 | struct writeback_control *wbc) | |
922 | { | |
923 | sector_t block, dblock, rec_block; | |
924 | struct inode *vi = page->mapping->host; | |
925 | ntfs_inode *ni = NTFS_I(vi); | |
926 | ntfs_volume *vol = ni->vol; | |
927 | u8 *kaddr; | |
1da177e4 LT |
928 | unsigned int rec_size = ni->itype.index.block_size; |
929 | ntfs_inode *locked_nis[PAGE_CACHE_SIZE / rec_size]; | |
930 | struct buffer_head *bh, *head, *tbh, *rec_start_bh; | |
d53ee322 | 931 | struct buffer_head *bhs[MAX_BUF_PER_PAGE]; |
1da177e4 | 932 | runlist_element *rl; |
d53ee322 AA |
933 | int i, nr_locked_nis, nr_recs, nr_bhs, max_bhs, bhs_per_rec, err, err2; |
934 | unsigned bh_size, rec_size_bits; | |
c49c3111 | 935 | bool sync, is_mft, page_is_dirty, rec_is_dirty; |
d53ee322 | 936 | unsigned char bh_size_bits; |
1da177e4 LT |
937 | |
938 | ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index " | |
939 | "0x%lx.", vi->i_ino, ni->type, page->index); | |
940 | BUG_ON(!NInoNonResident(ni)); | |
941 | BUG_ON(!NInoMstProtected(ni)); | |
942 | is_mft = (S_ISREG(vi->i_mode) && !vi->i_ino); | |
943 | /* | |
944 | * NOTE: ntfs_write_mst_block() would be called for $MFTMirr if a page | |
945 | * in its page cache were to be marked dirty. However this should | |
946 | * never happen with the current driver and considering we do not | |
947 | * handle this case here we do want to BUG(), at least for now. | |
948 | */ | |
949 | BUG_ON(!(is_mft || S_ISDIR(vi->i_mode) || | |
950 | (NInoAttr(ni) && ni->type == AT_INDEX_ALLOCATION))); | |
78af34f0 AA |
951 | bh_size = vol->sb->s_blocksize; |
952 | bh_size_bits = vol->sb->s_blocksize_bits; | |
d53ee322 | 953 | max_bhs = PAGE_CACHE_SIZE / bh_size; |
1da177e4 | 954 | BUG_ON(!max_bhs); |
d53ee322 | 955 | BUG_ON(max_bhs > MAX_BUF_PER_PAGE); |
1da177e4 LT |
956 | |
957 | /* Were we called for sync purposes? */ | |
958 | sync = (wbc->sync_mode == WB_SYNC_ALL); | |
959 | ||
960 | /* Make sure we have mapped buffers. */ | |
1da177e4 LT |
961 | bh = head = page_buffers(page); |
962 | BUG_ON(!bh); | |
963 | ||
964 | rec_size_bits = ni->itype.index.block_size_bits; | |
965 | BUG_ON(!(PAGE_CACHE_SIZE >> rec_size_bits)); | |
966 | bhs_per_rec = rec_size >> bh_size_bits; | |
967 | BUG_ON(!bhs_per_rec); | |
968 | ||
969 | /* The first block in the page. */ | |
970 | rec_block = block = (sector_t)page->index << | |
971 | (PAGE_CACHE_SHIFT - bh_size_bits); | |
972 | ||
973 | /* The first out of bounds block for the data size. */ | |
07a4e2da | 974 | dblock = (i_size_read(vi) + bh_size - 1) >> bh_size_bits; |
1da177e4 LT |
975 | |
976 | rl = NULL; | |
977 | err = err2 = nr_bhs = nr_recs = nr_locked_nis = 0; | |
c49c3111 | 978 | page_is_dirty = rec_is_dirty = false; |
1da177e4 LT |
979 | rec_start_bh = NULL; |
980 | do { | |
c49c3111 | 981 | bool is_retry = false; |
1da177e4 LT |
982 | |
983 | if (likely(block < rec_block)) { | |
984 | if (unlikely(block >= dblock)) { | |
985 | clear_buffer_dirty(bh); | |
946929d8 | 986 | set_buffer_uptodate(bh); |
1da177e4 LT |
987 | continue; |
988 | } | |
989 | /* | |
990 | * This block is not the first one in the record. We | |
991 | * ignore the buffer's dirty state because we could | |
992 | * have raced with a parallel mark_ntfs_record_dirty(). | |
993 | */ | |
994 | if (!rec_is_dirty) | |
995 | continue; | |
996 | if (unlikely(err2)) { | |
997 | if (err2 != -ENOMEM) | |
998 | clear_buffer_dirty(bh); | |
999 | continue; | |
1000 | } | |
1001 | } else /* if (block == rec_block) */ { | |
1002 | BUG_ON(block > rec_block); | |
1003 | /* This block is the first one in the record. */ | |
1004 | rec_block += bhs_per_rec; | |
1005 | err2 = 0; | |
1006 | if (unlikely(block >= dblock)) { | |
1007 | clear_buffer_dirty(bh); | |
1008 | continue; | |
1009 | } | |
1010 | if (!buffer_dirty(bh)) { | |
1011 | /* Clean records are not written out. */ | |
c49c3111 | 1012 | rec_is_dirty = false; |
1da177e4 LT |
1013 | continue; |
1014 | } | |
c49c3111 | 1015 | rec_is_dirty = true; |
1da177e4 LT |
1016 | rec_start_bh = bh; |
1017 | } | |
1018 | /* Need to map the buffer if it is not mapped already. */ | |
1019 | if (unlikely(!buffer_mapped(bh))) { | |
1020 | VCN vcn; | |
1021 | LCN lcn; | |
1022 | unsigned int vcn_ofs; | |
1023 | ||
481d0374 | 1024 | bh->b_bdev = vol->sb->s_bdev; |
1da177e4 LT |
1025 | /* Obtain the vcn and offset of the current block. */ |
1026 | vcn = (VCN)block << bh_size_bits; | |
1027 | vcn_ofs = vcn & vol->cluster_size_mask; | |
1028 | vcn >>= vol->cluster_size_bits; | |
1029 | if (!rl) { | |
1030 | lock_retry_remap: | |
1031 | down_read(&ni->runlist.lock); | |
1032 | rl = ni->runlist.rl; | |
1033 | } | |
1034 | if (likely(rl != NULL)) { | |
1035 | /* Seek to element containing target vcn. */ | |
1036 | while (rl->length && rl[1].vcn <= vcn) | |
1037 | rl++; | |
1038 | lcn = ntfs_rl_vcn_to_lcn(rl, vcn); | |
1039 | } else | |
1040 | lcn = LCN_RL_NOT_MAPPED; | |
1041 | /* Successful remap. */ | |
1042 | if (likely(lcn >= 0)) { | |
1043 | /* Setup buffer head to correct block. */ | |
1044 | bh->b_blocknr = ((lcn << | |
1045 | vol->cluster_size_bits) + | |
1046 | vcn_ofs) >> bh_size_bits; | |
1047 | set_buffer_mapped(bh); | |
1048 | } else { | |
1049 | /* | |
1050 | * Remap failed. Retry to map the runlist once | |
1051 | * unless we are working on $MFT which always | |
1052 | * has the whole of its runlist in memory. | |
1053 | */ | |
1054 | if (!is_mft && !is_retry && | |
1055 | lcn == LCN_RL_NOT_MAPPED) { | |
c49c3111 | 1056 | is_retry = true; |
1da177e4 LT |
1057 | /* |
1058 | * Attempt to map runlist, dropping | |
1059 | * lock for the duration. | |
1060 | */ | |
1061 | up_read(&ni->runlist.lock); | |
1062 | err2 = ntfs_map_runlist(ni, vcn); | |
1063 | if (likely(!err2)) | |
1064 | goto lock_retry_remap; | |
1065 | if (err2 == -ENOMEM) | |
c49c3111 | 1066 | page_is_dirty = true; |
1da177e4 | 1067 | lcn = err2; |
9f993fe4 | 1068 | } else { |
1da177e4 | 1069 | err2 = -EIO; |
9f993fe4 AA |
1070 | if (!rl) |
1071 | up_read(&ni->runlist.lock); | |
1072 | } | |
1da177e4 LT |
1073 | /* Hard error. Abort writing this record. */ |
1074 | if (!err || err == -ENOMEM) | |
1075 | err = err2; | |
1076 | bh->b_blocknr = -1; | |
1077 | ntfs_error(vol->sb, "Cannot write ntfs record " | |
1078 | "0x%llx (inode 0x%lx, " | |
1079 | "attribute type 0x%x) because " | |
1080 | "its location on disk could " | |
1081 | "not be determined (error " | |
8907547d RD |
1082 | "code %lli).", |
1083 | (long long)block << | |
1da177e4 LT |
1084 | bh_size_bits >> |
1085 | vol->mft_record_size_bits, | |
1086 | ni->mft_no, ni->type, | |
1087 | (long long)lcn); | |
1088 | /* | |
1089 | * If this is not the first buffer, remove the | |
1090 | * buffers in this record from the list of | |
1091 | * buffers to write and clear their dirty bit | |
1092 | * if not error -ENOMEM. | |
1093 | */ | |
1094 | if (rec_start_bh != bh) { | |
1095 | while (bhs[--nr_bhs] != rec_start_bh) | |
1096 | ; | |
1097 | if (err2 != -ENOMEM) { | |
1098 | do { | |
1099 | clear_buffer_dirty( | |
1100 | rec_start_bh); | |
1101 | } while ((rec_start_bh = | |
1102 | rec_start_bh-> | |
1103 | b_this_page) != | |
1104 | bh); | |
1105 | } | |
1106 | } | |
1107 | continue; | |
1108 | } | |
1109 | } | |
1110 | BUG_ON(!buffer_uptodate(bh)); | |
1111 | BUG_ON(nr_bhs >= max_bhs); | |
1112 | bhs[nr_bhs++] = bh; | |
1113 | } while (block++, (bh = bh->b_this_page) != head); | |
1114 | if (unlikely(rl)) | |
1115 | up_read(&ni->runlist.lock); | |
1116 | /* If there were no dirty buffers, we are done. */ | |
1117 | if (!nr_bhs) | |
1118 | goto done; | |
1119 | /* Map the page so we can access its contents. */ | |
1120 | kaddr = kmap(page); | |
1121 | /* Clear the page uptodate flag whilst the mst fixups are applied. */ | |
1122 | BUG_ON(!PageUptodate(page)); | |
1123 | ClearPageUptodate(page); | |
1124 | for (i = 0; i < nr_bhs; i++) { | |
1125 | unsigned int ofs; | |
1126 | ||
1127 | /* Skip buffers which are not at the beginning of records. */ | |
1128 | if (i % bhs_per_rec) | |
1129 | continue; | |
1130 | tbh = bhs[i]; | |
1131 | ofs = bh_offset(tbh); | |
1132 | if (is_mft) { | |
1133 | ntfs_inode *tni; | |
1134 | unsigned long mft_no; | |
1135 | ||
1136 | /* Get the mft record number. */ | |
1137 | mft_no = (((s64)page->index << PAGE_CACHE_SHIFT) + ofs) | |
1138 | >> rec_size_bits; | |
1139 | /* Check whether to write this mft record. */ | |
1140 | tni = NULL; | |
1141 | if (!ntfs_may_write_mft_record(vol, mft_no, | |
1142 | (MFT_RECORD*)(kaddr + ofs), &tni)) { | |
1143 | /* | |
1144 | * The record should not be written. This | |
1145 | * means we need to redirty the page before | |
1146 | * returning. | |
1147 | */ | |
c49c3111 | 1148 | page_is_dirty = true; |
1da177e4 LT |
1149 | /* |
1150 | * Remove the buffers in this mft record from | |
1151 | * the list of buffers to write. | |
1152 | */ | |
1153 | do { | |
1154 | bhs[i] = NULL; | |
1155 | } while (++i % bhs_per_rec); | |
1156 | continue; | |
1157 | } | |
1158 | /* | |
1159 | * The record should be written. If a locked ntfs | |
1160 | * inode was returned, add it to the array of locked | |
1161 | * ntfs inodes. | |
1162 | */ | |
1163 | if (tni) | |
1164 | locked_nis[nr_locked_nis++] = tni; | |
1165 | } | |
1166 | /* Apply the mst protection fixups. */ | |
1167 | err2 = pre_write_mst_fixup((NTFS_RECORD*)(kaddr + ofs), | |
1168 | rec_size); | |
1169 | if (unlikely(err2)) { | |
1170 | if (!err || err == -ENOMEM) | |
1171 | err = -EIO; | |
1172 | ntfs_error(vol->sb, "Failed to apply mst fixups " | |
1173 | "(inode 0x%lx, attribute type 0x%x, " | |
1174 | "page index 0x%lx, page offset 0x%x)!" | |
1175 | " Unmount and run chkdsk.", vi->i_ino, | |
1176 | ni->type, page->index, ofs); | |
1177 | /* | |
1178 | * Mark all the buffers in this record clean as we do | |
1179 | * not want to write corrupt data to disk. | |
1180 | */ | |
1181 | do { | |
1182 | clear_buffer_dirty(bhs[i]); | |
1183 | bhs[i] = NULL; | |
1184 | } while (++i % bhs_per_rec); | |
1185 | continue; | |
1186 | } | |
1187 | nr_recs++; | |
1188 | } | |
1189 | /* If no records are to be written out, we are done. */ | |
1190 | if (!nr_recs) | |
1191 | goto unm_done; | |
1192 | flush_dcache_page(page); | |
1193 | /* Lock buffers and start synchronous write i/o on them. */ | |
1194 | for (i = 0; i < nr_bhs; i++) { | |
1195 | tbh = bhs[i]; | |
1196 | if (!tbh) | |
1197 | continue; | |
1198 | if (unlikely(test_set_buffer_locked(tbh))) | |
1199 | BUG(); | |
1200 | /* The buffer dirty state is now irrelevant, just clean it. */ | |
1201 | clear_buffer_dirty(tbh); | |
1202 | BUG_ON(!buffer_uptodate(tbh)); | |
1203 | BUG_ON(!buffer_mapped(tbh)); | |
1204 | get_bh(tbh); | |
1205 | tbh->b_end_io = end_buffer_write_sync; | |
1206 | submit_bh(WRITE, tbh); | |
1207 | } | |
1208 | /* Synchronize the mft mirror now if not @sync. */ | |
1209 | if (is_mft && !sync) | |
1210 | goto do_mirror; | |
1211 | do_wait: | |
1212 | /* Wait on i/o completion of buffers. */ | |
1213 | for (i = 0; i < nr_bhs; i++) { | |
1214 | tbh = bhs[i]; | |
1215 | if (!tbh) | |
1216 | continue; | |
1217 | wait_on_buffer(tbh); | |
1218 | if (unlikely(!buffer_uptodate(tbh))) { | |
1219 | ntfs_error(vol->sb, "I/O error while writing ntfs " | |
1220 | "record buffer (inode 0x%lx, " | |
1221 | "attribute type 0x%x, page index " | |
1222 | "0x%lx, page offset 0x%lx)! Unmount " | |
1223 | "and run chkdsk.", vi->i_ino, ni->type, | |
1224 | page->index, bh_offset(tbh)); | |
1225 | if (!err || err == -ENOMEM) | |
1226 | err = -EIO; | |
1227 | /* | |
1228 | * Set the buffer uptodate so the page and buffer | |
1229 | * states do not become out of sync. | |
1230 | */ | |
1231 | set_buffer_uptodate(tbh); | |
1232 | } | |
1233 | } | |
1234 | /* If @sync, now synchronize the mft mirror. */ | |
1235 | if (is_mft && sync) { | |
1236 | do_mirror: | |
1237 | for (i = 0; i < nr_bhs; i++) { | |
1238 | unsigned long mft_no; | |
1239 | unsigned int ofs; | |
1240 | ||
1241 | /* | |
1242 | * Skip buffers which are not at the beginning of | |
1243 | * records. | |
1244 | */ | |
1245 | if (i % bhs_per_rec) | |
1246 | continue; | |
1247 | tbh = bhs[i]; | |
1248 | /* Skip removed buffers (and hence records). */ | |
1249 | if (!tbh) | |
1250 | continue; | |
1251 | ofs = bh_offset(tbh); | |
1252 | /* Get the mft record number. */ | |
1253 | mft_no = (((s64)page->index << PAGE_CACHE_SHIFT) + ofs) | |
1254 | >> rec_size_bits; | |
1255 | if (mft_no < vol->mftmirr_size) | |
1256 | ntfs_sync_mft_mirror(vol, mft_no, | |
1257 | (MFT_RECORD*)(kaddr + ofs), | |
1258 | sync); | |
1259 | } | |
1260 | if (!sync) | |
1261 | goto do_wait; | |
1262 | } | |
1263 | /* Remove the mst protection fixups again. */ | |
1264 | for (i = 0; i < nr_bhs; i++) { | |
1265 | if (!(i % bhs_per_rec)) { | |
1266 | tbh = bhs[i]; | |
1267 | if (!tbh) | |
1268 | continue; | |
1269 | post_write_mst_fixup((NTFS_RECORD*)(kaddr + | |
1270 | bh_offset(tbh))); | |
1271 | } | |
1272 | } | |
1273 | flush_dcache_page(page); | |
1274 | unm_done: | |
1275 | /* Unlock any locked inodes. */ | |
1276 | while (nr_locked_nis-- > 0) { | |
1277 | ntfs_inode *tni, *base_tni; | |
1278 | ||
1279 | tni = locked_nis[nr_locked_nis]; | |
1280 | /* Get the base inode. */ | |
4e5e529a | 1281 | mutex_lock(&tni->extent_lock); |
1da177e4 LT |
1282 | if (tni->nr_extents >= 0) |
1283 | base_tni = tni; | |
1284 | else { | |
1285 | base_tni = tni->ext.base_ntfs_ino; | |
1286 | BUG_ON(!base_tni); | |
1287 | } | |
4e5e529a | 1288 | mutex_unlock(&tni->extent_lock); |
1da177e4 LT |
1289 | ntfs_debug("Unlocking %s inode 0x%lx.", |
1290 | tni == base_tni ? "base" : "extent", | |
1291 | tni->mft_no); | |
4e5e529a | 1292 | mutex_unlock(&tni->mrec_lock); |
1da177e4 LT |
1293 | atomic_dec(&tni->count); |
1294 | iput(VFS_I(base_tni)); | |
1295 | } | |
1296 | SetPageUptodate(page); | |
1297 | kunmap(page); | |
1298 | done: | |
1299 | if (unlikely(err && err != -ENOMEM)) { | |
1300 | /* | |
1301 | * Set page error if there is only one ntfs record in the page. | |
1302 | * Otherwise we would loose per-record granularity. | |
1303 | */ | |
1304 | if (ni->itype.index.block_size == PAGE_CACHE_SIZE) | |
1305 | SetPageError(page); | |
1306 | NVolSetErrors(vol); | |
1307 | } | |
1308 | if (page_is_dirty) { | |
1309 | ntfs_debug("Page still contains one or more dirty ntfs " | |
1310 | "records. Redirtying the page starting at " | |
1311 | "record 0x%lx.", page->index << | |
1312 | (PAGE_CACHE_SHIFT - rec_size_bits)); | |
1313 | redirty_page_for_writepage(wbc, page); | |
1314 | unlock_page(page); | |
1315 | } else { | |
1316 | /* | |
1317 | * Keep the VM happy. This must be done otherwise the | |
1318 | * radix-tree tag PAGECACHE_TAG_DIRTY remains set even though | |
1319 | * the page is clean. | |
1320 | */ | |
1321 | BUG_ON(PageWriteback(page)); | |
1322 | set_page_writeback(page); | |
1323 | unlock_page(page); | |
1324 | end_page_writeback(page); | |
1325 | } | |
1326 | if (likely(!err)) | |
1327 | ntfs_debug("Done."); | |
1328 | return err; | |
1329 | } | |
1330 | ||
1331 | /** | |
1332 | * ntfs_writepage - write a @page to the backing store | |
1333 | * @page: page cache page to write out | |
1334 | * @wbc: writeback control structure | |
1335 | * | |
1336 | * This is called from the VM when it wants to have a dirty ntfs page cache | |
1337 | * page cleaned. The VM has already locked the page and marked it clean. | |
1338 | * | |
1339 | * For non-resident attributes, ntfs_writepage() writes the @page by calling | |
1340 | * the ntfs version of the generic block_write_full_page() function, | |
1341 | * ntfs_write_block(), which in turn if necessary creates and writes the | |
1342 | * buffers associated with the page asynchronously. | |
1343 | * | |
1344 | * For resident attributes, OTOH, ntfs_writepage() writes the @page by copying | |
1345 | * the data to the mft record (which at this stage is most likely in memory). | |
1346 | * The mft record is then marked dirty and written out asynchronously via the | |
1347 | * vfs inode dirty code path for the inode the mft record belongs to or via the | |
1348 | * vm page dirty code path for the page the mft record is in. | |
1349 | * | |
1350 | * Based on ntfs_readpage() and fs/buffer.c::block_write_full_page(). | |
1351 | * | |
1352 | * Return 0 on success and -errno on error. | |
1353 | */ | |
1354 | static int ntfs_writepage(struct page *page, struct writeback_control *wbc) | |
1355 | { | |
1356 | loff_t i_size; | |
149f0c52 AA |
1357 | struct inode *vi = page->mapping->host; |
1358 | ntfs_inode *base_ni = NULL, *ni = NTFS_I(vi); | |
1da177e4 | 1359 | char *kaddr; |
149f0c52 AA |
1360 | ntfs_attr_search_ctx *ctx = NULL; |
1361 | MFT_RECORD *m = NULL; | |
1da177e4 LT |
1362 | u32 attr_len; |
1363 | int err; | |
1364 | ||
905685f6 | 1365 | retry_writepage: |
1da177e4 | 1366 | BUG_ON(!PageLocked(page)); |
1da177e4 | 1367 | i_size = i_size_read(vi); |
1da177e4 LT |
1368 | /* Is the page fully outside i_size? (truncate in progress) */ |
1369 | if (unlikely(page->index >= (i_size + PAGE_CACHE_SIZE - 1) >> | |
1370 | PAGE_CACHE_SHIFT)) { | |
1371 | /* | |
1372 | * The page may have dirty, unmapped buffers. Make them | |
1373 | * freeable here, so the page does not leak. | |
1374 | */ | |
1375 | block_invalidatepage(page, 0); | |
1376 | unlock_page(page); | |
1377 | ntfs_debug("Write outside i_size - truncated?"); | |
1378 | return 0; | |
1379 | } | |
bd45fdd2 AA |
1380 | /* |
1381 | * Only $DATA attributes can be encrypted and only unnamed $DATA | |
1382 | * attributes can be compressed. Index root can have the flags set but | |
1383 | * this means to create compressed/encrypted files, not that the | |
4e64c886 AA |
1384 | * attribute is compressed/encrypted. Note we need to check for |
1385 | * AT_INDEX_ALLOCATION since this is the type of both directory and | |
1386 | * index inodes. | |
bd45fdd2 | 1387 | */ |
4e64c886 | 1388 | if (ni->type != AT_INDEX_ALLOCATION) { |
bd45fdd2 AA |
1389 | /* If file is encrypted, deny access, just like NT4. */ |
1390 | if (NInoEncrypted(ni)) { | |
1391 | unlock_page(page); | |
1392 | BUG_ON(ni->type != AT_DATA); | |
7d0ffdb2 | 1393 | ntfs_debug("Denying write access to encrypted file."); |
bd45fdd2 AA |
1394 | return -EACCES; |
1395 | } | |
1396 | /* Compressed data streams are handled in compress.c. */ | |
1397 | if (NInoNonResident(ni) && NInoCompressed(ni)) { | |
1398 | BUG_ON(ni->type != AT_DATA); | |
1399 | BUG_ON(ni->name_len); | |
1400 | // TODO: Implement and replace this with | |
1401 | // return ntfs_write_compressed_block(page); | |
1402 | unlock_page(page); | |
1403 | ntfs_error(vi->i_sb, "Writing to compressed files is " | |
1404 | "not supported yet. Sorry."); | |
1405 | return -EOPNOTSUPP; | |
1406 | } | |
1407 | // TODO: Implement and remove this check. | |
1408 | if (NInoNonResident(ni) && NInoSparse(ni)) { | |
1409 | unlock_page(page); | |
1410 | ntfs_error(vi->i_sb, "Writing to sparse files is not " | |
1411 | "supported yet. Sorry."); | |
1412 | return -EOPNOTSUPP; | |
1413 | } | |
1414 | } | |
1da177e4 LT |
1415 | /* NInoNonResident() == NInoIndexAllocPresent() */ |
1416 | if (NInoNonResident(ni)) { | |
1da177e4 LT |
1417 | /* We have to zero every time due to mmap-at-end-of-file. */ |
1418 | if (page->index >= (i_size >> PAGE_CACHE_SHIFT)) { | |
1419 | /* The page straddles i_size. */ | |
1420 | unsigned int ofs = i_size & ~PAGE_CACHE_MASK; | |
1421 | kaddr = kmap_atomic(page, KM_USER0); | |
1422 | memset(kaddr + ofs, 0, PAGE_CACHE_SIZE - ofs); | |
1da177e4 | 1423 | kunmap_atomic(kaddr, KM_USER0); |
f6098cf4 | 1424 | flush_dcache_page(page); |
1da177e4 LT |
1425 | } |
1426 | /* Handle mst protected attributes. */ | |
1427 | if (NInoMstProtected(ni)) | |
1428 | return ntfs_write_mst_block(page, wbc); | |
bd45fdd2 | 1429 | /* Normal, non-resident data stream. */ |
1da177e4 LT |
1430 | return ntfs_write_block(page, wbc); |
1431 | } | |
1432 | /* | |
bd45fdd2 AA |
1433 | * Attribute is resident, implying it is not compressed, encrypted, or |
1434 | * mst protected. This also means the attribute is smaller than an mft | |
1435 | * record and hence smaller than a page, so can simply return error on | |
1436 | * any pages with index above 0. Note the attribute can actually be | |
1437 | * marked compressed but if it is resident the actual data is not | |
1438 | * compressed so we are ok to ignore the compressed flag here. | |
1da177e4 LT |
1439 | */ |
1440 | BUG_ON(page_has_buffers(page)); | |
1441 | BUG_ON(!PageUptodate(page)); | |
1442 | if (unlikely(page->index > 0)) { | |
1443 | ntfs_error(vi->i_sb, "BUG()! page->index (0x%lx) > 0. " | |
1444 | "Aborting write.", page->index); | |
1445 | BUG_ON(PageWriteback(page)); | |
1446 | set_page_writeback(page); | |
1447 | unlock_page(page); | |
1448 | end_page_writeback(page); | |
1449 | return -EIO; | |
1450 | } | |
1451 | if (!NInoAttr(ni)) | |
1452 | base_ni = ni; | |
1453 | else | |
1454 | base_ni = ni->ext.base_ntfs_ino; | |
1455 | /* Map, pin, and lock the mft record. */ | |
1456 | m = map_mft_record(base_ni); | |
1457 | if (IS_ERR(m)) { | |
1458 | err = PTR_ERR(m); | |
1459 | m = NULL; | |
1460 | ctx = NULL; | |
1461 | goto err_out; | |
1462 | } | |
905685f6 AA |
1463 | /* |
1464 | * If a parallel write made the attribute non-resident, drop the mft | |
1465 | * record and retry the writepage. | |
1466 | */ | |
1467 | if (unlikely(NInoNonResident(ni))) { | |
1468 | unmap_mft_record(base_ni); | |
1469 | goto retry_writepage; | |
1470 | } | |
1da177e4 LT |
1471 | ctx = ntfs_attr_get_search_ctx(base_ni, m); |
1472 | if (unlikely(!ctx)) { | |
1473 | err = -ENOMEM; | |
1474 | goto err_out; | |
1475 | } | |
1476 | err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len, | |
1477 | CASE_SENSITIVE, 0, NULL, 0, ctx); | |
1478 | if (unlikely(err)) | |
1479 | goto err_out; | |
1480 | /* | |
1481 | * Keep the VM happy. This must be done otherwise the radix-tree tag | |
1482 | * PAGECACHE_TAG_DIRTY remains set even though the page is clean. | |
1483 | */ | |
1484 | BUG_ON(PageWriteback(page)); | |
1485 | set_page_writeback(page); | |
1486 | unlock_page(page); | |
1da177e4 | 1487 | attr_len = le32_to_cpu(ctx->attr->data.resident.value_length); |
07a4e2da | 1488 | i_size = i_size_read(vi); |
1da177e4 | 1489 | if (unlikely(attr_len > i_size)) { |
f6098cf4 | 1490 | /* Race with shrinking truncate or a failed truncate. */ |
1da177e4 | 1491 | attr_len = i_size; |
f6098cf4 AA |
1492 | /* |
1493 | * If the truncate failed, fix it up now. If a concurrent | |
1494 | * truncate, we do its job, so it does not have to do anything. | |
1495 | */ | |
1496 | err = ntfs_resident_attr_value_resize(ctx->mrec, ctx->attr, | |
1497 | attr_len); | |
1498 | /* Shrinking cannot fail. */ | |
1499 | BUG_ON(err); | |
1da177e4 | 1500 | } |
f40661be | 1501 | kaddr = kmap_atomic(page, KM_USER0); |
1da177e4 LT |
1502 | /* Copy the data from the page to the mft record. */ |
1503 | memcpy((u8*)ctx->attr + | |
1504 | le16_to_cpu(ctx->attr->data.resident.value_offset), | |
1505 | kaddr, attr_len); | |
1da177e4 LT |
1506 | /* Zero out of bounds area in the page cache page. */ |
1507 | memset(kaddr + attr_len, 0, PAGE_CACHE_SIZE - attr_len); | |
1da177e4 | 1508 | kunmap_atomic(kaddr, KM_USER0); |
f6098cf4 | 1509 | flush_dcache_page(page); |
7d0ffdb2 | 1510 | flush_dcache_mft_record_page(ctx->ntfs_ino); |
f6098cf4 | 1511 | /* We are done with the page. */ |
1da177e4 | 1512 | end_page_writeback(page); |
f6098cf4 | 1513 | /* Finally, mark the mft record dirty, so it gets written back. */ |
1da177e4 LT |
1514 | mark_mft_record_dirty(ctx->ntfs_ino); |
1515 | ntfs_attr_put_search_ctx(ctx); | |
1516 | unmap_mft_record(base_ni); | |
1517 | return 0; | |
1518 | err_out: | |
1519 | if (err == -ENOMEM) { | |
1520 | ntfs_warning(vi->i_sb, "Error allocating memory. Redirtying " | |
1521 | "page so we try again later."); | |
1522 | /* | |
1523 | * Put the page back on mapping->dirty_pages, but leave its | |
1524 | * buffers' dirty state as-is. | |
1525 | */ | |
1526 | redirty_page_for_writepage(wbc, page); | |
1527 | err = 0; | |
1528 | } else { | |
1529 | ntfs_error(vi->i_sb, "Resident attribute write failed with " | |
149f0c52 | 1530 | "error %i.", err); |
1da177e4 | 1531 | SetPageError(page); |
149f0c52 | 1532 | NVolSetErrors(ni->vol); |
1da177e4 LT |
1533 | } |
1534 | unlock_page(page); | |
1535 | if (ctx) | |
1536 | ntfs_attr_put_search_ctx(ctx); | |
1537 | if (m) | |
1538 | unmap_mft_record(base_ni); | |
1539 | return err; | |
1540 | } | |
1541 | ||
1da177e4 LT |
1542 | #endif /* NTFS_RW */ |
1543 | ||
1544 | /** | |
1545 | * ntfs_aops - general address space operations for inodes and attributes | |
1546 | */ | |
f5e54d6e | 1547 | const struct address_space_operations ntfs_aops = { |
1da177e4 LT |
1548 | .readpage = ntfs_readpage, /* Fill page with data. */ |
1549 | .sync_page = block_sync_page, /* Currently, just unplugs the | |
1550 | disk request queue. */ | |
1551 | #ifdef NTFS_RW | |
1552 | .writepage = ntfs_writepage, /* Write dirty page to disk. */ | |
1da177e4 | 1553 | #endif /* NTFS_RW */ |
78264bd9 AA |
1554 | .migratepage = buffer_migrate_page, /* Move a page cache page from |
1555 | one physical page to an | |
1556 | other. */ | |
1da177e4 LT |
1557 | }; |
1558 | ||
1559 | /** | |
1560 | * ntfs_mst_aops - general address space operations for mst protecteed inodes | |
1561 | * and attributes | |
1562 | */ | |
f5e54d6e | 1563 | const struct address_space_operations ntfs_mst_aops = { |
1da177e4 LT |
1564 | .readpage = ntfs_readpage, /* Fill page with data. */ |
1565 | .sync_page = block_sync_page, /* Currently, just unplugs the | |
1566 | disk request queue. */ | |
1567 | #ifdef NTFS_RW | |
1568 | .writepage = ntfs_writepage, /* Write dirty page to disk. */ | |
1569 | .set_page_dirty = __set_page_dirty_nobuffers, /* Set the page dirty | |
1570 | without touching the buffers | |
1571 | belonging to the page. */ | |
1572 | #endif /* NTFS_RW */ | |
78264bd9 AA |
1573 | .migratepage = buffer_migrate_page, /* Move a page cache page from |
1574 | one physical page to an | |
1575 | other. */ | |
1da177e4 LT |
1576 | }; |
1577 | ||
1578 | #ifdef NTFS_RW | |
1579 | ||
1580 | /** | |
1581 | * mark_ntfs_record_dirty - mark an ntfs record dirty | |
1582 | * @page: page containing the ntfs record to mark dirty | |
1583 | * @ofs: byte offset within @page at which the ntfs record begins | |
1584 | * | |
1585 | * Set the buffers and the page in which the ntfs record is located dirty. | |
1586 | * | |
1587 | * The latter also marks the vfs inode the ntfs record belongs to dirty | |
1588 | * (I_DIRTY_PAGES only). | |
1589 | * | |
1590 | * If the page does not have buffers, we create them and set them uptodate. | |
1591 | * The page may not be locked which is why we need to handle the buffers under | |
1592 | * the mapping->private_lock. Once the buffers are marked dirty we no longer | |
1593 | * need the lock since try_to_free_buffers() does not free dirty buffers. | |
1594 | */ | |
1595 | void mark_ntfs_record_dirty(struct page *page, const unsigned int ofs) { | |
1596 | struct address_space *mapping = page->mapping; | |
1597 | ntfs_inode *ni = NTFS_I(mapping->host); | |
1598 | struct buffer_head *bh, *head, *buffers_to_free = NULL; | |
1599 | unsigned int end, bh_size, bh_ofs; | |
1600 | ||
1601 | BUG_ON(!PageUptodate(page)); | |
1602 | end = ofs + ni->itype.index.block_size; | |
78af34f0 | 1603 | bh_size = VFS_I(ni)->i_sb->s_blocksize; |
1da177e4 LT |
1604 | spin_lock(&mapping->private_lock); |
1605 | if (unlikely(!page_has_buffers(page))) { | |
1606 | spin_unlock(&mapping->private_lock); | |
1607 | bh = head = alloc_page_buffers(page, bh_size, 1); | |
1608 | spin_lock(&mapping->private_lock); | |
1609 | if (likely(!page_has_buffers(page))) { | |
1610 | struct buffer_head *tail; | |
1611 | ||
1612 | do { | |
1613 | set_buffer_uptodate(bh); | |
1614 | tail = bh; | |
1615 | bh = bh->b_this_page; | |
1616 | } while (bh); | |
1617 | tail->b_this_page = head; | |
1618 | attach_page_buffers(page, head); | |
1619 | } else | |
1620 | buffers_to_free = bh; | |
1621 | } | |
1622 | bh = head = page_buffers(page); | |
a01ac532 | 1623 | BUG_ON(!bh); |
1da177e4 LT |
1624 | do { |
1625 | bh_ofs = bh_offset(bh); | |
1626 | if (bh_ofs + bh_size <= ofs) | |
1627 | continue; | |
1628 | if (unlikely(bh_ofs >= end)) | |
1629 | break; | |
1630 | set_buffer_dirty(bh); | |
1631 | } while ((bh = bh->b_this_page) != head); | |
1632 | spin_unlock(&mapping->private_lock); | |
1633 | __set_page_dirty_nobuffers(page); | |
1634 | if (unlikely(buffers_to_free)) { | |
1635 | do { | |
1636 | bh = buffers_to_free->b_this_page; | |
1637 | free_buffer_head(buffers_to_free); | |
1638 | buffers_to_free = bh; | |
1639 | } while (buffers_to_free); | |
1640 | } | |
1641 | } | |
1642 | ||
1643 | #endif /* NTFS_RW */ |