Merge branch 'generic-zpos-v8' of http://git.linaro.org/people/benjamin.gaignard...
[deliverable/linux.git] / fs / xfs / xfs_iomap.c
1 /*
2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18 #include <linux/iomap.h>
19 #include "xfs.h"
20 #include "xfs_fs.h"
21 #include "xfs_shared.h"
22 #include "xfs_format.h"
23 #include "xfs_log_format.h"
24 #include "xfs_trans_resv.h"
25 #include "xfs_mount.h"
26 #include "xfs_inode.h"
27 #include "xfs_btree.h"
28 #include "xfs_bmap_btree.h"
29 #include "xfs_bmap.h"
30 #include "xfs_bmap_util.h"
31 #include "xfs_error.h"
32 #include "xfs_trans.h"
33 #include "xfs_trans_space.h"
34 #include "xfs_iomap.h"
35 #include "xfs_trace.h"
36 #include "xfs_icache.h"
37 #include "xfs_quota.h"
38 #include "xfs_dquot_item.h"
39 #include "xfs_dquot.h"
40
41
42 #define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \
43 << mp->m_writeio_log)
44 #define XFS_WRITE_IMAPS XFS_BMAP_MAX_NMAP
45
46 STATIC int
47 xfs_iomap_eof_align_last_fsb(
48 xfs_mount_t *mp,
49 xfs_inode_t *ip,
50 xfs_extlen_t extsize,
51 xfs_fileoff_t *last_fsb)
52 {
53 xfs_extlen_t align = 0;
54 int eof, error;
55
56 if (!XFS_IS_REALTIME_INODE(ip)) {
57 /*
58 * Round up the allocation request to a stripe unit
59 * (m_dalign) boundary if the file size is >= stripe unit
60 * size, and we are allocating past the allocation eof.
61 *
62 * If mounted with the "-o swalloc" option the alignment is
63 * increased from the strip unit size to the stripe width.
64 */
65 if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC))
66 align = mp->m_swidth;
67 else if (mp->m_dalign)
68 align = mp->m_dalign;
69
70 if (align && XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, align))
71 align = 0;
72 }
73
74 /*
75 * Always round up the allocation request to an extent boundary
76 * (when file on a real-time subvolume or has di_extsize hint).
77 */
78 if (extsize) {
79 if (align)
80 align = roundup_64(align, extsize);
81 else
82 align = extsize;
83 }
84
85 if (align) {
86 xfs_fileoff_t new_last_fsb = roundup_64(*last_fsb, align);
87 error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof);
88 if (error)
89 return error;
90 if (eof)
91 *last_fsb = new_last_fsb;
92 }
93 return 0;
94 }
95
96 STATIC int
97 xfs_alert_fsblock_zero(
98 xfs_inode_t *ip,
99 xfs_bmbt_irec_t *imap)
100 {
101 xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO,
102 "Access to block zero in inode %llu "
103 "start_block: %llx start_off: %llx "
104 "blkcnt: %llx extent-state: %x",
105 (unsigned long long)ip->i_ino,
106 (unsigned long long)imap->br_startblock,
107 (unsigned long long)imap->br_startoff,
108 (unsigned long long)imap->br_blockcount,
109 imap->br_state);
110 return -EFSCORRUPTED;
111 }
112
113 int
114 xfs_iomap_write_direct(
115 xfs_inode_t *ip,
116 xfs_off_t offset,
117 size_t count,
118 xfs_bmbt_irec_t *imap,
119 int nmaps)
120 {
121 xfs_mount_t *mp = ip->i_mount;
122 xfs_fileoff_t offset_fsb;
123 xfs_fileoff_t last_fsb;
124 xfs_filblks_t count_fsb, resaligned;
125 xfs_fsblock_t firstfsb;
126 xfs_extlen_t extsz, temp;
127 int nimaps;
128 int quota_flag;
129 int rt;
130 xfs_trans_t *tp;
131 xfs_bmap_free_t free_list;
132 uint qblocks, resblks, resrtextents;
133 int error;
134 int lockmode;
135 int bmapi_flags = XFS_BMAPI_PREALLOC;
136 uint tflags = 0;
137
138 rt = XFS_IS_REALTIME_INODE(ip);
139 extsz = xfs_get_extsz_hint(ip);
140 lockmode = XFS_ILOCK_SHARED; /* locked by caller */
141
142 ASSERT(xfs_isilocked(ip, lockmode));
143
144 offset_fsb = XFS_B_TO_FSBT(mp, offset);
145 last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
146 if ((offset + count) > XFS_ISIZE(ip)) {
147 /*
148 * Assert that the in-core extent list is present since this can
149 * call xfs_iread_extents() and we only have the ilock shared.
150 * This should be safe because the lock was held around a bmapi
151 * call in the caller and we only need it to access the in-core
152 * list.
153 */
154 ASSERT(XFS_IFORK_PTR(ip, XFS_DATA_FORK)->if_flags &
155 XFS_IFEXTENTS);
156 error = xfs_iomap_eof_align_last_fsb(mp, ip, extsz, &last_fsb);
157 if (error)
158 goto out_unlock;
159 } else {
160 if (nmaps && (imap->br_startblock == HOLESTARTBLOCK))
161 last_fsb = MIN(last_fsb, (xfs_fileoff_t)
162 imap->br_blockcount +
163 imap->br_startoff);
164 }
165 count_fsb = last_fsb - offset_fsb;
166 ASSERT(count_fsb > 0);
167
168 resaligned = count_fsb;
169 if (unlikely(extsz)) {
170 if ((temp = do_mod(offset_fsb, extsz)))
171 resaligned += temp;
172 if ((temp = do_mod(resaligned, extsz)))
173 resaligned += extsz - temp;
174 }
175
176 if (unlikely(rt)) {
177 resrtextents = qblocks = resaligned;
178 resrtextents /= mp->m_sb.sb_rextsize;
179 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
180 quota_flag = XFS_QMOPT_RES_RTBLKS;
181 } else {
182 resrtextents = 0;
183 resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
184 quota_flag = XFS_QMOPT_RES_REGBLKS;
185 }
186
187 /*
188 * Drop the shared lock acquired by the caller, attach the dquot if
189 * necessary and move on to transaction setup.
190 */
191 xfs_iunlock(ip, lockmode);
192 error = xfs_qm_dqattach(ip, 0);
193 if (error)
194 return error;
195
196 /*
197 * For DAX, we do not allocate unwritten extents, but instead we zero
198 * the block before we commit the transaction. Ideally we'd like to do
199 * this outside the transaction context, but if we commit and then crash
200 * we may not have zeroed the blocks and this will be exposed on
201 * recovery of the allocation. Hence we must zero before commit.
202 *
203 * Further, if we are mapping unwritten extents here, we need to zero
204 * and convert them to written so that we don't need an unwritten extent
205 * callback for DAX. This also means that we need to be able to dip into
206 * the reserve block pool for bmbt block allocation if there is no space
207 * left but we need to do unwritten extent conversion.
208 */
209 if (IS_DAX(VFS_I(ip))) {
210 bmapi_flags = XFS_BMAPI_CONVERT | XFS_BMAPI_ZERO;
211 if (ISUNWRITTEN(imap)) {
212 tflags |= XFS_TRANS_RESERVE;
213 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
214 }
215 }
216 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, resrtextents,
217 tflags, &tp);
218 if (error)
219 return error;
220
221 lockmode = XFS_ILOCK_EXCL;
222 xfs_ilock(ip, lockmode);
223
224 error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag);
225 if (error)
226 goto out_trans_cancel;
227
228 xfs_trans_ijoin(tp, ip, 0);
229
230 /*
231 * From this point onwards we overwrite the imap pointer that the
232 * caller gave to us.
233 */
234 xfs_bmap_init(&free_list, &firstfsb);
235 nimaps = 1;
236 error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb,
237 bmapi_flags, &firstfsb, resblks, imap,
238 &nimaps, &free_list);
239 if (error)
240 goto out_bmap_cancel;
241
242 /*
243 * Complete the transaction
244 */
245 error = xfs_bmap_finish(&tp, &free_list, NULL);
246 if (error)
247 goto out_bmap_cancel;
248
249 error = xfs_trans_commit(tp);
250 if (error)
251 goto out_unlock;
252
253 /*
254 * Copy any maps to caller's array and return any error.
255 */
256 if (nimaps == 0) {
257 error = -ENOSPC;
258 goto out_unlock;
259 }
260
261 if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip)))
262 error = xfs_alert_fsblock_zero(ip, imap);
263
264 out_unlock:
265 xfs_iunlock(ip, lockmode);
266 return error;
267
268 out_bmap_cancel:
269 xfs_bmap_cancel(&free_list);
270 xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
271 out_trans_cancel:
272 xfs_trans_cancel(tp);
273 goto out_unlock;
274 }
275
276 /*
277 * If the caller is doing a write at the end of the file, then extend the
278 * allocation out to the file system's write iosize. We clean up any extra
279 * space left over when the file is closed in xfs_inactive().
280 *
281 * If we find we already have delalloc preallocation beyond EOF, don't do more
282 * preallocation as it it not needed.
283 */
284 STATIC int
285 xfs_iomap_eof_want_preallocate(
286 xfs_mount_t *mp,
287 xfs_inode_t *ip,
288 xfs_off_t offset,
289 size_t count,
290 xfs_bmbt_irec_t *imap,
291 int nimaps,
292 int *prealloc)
293 {
294 xfs_fileoff_t start_fsb;
295 xfs_filblks_t count_fsb;
296 int n, error, imaps;
297 int found_delalloc = 0;
298
299 *prealloc = 0;
300 if (offset + count <= XFS_ISIZE(ip))
301 return 0;
302
303 /*
304 * If the file is smaller than the minimum prealloc and we are using
305 * dynamic preallocation, don't do any preallocation at all as it is
306 * likely this is the only write to the file that is going to be done.
307 */
308 if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) &&
309 XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_writeio_blocks))
310 return 0;
311
312 /*
313 * If there are any real blocks past eof, then don't
314 * do any speculative allocation.
315 */
316 start_fsb = XFS_B_TO_FSBT(mp, ((xfs_ufsize_t)(offset + count - 1)));
317 count_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
318 while (count_fsb > 0) {
319 imaps = nimaps;
320 error = xfs_bmapi_read(ip, start_fsb, count_fsb, imap, &imaps,
321 0);
322 if (error)
323 return error;
324 for (n = 0; n < imaps; n++) {
325 if ((imap[n].br_startblock != HOLESTARTBLOCK) &&
326 (imap[n].br_startblock != DELAYSTARTBLOCK))
327 return 0;
328 start_fsb += imap[n].br_blockcount;
329 count_fsb -= imap[n].br_blockcount;
330
331 if (imap[n].br_startblock == DELAYSTARTBLOCK)
332 found_delalloc = 1;
333 }
334 }
335 if (!found_delalloc)
336 *prealloc = 1;
337 return 0;
338 }
339
340 /*
341 * Determine the initial size of the preallocation. We are beyond the current
342 * EOF here, but we need to take into account whether this is a sparse write or
343 * an extending write when determining the preallocation size. Hence we need to
344 * look up the extent that ends at the current write offset and use the result
345 * to determine the preallocation size.
346 *
347 * If the extent is a hole, then preallocation is essentially disabled.
348 * Otherwise we take the size of the preceeding data extent as the basis for the
349 * preallocation size. If the size of the extent is greater than half the
350 * maximum extent length, then use the current offset as the basis. This ensures
351 * that for large files the preallocation size always extends to MAXEXTLEN
352 * rather than falling short due to things like stripe unit/width alignment of
353 * real extents.
354 */
355 STATIC xfs_fsblock_t
356 xfs_iomap_eof_prealloc_initial_size(
357 struct xfs_mount *mp,
358 struct xfs_inode *ip,
359 xfs_off_t offset,
360 xfs_bmbt_irec_t *imap,
361 int nimaps)
362 {
363 xfs_fileoff_t start_fsb;
364 int imaps = 1;
365 int error;
366
367 ASSERT(nimaps >= imaps);
368
369 /* if we are using a specific prealloc size, return now */
370 if (mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)
371 return 0;
372
373 /* If the file is small, then use the minimum prealloc */
374 if (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_dalign))
375 return 0;
376
377 /*
378 * As we write multiple pages, the offset will always align to the
379 * start of a page and hence point to a hole at EOF. i.e. if the size is
380 * 4096 bytes, we only have one block at FSB 0, but XFS_B_TO_FSB(4096)
381 * will return FSB 1. Hence if there are blocks in the file, we want to
382 * point to the block prior to the EOF block and not the hole that maps
383 * directly at @offset.
384 */
385 start_fsb = XFS_B_TO_FSB(mp, offset);
386 if (start_fsb)
387 start_fsb--;
388 error = xfs_bmapi_read(ip, start_fsb, 1, imap, &imaps, XFS_BMAPI_ENTIRE);
389 if (error)
390 return 0;
391
392 ASSERT(imaps == 1);
393 if (imap[0].br_startblock == HOLESTARTBLOCK)
394 return 0;
395 if (imap[0].br_blockcount <= (MAXEXTLEN >> 1))
396 return imap[0].br_blockcount << 1;
397 return XFS_B_TO_FSB(mp, offset);
398 }
399
400 STATIC bool
401 xfs_quota_need_throttle(
402 struct xfs_inode *ip,
403 int type,
404 xfs_fsblock_t alloc_blocks)
405 {
406 struct xfs_dquot *dq = xfs_inode_dquot(ip, type);
407
408 if (!dq || !xfs_this_quota_on(ip->i_mount, type))
409 return false;
410
411 /* no hi watermark, no throttle */
412 if (!dq->q_prealloc_hi_wmark)
413 return false;
414
415 /* under the lo watermark, no throttle */
416 if (dq->q_res_bcount + alloc_blocks < dq->q_prealloc_lo_wmark)
417 return false;
418
419 return true;
420 }
421
422 STATIC void
423 xfs_quota_calc_throttle(
424 struct xfs_inode *ip,
425 int type,
426 xfs_fsblock_t *qblocks,
427 int *qshift,
428 int64_t *qfreesp)
429 {
430 int64_t freesp;
431 int shift = 0;
432 struct xfs_dquot *dq = xfs_inode_dquot(ip, type);
433
434 /* no dq, or over hi wmark, squash the prealloc completely */
435 if (!dq || dq->q_res_bcount >= dq->q_prealloc_hi_wmark) {
436 *qblocks = 0;
437 *qfreesp = 0;
438 return;
439 }
440
441 freesp = dq->q_prealloc_hi_wmark - dq->q_res_bcount;
442 if (freesp < dq->q_low_space[XFS_QLOWSP_5_PCNT]) {
443 shift = 2;
444 if (freesp < dq->q_low_space[XFS_QLOWSP_3_PCNT])
445 shift += 2;
446 if (freesp < dq->q_low_space[XFS_QLOWSP_1_PCNT])
447 shift += 2;
448 }
449
450 if (freesp < *qfreesp)
451 *qfreesp = freesp;
452
453 /* only overwrite the throttle values if we are more aggressive */
454 if ((freesp >> shift) < (*qblocks >> *qshift)) {
455 *qblocks = freesp;
456 *qshift = shift;
457 }
458 }
459
460 /*
461 * If we don't have a user specified preallocation size, dynamically increase
462 * the preallocation size as the size of the file grows. Cap the maximum size
463 * at a single extent or less if the filesystem is near full. The closer the
464 * filesystem is to full, the smaller the maximum prealocation.
465 */
466 STATIC xfs_fsblock_t
467 xfs_iomap_prealloc_size(
468 struct xfs_mount *mp,
469 struct xfs_inode *ip,
470 xfs_off_t offset,
471 struct xfs_bmbt_irec *imap,
472 int nimaps)
473 {
474 xfs_fsblock_t alloc_blocks = 0;
475 int shift = 0;
476 int64_t freesp;
477 xfs_fsblock_t qblocks;
478 int qshift = 0;
479
480 alloc_blocks = xfs_iomap_eof_prealloc_initial_size(mp, ip, offset,
481 imap, nimaps);
482 if (!alloc_blocks)
483 goto check_writeio;
484 qblocks = alloc_blocks;
485
486 /*
487 * MAXEXTLEN is not a power of two value but we round the prealloc down
488 * to the nearest power of two value after throttling. To prevent the
489 * round down from unconditionally reducing the maximum supported prealloc
490 * size, we round up first, apply appropriate throttling, round down and
491 * cap the value to MAXEXTLEN.
492 */
493 alloc_blocks = XFS_FILEOFF_MIN(roundup_pow_of_two(MAXEXTLEN),
494 alloc_blocks);
495
496 freesp = percpu_counter_read_positive(&mp->m_fdblocks);
497 if (freesp < mp->m_low_space[XFS_LOWSP_5_PCNT]) {
498 shift = 2;
499 if (freesp < mp->m_low_space[XFS_LOWSP_4_PCNT])
500 shift++;
501 if (freesp < mp->m_low_space[XFS_LOWSP_3_PCNT])
502 shift++;
503 if (freesp < mp->m_low_space[XFS_LOWSP_2_PCNT])
504 shift++;
505 if (freesp < mp->m_low_space[XFS_LOWSP_1_PCNT])
506 shift++;
507 }
508
509 /*
510 * Check each quota to cap the prealloc size, provide a shift value to
511 * throttle with and adjust amount of available space.
512 */
513 if (xfs_quota_need_throttle(ip, XFS_DQ_USER, alloc_blocks))
514 xfs_quota_calc_throttle(ip, XFS_DQ_USER, &qblocks, &qshift,
515 &freesp);
516 if (xfs_quota_need_throttle(ip, XFS_DQ_GROUP, alloc_blocks))
517 xfs_quota_calc_throttle(ip, XFS_DQ_GROUP, &qblocks, &qshift,
518 &freesp);
519 if (xfs_quota_need_throttle(ip, XFS_DQ_PROJ, alloc_blocks))
520 xfs_quota_calc_throttle(ip, XFS_DQ_PROJ, &qblocks, &qshift,
521 &freesp);
522
523 /*
524 * The final prealloc size is set to the minimum of free space available
525 * in each of the quotas and the overall filesystem.
526 *
527 * The shift throttle value is set to the maximum value as determined by
528 * the global low free space values and per-quota low free space values.
529 */
530 alloc_blocks = MIN(alloc_blocks, qblocks);
531 shift = MAX(shift, qshift);
532
533 if (shift)
534 alloc_blocks >>= shift;
535 /*
536 * rounddown_pow_of_two() returns an undefined result if we pass in
537 * alloc_blocks = 0.
538 */
539 if (alloc_blocks)
540 alloc_blocks = rounddown_pow_of_two(alloc_blocks);
541 if (alloc_blocks > MAXEXTLEN)
542 alloc_blocks = MAXEXTLEN;
543
544 /*
545 * If we are still trying to allocate more space than is
546 * available, squash the prealloc hard. This can happen if we
547 * have a large file on a small filesystem and the above
548 * lowspace thresholds are smaller than MAXEXTLEN.
549 */
550 while (alloc_blocks && alloc_blocks >= freesp)
551 alloc_blocks >>= 4;
552
553 check_writeio:
554 if (alloc_blocks < mp->m_writeio_blocks)
555 alloc_blocks = mp->m_writeio_blocks;
556
557 trace_xfs_iomap_prealloc_size(ip, alloc_blocks, shift,
558 mp->m_writeio_blocks);
559
560 return alloc_blocks;
561 }
562
563 int
564 xfs_iomap_write_delay(
565 xfs_inode_t *ip,
566 xfs_off_t offset,
567 size_t count,
568 xfs_bmbt_irec_t *ret_imap)
569 {
570 xfs_mount_t *mp = ip->i_mount;
571 xfs_fileoff_t offset_fsb;
572 xfs_fileoff_t last_fsb;
573 xfs_off_t aligned_offset;
574 xfs_fileoff_t ioalign;
575 xfs_extlen_t extsz;
576 int nimaps;
577 xfs_bmbt_irec_t imap[XFS_WRITE_IMAPS];
578 int prealloc;
579 int error;
580
581 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
582
583 /*
584 * Make sure that the dquots are there. This doesn't hold
585 * the ilock across a disk read.
586 */
587 error = xfs_qm_dqattach_locked(ip, 0);
588 if (error)
589 return error;
590
591 extsz = xfs_get_extsz_hint(ip);
592 offset_fsb = XFS_B_TO_FSBT(mp, offset);
593
594 error = xfs_iomap_eof_want_preallocate(mp, ip, offset, count,
595 imap, XFS_WRITE_IMAPS, &prealloc);
596 if (error)
597 return error;
598
599 retry:
600 if (prealloc) {
601 xfs_fsblock_t alloc_blocks;
602
603 alloc_blocks = xfs_iomap_prealloc_size(mp, ip, offset, imap,
604 XFS_WRITE_IMAPS);
605
606 aligned_offset = XFS_WRITEIO_ALIGN(mp, (offset + count - 1));
607 ioalign = XFS_B_TO_FSBT(mp, aligned_offset);
608 last_fsb = ioalign + alloc_blocks;
609 } else {
610 last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
611 }
612
613 if (prealloc || extsz) {
614 error = xfs_iomap_eof_align_last_fsb(mp, ip, extsz, &last_fsb);
615 if (error)
616 return error;
617 }
618
619 /*
620 * Make sure preallocation does not create extents beyond the range we
621 * actually support in this filesystem.
622 */
623 if (last_fsb > XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes))
624 last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
625
626 ASSERT(last_fsb > offset_fsb);
627
628 nimaps = XFS_WRITE_IMAPS;
629 error = xfs_bmapi_delay(ip, offset_fsb, last_fsb - offset_fsb,
630 imap, &nimaps, XFS_BMAPI_ENTIRE);
631 switch (error) {
632 case 0:
633 case -ENOSPC:
634 case -EDQUOT:
635 break;
636 default:
637 return error;
638 }
639
640 /*
641 * If bmapi returned us nothing, we got either ENOSPC or EDQUOT. Retry
642 * without EOF preallocation.
643 */
644 if (nimaps == 0) {
645 trace_xfs_delalloc_enospc(ip, offset, count);
646 if (prealloc) {
647 prealloc = 0;
648 error = 0;
649 goto retry;
650 }
651 return error ? error : -ENOSPC;
652 }
653
654 if (!(imap[0].br_startblock || XFS_IS_REALTIME_INODE(ip)))
655 return xfs_alert_fsblock_zero(ip, &imap[0]);
656
657 /*
658 * Tag the inode as speculatively preallocated so we can reclaim this
659 * space on demand, if necessary.
660 */
661 if (prealloc)
662 xfs_inode_set_eofblocks_tag(ip);
663
664 *ret_imap = imap[0];
665 return 0;
666 }
667
668 /*
669 * Pass in a delayed allocate extent, convert it to real extents;
670 * return to the caller the extent we create which maps on top of
671 * the originating callers request.
672 *
673 * Called without a lock on the inode.
674 *
675 * We no longer bother to look at the incoming map - all we have to
676 * guarantee is that whatever we allocate fills the required range.
677 */
678 int
679 xfs_iomap_write_allocate(
680 xfs_inode_t *ip,
681 xfs_off_t offset,
682 xfs_bmbt_irec_t *imap)
683 {
684 xfs_mount_t *mp = ip->i_mount;
685 xfs_fileoff_t offset_fsb, last_block;
686 xfs_fileoff_t end_fsb, map_start_fsb;
687 xfs_fsblock_t first_block;
688 xfs_bmap_free_t free_list;
689 xfs_filblks_t count_fsb;
690 xfs_trans_t *tp;
691 int nimaps;
692 int error = 0;
693 int nres;
694
695 /*
696 * Make sure that the dquots are there.
697 */
698 error = xfs_qm_dqattach(ip, 0);
699 if (error)
700 return error;
701
702 offset_fsb = XFS_B_TO_FSBT(mp, offset);
703 count_fsb = imap->br_blockcount;
704 map_start_fsb = imap->br_startoff;
705
706 XFS_STATS_ADD(mp, xs_xstrat_bytes, XFS_FSB_TO_B(mp, count_fsb));
707
708 while (count_fsb != 0) {
709 /*
710 * Set up a transaction with which to allocate the
711 * backing store for the file. Do allocations in a
712 * loop until we get some space in the range we are
713 * interested in. The other space that might be allocated
714 * is in the delayed allocation extent on which we sit
715 * but before our buffer starts.
716 */
717
718 nimaps = 0;
719 while (nimaps == 0) {
720 nres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);
721
722 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, nres,
723 0, XFS_TRANS_RESERVE, &tp);
724 if (error)
725 return error;
726
727 xfs_ilock(ip, XFS_ILOCK_EXCL);
728 xfs_trans_ijoin(tp, ip, 0);
729
730 xfs_bmap_init(&free_list, &first_block);
731
732 /*
733 * it is possible that the extents have changed since
734 * we did the read call as we dropped the ilock for a
735 * while. We have to be careful about truncates or hole
736 * punchs here - we are not allowed to allocate
737 * non-delalloc blocks here.
738 *
739 * The only protection against truncation is the pages
740 * for the range we are being asked to convert are
741 * locked and hence a truncate will block on them
742 * first.
743 *
744 * As a result, if we go beyond the range we really
745 * need and hit an delalloc extent boundary followed by
746 * a hole while we have excess blocks in the map, we
747 * will fill the hole incorrectly and overrun the
748 * transaction reservation.
749 *
750 * Using a single map prevents this as we are forced to
751 * check each map we look for overlap with the desired
752 * range and abort as soon as we find it. Also, given
753 * that we only return a single map, having one beyond
754 * what we can return is probably a bit silly.
755 *
756 * We also need to check that we don't go beyond EOF;
757 * this is a truncate optimisation as a truncate sets
758 * the new file size before block on the pages we
759 * currently have locked under writeback. Because they
760 * are about to be tossed, we don't need to write them
761 * back....
762 */
763 nimaps = 1;
764 end_fsb = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
765 error = xfs_bmap_last_offset(ip, &last_block,
766 XFS_DATA_FORK);
767 if (error)
768 goto trans_cancel;
769
770 last_block = XFS_FILEOFF_MAX(last_block, end_fsb);
771 if ((map_start_fsb + count_fsb) > last_block) {
772 count_fsb = last_block - map_start_fsb;
773 if (count_fsb == 0) {
774 error = -EAGAIN;
775 goto trans_cancel;
776 }
777 }
778
779 /*
780 * From this point onwards we overwrite the imap
781 * pointer that the caller gave to us.
782 */
783 error = xfs_bmapi_write(tp, ip, map_start_fsb,
784 count_fsb, 0, &first_block,
785 nres, imap, &nimaps,
786 &free_list);
787 if (error)
788 goto trans_cancel;
789
790 error = xfs_bmap_finish(&tp, &free_list, NULL);
791 if (error)
792 goto trans_cancel;
793
794 error = xfs_trans_commit(tp);
795 if (error)
796 goto error0;
797
798 xfs_iunlock(ip, XFS_ILOCK_EXCL);
799 }
800
801 /*
802 * See if we were able to allocate an extent that
803 * covers at least part of the callers request
804 */
805 if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip)))
806 return xfs_alert_fsblock_zero(ip, imap);
807
808 if ((offset_fsb >= imap->br_startoff) &&
809 (offset_fsb < (imap->br_startoff +
810 imap->br_blockcount))) {
811 XFS_STATS_INC(mp, xs_xstrat_quick);
812 return 0;
813 }
814
815 /*
816 * So far we have not mapped the requested part of the
817 * file, just surrounding data, try again.
818 */
819 count_fsb -= imap->br_blockcount;
820 map_start_fsb = imap->br_startoff + imap->br_blockcount;
821 }
822
823 trans_cancel:
824 xfs_bmap_cancel(&free_list);
825 xfs_trans_cancel(tp);
826 error0:
827 xfs_iunlock(ip, XFS_ILOCK_EXCL);
828 return error;
829 }
830
831 int
832 xfs_iomap_write_unwritten(
833 xfs_inode_t *ip,
834 xfs_off_t offset,
835 xfs_off_t count)
836 {
837 xfs_mount_t *mp = ip->i_mount;
838 xfs_fileoff_t offset_fsb;
839 xfs_filblks_t count_fsb;
840 xfs_filblks_t numblks_fsb;
841 xfs_fsblock_t firstfsb;
842 int nimaps;
843 xfs_trans_t *tp;
844 xfs_bmbt_irec_t imap;
845 xfs_bmap_free_t free_list;
846 xfs_fsize_t i_size;
847 uint resblks;
848 int error;
849
850 trace_xfs_unwritten_convert(ip, offset, count);
851
852 offset_fsb = XFS_B_TO_FSBT(mp, offset);
853 count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
854 count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb);
855
856 /*
857 * Reserve enough blocks in this transaction for two complete extent
858 * btree splits. We may be converting the middle part of an unwritten
859 * extent and in this case we will insert two new extents in the btree
860 * each of which could cause a full split.
861 *
862 * This reservation amount will be used in the first call to
863 * xfs_bmbt_split() to select an AG with enough space to satisfy the
864 * rest of the operation.
865 */
866 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
867
868 do {
869 /*
870 * Set up a transaction to convert the range of extents
871 * from unwritten to real. Do allocations in a loop until
872 * we have covered the range passed in.
873 *
874 * Note that we can't risk to recursing back into the filesystem
875 * here as we might be asked to write out the same inode that we
876 * complete here and might deadlock on the iolock.
877 */
878 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0,
879 XFS_TRANS_RESERVE | XFS_TRANS_NOFS, &tp);
880 if (error)
881 return error;
882
883 xfs_ilock(ip, XFS_ILOCK_EXCL);
884 xfs_trans_ijoin(tp, ip, 0);
885
886 /*
887 * Modify the unwritten extent state of the buffer.
888 */
889 xfs_bmap_init(&free_list, &firstfsb);
890 nimaps = 1;
891 error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb,
892 XFS_BMAPI_CONVERT, &firstfsb, resblks,
893 &imap, &nimaps, &free_list);
894 if (error)
895 goto error_on_bmapi_transaction;
896
897 /*
898 * Log the updated inode size as we go. We have to be careful
899 * to only log it up to the actual write offset if it is
900 * halfway into a block.
901 */
902 i_size = XFS_FSB_TO_B(mp, offset_fsb + count_fsb);
903 if (i_size > offset + count)
904 i_size = offset + count;
905
906 i_size = xfs_new_eof(ip, i_size);
907 if (i_size) {
908 ip->i_d.di_size = i_size;
909 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
910 }
911
912 error = xfs_bmap_finish(&tp, &free_list, NULL);
913 if (error)
914 goto error_on_bmapi_transaction;
915
916 error = xfs_trans_commit(tp);
917 xfs_iunlock(ip, XFS_ILOCK_EXCL);
918 if (error)
919 return error;
920
921 if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip)))
922 return xfs_alert_fsblock_zero(ip, &imap);
923
924 if ((numblks_fsb = imap.br_blockcount) == 0) {
925 /*
926 * The numblks_fsb value should always get
927 * smaller, otherwise the loop is stuck.
928 */
929 ASSERT(imap.br_blockcount);
930 break;
931 }
932 offset_fsb += numblks_fsb;
933 count_fsb -= numblks_fsb;
934 } while (count_fsb > 0);
935
936 return 0;
937
938 error_on_bmapi_transaction:
939 xfs_bmap_cancel(&free_list);
940 xfs_trans_cancel(tp);
941 xfs_iunlock(ip, XFS_ILOCK_EXCL);
942 return error;
943 }
944
945 void
946 xfs_bmbt_to_iomap(
947 struct xfs_inode *ip,
948 struct iomap *iomap,
949 struct xfs_bmbt_irec *imap)
950 {
951 struct xfs_mount *mp = ip->i_mount;
952
953 if (imap->br_startblock == HOLESTARTBLOCK) {
954 iomap->blkno = IOMAP_NULL_BLOCK;
955 iomap->type = IOMAP_HOLE;
956 } else if (imap->br_startblock == DELAYSTARTBLOCK) {
957 iomap->blkno = IOMAP_NULL_BLOCK;
958 iomap->type = IOMAP_DELALLOC;
959 } else {
960 iomap->blkno = xfs_fsb_to_db(ip, imap->br_startblock);
961 if (imap->br_state == XFS_EXT_UNWRITTEN)
962 iomap->type = IOMAP_UNWRITTEN;
963 else
964 iomap->type = IOMAP_MAPPED;
965 }
966 iomap->offset = XFS_FSB_TO_B(mp, imap->br_startoff);
967 iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount);
968 iomap->bdev = xfs_find_bdev_for_inode(VFS_I(ip));
969 }
970
971 static inline bool imap_needs_alloc(struct xfs_bmbt_irec *imap, int nimaps)
972 {
973 return !nimaps ||
974 imap->br_startblock == HOLESTARTBLOCK ||
975 imap->br_startblock == DELAYSTARTBLOCK;
976 }
977
978 static int
979 xfs_file_iomap_begin(
980 struct inode *inode,
981 loff_t offset,
982 loff_t length,
983 unsigned flags,
984 struct iomap *iomap)
985 {
986 struct xfs_inode *ip = XFS_I(inode);
987 struct xfs_mount *mp = ip->i_mount;
988 struct xfs_bmbt_irec imap;
989 xfs_fileoff_t offset_fsb, end_fsb;
990 int nimaps = 1, error = 0;
991
992 if (XFS_FORCED_SHUTDOWN(mp))
993 return -EIO;
994
995 xfs_ilock(ip, XFS_ILOCK_EXCL);
996
997 ASSERT(offset <= mp->m_super->s_maxbytes);
998 if ((xfs_fsize_t)offset + length > mp->m_super->s_maxbytes)
999 length = mp->m_super->s_maxbytes - offset;
1000 offset_fsb = XFS_B_TO_FSBT(mp, offset);
1001 end_fsb = XFS_B_TO_FSB(mp, offset + length);
1002
1003 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
1004 &nimaps, XFS_BMAPI_ENTIRE);
1005 if (error) {
1006 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1007 return error;
1008 }
1009
1010 if ((flags & IOMAP_WRITE) && imap_needs_alloc(&imap, nimaps)) {
1011 /*
1012 * We cap the maximum length we map here to MAX_WRITEBACK_PAGES
1013 * pages to keep the chunks of work done where somewhat symmetric
1014 * with the work writeback does. This is a completely arbitrary
1015 * number pulled out of thin air as a best guess for initial
1016 * testing.
1017 *
1018 * Note that the values needs to be less than 32-bits wide until
1019 * the lower level functions are updated.
1020 */
1021 length = min_t(loff_t, length, 1024 * PAGE_SIZE);
1022 if (xfs_get_extsz_hint(ip)) {
1023 /*
1024 * xfs_iomap_write_direct() expects the shared lock. It
1025 * is unlocked on return.
1026 */
1027 xfs_ilock_demote(ip, XFS_ILOCK_EXCL);
1028 error = xfs_iomap_write_direct(ip, offset, length, &imap,
1029 nimaps);
1030 } else {
1031 error = xfs_iomap_write_delay(ip, offset, length, &imap);
1032 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1033 }
1034
1035 if (error)
1036 return error;
1037
1038 trace_xfs_iomap_alloc(ip, offset, length, 0, &imap);
1039 xfs_bmbt_to_iomap(ip, iomap, &imap);
1040 } else if (nimaps) {
1041 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1042 trace_xfs_iomap_found(ip, offset, length, 0, &imap);
1043 xfs_bmbt_to_iomap(ip, iomap, &imap);
1044 } else {
1045 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1046 trace_xfs_iomap_not_found(ip, offset, length, 0, &imap);
1047 iomap->blkno = IOMAP_NULL_BLOCK;
1048 iomap->type = IOMAP_HOLE;
1049 iomap->offset = offset;
1050 iomap->length = length;
1051 }
1052
1053 return 0;
1054 }
1055
1056 static int
1057 xfs_file_iomap_end_delalloc(
1058 struct xfs_inode *ip,
1059 loff_t offset,
1060 loff_t length,
1061 ssize_t written)
1062 {
1063 struct xfs_mount *mp = ip->i_mount;
1064 xfs_fileoff_t start_fsb;
1065 xfs_fileoff_t end_fsb;
1066 int error = 0;
1067
1068 start_fsb = XFS_B_TO_FSB(mp, offset + written);
1069 end_fsb = XFS_B_TO_FSB(mp, offset + length);
1070
1071 /*
1072 * Trim back delalloc blocks if we didn't manage to write the whole
1073 * range reserved.
1074 *
1075 * We don't need to care about racing delalloc as we hold i_mutex
1076 * across the reserve/allocate/unreserve calls. If there are delalloc
1077 * blocks in the range, they are ours.
1078 */
1079 if (start_fsb < end_fsb) {
1080 xfs_ilock(ip, XFS_ILOCK_EXCL);
1081 error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
1082 end_fsb - start_fsb);
1083 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1084
1085 if (error && !XFS_FORCED_SHUTDOWN(mp)) {
1086 xfs_alert(mp, "%s: unable to clean up ino %lld",
1087 __func__, ip->i_ino);
1088 return error;
1089 }
1090 }
1091
1092 return 0;
1093 }
1094
1095 static int
1096 xfs_file_iomap_end(
1097 struct inode *inode,
1098 loff_t offset,
1099 loff_t length,
1100 ssize_t written,
1101 unsigned flags,
1102 struct iomap *iomap)
1103 {
1104 if ((flags & IOMAP_WRITE) && iomap->type == IOMAP_DELALLOC)
1105 return xfs_file_iomap_end_delalloc(XFS_I(inode), offset,
1106 length, written);
1107 return 0;
1108 }
1109
1110 struct iomap_ops xfs_iomap_ops = {
1111 .iomap_begin = xfs_file_iomap_begin,
1112 .iomap_end = xfs_file_iomap_end,
1113 };
This page took 0.051572 seconds and 6 git commands to generate.