[XFS] kill superflous buffer locking (2nd attempt)
[deliverable/linux.git] / fs / xfs / xfs_inode.c
CommitLineData
1da177e4 1/*
3e57ecf6 2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
7b718769 3 * All Rights Reserved.
1da177e4 4 *
7b718769
NS
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
1da177e4
LT
7 * published by the Free Software Foundation.
8 *
7b718769
NS
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
1da177e4 13 *
7b718769
NS
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
1da177e4 17 */
40ebd81d
RD
18#include <linux/log2.h>
19
1da177e4 20#include "xfs.h"
a844f451 21#include "xfs_fs.h"
1da177e4 22#include "xfs_types.h"
a844f451 23#include "xfs_bit.h"
1da177e4 24#include "xfs_log.h"
a844f451
NS
25#include "xfs_inum.h"
26#include "xfs_imap.h"
1da177e4
LT
27#include "xfs_trans.h"
28#include "xfs_trans_priv.h"
29#include "xfs_sb.h"
30#include "xfs_ag.h"
1da177e4
LT
31#include "xfs_dir2.h"
32#include "xfs_dmapi.h"
33#include "xfs_mount.h"
1da177e4 34#include "xfs_bmap_btree.h"
a844f451 35#include "xfs_alloc_btree.h"
1da177e4 36#include "xfs_ialloc_btree.h"
1da177e4 37#include "xfs_dir2_sf.h"
a844f451 38#include "xfs_attr_sf.h"
1da177e4 39#include "xfs_dinode.h"
1da177e4 40#include "xfs_inode.h"
1da177e4 41#include "xfs_buf_item.h"
a844f451
NS
42#include "xfs_inode_item.h"
43#include "xfs_btree.h"
44#include "xfs_alloc.h"
45#include "xfs_ialloc.h"
46#include "xfs_bmap.h"
1da177e4
LT
47#include "xfs_rw.h"
48#include "xfs_error.h"
1da177e4
LT
49#include "xfs_utils.h"
50#include "xfs_dir2_trace.h"
51#include "xfs_quota.h"
1da177e4 52#include "xfs_acl.h"
2a82b8be 53#include "xfs_filestream.h"
739bfb2a 54#include "xfs_vnodeops.h"
1da177e4 55
1da177e4
LT
56kmem_zone_t *xfs_ifork_zone;
57kmem_zone_t *xfs_inode_zone;
da353b0d 58kmem_zone_t *xfs_icluster_zone;
1da177e4
LT
59
60/*
61 * Used in xfs_itruncate(). This is the maximum number of extents
62 * freed from a file in a single transaction.
63 */
64#define XFS_ITRUNC_MAX_EXTENTS 2
65
66STATIC int xfs_iflush_int(xfs_inode_t *, xfs_buf_t *);
67STATIC int xfs_iformat_local(xfs_inode_t *, xfs_dinode_t *, int, int);
68STATIC int xfs_iformat_extents(xfs_inode_t *, xfs_dinode_t *, int);
69STATIC int xfs_iformat_btree(xfs_inode_t *, xfs_dinode_t *, int);
70
1da177e4
LT
71#ifdef DEBUG
72/*
73 * Make sure that the extents in the given memory buffer
74 * are valid.
75 */
76STATIC void
77xfs_validate_extents(
4eea22f0 78 xfs_ifork_t *ifp,
1da177e4 79 int nrecs,
1da177e4
LT
80 xfs_exntfmt_t fmt)
81{
82 xfs_bmbt_irec_t irec;
a6f64d4a 83 xfs_bmbt_rec_host_t rec;
1da177e4
LT
84 int i;
85
86 for (i = 0; i < nrecs; i++) {
a6f64d4a
CH
87 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i);
88 rec.l0 = get_unaligned(&ep->l0);
89 rec.l1 = get_unaligned(&ep->l1);
90 xfs_bmbt_get_all(&rec, &irec);
1da177e4
LT
91 if (fmt == XFS_EXTFMT_NOSTATE)
92 ASSERT(irec.br_state == XFS_EXT_NORM);
1da177e4
LT
93 }
94}
95#else /* DEBUG */
a6f64d4a 96#define xfs_validate_extents(ifp, nrecs, fmt)
1da177e4
LT
97#endif /* DEBUG */
98
99/*
100 * Check that none of the inode's in the buffer have a next
101 * unlinked field of 0.
102 */
103#if defined(DEBUG)
104void
105xfs_inobp_check(
106 xfs_mount_t *mp,
107 xfs_buf_t *bp)
108{
109 int i;
110 int j;
111 xfs_dinode_t *dip;
112
113 j = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog;
114
115 for (i = 0; i < j; i++) {
116 dip = (xfs_dinode_t *)xfs_buf_offset(bp,
117 i * mp->m_sb.sb_inodesize);
118 if (!dip->di_next_unlinked) {
119 xfs_fs_cmn_err(CE_ALERT, mp,
120 "Detected a bogus zero next_unlinked field in incore inode buffer 0x%p. About to pop an ASSERT.",
121 bp);
122 ASSERT(dip->di_next_unlinked);
123 }
124 }
125}
126#endif
127
1da177e4
LT
128/*
129 * This routine is called to map an inode number within a file
130 * system to the buffer containing the on-disk version of the
131 * inode. It returns a pointer to the buffer containing the
132 * on-disk inode in the bpp parameter, and in the dip parameter
133 * it returns a pointer to the on-disk inode within that buffer.
134 *
135 * If a non-zero error is returned, then the contents of bpp and
136 * dipp are undefined.
137 *
138 * Use xfs_imap() to determine the size and location of the
139 * buffer to read from disk.
140 */
ba0f32d4 141STATIC int
1da177e4
LT
142xfs_inotobp(
143 xfs_mount_t *mp,
144 xfs_trans_t *tp,
145 xfs_ino_t ino,
146 xfs_dinode_t **dipp,
147 xfs_buf_t **bpp,
148 int *offset)
149{
150 int di_ok;
151 xfs_imap_t imap;
152 xfs_buf_t *bp;
153 int error;
154 xfs_dinode_t *dip;
155
156 /*
c41564b5 157 * Call the space management code to find the location of the
1da177e4
LT
158 * inode on disk.
159 */
160 imap.im_blkno = 0;
161 error = xfs_imap(mp, tp, ino, &imap, XFS_IMAP_LOOKUP);
162 if (error != 0) {
163 cmn_err(CE_WARN,
164 "xfs_inotobp: xfs_imap() returned an "
165 "error %d on %s. Returning error.", error, mp->m_fsname);
166 return error;
167 }
168
169 /*
170 * If the inode number maps to a block outside the bounds of the
171 * file system then return NULL rather than calling read_buf
172 * and panicing when we get an error from the driver.
173 */
174 if ((imap.im_blkno + imap.im_len) >
175 XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)) {
176 cmn_err(CE_WARN,
da1650a5 177 "xfs_inotobp: inode number (%llu + %d) maps to a block outside the bounds "
1da177e4 178 "of the file system %s. Returning EINVAL.",
da1650a5
CH
179 (unsigned long long)imap.im_blkno,
180 imap.im_len, mp->m_fsname);
1da177e4
LT
181 return XFS_ERROR(EINVAL);
182 }
183
184 /*
185 * Read in the buffer. If tp is NULL, xfs_trans_read_buf() will
186 * default to just a read_buf() call.
187 */
188 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap.im_blkno,
189 (int)imap.im_len, XFS_BUF_LOCK, &bp);
190
191 if (error) {
192 cmn_err(CE_WARN,
193 "xfs_inotobp: xfs_trans_read_buf() returned an "
194 "error %d on %s. Returning error.", error, mp->m_fsname);
195 return error;
196 }
197 dip = (xfs_dinode_t *)xfs_buf_offset(bp, 0);
198 di_ok =
347d1c01
CH
199 be16_to_cpu(dip->di_core.di_magic) == XFS_DINODE_MAGIC &&
200 XFS_DINODE_GOOD_VERSION(dip->di_core.di_version);
1da177e4
LT
201 if (unlikely(XFS_TEST_ERROR(!di_ok, mp, XFS_ERRTAG_ITOBP_INOTOBP,
202 XFS_RANDOM_ITOBP_INOTOBP))) {
203 XFS_CORRUPTION_ERROR("xfs_inotobp", XFS_ERRLEVEL_LOW, mp, dip);
204 xfs_trans_brelse(tp, bp);
205 cmn_err(CE_WARN,
206 "xfs_inotobp: XFS_TEST_ERROR() returned an "
207 "error on %s. Returning EFSCORRUPTED.", mp->m_fsname);
208 return XFS_ERROR(EFSCORRUPTED);
209 }
210
211 xfs_inobp_check(mp, bp);
212
213 /*
214 * Set *dipp to point to the on-disk inode in the buffer.
215 */
216 *dipp = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset);
217 *bpp = bp;
218 *offset = imap.im_boffset;
219 return 0;
220}
221
222
223/*
224 * This routine is called to map an inode to the buffer containing
225 * the on-disk version of the inode. It returns a pointer to the
226 * buffer containing the on-disk inode in the bpp parameter, and in
227 * the dip parameter it returns a pointer to the on-disk inode within
228 * that buffer.
229 *
230 * If a non-zero error is returned, then the contents of bpp and
231 * dipp are undefined.
232 *
233 * If the inode is new and has not yet been initialized, use xfs_imap()
234 * to determine the size and location of the buffer to read from disk.
235 * If the inode has already been mapped to its buffer and read in once,
236 * then use the mapping information stored in the inode rather than
237 * calling xfs_imap(). This allows us to avoid the overhead of looking
238 * at the inode btree for small block file systems (see xfs_dilocate()).
239 * We can tell whether the inode has been mapped in before by comparing
240 * its disk block address to 0. Only uninitialized inodes will have
241 * 0 for the disk block address.
242 */
243int
244xfs_itobp(
245 xfs_mount_t *mp,
246 xfs_trans_t *tp,
247 xfs_inode_t *ip,
248 xfs_dinode_t **dipp,
249 xfs_buf_t **bpp,
b12dd342
NS
250 xfs_daddr_t bno,
251 uint imap_flags)
1da177e4 252{
4d1a2ed3 253 xfs_imap_t imap;
1da177e4
LT
254 xfs_buf_t *bp;
255 int error;
1da177e4
LT
256 int i;
257 int ni;
1da177e4
LT
258
259 if (ip->i_blkno == (xfs_daddr_t)0) {
260 /*
261 * Call the space management code to find the location of the
262 * inode on disk.
263 */
264 imap.im_blkno = bno;
b12dd342
NS
265 if ((error = xfs_imap(mp, tp, ip->i_ino, &imap,
266 XFS_IMAP_LOOKUP | imap_flags)))
1da177e4 267 return error;
1da177e4
LT
268
269 /*
270 * If the inode number maps to a block outside the bounds
271 * of the file system then return NULL rather than calling
272 * read_buf and panicing when we get an error from the
273 * driver.
274 */
275 if ((imap.im_blkno + imap.im_len) >
276 XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)) {
277#ifdef DEBUG
278 xfs_fs_cmn_err(CE_ALERT, mp, "xfs_itobp: "
279 "(imap.im_blkno (0x%llx) "
280 "+ imap.im_len (0x%llx)) > "
281 " XFS_FSB_TO_BB(mp, "
282 "mp->m_sb.sb_dblocks) (0x%llx)",
283 (unsigned long long) imap.im_blkno,
284 (unsigned long long) imap.im_len,
285 XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks));
286#endif /* DEBUG */
287 return XFS_ERROR(EINVAL);
288 }
289
290 /*
291 * Fill in the fields in the inode that will be used to
292 * map the inode to its buffer from now on.
293 */
294 ip->i_blkno = imap.im_blkno;
295 ip->i_len = imap.im_len;
296 ip->i_boffset = imap.im_boffset;
297 } else {
298 /*
299 * We've already mapped the inode once, so just use the
300 * mapping that we saved the first time.
301 */
302 imap.im_blkno = ip->i_blkno;
303 imap.im_len = ip->i_len;
304 imap.im_boffset = ip->i_boffset;
305 }
306 ASSERT(bno == 0 || bno == imap.im_blkno);
307
308 /*
309 * Read in the buffer. If tp is NULL, xfs_trans_read_buf() will
310 * default to just a read_buf() call.
311 */
312 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap.im_blkno,
313 (int)imap.im_len, XFS_BUF_LOCK, &bp);
1da177e4
LT
314 if (error) {
315#ifdef DEBUG
316 xfs_fs_cmn_err(CE_ALERT, mp, "xfs_itobp: "
317 "xfs_trans_read_buf() returned error %d, "
318 "imap.im_blkno 0x%llx, imap.im_len 0x%llx",
319 error, (unsigned long long) imap.im_blkno,
320 (unsigned long long) imap.im_len);
321#endif /* DEBUG */
322 return error;
323 }
4d1a2ed3 324
1da177e4
LT
325 /*
326 * Validate the magic number and version of every inode in the buffer
327 * (if DEBUG kernel) or the first inode in the buffer, otherwise.
4d1a2ed3 328 * No validation is done here in userspace (xfs_repair).
1da177e4 329 */
4d1a2ed3
NS
330#if !defined(__KERNEL__)
331 ni = 0;
332#elif defined(DEBUG)
41ff715a 333 ni = BBTOB(imap.im_len) >> mp->m_sb.sb_inodelog;
4d1a2ed3 334#else /* usual case */
41ff715a 335 ni = 1;
1da177e4 336#endif
4d1a2ed3 337
1da177e4
LT
338 for (i = 0; i < ni; i++) {
339 int di_ok;
340 xfs_dinode_t *dip;
341
342 dip = (xfs_dinode_t *)xfs_buf_offset(bp,
343 (i << mp->m_sb.sb_inodelog));
347d1c01
CH
344 di_ok = be16_to_cpu(dip->di_core.di_magic) == XFS_DINODE_MAGIC &&
345 XFS_DINODE_GOOD_VERSION(dip->di_core.di_version);
41ff715a
NS
346 if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
347 XFS_ERRTAG_ITOBP_INOTOBP,
348 XFS_RANDOM_ITOBP_INOTOBP))) {
349 if (imap_flags & XFS_IMAP_BULKSTAT) {
350 xfs_trans_brelse(tp, bp);
351 return XFS_ERROR(EINVAL);
352 }
1da177e4 353#ifdef DEBUG
41ff715a 354 cmn_err(CE_ALERT,
4d1a2ed3
NS
355 "Device %s - bad inode magic/vsn "
356 "daddr %lld #%d (magic=%x)",
b6574520 357 XFS_BUFTARG_NAME(mp->m_ddev_targp),
1da177e4 358 (unsigned long long)imap.im_blkno, i,
347d1c01 359 be16_to_cpu(dip->di_core.di_magic));
1da177e4
LT
360#endif
361 XFS_CORRUPTION_ERROR("xfs_itobp", XFS_ERRLEVEL_HIGH,
362 mp, dip);
363 xfs_trans_brelse(tp, bp);
364 return XFS_ERROR(EFSCORRUPTED);
365 }
366 }
1da177e4
LT
367
368 xfs_inobp_check(mp, bp);
369
370 /*
371 * Mark the buffer as an inode buffer now that it looks good
372 */
373 XFS_BUF_SET_VTYPE(bp, B_FS_INO);
374
375 /*
376 * Set *dipp to point to the on-disk inode in the buffer.
377 */
378 *dipp = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset);
379 *bpp = bp;
380 return 0;
381}
382
383/*
384 * Move inode type and inode format specific information from the
385 * on-disk inode to the in-core inode. For fifos, devs, and sockets
386 * this means set if_rdev to the proper value. For files, directories,
387 * and symlinks this means to bring in the in-line data or extent
388 * pointers. For a file in B-tree format, only the root is immediately
389 * brought in-core. The rest will be in-lined in if_extents when it
390 * is first referenced (see xfs_iread_extents()).
391 */
392STATIC int
393xfs_iformat(
394 xfs_inode_t *ip,
395 xfs_dinode_t *dip)
396{
397 xfs_attr_shortform_t *atp;
398 int size;
399 int error;
400 xfs_fsize_t di_size;
401 ip->i_df.if_ext_max =
402 XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
403 error = 0;
404
347d1c01
CH
405 if (unlikely(be32_to_cpu(dip->di_core.di_nextents) +
406 be16_to_cpu(dip->di_core.di_anextents) >
407 be64_to_cpu(dip->di_core.di_nblocks))) {
3762ec6b
NS
408 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
409 "corrupt dinode %Lu, extent total = %d, nblocks = %Lu.",
1da177e4 410 (unsigned long long)ip->i_ino,
347d1c01
CH
411 (int)(be32_to_cpu(dip->di_core.di_nextents) +
412 be16_to_cpu(dip->di_core.di_anextents)),
1da177e4 413 (unsigned long long)
347d1c01 414 be64_to_cpu(dip->di_core.di_nblocks));
1da177e4
LT
415 XFS_CORRUPTION_ERROR("xfs_iformat(1)", XFS_ERRLEVEL_LOW,
416 ip->i_mount, dip);
417 return XFS_ERROR(EFSCORRUPTED);
418 }
419
347d1c01 420 if (unlikely(dip->di_core.di_forkoff > ip->i_mount->m_sb.sb_inodesize)) {
3762ec6b
NS
421 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
422 "corrupt dinode %Lu, forkoff = 0x%x.",
1da177e4 423 (unsigned long long)ip->i_ino,
347d1c01 424 dip->di_core.di_forkoff);
1da177e4
LT
425 XFS_CORRUPTION_ERROR("xfs_iformat(2)", XFS_ERRLEVEL_LOW,
426 ip->i_mount, dip);
427 return XFS_ERROR(EFSCORRUPTED);
428 }
429
430 switch (ip->i_d.di_mode & S_IFMT) {
431 case S_IFIFO:
432 case S_IFCHR:
433 case S_IFBLK:
434 case S_IFSOCK:
347d1c01 435 if (unlikely(dip->di_core.di_format != XFS_DINODE_FMT_DEV)) {
1da177e4
LT
436 XFS_CORRUPTION_ERROR("xfs_iformat(3)", XFS_ERRLEVEL_LOW,
437 ip->i_mount, dip);
438 return XFS_ERROR(EFSCORRUPTED);
439 }
440 ip->i_d.di_size = 0;
ba87ea69 441 ip->i_size = 0;
347d1c01 442 ip->i_df.if_u2.if_rdev = be32_to_cpu(dip->di_u.di_dev);
1da177e4
LT
443 break;
444
445 case S_IFREG:
446 case S_IFLNK:
447 case S_IFDIR:
347d1c01 448 switch (dip->di_core.di_format) {
1da177e4
LT
449 case XFS_DINODE_FMT_LOCAL:
450 /*
451 * no local regular files yet
452 */
347d1c01 453 if (unlikely((be16_to_cpu(dip->di_core.di_mode) & S_IFMT) == S_IFREG)) {
3762ec6b
NS
454 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
455 "corrupt inode %Lu "
456 "(local format for regular file).",
1da177e4
LT
457 (unsigned long long) ip->i_ino);
458 XFS_CORRUPTION_ERROR("xfs_iformat(4)",
459 XFS_ERRLEVEL_LOW,
460 ip->i_mount, dip);
461 return XFS_ERROR(EFSCORRUPTED);
462 }
463
347d1c01 464 di_size = be64_to_cpu(dip->di_core.di_size);
1da177e4 465 if (unlikely(di_size > XFS_DFORK_DSIZE(dip, ip->i_mount))) {
3762ec6b
NS
466 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
467 "corrupt inode %Lu "
468 "(bad size %Ld for local inode).",
1da177e4
LT
469 (unsigned long long) ip->i_ino,
470 (long long) di_size);
471 XFS_CORRUPTION_ERROR("xfs_iformat(5)",
472 XFS_ERRLEVEL_LOW,
473 ip->i_mount, dip);
474 return XFS_ERROR(EFSCORRUPTED);
475 }
476
477 size = (int)di_size;
478 error = xfs_iformat_local(ip, dip, XFS_DATA_FORK, size);
479 break;
480 case XFS_DINODE_FMT_EXTENTS:
481 error = xfs_iformat_extents(ip, dip, XFS_DATA_FORK);
482 break;
483 case XFS_DINODE_FMT_BTREE:
484 error = xfs_iformat_btree(ip, dip, XFS_DATA_FORK);
485 break;
486 default:
487 XFS_ERROR_REPORT("xfs_iformat(6)", XFS_ERRLEVEL_LOW,
488 ip->i_mount);
489 return XFS_ERROR(EFSCORRUPTED);
490 }
491 break;
492
493 default:
494 XFS_ERROR_REPORT("xfs_iformat(7)", XFS_ERRLEVEL_LOW, ip->i_mount);
495 return XFS_ERROR(EFSCORRUPTED);
496 }
497 if (error) {
498 return error;
499 }
500 if (!XFS_DFORK_Q(dip))
501 return 0;
502 ASSERT(ip->i_afp == NULL);
503 ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP);
504 ip->i_afp->if_ext_max =
505 XFS_IFORK_ASIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
347d1c01 506 switch (dip->di_core.di_aformat) {
1da177e4
LT
507 case XFS_DINODE_FMT_LOCAL:
508 atp = (xfs_attr_shortform_t *)XFS_DFORK_APTR(dip);
3b244aa8 509 size = be16_to_cpu(atp->hdr.totsize);
1da177e4
LT
510 error = xfs_iformat_local(ip, dip, XFS_ATTR_FORK, size);
511 break;
512 case XFS_DINODE_FMT_EXTENTS:
513 error = xfs_iformat_extents(ip, dip, XFS_ATTR_FORK);
514 break;
515 case XFS_DINODE_FMT_BTREE:
516 error = xfs_iformat_btree(ip, dip, XFS_ATTR_FORK);
517 break;
518 default:
519 error = XFS_ERROR(EFSCORRUPTED);
520 break;
521 }
522 if (error) {
523 kmem_zone_free(xfs_ifork_zone, ip->i_afp);
524 ip->i_afp = NULL;
525 xfs_idestroy_fork(ip, XFS_DATA_FORK);
526 }
527 return error;
528}
529
530/*
531 * The file is in-lined in the on-disk inode.
532 * If it fits into if_inline_data, then copy
533 * it there, otherwise allocate a buffer for it
534 * and copy the data there. Either way, set
535 * if_data to point at the data.
536 * If we allocate a buffer for the data, make
537 * sure that its size is a multiple of 4 and
538 * record the real size in i_real_bytes.
539 */
540STATIC int
541xfs_iformat_local(
542 xfs_inode_t *ip,
543 xfs_dinode_t *dip,
544 int whichfork,
545 int size)
546{
547 xfs_ifork_t *ifp;
548 int real_size;
549
550 /*
551 * If the size is unreasonable, then something
552 * is wrong and we just bail out rather than crash in
553 * kmem_alloc() or memcpy() below.
554 */
555 if (unlikely(size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) {
3762ec6b
NS
556 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
557 "corrupt inode %Lu "
558 "(bad size %d for local fork, size = %d).",
1da177e4
LT
559 (unsigned long long) ip->i_ino, size,
560 XFS_DFORK_SIZE(dip, ip->i_mount, whichfork));
561 XFS_CORRUPTION_ERROR("xfs_iformat_local", XFS_ERRLEVEL_LOW,
562 ip->i_mount, dip);
563 return XFS_ERROR(EFSCORRUPTED);
564 }
565 ifp = XFS_IFORK_PTR(ip, whichfork);
566 real_size = 0;
567 if (size == 0)
568 ifp->if_u1.if_data = NULL;
569 else if (size <= sizeof(ifp->if_u2.if_inline_data))
570 ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
571 else {
572 real_size = roundup(size, 4);
573 ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP);
574 }
575 ifp->if_bytes = size;
576 ifp->if_real_bytes = real_size;
577 if (size)
578 memcpy(ifp->if_u1.if_data, XFS_DFORK_PTR(dip, whichfork), size);
579 ifp->if_flags &= ~XFS_IFEXTENTS;
580 ifp->if_flags |= XFS_IFINLINE;
581 return 0;
582}
583
584/*
585 * The file consists of a set of extents all
586 * of which fit into the on-disk inode.
587 * If there are few enough extents to fit into
588 * the if_inline_ext, then copy them there.
589 * Otherwise allocate a buffer for them and copy
590 * them into it. Either way, set if_extents
591 * to point at the extents.
592 */
593STATIC int
594xfs_iformat_extents(
595 xfs_inode_t *ip,
596 xfs_dinode_t *dip,
597 int whichfork)
598{
a6f64d4a 599 xfs_bmbt_rec_t *dp;
1da177e4
LT
600 xfs_ifork_t *ifp;
601 int nex;
1da177e4
LT
602 int size;
603 int i;
604
605 ifp = XFS_IFORK_PTR(ip, whichfork);
606 nex = XFS_DFORK_NEXTENTS(dip, whichfork);
607 size = nex * (uint)sizeof(xfs_bmbt_rec_t);
608
609 /*
610 * If the number of extents is unreasonable, then something
611 * is wrong and we just bail out rather than crash in
612 * kmem_alloc() or memcpy() below.
613 */
614 if (unlikely(size < 0 || size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) {
3762ec6b
NS
615 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
616 "corrupt inode %Lu ((a)extents = %d).",
1da177e4
LT
617 (unsigned long long) ip->i_ino, nex);
618 XFS_CORRUPTION_ERROR("xfs_iformat_extents(1)", XFS_ERRLEVEL_LOW,
619 ip->i_mount, dip);
620 return XFS_ERROR(EFSCORRUPTED);
621 }
622
4eea22f0 623 ifp->if_real_bytes = 0;
1da177e4
LT
624 if (nex == 0)
625 ifp->if_u1.if_extents = NULL;
626 else if (nex <= XFS_INLINE_EXTS)
627 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
4eea22f0
MK
628 else
629 xfs_iext_add(ifp, 0, nex);
630
1da177e4 631 ifp->if_bytes = size;
1da177e4
LT
632 if (size) {
633 dp = (xfs_bmbt_rec_t *) XFS_DFORK_PTR(dip, whichfork);
a6f64d4a 634 xfs_validate_extents(ifp, nex, XFS_EXTFMT_INODE(ip));
4eea22f0 635 for (i = 0; i < nex; i++, dp++) {
a6f64d4a 636 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i);
cd8b0a97
CH
637 ep->l0 = be64_to_cpu(get_unaligned(&dp->l0));
638 ep->l1 = be64_to_cpu(get_unaligned(&dp->l1));
1da177e4 639 }
3a59c94c 640 XFS_BMAP_TRACE_EXLIST(ip, nex, whichfork);
1da177e4
LT
641 if (whichfork != XFS_DATA_FORK ||
642 XFS_EXTFMT_INODE(ip) == XFS_EXTFMT_NOSTATE)
643 if (unlikely(xfs_check_nostate_extents(
4eea22f0 644 ifp, 0, nex))) {
1da177e4
LT
645 XFS_ERROR_REPORT("xfs_iformat_extents(2)",
646 XFS_ERRLEVEL_LOW,
647 ip->i_mount);
648 return XFS_ERROR(EFSCORRUPTED);
649 }
650 }
651 ifp->if_flags |= XFS_IFEXTENTS;
652 return 0;
653}
654
655/*
656 * The file has too many extents to fit into
657 * the inode, so they are in B-tree format.
658 * Allocate a buffer for the root of the B-tree
659 * and copy the root into it. The i_extents
660 * field will remain NULL until all of the
661 * extents are read in (when they are needed).
662 */
663STATIC int
664xfs_iformat_btree(
665 xfs_inode_t *ip,
666 xfs_dinode_t *dip,
667 int whichfork)
668{
669 xfs_bmdr_block_t *dfp;
670 xfs_ifork_t *ifp;
671 /* REFERENCED */
672 int nrecs;
673 int size;
674
675 ifp = XFS_IFORK_PTR(ip, whichfork);
676 dfp = (xfs_bmdr_block_t *)XFS_DFORK_PTR(dip, whichfork);
677 size = XFS_BMAP_BROOT_SPACE(dfp);
678 nrecs = XFS_BMAP_BROOT_NUMRECS(dfp);
679
680 /*
681 * blow out if -- fork has less extents than can fit in
682 * fork (fork shouldn't be a btree format), root btree
683 * block has more records than can fit into the fork,
684 * or the number of extents is greater than the number of
685 * blocks.
686 */
687 if (unlikely(XFS_IFORK_NEXTENTS(ip, whichfork) <= ifp->if_ext_max
688 || XFS_BMDR_SPACE_CALC(nrecs) >
689 XFS_DFORK_SIZE(dip, ip->i_mount, whichfork)
690 || XFS_IFORK_NEXTENTS(ip, whichfork) > ip->i_d.di_nblocks)) {
3762ec6b
NS
691 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
692 "corrupt inode %Lu (btree).",
1da177e4
LT
693 (unsigned long long) ip->i_ino);
694 XFS_ERROR_REPORT("xfs_iformat_btree", XFS_ERRLEVEL_LOW,
695 ip->i_mount);
696 return XFS_ERROR(EFSCORRUPTED);
697 }
698
699 ifp->if_broot_bytes = size;
700 ifp->if_broot = kmem_alloc(size, KM_SLEEP);
701 ASSERT(ifp->if_broot != NULL);
702 /*
703 * Copy and convert from the on-disk structure
704 * to the in-memory structure.
705 */
706 xfs_bmdr_to_bmbt(dfp, XFS_DFORK_SIZE(dip, ip->i_mount, whichfork),
707 ifp->if_broot, size);
708 ifp->if_flags &= ~XFS_IFEXTENTS;
709 ifp->if_flags |= XFS_IFBROOT;
710
711 return 0;
712}
713
1da177e4 714void
347d1c01
CH
715xfs_dinode_from_disk(
716 xfs_icdinode_t *to,
717 xfs_dinode_core_t *from)
1da177e4 718{
347d1c01
CH
719 to->di_magic = be16_to_cpu(from->di_magic);
720 to->di_mode = be16_to_cpu(from->di_mode);
721 to->di_version = from ->di_version;
722 to->di_format = from->di_format;
723 to->di_onlink = be16_to_cpu(from->di_onlink);
724 to->di_uid = be32_to_cpu(from->di_uid);
725 to->di_gid = be32_to_cpu(from->di_gid);
726 to->di_nlink = be32_to_cpu(from->di_nlink);
727 to->di_projid = be16_to_cpu(from->di_projid);
728 memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
729 to->di_flushiter = be16_to_cpu(from->di_flushiter);
730 to->di_atime.t_sec = be32_to_cpu(from->di_atime.t_sec);
731 to->di_atime.t_nsec = be32_to_cpu(from->di_atime.t_nsec);
732 to->di_mtime.t_sec = be32_to_cpu(from->di_mtime.t_sec);
733 to->di_mtime.t_nsec = be32_to_cpu(from->di_mtime.t_nsec);
734 to->di_ctime.t_sec = be32_to_cpu(from->di_ctime.t_sec);
735 to->di_ctime.t_nsec = be32_to_cpu(from->di_ctime.t_nsec);
736 to->di_size = be64_to_cpu(from->di_size);
737 to->di_nblocks = be64_to_cpu(from->di_nblocks);
738 to->di_extsize = be32_to_cpu(from->di_extsize);
739 to->di_nextents = be32_to_cpu(from->di_nextents);
740 to->di_anextents = be16_to_cpu(from->di_anextents);
741 to->di_forkoff = from->di_forkoff;
742 to->di_aformat = from->di_aformat;
743 to->di_dmevmask = be32_to_cpu(from->di_dmevmask);
744 to->di_dmstate = be16_to_cpu(from->di_dmstate);
745 to->di_flags = be16_to_cpu(from->di_flags);
746 to->di_gen = be32_to_cpu(from->di_gen);
747}
748
749void
750xfs_dinode_to_disk(
751 xfs_dinode_core_t *to,
752 xfs_icdinode_t *from)
753{
754 to->di_magic = cpu_to_be16(from->di_magic);
755 to->di_mode = cpu_to_be16(from->di_mode);
756 to->di_version = from ->di_version;
757 to->di_format = from->di_format;
758 to->di_onlink = cpu_to_be16(from->di_onlink);
759 to->di_uid = cpu_to_be32(from->di_uid);
760 to->di_gid = cpu_to_be32(from->di_gid);
761 to->di_nlink = cpu_to_be32(from->di_nlink);
762 to->di_projid = cpu_to_be16(from->di_projid);
763 memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
764 to->di_flushiter = cpu_to_be16(from->di_flushiter);
765 to->di_atime.t_sec = cpu_to_be32(from->di_atime.t_sec);
766 to->di_atime.t_nsec = cpu_to_be32(from->di_atime.t_nsec);
767 to->di_mtime.t_sec = cpu_to_be32(from->di_mtime.t_sec);
768 to->di_mtime.t_nsec = cpu_to_be32(from->di_mtime.t_nsec);
769 to->di_ctime.t_sec = cpu_to_be32(from->di_ctime.t_sec);
770 to->di_ctime.t_nsec = cpu_to_be32(from->di_ctime.t_nsec);
771 to->di_size = cpu_to_be64(from->di_size);
772 to->di_nblocks = cpu_to_be64(from->di_nblocks);
773 to->di_extsize = cpu_to_be32(from->di_extsize);
774 to->di_nextents = cpu_to_be32(from->di_nextents);
775 to->di_anextents = cpu_to_be16(from->di_anextents);
776 to->di_forkoff = from->di_forkoff;
777 to->di_aformat = from->di_aformat;
778 to->di_dmevmask = cpu_to_be32(from->di_dmevmask);
779 to->di_dmstate = cpu_to_be16(from->di_dmstate);
780 to->di_flags = cpu_to_be16(from->di_flags);
781 to->di_gen = cpu_to_be32(from->di_gen);
1da177e4
LT
782}
783
784STATIC uint
785_xfs_dic2xflags(
1da177e4
LT
786 __uint16_t di_flags)
787{
788 uint flags = 0;
789
790 if (di_flags & XFS_DIFLAG_ANY) {
791 if (di_flags & XFS_DIFLAG_REALTIME)
792 flags |= XFS_XFLAG_REALTIME;
793 if (di_flags & XFS_DIFLAG_PREALLOC)
794 flags |= XFS_XFLAG_PREALLOC;
795 if (di_flags & XFS_DIFLAG_IMMUTABLE)
796 flags |= XFS_XFLAG_IMMUTABLE;
797 if (di_flags & XFS_DIFLAG_APPEND)
798 flags |= XFS_XFLAG_APPEND;
799 if (di_flags & XFS_DIFLAG_SYNC)
800 flags |= XFS_XFLAG_SYNC;
801 if (di_flags & XFS_DIFLAG_NOATIME)
802 flags |= XFS_XFLAG_NOATIME;
803 if (di_flags & XFS_DIFLAG_NODUMP)
804 flags |= XFS_XFLAG_NODUMP;
805 if (di_flags & XFS_DIFLAG_RTINHERIT)
806 flags |= XFS_XFLAG_RTINHERIT;
807 if (di_flags & XFS_DIFLAG_PROJINHERIT)
808 flags |= XFS_XFLAG_PROJINHERIT;
809 if (di_flags & XFS_DIFLAG_NOSYMLINKS)
810 flags |= XFS_XFLAG_NOSYMLINKS;
dd9f438e
NS
811 if (di_flags & XFS_DIFLAG_EXTSIZE)
812 flags |= XFS_XFLAG_EXTSIZE;
813 if (di_flags & XFS_DIFLAG_EXTSZINHERIT)
814 flags |= XFS_XFLAG_EXTSZINHERIT;
d3446eac
BN
815 if (di_flags & XFS_DIFLAG_NODEFRAG)
816 flags |= XFS_XFLAG_NODEFRAG;
2a82b8be
DC
817 if (di_flags & XFS_DIFLAG_FILESTREAM)
818 flags |= XFS_XFLAG_FILESTREAM;
1da177e4
LT
819 }
820
821 return flags;
822}
823
824uint
825xfs_ip2xflags(
826 xfs_inode_t *ip)
827{
347d1c01 828 xfs_icdinode_t *dic = &ip->i_d;
1da177e4 829
a916e2bd
NS
830 return _xfs_dic2xflags(dic->di_flags) |
831 (XFS_CFORK_Q(dic) ? XFS_XFLAG_HASATTR : 0);
1da177e4
LT
832}
833
834uint
835xfs_dic2xflags(
836 xfs_dinode_core_t *dic)
837{
347d1c01 838 return _xfs_dic2xflags(be16_to_cpu(dic->di_flags)) |
a916e2bd 839 (XFS_CFORK_Q_DISK(dic) ? XFS_XFLAG_HASATTR : 0);
1da177e4
LT
840}
841
842/*
843 * Given a mount structure and an inode number, return a pointer
c41564b5 844 * to a newly allocated in-core inode corresponding to the given
1da177e4
LT
845 * inode number.
846 *
847 * Initialize the inode's attributes and extent pointers if it
848 * already has them (it will not if the inode has no links).
849 */
850int
851xfs_iread(
852 xfs_mount_t *mp,
853 xfs_trans_t *tp,
854 xfs_ino_t ino,
855 xfs_inode_t **ipp,
745b1f47
NS
856 xfs_daddr_t bno,
857 uint imap_flags)
1da177e4
LT
858{
859 xfs_buf_t *bp;
860 xfs_dinode_t *dip;
861 xfs_inode_t *ip;
862 int error;
863
864 ASSERT(xfs_inode_zone != NULL);
865
866 ip = kmem_zone_zalloc(xfs_inode_zone, KM_SLEEP);
867 ip->i_ino = ino;
868 ip->i_mount = mp;
b677c210 869 atomic_set(&ip->i_iocount, 0);
f273ab84 870 spin_lock_init(&ip->i_flags_lock);
1da177e4
LT
871
872 /*
873 * Get pointer's to the on-disk inode and the buffer containing it.
874 * If the inode number refers to a block outside the file system
875 * then xfs_itobp() will return NULL. In this case we should
876 * return NULL as well. Set i_blkno to 0 so that xfs_itobp() will
877 * know that this is a new incore inode.
878 */
745b1f47 879 error = xfs_itobp(mp, tp, ip, &dip, &bp, bno, imap_flags);
b12dd342 880 if (error) {
1da177e4
LT
881 kmem_zone_free(xfs_inode_zone, ip);
882 return error;
883 }
884
885 /*
886 * Initialize inode's trace buffers.
887 * Do this before xfs_iformat in case it adds entries.
888 */
cf441eeb
LM
889#ifdef XFS_INODE_TRACE
890 ip->i_trace = ktrace_alloc(INODE_TRACE_SIZE, KM_SLEEP);
1543d79c 891#endif
1da177e4
LT
892#ifdef XFS_BMAP_TRACE
893 ip->i_xtrace = ktrace_alloc(XFS_BMAP_KTRACE_SIZE, KM_SLEEP);
894#endif
895#ifdef XFS_BMBT_TRACE
896 ip->i_btrace = ktrace_alloc(XFS_BMBT_KTRACE_SIZE, KM_SLEEP);
897#endif
898#ifdef XFS_RW_TRACE
899 ip->i_rwtrace = ktrace_alloc(XFS_RW_KTRACE_SIZE, KM_SLEEP);
900#endif
901#ifdef XFS_ILOCK_TRACE
902 ip->i_lock_trace = ktrace_alloc(XFS_ILOCK_KTRACE_SIZE, KM_SLEEP);
903#endif
904#ifdef XFS_DIR2_TRACE
905 ip->i_dir_trace = ktrace_alloc(XFS_DIR2_KTRACE_SIZE, KM_SLEEP);
906#endif
907
908 /*
909 * If we got something that isn't an inode it means someone
910 * (nfs or dmi) has a stale handle.
911 */
347d1c01 912 if (be16_to_cpu(dip->di_core.di_magic) != XFS_DINODE_MAGIC) {
1da177e4
LT
913 kmem_zone_free(xfs_inode_zone, ip);
914 xfs_trans_brelse(tp, bp);
915#ifdef DEBUG
916 xfs_fs_cmn_err(CE_ALERT, mp, "xfs_iread: "
917 "dip->di_core.di_magic (0x%x) != "
918 "XFS_DINODE_MAGIC (0x%x)",
347d1c01 919 be16_to_cpu(dip->di_core.di_magic),
1da177e4
LT
920 XFS_DINODE_MAGIC);
921#endif /* DEBUG */
922 return XFS_ERROR(EINVAL);
923 }
924
925 /*
926 * If the on-disk inode is already linked to a directory
927 * entry, copy all of the inode into the in-core inode.
928 * xfs_iformat() handles copying in the inode format
929 * specific information.
930 * Otherwise, just get the truly permanent information.
931 */
932 if (dip->di_core.di_mode) {
347d1c01 933 xfs_dinode_from_disk(&ip->i_d, &dip->di_core);
1da177e4
LT
934 error = xfs_iformat(ip, dip);
935 if (error) {
936 kmem_zone_free(xfs_inode_zone, ip);
937 xfs_trans_brelse(tp, bp);
938#ifdef DEBUG
939 xfs_fs_cmn_err(CE_ALERT, mp, "xfs_iread: "
940 "xfs_iformat() returned error %d",
941 error);
942#endif /* DEBUG */
943 return error;
944 }
945 } else {
347d1c01
CH
946 ip->i_d.di_magic = be16_to_cpu(dip->di_core.di_magic);
947 ip->i_d.di_version = dip->di_core.di_version;
948 ip->i_d.di_gen = be32_to_cpu(dip->di_core.di_gen);
949 ip->i_d.di_flushiter = be16_to_cpu(dip->di_core.di_flushiter);
1da177e4
LT
950 /*
951 * Make sure to pull in the mode here as well in
952 * case the inode is released without being used.
953 * This ensures that xfs_inactive() will see that
954 * the inode is already free and not try to mess
955 * with the uninitialized part of it.
956 */
957 ip->i_d.di_mode = 0;
958 /*
959 * Initialize the per-fork minima and maxima for a new
960 * inode here. xfs_iformat will do it for old inodes.
961 */
962 ip->i_df.if_ext_max =
963 XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
964 }
965
966 INIT_LIST_HEAD(&ip->i_reclaim);
967
968 /*
969 * The inode format changed when we moved the link count and
970 * made it 32 bits long. If this is an old format inode,
971 * convert it in memory to look like a new one. If it gets
972 * flushed to disk we will convert back before flushing or
973 * logging it. We zero out the new projid field and the old link
974 * count field. We'll handle clearing the pad field (the remains
975 * of the old uuid field) when we actually convert the inode to
976 * the new format. We don't change the version number so that we
977 * can distinguish this from a real new format inode.
978 */
979 if (ip->i_d.di_version == XFS_DINODE_VERSION_1) {
980 ip->i_d.di_nlink = ip->i_d.di_onlink;
981 ip->i_d.di_onlink = 0;
982 ip->i_d.di_projid = 0;
983 }
984
985 ip->i_delayed_blks = 0;
ba87ea69 986 ip->i_size = ip->i_d.di_size;
1da177e4
LT
987
988 /*
989 * Mark the buffer containing the inode as something to keep
990 * around for a while. This helps to keep recently accessed
991 * meta-data in-core longer.
992 */
993 XFS_BUF_SET_REF(bp, XFS_INO_REF);
994
995 /*
996 * Use xfs_trans_brelse() to release the buffer containing the
997 * on-disk inode, because it was acquired with xfs_trans_read_buf()
998 * in xfs_itobp() above. If tp is NULL, this is just a normal
999 * brelse(). If we're within a transaction, then xfs_trans_brelse()
1000 * will only release the buffer if it is not dirty within the
1001 * transaction. It will be OK to release the buffer in this case,
1002 * because inodes on disk are never destroyed and we will be
1003 * locking the new in-core inode before putting it in the hash
1004 * table where other processes can find it. Thus we don't have
1005 * to worry about the inode being changed just because we released
1006 * the buffer.
1007 */
1008 xfs_trans_brelse(tp, bp);
1009 *ipp = ip;
1010 return 0;
1011}
1012
1013/*
1014 * Read in extents from a btree-format inode.
1015 * Allocate and fill in if_extents. Real work is done in xfs_bmap.c.
1016 */
1017int
1018xfs_iread_extents(
1019 xfs_trans_t *tp,
1020 xfs_inode_t *ip,
1021 int whichfork)
1022{
1023 int error;
1024 xfs_ifork_t *ifp;
4eea22f0 1025 xfs_extnum_t nextents;
1da177e4
LT
1026 size_t size;
1027
1028 if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
1029 XFS_ERROR_REPORT("xfs_iread_extents", XFS_ERRLEVEL_LOW,
1030 ip->i_mount);
1031 return XFS_ERROR(EFSCORRUPTED);
1032 }
4eea22f0
MK
1033 nextents = XFS_IFORK_NEXTENTS(ip, whichfork);
1034 size = nextents * sizeof(xfs_bmbt_rec_t);
1da177e4 1035 ifp = XFS_IFORK_PTR(ip, whichfork);
4eea22f0 1036
1da177e4
LT
1037 /*
1038 * We know that the size is valid (it's checked in iformat_btree)
1039 */
1da177e4 1040 ifp->if_lastex = NULLEXTNUM;
4eea22f0 1041 ifp->if_bytes = ifp->if_real_bytes = 0;
1da177e4 1042 ifp->if_flags |= XFS_IFEXTENTS;
4eea22f0 1043 xfs_iext_add(ifp, 0, nextents);
1da177e4
LT
1044 error = xfs_bmap_read_extents(tp, ip, whichfork);
1045 if (error) {
4eea22f0 1046 xfs_iext_destroy(ifp);
1da177e4
LT
1047 ifp->if_flags &= ~XFS_IFEXTENTS;
1048 return error;
1049 }
a6f64d4a 1050 xfs_validate_extents(ifp, nextents, XFS_EXTFMT_INODE(ip));
1da177e4
LT
1051 return 0;
1052}
1053
1054/*
1055 * Allocate an inode on disk and return a copy of its in-core version.
1056 * The in-core inode is locked exclusively. Set mode, nlink, and rdev
1057 * appropriately within the inode. The uid and gid for the inode are
1058 * set according to the contents of the given cred structure.
1059 *
1060 * Use xfs_dialloc() to allocate the on-disk inode. If xfs_dialloc()
1061 * has a free inode available, call xfs_iget()
1062 * to obtain the in-core version of the allocated inode. Finally,
1063 * fill in the inode and log its initial contents. In this case,
1064 * ialloc_context would be set to NULL and call_again set to false.
1065 *
1066 * If xfs_dialloc() does not have an available inode,
1067 * it will replenish its supply by doing an allocation. Since we can
1068 * only do one allocation within a transaction without deadlocks, we
1069 * must commit the current transaction before returning the inode itself.
1070 * In this case, therefore, we will set call_again to true and return.
1071 * The caller should then commit the current transaction, start a new
1072 * transaction, and call xfs_ialloc() again to actually get the inode.
1073 *
1074 * To ensure that some other process does not grab the inode that
1075 * was allocated during the first call to xfs_ialloc(), this routine
1076 * also returns the [locked] bp pointing to the head of the freelist
1077 * as ialloc_context. The caller should hold this buffer across
1078 * the commit and pass it back into this routine on the second call.
b11f94d5
DC
1079 *
1080 * If we are allocating quota inodes, we do not have a parent inode
1081 * to attach to or associate with (i.e. pip == NULL) because they
1082 * are not linked into the directory structure - they are attached
1083 * directly to the superblock - and so have no parent.
1da177e4
LT
1084 */
1085int
1086xfs_ialloc(
1087 xfs_trans_t *tp,
1088 xfs_inode_t *pip,
1089 mode_t mode,
31b084ae 1090 xfs_nlink_t nlink,
1da177e4
LT
1091 xfs_dev_t rdev,
1092 cred_t *cr,
1093 xfs_prid_t prid,
1094 int okalloc,
1095 xfs_buf_t **ialloc_context,
1096 boolean_t *call_again,
1097 xfs_inode_t **ipp)
1098{
1099 xfs_ino_t ino;
1100 xfs_inode_t *ip;
67fcaa73 1101 bhv_vnode_t *vp;
1da177e4
LT
1102 uint flags;
1103 int error;
1104
1105 /*
1106 * Call the space management code to pick
1107 * the on-disk inode to be allocated.
1108 */
b11f94d5 1109 error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode, okalloc,
1da177e4
LT
1110 ialloc_context, call_again, &ino);
1111 if (error != 0) {
1112 return error;
1113 }
1114 if (*call_again || ino == NULLFSINO) {
1115 *ipp = NULL;
1116 return 0;
1117 }
1118 ASSERT(*ialloc_context == NULL);
1119
1120 /*
1121 * Get the in-core inode with the lock held exclusively.
1122 * This is because we're setting fields here we need
1123 * to prevent others from looking at until we're done.
1124 */
1125 error = xfs_trans_iget(tp->t_mountp, tp, ino,
745b1f47 1126 XFS_IGET_CREATE, XFS_ILOCK_EXCL, &ip);
1da177e4
LT
1127 if (error != 0) {
1128 return error;
1129 }
1130 ASSERT(ip != NULL);
1131
1132 vp = XFS_ITOV(ip);
1da177e4
LT
1133 ip->i_d.di_mode = (__uint16_t)mode;
1134 ip->i_d.di_onlink = 0;
1135 ip->i_d.di_nlink = nlink;
1136 ASSERT(ip->i_d.di_nlink == nlink);
1137 ip->i_d.di_uid = current_fsuid(cr);
1138 ip->i_d.di_gid = current_fsgid(cr);
1139 ip->i_d.di_projid = prid;
1140 memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
1141
1142 /*
1143 * If the superblock version is up to where we support new format
1144 * inodes and this is currently an old format inode, then change
1145 * the inode version number now. This way we only do the conversion
1146 * here rather than here and in the flush/logging code.
1147 */
1148 if (XFS_SB_VERSION_HASNLINK(&tp->t_mountp->m_sb) &&
1149 ip->i_d.di_version == XFS_DINODE_VERSION_1) {
1150 ip->i_d.di_version = XFS_DINODE_VERSION_2;
1151 /*
1152 * We've already zeroed the old link count, the projid field,
1153 * and the pad field.
1154 */
1155 }
1156
1157 /*
1158 * Project ids won't be stored on disk if we are using a version 1 inode.
1159 */
2a82b8be 1160 if ((prid != 0) && (ip->i_d.di_version == XFS_DINODE_VERSION_1))
1da177e4
LT
1161 xfs_bump_ino_vers2(tp, ip);
1162
bd186aa9 1163 if (pip && XFS_INHERIT_GID(pip)) {
1da177e4
LT
1164 ip->i_d.di_gid = pip->i_d.di_gid;
1165 if ((pip->i_d.di_mode & S_ISGID) && (mode & S_IFMT) == S_IFDIR) {
1166 ip->i_d.di_mode |= S_ISGID;
1167 }
1168 }
1169
1170 /*
1171 * If the group ID of the new file does not match the effective group
1172 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared
1173 * (and only if the irix_sgid_inherit compatibility variable is set).
1174 */
1175 if ((irix_sgid_inherit) &&
1176 (ip->i_d.di_mode & S_ISGID) &&
1177 (!in_group_p((gid_t)ip->i_d.di_gid))) {
1178 ip->i_d.di_mode &= ~S_ISGID;
1179 }
1180
1181 ip->i_d.di_size = 0;
ba87ea69 1182 ip->i_size = 0;
1da177e4
LT
1183 ip->i_d.di_nextents = 0;
1184 ASSERT(ip->i_d.di_nblocks == 0);
1185 xfs_ichgtime(ip, XFS_ICHGTIME_CHG|XFS_ICHGTIME_ACC|XFS_ICHGTIME_MOD);
1186 /*
1187 * di_gen will have been taken care of in xfs_iread.
1188 */
1189 ip->i_d.di_extsize = 0;
1190 ip->i_d.di_dmevmask = 0;
1191 ip->i_d.di_dmstate = 0;
1192 ip->i_d.di_flags = 0;
1193 flags = XFS_ILOG_CORE;
1194 switch (mode & S_IFMT) {
1195 case S_IFIFO:
1196 case S_IFCHR:
1197 case S_IFBLK:
1198 case S_IFSOCK:
1199 ip->i_d.di_format = XFS_DINODE_FMT_DEV;
1200 ip->i_df.if_u2.if_rdev = rdev;
1201 ip->i_df.if_flags = 0;
1202 flags |= XFS_ILOG_DEV;
1203 break;
1204 case S_IFREG:
b11f94d5 1205 if (pip && xfs_inode_is_filestream(pip)) {
2a82b8be
DC
1206 error = xfs_filestream_associate(pip, ip);
1207 if (error < 0)
1208 return -error;
1209 if (!error)
1210 xfs_iflags_set(ip, XFS_IFILESTREAM);
1211 }
1212 /* fall through */
1da177e4 1213 case S_IFDIR:
b11f94d5 1214 if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) {
365ca83d
NS
1215 uint di_flags = 0;
1216
1217 if ((mode & S_IFMT) == S_IFDIR) {
1218 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
1219 di_flags |= XFS_DIFLAG_RTINHERIT;
dd9f438e
NS
1220 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
1221 di_flags |= XFS_DIFLAG_EXTSZINHERIT;
1222 ip->i_d.di_extsize = pip->i_d.di_extsize;
1223 }
1224 } else if ((mode & S_IFMT) == S_IFREG) {
613d7043 1225 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
365ca83d 1226 di_flags |= XFS_DIFLAG_REALTIME;
dd9f438e
NS
1227 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
1228 di_flags |= XFS_DIFLAG_EXTSIZE;
1229 ip->i_d.di_extsize = pip->i_d.di_extsize;
1230 }
1da177e4
LT
1231 }
1232 if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) &&
1233 xfs_inherit_noatime)
365ca83d 1234 di_flags |= XFS_DIFLAG_NOATIME;
1da177e4
LT
1235 if ((pip->i_d.di_flags & XFS_DIFLAG_NODUMP) &&
1236 xfs_inherit_nodump)
365ca83d 1237 di_flags |= XFS_DIFLAG_NODUMP;
1da177e4
LT
1238 if ((pip->i_d.di_flags & XFS_DIFLAG_SYNC) &&
1239 xfs_inherit_sync)
365ca83d 1240 di_flags |= XFS_DIFLAG_SYNC;
1da177e4
LT
1241 if ((pip->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) &&
1242 xfs_inherit_nosymlinks)
365ca83d
NS
1243 di_flags |= XFS_DIFLAG_NOSYMLINKS;
1244 if (pip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
1245 di_flags |= XFS_DIFLAG_PROJINHERIT;
d3446eac
BN
1246 if ((pip->i_d.di_flags & XFS_DIFLAG_NODEFRAG) &&
1247 xfs_inherit_nodefrag)
1248 di_flags |= XFS_DIFLAG_NODEFRAG;
2a82b8be
DC
1249 if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM)
1250 di_flags |= XFS_DIFLAG_FILESTREAM;
365ca83d 1251 ip->i_d.di_flags |= di_flags;
1da177e4
LT
1252 }
1253 /* FALLTHROUGH */
1254 case S_IFLNK:
1255 ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
1256 ip->i_df.if_flags = XFS_IFEXTENTS;
1257 ip->i_df.if_bytes = ip->i_df.if_real_bytes = 0;
1258 ip->i_df.if_u1.if_extents = NULL;
1259 break;
1260 default:
1261 ASSERT(0);
1262 }
1263 /*
1264 * Attribute fork settings for new inode.
1265 */
1266 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
1267 ip->i_d.di_anextents = 0;
1268
1269 /*
1270 * Log the new values stuffed into the inode.
1271 */
1272 xfs_trans_log_inode(tp, ip, flags);
1273
b83bd138 1274 /* now that we have an i_mode we can setup inode ops and unlock */
745f6919 1275 xfs_initialize_vnode(tp->t_mountp, vp, ip);
1da177e4
LT
1276
1277 *ipp = ip;
1278 return 0;
1279}
1280
1281/*
1282 * Check to make sure that there are no blocks allocated to the
1283 * file beyond the size of the file. We don't check this for
1284 * files with fixed size extents or real time extents, but we
1285 * at least do it for regular files.
1286 */
1287#ifdef DEBUG
1288void
1289xfs_isize_check(
1290 xfs_mount_t *mp,
1291 xfs_inode_t *ip,
1292 xfs_fsize_t isize)
1293{
1294 xfs_fileoff_t map_first;
1295 int nimaps;
1296 xfs_bmbt_irec_t imaps[2];
1297
1298 if ((ip->i_d.di_mode & S_IFMT) != S_IFREG)
1299 return;
1300
71ddabb9
ES
1301 if (XFS_IS_REALTIME_INODE(ip))
1302 return;
1303
1304 if (ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE)
1da177e4
LT
1305 return;
1306
1307 nimaps = 2;
1308 map_first = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize);
1309 /*
1310 * The filesystem could be shutting down, so bmapi may return
1311 * an error.
1312 */
1313 if (xfs_bmapi(NULL, ip, map_first,
1314 (XFS_B_TO_FSB(mp,
1315 (xfs_ufsize_t)XFS_MAXIOFFSET(mp)) -
1316 map_first),
1317 XFS_BMAPI_ENTIRE, NULL, 0, imaps, &nimaps,
3e57ecf6 1318 NULL, NULL))
1da177e4
LT
1319 return;
1320 ASSERT(nimaps == 1);
1321 ASSERT(imaps[0].br_startblock == HOLESTARTBLOCK);
1322}
1323#endif /* DEBUG */
1324
1325/*
1326 * Calculate the last possible buffered byte in a file. This must
1327 * include data that was buffered beyond the EOF by the write code.
1328 * This also needs to deal with overflowing the xfs_fsize_t type
1329 * which can happen for sizes near the limit.
1330 *
1331 * We also need to take into account any blocks beyond the EOF. It
1332 * may be the case that they were buffered by a write which failed.
1333 * In that case the pages will still be in memory, but the inode size
1334 * will never have been updated.
1335 */
1336xfs_fsize_t
1337xfs_file_last_byte(
1338 xfs_inode_t *ip)
1339{
1340 xfs_mount_t *mp;
1341 xfs_fsize_t last_byte;
1342 xfs_fileoff_t last_block;
1343 xfs_fileoff_t size_last_block;
1344 int error;
1345
1346 ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE | MR_ACCESS));
1347
1348 mp = ip->i_mount;
1349 /*
1350 * Only check for blocks beyond the EOF if the extents have
1351 * been read in. This eliminates the need for the inode lock,
1352 * and it also saves us from looking when it really isn't
1353 * necessary.
1354 */
1355 if (ip->i_df.if_flags & XFS_IFEXTENTS) {
1356 error = xfs_bmap_last_offset(NULL, ip, &last_block,
1357 XFS_DATA_FORK);
1358 if (error) {
1359 last_block = 0;
1360 }
1361 } else {
1362 last_block = 0;
1363 }
ba87ea69 1364 size_last_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)ip->i_size);
1da177e4
LT
1365 last_block = XFS_FILEOFF_MAX(last_block, size_last_block);
1366
1367 last_byte = XFS_FSB_TO_B(mp, last_block);
1368 if (last_byte < 0) {
1369 return XFS_MAXIOFFSET(mp);
1370 }
1371 last_byte += (1 << mp->m_writeio_log);
1372 if (last_byte < 0) {
1373 return XFS_MAXIOFFSET(mp);
1374 }
1375 return last_byte;
1376}
1377
1378#if defined(XFS_RW_TRACE)
1379STATIC void
1380xfs_itrunc_trace(
1381 int tag,
1382 xfs_inode_t *ip,
1383 int flag,
1384 xfs_fsize_t new_size,
1385 xfs_off_t toss_start,
1386 xfs_off_t toss_finish)
1387{
1388 if (ip->i_rwtrace == NULL) {
1389 return;
1390 }
1391
1392 ktrace_enter(ip->i_rwtrace,
1393 (void*)((long)tag),
1394 (void*)ip,
1395 (void*)(unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff),
1396 (void*)(unsigned long)(ip->i_d.di_size & 0xffffffff),
1397 (void*)((long)flag),
1398 (void*)(unsigned long)((new_size >> 32) & 0xffffffff),
1399 (void*)(unsigned long)(new_size & 0xffffffff),
1400 (void*)(unsigned long)((toss_start >> 32) & 0xffffffff),
1401 (void*)(unsigned long)(toss_start & 0xffffffff),
1402 (void*)(unsigned long)((toss_finish >> 32) & 0xffffffff),
1403 (void*)(unsigned long)(toss_finish & 0xffffffff),
1404 (void*)(unsigned long)current_cpu(),
f1fdc848
YL
1405 (void*)(unsigned long)current_pid(),
1406 (void*)NULL,
1407 (void*)NULL,
1408 (void*)NULL);
1da177e4
LT
1409}
1410#else
1411#define xfs_itrunc_trace(tag, ip, flag, new_size, toss_start, toss_finish)
1412#endif
1413
1414/*
1415 * Start the truncation of the file to new_size. The new size
1416 * must be smaller than the current size. This routine will
1417 * clear the buffer and page caches of file data in the removed
1418 * range, and xfs_itruncate_finish() will remove the underlying
1419 * disk blocks.
1420 *
1421 * The inode must have its I/O lock locked EXCLUSIVELY, and it
1422 * must NOT have the inode lock held at all. This is because we're
1423 * calling into the buffer/page cache code and we can't hold the
1424 * inode lock when we do so.
1425 *
38e2299a
DC
1426 * We need to wait for any direct I/Os in flight to complete before we
1427 * proceed with the truncate. This is needed to prevent the extents
1428 * being read or written by the direct I/Os from being removed while the
1429 * I/O is in flight as there is no other method of synchronising
1430 * direct I/O with the truncate operation. Also, because we hold
1431 * the IOLOCK in exclusive mode, we prevent new direct I/Os from being
1432 * started until the truncate completes and drops the lock. Essentially,
1433 * the vn_iowait() call forms an I/O barrier that provides strict ordering
1434 * between direct I/Os and the truncate operation.
1435 *
1da177e4
LT
1436 * The flags parameter can have either the value XFS_ITRUNC_DEFINITE
1437 * or XFS_ITRUNC_MAYBE. The XFS_ITRUNC_MAYBE value should be used
1438 * in the case that the caller is locking things out of order and
1439 * may not be able to call xfs_itruncate_finish() with the inode lock
1440 * held without dropping the I/O lock. If the caller must drop the
1441 * I/O lock before calling xfs_itruncate_finish(), then xfs_itruncate_start()
1442 * must be called again with all the same restrictions as the initial
1443 * call.
1444 */
d3cf2094 1445int
1da177e4
LT
1446xfs_itruncate_start(
1447 xfs_inode_t *ip,
1448 uint flags,
1449 xfs_fsize_t new_size)
1450{
1451 xfs_fsize_t last_byte;
1452 xfs_off_t toss_start;
1453 xfs_mount_t *mp;
67fcaa73 1454 bhv_vnode_t *vp;
d3cf2094 1455 int error = 0;
1da177e4
LT
1456
1457 ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE) != 0);
ba87ea69 1458 ASSERT((new_size == 0) || (new_size <= ip->i_size));
1da177e4
LT
1459 ASSERT((flags == XFS_ITRUNC_DEFINITE) ||
1460 (flags == XFS_ITRUNC_MAYBE));
1461
1462 mp = ip->i_mount;
1463 vp = XFS_ITOV(ip);
9fa8046f 1464
c734c79b
LM
1465 /* wait for the completion of any pending DIOs */
1466 if (new_size < ip->i_size)
1467 vn_iowait(ip);
1468
1da177e4 1469 /*
67fcaa73 1470 * Call toss_pages or flushinval_pages to get rid of pages
1da177e4 1471 * overlapping the region being removed. We have to use
67fcaa73 1472 * the less efficient flushinval_pages in the case that the
1da177e4
LT
1473 * caller may not be able to finish the truncate without
1474 * dropping the inode's I/O lock. Make sure
1475 * to catch any pages brought in by buffers overlapping
1476 * the EOF by searching out beyond the isize by our
1477 * block size. We round new_size up to a block boundary
1478 * so that we don't toss things on the same block as
1479 * new_size but before it.
1480 *
67fcaa73 1481 * Before calling toss_page or flushinval_pages, make sure to
1da177e4
LT
1482 * call remapf() over the same region if the file is mapped.
1483 * This frees up mapped file references to the pages in the
67fcaa73 1484 * given range and for the flushinval_pages case it ensures
1da177e4
LT
1485 * that we get the latest mapped changes flushed out.
1486 */
1487 toss_start = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
1488 toss_start = XFS_FSB_TO_B(mp, toss_start);
1489 if (toss_start < 0) {
1490 /*
1491 * The place to start tossing is beyond our maximum
1492 * file size, so there is no way that the data extended
1493 * out there.
1494 */
d3cf2094 1495 return 0;
1da177e4
LT
1496 }
1497 last_byte = xfs_file_last_byte(ip);
1498 xfs_itrunc_trace(XFS_ITRUNC_START, ip, flags, new_size, toss_start,
1499 last_byte);
1500 if (last_byte > toss_start) {
1501 if (flags & XFS_ITRUNC_DEFINITE) {
739bfb2a
CH
1502 xfs_tosspages(ip, toss_start,
1503 -1, FI_REMAPF_LOCKED);
1da177e4 1504 } else {
739bfb2a
CH
1505 error = xfs_flushinval_pages(ip, toss_start,
1506 -1, FI_REMAPF_LOCKED);
1da177e4
LT
1507 }
1508 }
1509
1510#ifdef DEBUG
1511 if (new_size == 0) {
1512 ASSERT(VN_CACHED(vp) == 0);
1513 }
1514#endif
d3cf2094 1515 return error;
1da177e4
LT
1516}
1517
1518/*
1519 * Shrink the file to the given new_size. The new
1520 * size must be smaller than the current size.
1521 * This will free up the underlying blocks
1522 * in the removed range after a call to xfs_itruncate_start()
1523 * or xfs_atruncate_start().
1524 *
1525 * The transaction passed to this routine must have made
1526 * a permanent log reservation of at least XFS_ITRUNCATE_LOG_RES.
1527 * This routine may commit the given transaction and
1528 * start new ones, so make sure everything involved in
1529 * the transaction is tidy before calling here.
1530 * Some transaction will be returned to the caller to be
1531 * committed. The incoming transaction must already include
1532 * the inode, and both inode locks must be held exclusively.
1533 * The inode must also be "held" within the transaction. On
1534 * return the inode will be "held" within the returned transaction.
1535 * This routine does NOT require any disk space to be reserved
1536 * for it within the transaction.
1537 *
1538 * The fork parameter must be either xfs_attr_fork or xfs_data_fork,
1539 * and it indicates the fork which is to be truncated. For the
1540 * attribute fork we only support truncation to size 0.
1541 *
1542 * We use the sync parameter to indicate whether or not the first
1543 * transaction we perform might have to be synchronous. For the attr fork,
1544 * it needs to be so if the unlink of the inode is not yet known to be
1545 * permanent in the log. This keeps us from freeing and reusing the
1546 * blocks of the attribute fork before the unlink of the inode becomes
1547 * permanent.
1548 *
1549 * For the data fork, we normally have to run synchronously if we're
1550 * being called out of the inactive path or we're being called
1551 * out of the create path where we're truncating an existing file.
1552 * Either way, the truncate needs to be sync so blocks don't reappear
1553 * in the file with altered data in case of a crash. wsync filesystems
1554 * can run the first case async because anything that shrinks the inode
1555 * has to run sync so by the time we're called here from inactive, the
1556 * inode size is permanently set to 0.
1557 *
1558 * Calls from the truncate path always need to be sync unless we're
1559 * in a wsync filesystem and the file has already been unlinked.
1560 *
1561 * The caller is responsible for correctly setting the sync parameter.
1562 * It gets too hard for us to guess here which path we're being called
1563 * out of just based on inode state.
1564 */
1565int
1566xfs_itruncate_finish(
1567 xfs_trans_t **tp,
1568 xfs_inode_t *ip,
1569 xfs_fsize_t new_size,
1570 int fork,
1571 int sync)
1572{
1573 xfs_fsblock_t first_block;
1574 xfs_fileoff_t first_unmap_block;
1575 xfs_fileoff_t last_block;
1576 xfs_filblks_t unmap_len=0;
1577 xfs_mount_t *mp;
1578 xfs_trans_t *ntp;
1579 int done;
1580 int committed;
1581 xfs_bmap_free_t free_list;
1582 int error;
1583
1584 ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE) != 0);
1585 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE) != 0);
ba87ea69 1586 ASSERT((new_size == 0) || (new_size <= ip->i_size));
1da177e4
LT
1587 ASSERT(*tp != NULL);
1588 ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
1589 ASSERT(ip->i_transp == *tp);
1590 ASSERT(ip->i_itemp != NULL);
1591 ASSERT(ip->i_itemp->ili_flags & XFS_ILI_HOLD);
1592
1593
1594 ntp = *tp;
1595 mp = (ntp)->t_mountp;
1596 ASSERT(! XFS_NOT_DQATTACHED(mp, ip));
1597
1598 /*
1599 * We only support truncating the entire attribute fork.
1600 */
1601 if (fork == XFS_ATTR_FORK) {
1602 new_size = 0LL;
1603 }
1604 first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
1605 xfs_itrunc_trace(XFS_ITRUNC_FINISH1, ip, 0, new_size, 0, 0);
1606 /*
1607 * The first thing we do is set the size to new_size permanently
1608 * on disk. This way we don't have to worry about anyone ever
1609 * being able to look at the data being freed even in the face
1610 * of a crash. What we're getting around here is the case where
1611 * we free a block, it is allocated to another file, it is written
1612 * to, and then we crash. If the new data gets written to the
1613 * file but the log buffers containing the free and reallocation
1614 * don't, then we'd end up with garbage in the blocks being freed.
1615 * As long as we make the new_size permanent before actually
1616 * freeing any blocks it doesn't matter if they get writtten to.
1617 *
1618 * The callers must signal into us whether or not the size
1619 * setting here must be synchronous. There are a few cases
1620 * where it doesn't have to be synchronous. Those cases
1621 * occur if the file is unlinked and we know the unlink is
1622 * permanent or if the blocks being truncated are guaranteed
1623 * to be beyond the inode eof (regardless of the link count)
1624 * and the eof value is permanent. Both of these cases occur
1625 * only on wsync-mounted filesystems. In those cases, we're
1626 * guaranteed that no user will ever see the data in the blocks
1627 * that are being truncated so the truncate can run async.
1628 * In the free beyond eof case, the file may wind up with
1629 * more blocks allocated to it than it needs if we crash
1630 * and that won't get fixed until the next time the file
1631 * is re-opened and closed but that's ok as that shouldn't
1632 * be too many blocks.
1633 *
1634 * However, we can't just make all wsync xactions run async
1635 * because there's one call out of the create path that needs
1636 * to run sync where it's truncating an existing file to size
1637 * 0 whose size is > 0.
1638 *
1639 * It's probably possible to come up with a test in this
1640 * routine that would correctly distinguish all the above
1641 * cases from the values of the function parameters and the
1642 * inode state but for sanity's sake, I've decided to let the
1643 * layers above just tell us. It's simpler to correctly figure
1644 * out in the layer above exactly under what conditions we
1645 * can run async and I think it's easier for others read and
1646 * follow the logic in case something has to be changed.
1647 * cscope is your friend -- rcc.
1648 *
1649 * The attribute fork is much simpler.
1650 *
1651 * For the attribute fork we allow the caller to tell us whether
1652 * the unlink of the inode that led to this call is yet permanent
1653 * in the on disk log. If it is not and we will be freeing extents
1654 * in this inode then we make the first transaction synchronous
1655 * to make sure that the unlink is permanent by the time we free
1656 * the blocks.
1657 */
1658 if (fork == XFS_DATA_FORK) {
1659 if (ip->i_d.di_nextents > 0) {
ba87ea69
LM
1660 /*
1661 * If we are not changing the file size then do
1662 * not update the on-disk file size - we may be
1663 * called from xfs_inactive_free_eofblocks(). If we
1664 * update the on-disk file size and then the system
1665 * crashes before the contents of the file are
1666 * flushed to disk then the files may be full of
1667 * holes (ie NULL files bug).
1668 */
1669 if (ip->i_size != new_size) {
1670 ip->i_d.di_size = new_size;
1671 ip->i_size = new_size;
1672 xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE);
1673 }
1da177e4
LT
1674 }
1675 } else if (sync) {
1676 ASSERT(!(mp->m_flags & XFS_MOUNT_WSYNC));
1677 if (ip->i_d.di_anextents > 0)
1678 xfs_trans_set_sync(ntp);
1679 }
1680 ASSERT(fork == XFS_DATA_FORK ||
1681 (fork == XFS_ATTR_FORK &&
1682 ((sync && !(mp->m_flags & XFS_MOUNT_WSYNC)) ||
1683 (sync == 0 && (mp->m_flags & XFS_MOUNT_WSYNC)))));
1684
1685 /*
1686 * Since it is possible for space to become allocated beyond
1687 * the end of the file (in a crash where the space is allocated
1688 * but the inode size is not yet updated), simply remove any
1689 * blocks which show up between the new EOF and the maximum
1690 * possible file size. If the first block to be removed is
1691 * beyond the maximum file size (ie it is the same as last_block),
1692 * then there is nothing to do.
1693 */
1694 last_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp));
1695 ASSERT(first_unmap_block <= last_block);
1696 done = 0;
1697 if (last_block == first_unmap_block) {
1698 done = 1;
1699 } else {
1700 unmap_len = last_block - first_unmap_block + 1;
1701 }
1702 while (!done) {
1703 /*
1704 * Free up up to XFS_ITRUNC_MAX_EXTENTS. xfs_bunmapi()
1705 * will tell us whether it freed the entire range or
1706 * not. If this is a synchronous mount (wsync),
1707 * then we can tell bunmapi to keep all the
1708 * transactions asynchronous since the unlink
1709 * transaction that made this inode inactive has
1710 * already hit the disk. There's no danger of
1711 * the freed blocks being reused, there being a
1712 * crash, and the reused blocks suddenly reappearing
1713 * in this file with garbage in them once recovery
1714 * runs.
1715 */
1716 XFS_BMAP_INIT(&free_list, &first_block);
541d7d3c 1717 error = xfs_bunmapi(ntp, ip,
3e57ecf6 1718 first_unmap_block, unmap_len,
1da177e4
LT
1719 XFS_BMAPI_AFLAG(fork) |
1720 (sync ? 0 : XFS_BMAPI_ASYNC),
1721 XFS_ITRUNC_MAX_EXTENTS,
3e57ecf6
OW
1722 &first_block, &free_list,
1723 NULL, &done);
1da177e4
LT
1724 if (error) {
1725 /*
1726 * If the bunmapi call encounters an error,
1727 * return to the caller where the transaction
1728 * can be properly aborted. We just need to
1729 * make sure we're not holding any resources
1730 * that we were not when we came in.
1731 */
1732 xfs_bmap_cancel(&free_list);
1733 return error;
1734 }
1735
1736 /*
1737 * Duplicate the transaction that has the permanent
1738 * reservation and commit the old transaction.
1739 */
f7c99b6f 1740 error = xfs_bmap_finish(tp, &free_list, &committed);
1da177e4
LT
1741 ntp = *tp;
1742 if (error) {
1743 /*
1744 * If the bmap finish call encounters an error,
1745 * return to the caller where the transaction
1746 * can be properly aborted. We just need to
1747 * make sure we're not holding any resources
1748 * that we were not when we came in.
1749 *
1750 * Aborting from this point might lose some
1751 * blocks in the file system, but oh well.
1752 */
1753 xfs_bmap_cancel(&free_list);
1754 if (committed) {
1755 /*
1756 * If the passed in transaction committed
1757 * in xfs_bmap_finish(), then we want to
1758 * add the inode to this one before returning.
1759 * This keeps things simple for the higher
1760 * level code, because it always knows that
1761 * the inode is locked and held in the
1762 * transaction that returns to it whether
1763 * errors occur or not. We don't mark the
1764 * inode dirty so that this transaction can
1765 * be easily aborted if possible.
1766 */
1767 xfs_trans_ijoin(ntp, ip,
1768 XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
1769 xfs_trans_ihold(ntp, ip);
1770 }
1771 return error;
1772 }
1773
1774 if (committed) {
1775 /*
1776 * The first xact was committed,
1777 * so add the inode to the new one.
1778 * Mark it dirty so it will be logged
1779 * and moved forward in the log as
1780 * part of every commit.
1781 */
1782 xfs_trans_ijoin(ntp, ip,
1783 XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
1784 xfs_trans_ihold(ntp, ip);
1785 xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE);
1786 }
1787 ntp = xfs_trans_dup(ntp);
1c72bf90 1788 (void) xfs_trans_commit(*tp, 0);
1da177e4
LT
1789 *tp = ntp;
1790 error = xfs_trans_reserve(ntp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0,
1791 XFS_TRANS_PERM_LOG_RES,
1792 XFS_ITRUNCATE_LOG_COUNT);
1793 /*
1794 * Add the inode being truncated to the next chained
1795 * transaction.
1796 */
1797 xfs_trans_ijoin(ntp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
1798 xfs_trans_ihold(ntp, ip);
1799 if (error)
1800 return (error);
1801 }
1802 /*
1803 * Only update the size in the case of the data fork, but
1804 * always re-log the inode so that our permanent transaction
1805 * can keep on rolling it forward in the log.
1806 */
1807 if (fork == XFS_DATA_FORK) {
1808 xfs_isize_check(mp, ip, new_size);
ba87ea69
LM
1809 /*
1810 * If we are not changing the file size then do
1811 * not update the on-disk file size - we may be
1812 * called from xfs_inactive_free_eofblocks(). If we
1813 * update the on-disk file size and then the system
1814 * crashes before the contents of the file are
1815 * flushed to disk then the files may be full of
1816 * holes (ie NULL files bug).
1817 */
1818 if (ip->i_size != new_size) {
1819 ip->i_d.di_size = new_size;
1820 ip->i_size = new_size;
1821 }
1da177e4
LT
1822 }
1823 xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE);
1824 ASSERT((new_size != 0) ||
1825 (fork == XFS_ATTR_FORK) ||
1826 (ip->i_delayed_blks == 0));
1827 ASSERT((new_size != 0) ||
1828 (fork == XFS_ATTR_FORK) ||
1829 (ip->i_d.di_nextents == 0));
1830 xfs_itrunc_trace(XFS_ITRUNC_FINISH2, ip, 0, new_size, 0, 0);
1831 return 0;
1832}
1833
1834
1835/*
1836 * xfs_igrow_start
1837 *
1838 * Do the first part of growing a file: zero any data in the last
1839 * block that is beyond the old EOF. We need to do this before
1840 * the inode is joined to the transaction to modify the i_size.
1841 * That way we can drop the inode lock and call into the buffer
1842 * cache to get the buffer mapping the EOF.
1843 */
1844int
1845xfs_igrow_start(
1846 xfs_inode_t *ip,
1847 xfs_fsize_t new_size,
1848 cred_t *credp)
1849{
1da177e4
LT
1850 ASSERT(ismrlocked(&(ip->i_lock), MR_UPDATE) != 0);
1851 ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE) != 0);
ba87ea69 1852 ASSERT(new_size > ip->i_size);
1da177e4 1853
1da177e4
LT
1854 /*
1855 * Zero any pages that may have been created by
1856 * xfs_write_file() beyond the end of the file
1857 * and any blocks between the old and new file sizes.
1858 */
541d7d3c 1859 return xfs_zero_eof(ip, new_size, ip->i_size);
1da177e4
LT
1860}
1861
1862/*
1863 * xfs_igrow_finish
1864 *
1865 * This routine is called to extend the size of a file.
1866 * The inode must have both the iolock and the ilock locked
1867 * for update and it must be a part of the current transaction.
1868 * The xfs_igrow_start() function must have been called previously.
1869 * If the change_flag is not zero, the inode change timestamp will
1870 * be updated.
1871 */
1872void
1873xfs_igrow_finish(
1874 xfs_trans_t *tp,
1875 xfs_inode_t *ip,
1876 xfs_fsize_t new_size,
1877 int change_flag)
1878{
1879 ASSERT(ismrlocked(&(ip->i_lock), MR_UPDATE) != 0);
1880 ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE) != 0);
1881 ASSERT(ip->i_transp == tp);
ba87ea69 1882 ASSERT(new_size > ip->i_size);
1da177e4
LT
1883
1884 /*
1885 * Update the file size. Update the inode change timestamp
1886 * if change_flag set.
1887 */
1888 ip->i_d.di_size = new_size;
ba87ea69 1889 ip->i_size = new_size;
1da177e4
LT
1890 if (change_flag)
1891 xfs_ichgtime(ip, XFS_ICHGTIME_CHG);
1892 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1893
1894}
1895
1896
1897/*
1898 * This is called when the inode's link count goes to 0.
1899 * We place the on-disk inode on a list in the AGI. It
1900 * will be pulled from this list when the inode is freed.
1901 */
1902int
1903xfs_iunlink(
1904 xfs_trans_t *tp,
1905 xfs_inode_t *ip)
1906{
1907 xfs_mount_t *mp;
1908 xfs_agi_t *agi;
1909 xfs_dinode_t *dip;
1910 xfs_buf_t *agibp;
1911 xfs_buf_t *ibp;
1912 xfs_agnumber_t agno;
1913 xfs_daddr_t agdaddr;
1914 xfs_agino_t agino;
1915 short bucket_index;
1916 int offset;
1917 int error;
1918 int agi_ok;
1919
1920 ASSERT(ip->i_d.di_nlink == 0);
1921 ASSERT(ip->i_d.di_mode != 0);
1922 ASSERT(ip->i_transp == tp);
1923
1924 mp = tp->t_mountp;
1925
1926 agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
1927 agdaddr = XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp));
1928
1929 /*
1930 * Get the agi buffer first. It ensures lock ordering
1931 * on the list.
1932 */
1933 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, agdaddr,
1934 XFS_FSS_TO_BB(mp, 1), 0, &agibp);
859d7182 1935 if (error)
1da177e4 1936 return error;
859d7182 1937
1da177e4
LT
1938 /*
1939 * Validate the magic number of the agi block.
1940 */
1941 agi = XFS_BUF_TO_AGI(agibp);
1942 agi_ok =
16259e7d
CH
1943 be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC &&
1944 XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum));
1da177e4
LT
1945 if (unlikely(XFS_TEST_ERROR(!agi_ok, mp, XFS_ERRTAG_IUNLINK,
1946 XFS_RANDOM_IUNLINK))) {
1947 XFS_CORRUPTION_ERROR("xfs_iunlink", XFS_ERRLEVEL_LOW, mp, agi);
1948 xfs_trans_brelse(tp, agibp);
1949 return XFS_ERROR(EFSCORRUPTED);
1950 }
1951 /*
1952 * Get the index into the agi hash table for the
1953 * list this inode will go on.
1954 */
1955 agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
1956 ASSERT(agino != 0);
1957 bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
1958 ASSERT(agi->agi_unlinked[bucket_index]);
16259e7d 1959 ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != agino);
1da177e4 1960
16259e7d 1961 if (be32_to_cpu(agi->agi_unlinked[bucket_index]) != NULLAGINO) {
1da177e4
LT
1962 /*
1963 * There is already another inode in the bucket we need
1964 * to add ourselves to. Add us at the front of the list.
1965 * Here we put the head pointer into our next pointer,
1966 * and then we fall through to point the head at us.
1967 */
c319b58b
VA
1968 error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0);
1969 if (error)
1970 return error;
1971
347d1c01 1972 ASSERT(be32_to_cpu(dip->di_next_unlinked) == NULLAGINO);
1da177e4
LT
1973 /* both on-disk, don't endian flip twice */
1974 dip->di_next_unlinked = agi->agi_unlinked[bucket_index];
1975 offset = ip->i_boffset +
1976 offsetof(xfs_dinode_t, di_next_unlinked);
1977 xfs_trans_inode_buf(tp, ibp);
1978 xfs_trans_log_buf(tp, ibp, offset,
1979 (offset + sizeof(xfs_agino_t) - 1));
1980 xfs_inobp_check(mp, ibp);
1981 }
1982
1983 /*
1984 * Point the bucket head pointer at the inode being inserted.
1985 */
1986 ASSERT(agino != 0);
16259e7d 1987 agi->agi_unlinked[bucket_index] = cpu_to_be32(agino);
1da177e4
LT
1988 offset = offsetof(xfs_agi_t, agi_unlinked) +
1989 (sizeof(xfs_agino_t) * bucket_index);
1990 xfs_trans_log_buf(tp, agibp, offset,
1991 (offset + sizeof(xfs_agino_t) - 1));
1992 return 0;
1993}
1994
1995/*
1996 * Pull the on-disk inode from the AGI unlinked list.
1997 */
1998STATIC int
1999xfs_iunlink_remove(
2000 xfs_trans_t *tp,
2001 xfs_inode_t *ip)
2002{
2003 xfs_ino_t next_ino;
2004 xfs_mount_t *mp;
2005 xfs_agi_t *agi;
2006 xfs_dinode_t *dip;
2007 xfs_buf_t *agibp;
2008 xfs_buf_t *ibp;
2009 xfs_agnumber_t agno;
2010 xfs_daddr_t agdaddr;
2011 xfs_agino_t agino;
2012 xfs_agino_t next_agino;
2013 xfs_buf_t *last_ibp;
6fdf8ccc 2014 xfs_dinode_t *last_dip = NULL;
1da177e4 2015 short bucket_index;
6fdf8ccc 2016 int offset, last_offset = 0;
1da177e4
LT
2017 int error;
2018 int agi_ok;
2019
2020 /*
2021 * First pull the on-disk inode from the AGI unlinked list.
2022 */
2023 mp = tp->t_mountp;
2024
2025 agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
2026 agdaddr = XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp));
2027
2028 /*
2029 * Get the agi buffer first. It ensures lock ordering
2030 * on the list.
2031 */
2032 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, agdaddr,
2033 XFS_FSS_TO_BB(mp, 1), 0, &agibp);
2034 if (error) {
2035 cmn_err(CE_WARN,
2036 "xfs_iunlink_remove: xfs_trans_read_buf() returned an error %d on %s. Returning error.",
2037 error, mp->m_fsname);
2038 return error;
2039 }
2040 /*
2041 * Validate the magic number of the agi block.
2042 */
2043 agi = XFS_BUF_TO_AGI(agibp);
2044 agi_ok =
16259e7d
CH
2045 be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC &&
2046 XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum));
1da177e4
LT
2047 if (unlikely(XFS_TEST_ERROR(!agi_ok, mp, XFS_ERRTAG_IUNLINK_REMOVE,
2048 XFS_RANDOM_IUNLINK_REMOVE))) {
2049 XFS_CORRUPTION_ERROR("xfs_iunlink_remove", XFS_ERRLEVEL_LOW,
2050 mp, agi);
2051 xfs_trans_brelse(tp, agibp);
2052 cmn_err(CE_WARN,
2053 "xfs_iunlink_remove: XFS_TEST_ERROR() returned an error on %s. Returning EFSCORRUPTED.",
2054 mp->m_fsname);
2055 return XFS_ERROR(EFSCORRUPTED);
2056 }
2057 /*
2058 * Get the index into the agi hash table for the
2059 * list this inode will go on.
2060 */
2061 agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
2062 ASSERT(agino != 0);
2063 bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
16259e7d 2064 ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != NULLAGINO);
1da177e4
LT
2065 ASSERT(agi->agi_unlinked[bucket_index]);
2066
16259e7d 2067 if (be32_to_cpu(agi->agi_unlinked[bucket_index]) == agino) {
1da177e4
LT
2068 /*
2069 * We're at the head of the list. Get the inode's
2070 * on-disk buffer to see if there is anyone after us
2071 * on the list. Only modify our next pointer if it
2072 * is not already NULLAGINO. This saves us the overhead
2073 * of dealing with the buffer when there is no need to
2074 * change it.
2075 */
b12dd342 2076 error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0);
1da177e4
LT
2077 if (error) {
2078 cmn_err(CE_WARN,
2079 "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.",
2080 error, mp->m_fsname);
2081 return error;
2082 }
347d1c01 2083 next_agino = be32_to_cpu(dip->di_next_unlinked);
1da177e4
LT
2084 ASSERT(next_agino != 0);
2085 if (next_agino != NULLAGINO) {
347d1c01 2086 dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
1da177e4
LT
2087 offset = ip->i_boffset +
2088 offsetof(xfs_dinode_t, di_next_unlinked);
2089 xfs_trans_inode_buf(tp, ibp);
2090 xfs_trans_log_buf(tp, ibp, offset,
2091 (offset + sizeof(xfs_agino_t) - 1));
2092 xfs_inobp_check(mp, ibp);
2093 } else {
2094 xfs_trans_brelse(tp, ibp);
2095 }
2096 /*
2097 * Point the bucket head pointer at the next inode.
2098 */
2099 ASSERT(next_agino != 0);
2100 ASSERT(next_agino != agino);
16259e7d 2101 agi->agi_unlinked[bucket_index] = cpu_to_be32(next_agino);
1da177e4
LT
2102 offset = offsetof(xfs_agi_t, agi_unlinked) +
2103 (sizeof(xfs_agino_t) * bucket_index);
2104 xfs_trans_log_buf(tp, agibp, offset,
2105 (offset + sizeof(xfs_agino_t) - 1));
2106 } else {
2107 /*
2108 * We need to search the list for the inode being freed.
2109 */
16259e7d 2110 next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
1da177e4
LT
2111 last_ibp = NULL;
2112 while (next_agino != agino) {
2113 /*
2114 * If the last inode wasn't the one pointing to
2115 * us, then release its buffer since we're not
2116 * going to do anything with it.
2117 */
2118 if (last_ibp != NULL) {
2119 xfs_trans_brelse(tp, last_ibp);
2120 }
2121 next_ino = XFS_AGINO_TO_INO(mp, agno, next_agino);
2122 error = xfs_inotobp(mp, tp, next_ino, &last_dip,
2123 &last_ibp, &last_offset);
2124 if (error) {
2125 cmn_err(CE_WARN,
2126 "xfs_iunlink_remove: xfs_inotobp() returned an error %d on %s. Returning error.",
2127 error, mp->m_fsname);
2128 return error;
2129 }
347d1c01 2130 next_agino = be32_to_cpu(last_dip->di_next_unlinked);
1da177e4
LT
2131 ASSERT(next_agino != NULLAGINO);
2132 ASSERT(next_agino != 0);
2133 }
2134 /*
2135 * Now last_ibp points to the buffer previous to us on
2136 * the unlinked list. Pull us from the list.
2137 */
b12dd342 2138 error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0);
1da177e4
LT
2139 if (error) {
2140 cmn_err(CE_WARN,
2141 "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.",
2142 error, mp->m_fsname);
2143 return error;
2144 }
347d1c01 2145 next_agino = be32_to_cpu(dip->di_next_unlinked);
1da177e4
LT
2146 ASSERT(next_agino != 0);
2147 ASSERT(next_agino != agino);
2148 if (next_agino != NULLAGINO) {
347d1c01 2149 dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
1da177e4
LT
2150 offset = ip->i_boffset +
2151 offsetof(xfs_dinode_t, di_next_unlinked);
2152 xfs_trans_inode_buf(tp, ibp);
2153 xfs_trans_log_buf(tp, ibp, offset,
2154 (offset + sizeof(xfs_agino_t) - 1));
2155 xfs_inobp_check(mp, ibp);
2156 } else {
2157 xfs_trans_brelse(tp, ibp);
2158 }
2159 /*
2160 * Point the previous inode on the list to the next inode.
2161 */
347d1c01 2162 last_dip->di_next_unlinked = cpu_to_be32(next_agino);
1da177e4
LT
2163 ASSERT(next_agino != 0);
2164 offset = last_offset + offsetof(xfs_dinode_t, di_next_unlinked);
2165 xfs_trans_inode_buf(tp, last_ibp);
2166 xfs_trans_log_buf(tp, last_ibp, offset,
2167 (offset + sizeof(xfs_agino_t) - 1));
2168 xfs_inobp_check(mp, last_ibp);
2169 }
2170 return 0;
2171}
2172
7989cb8e 2173STATIC_INLINE int xfs_inode_clean(xfs_inode_t *ip)
1da177e4
LT
2174{
2175 return (((ip->i_itemp == NULL) ||
2176 !(ip->i_itemp->ili_format.ilf_fields & XFS_ILOG_ALL)) &&
2177 (ip->i_update_core == 0));
2178}
2179
ba0f32d4 2180STATIC void
1da177e4
LT
2181xfs_ifree_cluster(
2182 xfs_inode_t *free_ip,
2183 xfs_trans_t *tp,
2184 xfs_ino_t inum)
2185{
2186 xfs_mount_t *mp = free_ip->i_mount;
2187 int blks_per_cluster;
2188 int nbufs;
2189 int ninodes;
2190 int i, j, found, pre_flushed;
2191 xfs_daddr_t blkno;
2192 xfs_buf_t *bp;
1da177e4
LT
2193 xfs_inode_t *ip, **ip_found;
2194 xfs_inode_log_item_t *iip;
2195 xfs_log_item_t *lip;
da353b0d 2196 xfs_perag_t *pag = xfs_get_perag(mp, inum);
1da177e4
LT
2197
2198 if (mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) {
2199 blks_per_cluster = 1;
2200 ninodes = mp->m_sb.sb_inopblock;
2201 nbufs = XFS_IALLOC_BLOCKS(mp);
2202 } else {
2203 blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) /
2204 mp->m_sb.sb_blocksize;
2205 ninodes = blks_per_cluster * mp->m_sb.sb_inopblock;
2206 nbufs = XFS_IALLOC_BLOCKS(mp) / blks_per_cluster;
2207 }
2208
2209 ip_found = kmem_alloc(ninodes * sizeof(xfs_inode_t *), KM_NOFS);
2210
2211 for (j = 0; j < nbufs; j++, inum += ninodes) {
2212 blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
2213 XFS_INO_TO_AGBNO(mp, inum));
2214
2215
2216 /*
2217 * Look for each inode in memory and attempt to lock it,
2218 * we can be racing with flush and tail pushing here.
2219 * any inode we get the locks on, add to an array of
2220 * inode items to process later.
2221 *
2222 * The get the buffer lock, we could beat a flush
2223 * or tail pushing thread to the lock here, in which
2224 * case they will go looking for the inode buffer
2225 * and fail, we need some other form of interlock
2226 * here.
2227 */
2228 found = 0;
2229 for (i = 0; i < ninodes; i++) {
da353b0d
DC
2230 read_lock(&pag->pag_ici_lock);
2231 ip = radix_tree_lookup(&pag->pag_ici_root,
2232 XFS_INO_TO_AGINO(mp, (inum + i)));
1da177e4
LT
2233
2234 /* Inode not in memory or we found it already,
2235 * nothing to do
2236 */
7a18c386 2237 if (!ip || xfs_iflags_test(ip, XFS_ISTALE)) {
da353b0d 2238 read_unlock(&pag->pag_ici_lock);
1da177e4
LT
2239 continue;
2240 }
2241
2242 if (xfs_inode_clean(ip)) {
da353b0d 2243 read_unlock(&pag->pag_ici_lock);
1da177e4
LT
2244 continue;
2245 }
2246
2247 /* If we can get the locks then add it to the
2248 * list, otherwise by the time we get the bp lock
2249 * below it will already be attached to the
2250 * inode buffer.
2251 */
2252
2253 /* This inode will already be locked - by us, lets
2254 * keep it that way.
2255 */
2256
2257 if (ip == free_ip) {
2258 if (xfs_iflock_nowait(ip)) {
7a18c386 2259 xfs_iflags_set(ip, XFS_ISTALE);
1da177e4
LT
2260 if (xfs_inode_clean(ip)) {
2261 xfs_ifunlock(ip);
2262 } else {
2263 ip_found[found++] = ip;
2264 }
2265 }
da353b0d 2266 read_unlock(&pag->pag_ici_lock);
1da177e4
LT
2267 continue;
2268 }
2269
2270 if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
2271 if (xfs_iflock_nowait(ip)) {
7a18c386 2272 xfs_iflags_set(ip, XFS_ISTALE);
1da177e4
LT
2273
2274 if (xfs_inode_clean(ip)) {
2275 xfs_ifunlock(ip);
2276 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2277 } else {
2278 ip_found[found++] = ip;
2279 }
2280 } else {
2281 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2282 }
2283 }
da353b0d 2284 read_unlock(&pag->pag_ici_lock);
1da177e4
LT
2285 }
2286
2287 bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
2288 mp->m_bsize * blks_per_cluster,
2289 XFS_BUF_LOCK);
2290
2291 pre_flushed = 0;
2292 lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
2293 while (lip) {
2294 if (lip->li_type == XFS_LI_INODE) {
2295 iip = (xfs_inode_log_item_t *)lip;
2296 ASSERT(iip->ili_logged == 1);
2297 lip->li_cb = (void(*)(xfs_buf_t*,xfs_log_item_t*)) xfs_istale_done;
287f3dad 2298 spin_lock(&mp->m_ail_lock);
1da177e4 2299 iip->ili_flush_lsn = iip->ili_item.li_lsn;
287f3dad 2300 spin_unlock(&mp->m_ail_lock);
e5ffd2bb 2301 xfs_iflags_set(iip->ili_inode, XFS_ISTALE);
1da177e4
LT
2302 pre_flushed++;
2303 }
2304 lip = lip->li_bio_list;
2305 }
2306
2307 for (i = 0; i < found; i++) {
2308 ip = ip_found[i];
2309 iip = ip->i_itemp;
2310
2311 if (!iip) {
2312 ip->i_update_core = 0;
2313 xfs_ifunlock(ip);
2314 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2315 continue;
2316 }
2317
2318 iip->ili_last_fields = iip->ili_format.ilf_fields;
2319 iip->ili_format.ilf_fields = 0;
2320 iip->ili_logged = 1;
287f3dad 2321 spin_lock(&mp->m_ail_lock);
1da177e4 2322 iip->ili_flush_lsn = iip->ili_item.li_lsn;
287f3dad 2323 spin_unlock(&mp->m_ail_lock);
1da177e4
LT
2324
2325 xfs_buf_attach_iodone(bp,
2326 (void(*)(xfs_buf_t*,xfs_log_item_t*))
2327 xfs_istale_done, (xfs_log_item_t *)iip);
2328 if (ip != free_ip) {
2329 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2330 }
2331 }
2332
2333 if (found || pre_flushed)
2334 xfs_trans_stale_inode_buf(tp, bp);
2335 xfs_trans_binval(tp, bp);
2336 }
2337
2338 kmem_free(ip_found, ninodes * sizeof(xfs_inode_t *));
da353b0d 2339 xfs_put_perag(mp, pag);
1da177e4
LT
2340}
2341
2342/*
2343 * This is called to return an inode to the inode free list.
2344 * The inode should already be truncated to 0 length and have
2345 * no pages associated with it. This routine also assumes that
2346 * the inode is already a part of the transaction.
2347 *
2348 * The on-disk copy of the inode will have been added to the list
2349 * of unlinked inodes in the AGI. We need to remove the inode from
2350 * that list atomically with respect to freeing it here.
2351 */
2352int
2353xfs_ifree(
2354 xfs_trans_t *tp,
2355 xfs_inode_t *ip,
2356 xfs_bmap_free_t *flist)
2357{
2358 int error;
2359 int delete;
2360 xfs_ino_t first_ino;
c319b58b
VA
2361 xfs_dinode_t *dip;
2362 xfs_buf_t *ibp;
1da177e4
LT
2363
2364 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE));
2365 ASSERT(ip->i_transp == tp);
2366 ASSERT(ip->i_d.di_nlink == 0);
2367 ASSERT(ip->i_d.di_nextents == 0);
2368 ASSERT(ip->i_d.di_anextents == 0);
ba87ea69 2369 ASSERT((ip->i_d.di_size == 0 && ip->i_size == 0) ||
1da177e4
LT
2370 ((ip->i_d.di_mode & S_IFMT) != S_IFREG));
2371 ASSERT(ip->i_d.di_nblocks == 0);
2372
2373 /*
2374 * Pull the on-disk inode from the AGI unlinked list.
2375 */
2376 error = xfs_iunlink_remove(tp, ip);
2377 if (error != 0) {
2378 return error;
2379 }
2380
2381 error = xfs_difree(tp, ip->i_ino, flist, &delete, &first_ino);
2382 if (error != 0) {
2383 return error;
2384 }
2385 ip->i_d.di_mode = 0; /* mark incore inode as free */
2386 ip->i_d.di_flags = 0;
2387 ip->i_d.di_dmevmask = 0;
2388 ip->i_d.di_forkoff = 0; /* mark the attr fork not in use */
2389 ip->i_df.if_ext_max =
2390 XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
2391 ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
2392 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
2393 /*
2394 * Bump the generation count so no one will be confused
2395 * by reincarnations of this inode.
2396 */
2397 ip->i_d.di_gen++;
c319b58b 2398
1da177e4
LT
2399 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
2400
c319b58b
VA
2401 error = xfs_itobp(ip->i_mount, tp, ip, &dip, &ibp, 0, 0);
2402 if (error)
2403 return error;
2404
2405 /*
2406 * Clear the on-disk di_mode. This is to prevent xfs_bulkstat
2407 * from picking up this inode when it is reclaimed (its incore state
2408 * initialzed but not flushed to disk yet). The in-core di_mode is
2409 * already cleared and a corresponding transaction logged.
2410 * The hack here just synchronizes the in-core to on-disk
2411 * di_mode value in advance before the actual inode sync to disk.
2412 * This is OK because the inode is already unlinked and would never
2413 * change its di_mode again for this inode generation.
2414 * This is a temporary hack that would require a proper fix
2415 * in the future.
2416 */
2417 dip->di_core.di_mode = 0;
2418
1da177e4
LT
2419 if (delete) {
2420 xfs_ifree_cluster(ip, tp, first_ino);
2421 }
2422
2423 return 0;
2424}
2425
2426/*
2427 * Reallocate the space for if_broot based on the number of records
2428 * being added or deleted as indicated in rec_diff. Move the records
2429 * and pointers in if_broot to fit the new size. When shrinking this
2430 * will eliminate holes between the records and pointers created by
2431 * the caller. When growing this will create holes to be filled in
2432 * by the caller.
2433 *
2434 * The caller must not request to add more records than would fit in
2435 * the on-disk inode root. If the if_broot is currently NULL, then
2436 * if we adding records one will be allocated. The caller must also
2437 * not request that the number of records go below zero, although
2438 * it can go to zero.
2439 *
2440 * ip -- the inode whose if_broot area is changing
2441 * ext_diff -- the change in the number of records, positive or negative,
2442 * requested for the if_broot array.
2443 */
2444void
2445xfs_iroot_realloc(
2446 xfs_inode_t *ip,
2447 int rec_diff,
2448 int whichfork)
2449{
2450 int cur_max;
2451 xfs_ifork_t *ifp;
2452 xfs_bmbt_block_t *new_broot;
2453 int new_max;
2454 size_t new_size;
2455 char *np;
2456 char *op;
2457
2458 /*
2459 * Handle the degenerate case quietly.
2460 */
2461 if (rec_diff == 0) {
2462 return;
2463 }
2464
2465 ifp = XFS_IFORK_PTR(ip, whichfork);
2466 if (rec_diff > 0) {
2467 /*
2468 * If there wasn't any memory allocated before, just
2469 * allocate it now and get out.
2470 */
2471 if (ifp->if_broot_bytes == 0) {
2472 new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(rec_diff);
2473 ifp->if_broot = (xfs_bmbt_block_t*)kmem_alloc(new_size,
2474 KM_SLEEP);
2475 ifp->if_broot_bytes = (int)new_size;
2476 return;
2477 }
2478
2479 /*
2480 * If there is already an existing if_broot, then we need
2481 * to realloc() it and shift the pointers to their new
2482 * location. The records don't change location because
2483 * they are kept butted up against the btree block header.
2484 */
2485 cur_max = XFS_BMAP_BROOT_MAXRECS(ifp->if_broot_bytes);
2486 new_max = cur_max + rec_diff;
2487 new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max);
2488 ifp->if_broot = (xfs_bmbt_block_t *)
2489 kmem_realloc(ifp->if_broot,
2490 new_size,
2491 (size_t)XFS_BMAP_BROOT_SPACE_CALC(cur_max), /* old size */
2492 KM_SLEEP);
2493 op = (char *)XFS_BMAP_BROOT_PTR_ADDR(ifp->if_broot, 1,
2494 ifp->if_broot_bytes);
2495 np = (char *)XFS_BMAP_BROOT_PTR_ADDR(ifp->if_broot, 1,
2496 (int)new_size);
2497 ifp->if_broot_bytes = (int)new_size;
2498 ASSERT(ifp->if_broot_bytes <=
2499 XFS_IFORK_SIZE(ip, whichfork) + XFS_BROOT_SIZE_ADJ);
2500 memmove(np, op, cur_max * (uint)sizeof(xfs_dfsbno_t));
2501 return;
2502 }
2503
2504 /*
2505 * rec_diff is less than 0. In this case, we are shrinking the
2506 * if_broot buffer. It must already exist. If we go to zero
2507 * records, just get rid of the root and clear the status bit.
2508 */
2509 ASSERT((ifp->if_broot != NULL) && (ifp->if_broot_bytes > 0));
2510 cur_max = XFS_BMAP_BROOT_MAXRECS(ifp->if_broot_bytes);
2511 new_max = cur_max + rec_diff;
2512 ASSERT(new_max >= 0);
2513 if (new_max > 0)
2514 new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max);
2515 else
2516 new_size = 0;
2517 if (new_size > 0) {
2518 new_broot = (xfs_bmbt_block_t *)kmem_alloc(new_size, KM_SLEEP);
2519 /*
2520 * First copy over the btree block header.
2521 */
2522 memcpy(new_broot, ifp->if_broot, sizeof(xfs_bmbt_block_t));
2523 } else {
2524 new_broot = NULL;
2525 ifp->if_flags &= ~XFS_IFBROOT;
2526 }
2527
2528 /*
2529 * Only copy the records and pointers if there are any.
2530 */
2531 if (new_max > 0) {
2532 /*
2533 * First copy the records.
2534 */
2535 op = (char *)XFS_BMAP_BROOT_REC_ADDR(ifp->if_broot, 1,
2536 ifp->if_broot_bytes);
2537 np = (char *)XFS_BMAP_BROOT_REC_ADDR(new_broot, 1,
2538 (int)new_size);
2539 memcpy(np, op, new_max * (uint)sizeof(xfs_bmbt_rec_t));
2540
2541 /*
2542 * Then copy the pointers.
2543 */
2544 op = (char *)XFS_BMAP_BROOT_PTR_ADDR(ifp->if_broot, 1,
2545 ifp->if_broot_bytes);
2546 np = (char *)XFS_BMAP_BROOT_PTR_ADDR(new_broot, 1,
2547 (int)new_size);
2548 memcpy(np, op, new_max * (uint)sizeof(xfs_dfsbno_t));
2549 }
2550 kmem_free(ifp->if_broot, ifp->if_broot_bytes);
2551 ifp->if_broot = new_broot;
2552 ifp->if_broot_bytes = (int)new_size;
2553 ASSERT(ifp->if_broot_bytes <=
2554 XFS_IFORK_SIZE(ip, whichfork) + XFS_BROOT_SIZE_ADJ);
2555 return;
2556}
2557
2558
1da177e4
LT
2559/*
2560 * This is called when the amount of space needed for if_data
2561 * is increased or decreased. The change in size is indicated by
2562 * the number of bytes that need to be added or deleted in the
2563 * byte_diff parameter.
2564 *
2565 * If the amount of space needed has decreased below the size of the
2566 * inline buffer, then switch to using the inline buffer. Otherwise,
2567 * use kmem_realloc() or kmem_alloc() to adjust the size of the buffer
2568 * to what is needed.
2569 *
2570 * ip -- the inode whose if_data area is changing
2571 * byte_diff -- the change in the number of bytes, positive or negative,
2572 * requested for the if_data array.
2573 */
2574void
2575xfs_idata_realloc(
2576 xfs_inode_t *ip,
2577 int byte_diff,
2578 int whichfork)
2579{
2580 xfs_ifork_t *ifp;
2581 int new_size;
2582 int real_size;
2583
2584 if (byte_diff == 0) {
2585 return;
2586 }
2587
2588 ifp = XFS_IFORK_PTR(ip, whichfork);
2589 new_size = (int)ifp->if_bytes + byte_diff;
2590 ASSERT(new_size >= 0);
2591
2592 if (new_size == 0) {
2593 if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
2594 kmem_free(ifp->if_u1.if_data, ifp->if_real_bytes);
2595 }
2596 ifp->if_u1.if_data = NULL;
2597 real_size = 0;
2598 } else if (new_size <= sizeof(ifp->if_u2.if_inline_data)) {
2599 /*
2600 * If the valid extents/data can fit in if_inline_ext/data,
2601 * copy them from the malloc'd vector and free it.
2602 */
2603 if (ifp->if_u1.if_data == NULL) {
2604 ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
2605 } else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
2606 ASSERT(ifp->if_real_bytes != 0);
2607 memcpy(ifp->if_u2.if_inline_data, ifp->if_u1.if_data,
2608 new_size);
2609 kmem_free(ifp->if_u1.if_data, ifp->if_real_bytes);
2610 ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
2611 }
2612 real_size = 0;
2613 } else {
2614 /*
2615 * Stuck with malloc/realloc.
2616 * For inline data, the underlying buffer must be
2617 * a multiple of 4 bytes in size so that it can be
2618 * logged and stay on word boundaries. We enforce
2619 * that here.
2620 */
2621 real_size = roundup(new_size, 4);
2622 if (ifp->if_u1.if_data == NULL) {
2623 ASSERT(ifp->if_real_bytes == 0);
2624 ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP);
2625 } else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
2626 /*
2627 * Only do the realloc if the underlying size
2628 * is really changing.
2629 */
2630 if (ifp->if_real_bytes != real_size) {
2631 ifp->if_u1.if_data =
2632 kmem_realloc(ifp->if_u1.if_data,
2633 real_size,
2634 ifp->if_real_bytes,
2635 KM_SLEEP);
2636 }
2637 } else {
2638 ASSERT(ifp->if_real_bytes == 0);
2639 ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP);
2640 memcpy(ifp->if_u1.if_data, ifp->if_u2.if_inline_data,
2641 ifp->if_bytes);
2642 }
2643 }
2644 ifp->if_real_bytes = real_size;
2645 ifp->if_bytes = new_size;
2646 ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork));
2647}
2648
2649
2650
2651
2652/*
2653 * Map inode to disk block and offset.
2654 *
2655 * mp -- the mount point structure for the current file system
2656 * tp -- the current transaction
2657 * ino -- the inode number of the inode to be located
2658 * imap -- this structure is filled in with the information necessary
2659 * to retrieve the given inode from disk
2660 * flags -- flags to pass to xfs_dilocate indicating whether or not
2661 * lookups in the inode btree were OK or not
2662 */
2663int
2664xfs_imap(
2665 xfs_mount_t *mp,
2666 xfs_trans_t *tp,
2667 xfs_ino_t ino,
2668 xfs_imap_t *imap,
2669 uint flags)
2670{
2671 xfs_fsblock_t fsbno;
2672 int len;
2673 int off;
2674 int error;
2675
2676 fsbno = imap->im_blkno ?
2677 XFS_DADDR_TO_FSB(mp, imap->im_blkno) : NULLFSBLOCK;
2678 error = xfs_dilocate(mp, tp, ino, &fsbno, &len, &off, flags);
2679 if (error != 0) {
2680 return error;
2681 }
2682 imap->im_blkno = XFS_FSB_TO_DADDR(mp, fsbno);
2683 imap->im_len = XFS_FSB_TO_BB(mp, len);
2684 imap->im_agblkno = XFS_FSB_TO_AGBNO(mp, fsbno);
2685 imap->im_ioffset = (ushort)off;
2686 imap->im_boffset = (ushort)(off << mp->m_sb.sb_inodelog);
2687 return 0;
2688}
2689
2690void
2691xfs_idestroy_fork(
2692 xfs_inode_t *ip,
2693 int whichfork)
2694{
2695 xfs_ifork_t *ifp;
2696
2697 ifp = XFS_IFORK_PTR(ip, whichfork);
2698 if (ifp->if_broot != NULL) {
2699 kmem_free(ifp->if_broot, ifp->if_broot_bytes);
2700 ifp->if_broot = NULL;
2701 }
2702
2703 /*
2704 * If the format is local, then we can't have an extents
2705 * array so just look for an inline data array. If we're
2706 * not local then we may or may not have an extents list,
2707 * so check and free it up if we do.
2708 */
2709 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
2710 if ((ifp->if_u1.if_data != ifp->if_u2.if_inline_data) &&
2711 (ifp->if_u1.if_data != NULL)) {
2712 ASSERT(ifp->if_real_bytes != 0);
2713 kmem_free(ifp->if_u1.if_data, ifp->if_real_bytes);
2714 ifp->if_u1.if_data = NULL;
2715 ifp->if_real_bytes = 0;
2716 }
2717 } else if ((ifp->if_flags & XFS_IFEXTENTS) &&
0293ce3a
MK
2718 ((ifp->if_flags & XFS_IFEXTIREC) ||
2719 ((ifp->if_u1.if_extents != NULL) &&
2720 (ifp->if_u1.if_extents != ifp->if_u2.if_inline_ext)))) {
1da177e4 2721 ASSERT(ifp->if_real_bytes != 0);
4eea22f0 2722 xfs_iext_destroy(ifp);
1da177e4
LT
2723 }
2724 ASSERT(ifp->if_u1.if_extents == NULL ||
2725 ifp->if_u1.if_extents == ifp->if_u2.if_inline_ext);
2726 ASSERT(ifp->if_real_bytes == 0);
2727 if (whichfork == XFS_ATTR_FORK) {
2728 kmem_zone_free(xfs_ifork_zone, ip->i_afp);
2729 ip->i_afp = NULL;
2730 }
2731}
2732
2733/*
2734 * This is called free all the memory associated with an inode.
2735 * It must free the inode itself and any buffers allocated for
2736 * if_extents/if_data and if_broot. It must also free the lock
2737 * associated with the inode.
2738 */
2739void
2740xfs_idestroy(
2741 xfs_inode_t *ip)
2742{
1da177e4
LT
2743 switch (ip->i_d.di_mode & S_IFMT) {
2744 case S_IFREG:
2745 case S_IFDIR:
2746 case S_IFLNK:
2747 xfs_idestroy_fork(ip, XFS_DATA_FORK);
2748 break;
2749 }
2750 if (ip->i_afp)
2751 xfs_idestroy_fork(ip, XFS_ATTR_FORK);
2752 mrfree(&ip->i_lock);
2753 mrfree(&ip->i_iolock);
2754 freesema(&ip->i_flock);
1543d79c 2755
cf441eeb 2756#ifdef XFS_INODE_TRACE
1543d79c
CH
2757 ktrace_free(ip->i_trace);
2758#endif
1da177e4
LT
2759#ifdef XFS_BMAP_TRACE
2760 ktrace_free(ip->i_xtrace);
2761#endif
2762#ifdef XFS_BMBT_TRACE
2763 ktrace_free(ip->i_btrace);
2764#endif
2765#ifdef XFS_RW_TRACE
2766 ktrace_free(ip->i_rwtrace);
2767#endif
2768#ifdef XFS_ILOCK_TRACE
2769 ktrace_free(ip->i_lock_trace);
2770#endif
2771#ifdef XFS_DIR2_TRACE
2772 ktrace_free(ip->i_dir_trace);
2773#endif
2774 if (ip->i_itemp) {
f74eaf59
DC
2775 /*
2776 * Only if we are shutting down the fs will we see an
2777 * inode still in the AIL. If it is there, we should remove
2778 * it to prevent a use-after-free from occurring.
2779 */
2780 xfs_mount_t *mp = ip->i_mount;
2781 xfs_log_item_t *lip = &ip->i_itemp->ili_item;
f74eaf59
DC
2782
2783 ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) ||
2784 XFS_FORCED_SHUTDOWN(ip->i_mount));
2785 if (lip->li_flags & XFS_LI_IN_AIL) {
287f3dad 2786 spin_lock(&mp->m_ail_lock);
f74eaf59 2787 if (lip->li_flags & XFS_LI_IN_AIL)
287f3dad 2788 xfs_trans_delete_ail(mp, lip);
f74eaf59 2789 else
287f3dad 2790 spin_unlock(&mp->m_ail_lock);
f74eaf59 2791 }
1da177e4
LT
2792 xfs_inode_item_destroy(ip);
2793 }
2794 kmem_zone_free(xfs_inode_zone, ip);
2795}
2796
2797
2798/*
2799 * Increment the pin count of the given buffer.
2800 * This value is protected by ipinlock spinlock in the mount structure.
2801 */
2802void
2803xfs_ipin(
2804 xfs_inode_t *ip)
2805{
2806 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE));
2807
2808 atomic_inc(&ip->i_pincount);
2809}
2810
2811/*
2812 * Decrement the pin count of the given inode, and wake up
2813 * anyone in xfs_iwait_unpin() if the count goes to 0. The
c41564b5 2814 * inode must have been previously pinned with a call to xfs_ipin().
1da177e4
LT
2815 */
2816void
2817xfs_iunpin(
2818 xfs_inode_t *ip)
2819{
2820 ASSERT(atomic_read(&ip->i_pincount) > 0);
2821
5d51eff4 2822 if (atomic_dec_and_test(&ip->i_pincount))
1da177e4 2823 wake_up(&ip->i_ipin_wait);
1da177e4
LT
2824}
2825
2826/*
2827 * This is called to wait for the given inode to be unpinned.
2828 * It will sleep until this happens. The caller must have the
2829 * inode locked in at least shared mode so that the buffer cannot
2830 * be subsequently pinned once someone is waiting for it to be
2831 * unpinned.
2832 */
ba0f32d4 2833STATIC void
1da177e4
LT
2834xfs_iunpin_wait(
2835 xfs_inode_t *ip)
2836{
2837 xfs_inode_log_item_t *iip;
2838 xfs_lsn_t lsn;
2839
2840 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE | MR_ACCESS));
2841
2842 if (atomic_read(&ip->i_pincount) == 0) {
2843 return;
2844 }
2845
2846 iip = ip->i_itemp;
2847 if (iip && iip->ili_last_lsn) {
2848 lsn = iip->ili_last_lsn;
2849 } else {
2850 lsn = (xfs_lsn_t)0;
2851 }
2852
2853 /*
2854 * Give the log a push so we don't wait here too long.
2855 */
2856 xfs_log_force(ip->i_mount, lsn, XFS_LOG_FORCE);
2857
2858 wait_event(ip->i_ipin_wait, (atomic_read(&ip->i_pincount) == 0));
2859}
2860
2861
2862/*
2863 * xfs_iextents_copy()
2864 *
2865 * This is called to copy the REAL extents (as opposed to the delayed
2866 * allocation extents) from the inode into the given buffer. It
2867 * returns the number of bytes copied into the buffer.
2868 *
2869 * If there are no delayed allocation extents, then we can just
2870 * memcpy() the extents into the buffer. Otherwise, we need to
2871 * examine each extent in turn and skip those which are delayed.
2872 */
2873int
2874xfs_iextents_copy(
2875 xfs_inode_t *ip,
a6f64d4a 2876 xfs_bmbt_rec_t *dp,
1da177e4
LT
2877 int whichfork)
2878{
2879 int copied;
1da177e4
LT
2880 int i;
2881 xfs_ifork_t *ifp;
2882 int nrecs;
2883 xfs_fsblock_t start_block;
2884
2885 ifp = XFS_IFORK_PTR(ip, whichfork);
2886 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE|MR_ACCESS));
2887 ASSERT(ifp->if_bytes > 0);
2888
2889 nrecs = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3a59c94c 2890 XFS_BMAP_TRACE_EXLIST(ip, nrecs, whichfork);
1da177e4
LT
2891 ASSERT(nrecs > 0);
2892
2893 /*
2894 * There are some delayed allocation extents in the
2895 * inode, so copy the extents one at a time and skip
2896 * the delayed ones. There must be at least one
2897 * non-delayed extent.
2898 */
1da177e4
LT
2899 copied = 0;
2900 for (i = 0; i < nrecs; i++) {
a6f64d4a 2901 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i);
1da177e4
LT
2902 start_block = xfs_bmbt_get_startblock(ep);
2903 if (ISNULLSTARTBLOCK(start_block)) {
2904 /*
2905 * It's a delayed allocation extent, so skip it.
2906 */
1da177e4
LT
2907 continue;
2908 }
2909
2910 /* Translate to on disk format */
cd8b0a97
CH
2911 put_unaligned(cpu_to_be64(ep->l0), &dp->l0);
2912 put_unaligned(cpu_to_be64(ep->l1), &dp->l1);
a6f64d4a 2913 dp++;
1da177e4
LT
2914 copied++;
2915 }
2916 ASSERT(copied != 0);
a6f64d4a 2917 xfs_validate_extents(ifp, copied, XFS_EXTFMT_INODE(ip));
1da177e4
LT
2918
2919 return (copied * (uint)sizeof(xfs_bmbt_rec_t));
2920}
2921
2922/*
2923 * Each of the following cases stores data into the same region
2924 * of the on-disk inode, so only one of them can be valid at
2925 * any given time. While it is possible to have conflicting formats
2926 * and log flags, e.g. having XFS_ILOG_?DATA set when the fork is
2927 * in EXTENTS format, this can only happen when the fork has
2928 * changed formats after being modified but before being flushed.
2929 * In these cases, the format always takes precedence, because the
2930 * format indicates the current state of the fork.
2931 */
2932/*ARGSUSED*/
2933STATIC int
2934xfs_iflush_fork(
2935 xfs_inode_t *ip,
2936 xfs_dinode_t *dip,
2937 xfs_inode_log_item_t *iip,
2938 int whichfork,
2939 xfs_buf_t *bp)
2940{
2941 char *cp;
2942 xfs_ifork_t *ifp;
2943 xfs_mount_t *mp;
2944#ifdef XFS_TRANS_DEBUG
2945 int first;
2946#endif
2947 static const short brootflag[2] =
2948 { XFS_ILOG_DBROOT, XFS_ILOG_ABROOT };
2949 static const short dataflag[2] =
2950 { XFS_ILOG_DDATA, XFS_ILOG_ADATA };
2951 static const short extflag[2] =
2952 { XFS_ILOG_DEXT, XFS_ILOG_AEXT };
2953
2954 if (iip == NULL)
2955 return 0;
2956 ifp = XFS_IFORK_PTR(ip, whichfork);
2957 /*
2958 * This can happen if we gave up in iformat in an error path,
2959 * for the attribute fork.
2960 */
2961 if (ifp == NULL) {
2962 ASSERT(whichfork == XFS_ATTR_FORK);
2963 return 0;
2964 }
2965 cp = XFS_DFORK_PTR(dip, whichfork);
2966 mp = ip->i_mount;
2967 switch (XFS_IFORK_FORMAT(ip, whichfork)) {
2968 case XFS_DINODE_FMT_LOCAL:
2969 if ((iip->ili_format.ilf_fields & dataflag[whichfork]) &&
2970 (ifp->if_bytes > 0)) {
2971 ASSERT(ifp->if_u1.if_data != NULL);
2972 ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork));
2973 memcpy(cp, ifp->if_u1.if_data, ifp->if_bytes);
2974 }
1da177e4
LT
2975 break;
2976
2977 case XFS_DINODE_FMT_EXTENTS:
2978 ASSERT((ifp->if_flags & XFS_IFEXTENTS) ||
2979 !(iip->ili_format.ilf_fields & extflag[whichfork]));
4eea22f0
MK
2980 ASSERT((xfs_iext_get_ext(ifp, 0) != NULL) ||
2981 (ifp->if_bytes == 0));
2982 ASSERT((xfs_iext_get_ext(ifp, 0) == NULL) ||
2983 (ifp->if_bytes > 0));
1da177e4
LT
2984 if ((iip->ili_format.ilf_fields & extflag[whichfork]) &&
2985 (ifp->if_bytes > 0)) {
2986 ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) > 0);
2987 (void)xfs_iextents_copy(ip, (xfs_bmbt_rec_t *)cp,
2988 whichfork);
2989 }
2990 break;
2991
2992 case XFS_DINODE_FMT_BTREE:
2993 if ((iip->ili_format.ilf_fields & brootflag[whichfork]) &&
2994 (ifp->if_broot_bytes > 0)) {
2995 ASSERT(ifp->if_broot != NULL);
2996 ASSERT(ifp->if_broot_bytes <=
2997 (XFS_IFORK_SIZE(ip, whichfork) +
2998 XFS_BROOT_SIZE_ADJ));
2999 xfs_bmbt_to_bmdr(ifp->if_broot, ifp->if_broot_bytes,
3000 (xfs_bmdr_block_t *)cp,
3001 XFS_DFORK_SIZE(dip, mp, whichfork));
3002 }
3003 break;
3004
3005 case XFS_DINODE_FMT_DEV:
3006 if (iip->ili_format.ilf_fields & XFS_ILOG_DEV) {
3007 ASSERT(whichfork == XFS_DATA_FORK);
347d1c01 3008 dip->di_u.di_dev = cpu_to_be32(ip->i_df.if_u2.if_rdev);
1da177e4
LT
3009 }
3010 break;
3011
3012 case XFS_DINODE_FMT_UUID:
3013 if (iip->ili_format.ilf_fields & XFS_ILOG_UUID) {
3014 ASSERT(whichfork == XFS_DATA_FORK);
3015 memcpy(&dip->di_u.di_muuid, &ip->i_df.if_u2.if_uuid,
3016 sizeof(uuid_t));
3017 }
3018 break;
3019
3020 default:
3021 ASSERT(0);
3022 break;
3023 }
3024
3025 return 0;
3026}
3027
3028/*
3029 * xfs_iflush() will write a modified inode's changes out to the
3030 * inode's on disk home. The caller must have the inode lock held
3031 * in at least shared mode and the inode flush semaphore must be
3032 * held as well. The inode lock will still be held upon return from
3033 * the call and the caller is free to unlock it.
3034 * The inode flush lock will be unlocked when the inode reaches the disk.
3035 * The flags indicate how the inode's buffer should be written out.
3036 */
3037int
3038xfs_iflush(
3039 xfs_inode_t *ip,
3040 uint flags)
3041{
3042 xfs_inode_log_item_t *iip;
3043 xfs_buf_t *bp;
3044 xfs_dinode_t *dip;
3045 xfs_mount_t *mp;
3046 int error;
3047 /* REFERENCED */
1da177e4
LT
3048 xfs_inode_t *iq;
3049 int clcount; /* count of inodes clustered */
3050 int bufwasdelwri;
da353b0d 3051 struct hlist_node *entry;
1da177e4 3052 enum { INT_DELWRI = (1 << 0), INT_ASYNC = (1 << 1) };
1da177e4
LT
3053
3054 XFS_STATS_INC(xs_iflush_count);
3055
3056 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE|MR_ACCESS));
0d8fee32 3057 ASSERT(issemalocked(&(ip->i_flock)));
1da177e4
LT
3058 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
3059 ip->i_d.di_nextents > ip->i_df.if_ext_max);
3060
3061 iip = ip->i_itemp;
3062 mp = ip->i_mount;
3063
3064 /*
3065 * If the inode isn't dirty, then just release the inode
3066 * flush lock and do nothing.
3067 */
3068 if ((ip->i_update_core == 0) &&
3069 ((iip == NULL) || !(iip->ili_format.ilf_fields & XFS_ILOG_ALL))) {
3070 ASSERT((iip != NULL) ?
3071 !(iip->ili_item.li_flags & XFS_LI_IN_AIL) : 1);
3072 xfs_ifunlock(ip);
3073 return 0;
3074 }
3075
3076 /*
3077 * We can't flush the inode until it is unpinned, so
3078 * wait for it. We know noone new can pin it, because
3079 * we are holding the inode lock shared and you need
3080 * to hold it exclusively to pin the inode.
3081 */
3082 xfs_iunpin_wait(ip);
3083
3084 /*
3085 * This may have been unpinned because the filesystem is shutting
3086 * down forcibly. If that's the case we must not write this inode
3087 * to disk, because the log record didn't make it to disk!
3088 */
3089 if (XFS_FORCED_SHUTDOWN(mp)) {
3090 ip->i_update_core = 0;
3091 if (iip)
3092 iip->ili_format.ilf_fields = 0;
3093 xfs_ifunlock(ip);
3094 return XFS_ERROR(EIO);
3095 }
3096
3097 /*
3098 * Get the buffer containing the on-disk inode.
3099 */
b12dd342
NS
3100 error = xfs_itobp(mp, NULL, ip, &dip, &bp, 0, 0);
3101 if (error) {
1da177e4
LT
3102 xfs_ifunlock(ip);
3103 return error;
3104 }
3105
3106 /*
3107 * Decide how buffer will be flushed out. This is done before
3108 * the call to xfs_iflush_int because this field is zeroed by it.
3109 */
3110 if (iip != NULL && iip->ili_format.ilf_fields != 0) {
3111 /*
3112 * Flush out the inode buffer according to the directions
3113 * of the caller. In the cases where the caller has given
3114 * us a choice choose the non-delwri case. This is because
3115 * the inode is in the AIL and we need to get it out soon.
3116 */
3117 switch (flags) {
3118 case XFS_IFLUSH_SYNC:
3119 case XFS_IFLUSH_DELWRI_ELSE_SYNC:
3120 flags = 0;
3121 break;
3122 case XFS_IFLUSH_ASYNC:
3123 case XFS_IFLUSH_DELWRI_ELSE_ASYNC:
3124 flags = INT_ASYNC;
3125 break;
3126 case XFS_IFLUSH_DELWRI:
3127 flags = INT_DELWRI;
3128 break;
3129 default:
3130 ASSERT(0);
3131 flags = 0;
3132 break;
3133 }
3134 } else {
3135 switch (flags) {
3136 case XFS_IFLUSH_DELWRI_ELSE_SYNC:
3137 case XFS_IFLUSH_DELWRI_ELSE_ASYNC:
3138 case XFS_IFLUSH_DELWRI:
3139 flags = INT_DELWRI;
3140 break;
3141 case XFS_IFLUSH_ASYNC:
3142 flags = INT_ASYNC;
3143 break;
3144 case XFS_IFLUSH_SYNC:
3145 flags = 0;
3146 break;
3147 default:
3148 ASSERT(0);
3149 flags = 0;
3150 break;
3151 }
3152 }
3153
3154 /*
3155 * First flush out the inode that xfs_iflush was called with.
3156 */
3157 error = xfs_iflush_int(ip, bp);
3158 if (error) {
3159 goto corrupt_out;
3160 }
3161
3162 /*
3163 * inode clustering:
3164 * see if other inodes can be gathered into this write
3165 */
da353b0d
DC
3166 spin_lock(&ip->i_cluster->icl_lock);
3167 ip->i_cluster->icl_buf = bp;
1da177e4
LT
3168
3169 clcount = 0;
da353b0d
DC
3170 hlist_for_each_entry(iq, entry, &ip->i_cluster->icl_inodes, i_cnode) {
3171 if (iq == ip)
3172 continue;
3173
1da177e4
LT
3174 /*
3175 * Do an un-protected check to see if the inode is dirty and
3176 * is a candidate for flushing. These checks will be repeated
3177 * later after the appropriate locks are acquired.
3178 */
3179 iip = iq->i_itemp;
3180 if ((iq->i_update_core == 0) &&
3181 ((iip == NULL) ||
3182 !(iip->ili_format.ilf_fields & XFS_ILOG_ALL)) &&
3183 xfs_ipincount(iq) == 0) {
3184 continue;
3185 }
3186
3187 /*
3188 * Try to get locks. If any are unavailable,
3189 * then this inode cannot be flushed and is skipped.
3190 */
3191
3192 /* get inode locks (just i_lock) */
3193 if (xfs_ilock_nowait(iq, XFS_ILOCK_SHARED)) {
3194 /* get inode flush lock */
3195 if (xfs_iflock_nowait(iq)) {
3196 /* check if pinned */
3197 if (xfs_ipincount(iq) == 0) {
3198 /* arriving here means that
3199 * this inode can be flushed.
3200 * first re-check that it's
3201 * dirty
3202 */
3203 iip = iq->i_itemp;
3204 if ((iq->i_update_core != 0)||
3205 ((iip != NULL) &&
3206 (iip->ili_format.ilf_fields & XFS_ILOG_ALL))) {
3207 clcount++;
3208 error = xfs_iflush_int(iq, bp);
3209 if (error) {
3210 xfs_iunlock(iq,
3211 XFS_ILOCK_SHARED);
3212 goto cluster_corrupt_out;
3213 }
3214 } else {
3215 xfs_ifunlock(iq);
3216 }
3217 } else {
3218 xfs_ifunlock(iq);
3219 }
3220 }
3221 xfs_iunlock(iq, XFS_ILOCK_SHARED);
3222 }
3223 }
da353b0d 3224 spin_unlock(&ip->i_cluster->icl_lock);
1da177e4
LT
3225
3226 if (clcount) {
3227 XFS_STATS_INC(xs_icluster_flushcnt);
3228 XFS_STATS_ADD(xs_icluster_flushinode, clcount);
3229 }
3230
3231 /*
3232 * If the buffer is pinned then push on the log so we won't
3233 * get stuck waiting in the write for too long.
3234 */
3235 if (XFS_BUF_ISPINNED(bp)){
3236 xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE);
3237 }
3238
3239 if (flags & INT_DELWRI) {
3240 xfs_bdwrite(mp, bp);
3241 } else if (flags & INT_ASYNC) {
3242 xfs_bawrite(mp, bp);
3243 } else {
3244 error = xfs_bwrite(mp, bp);
3245 }
3246 return error;
3247
3248corrupt_out:
3249 xfs_buf_relse(bp);
7d04a335 3250 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1da177e4
LT
3251 xfs_iflush_abort(ip);
3252 /*
3253 * Unlocks the flush lock
3254 */
3255 return XFS_ERROR(EFSCORRUPTED);
3256
3257cluster_corrupt_out:
3258 /* Corruption detected in the clustering loop. Invalidate the
3259 * inode buffer and shut down the filesystem.
3260 */
da353b0d 3261 spin_unlock(&ip->i_cluster->icl_lock);
1da177e4
LT
3262
3263 /*
3264 * Clean up the buffer. If it was B_DELWRI, just release it --
3265 * brelse can handle it with no problems. If not, shut down the
3266 * filesystem before releasing the buffer.
3267 */
3268 if ((bufwasdelwri= XFS_BUF_ISDELAYWRITE(bp))) {
3269 xfs_buf_relse(bp);
3270 }
3271
7d04a335 3272 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1da177e4
LT
3273
3274 if(!bufwasdelwri) {
3275 /*
3276 * Just like incore_relse: if we have b_iodone functions,
3277 * mark the buffer as an error and call them. Otherwise
3278 * mark it as stale and brelse.
3279 */
3280 if (XFS_BUF_IODONE_FUNC(bp)) {
3281 XFS_BUF_CLR_BDSTRAT_FUNC(bp);
3282 XFS_BUF_UNDONE(bp);
3283 XFS_BUF_STALE(bp);
3284 XFS_BUF_SHUT(bp);
3285 XFS_BUF_ERROR(bp,EIO);
3286 xfs_biodone(bp);
3287 } else {
3288 XFS_BUF_STALE(bp);
3289 xfs_buf_relse(bp);
3290 }
3291 }
3292
3293 xfs_iflush_abort(iq);
3294 /*
3295 * Unlocks the flush lock
3296 */
3297 return XFS_ERROR(EFSCORRUPTED);
3298}
3299
3300
3301STATIC int
3302xfs_iflush_int(
3303 xfs_inode_t *ip,
3304 xfs_buf_t *bp)
3305{
3306 xfs_inode_log_item_t *iip;
3307 xfs_dinode_t *dip;
3308 xfs_mount_t *mp;
3309#ifdef XFS_TRANS_DEBUG
3310 int first;
3311#endif
1da177e4
LT
3312
3313 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE|MR_ACCESS));
0d8fee32 3314 ASSERT(issemalocked(&(ip->i_flock)));
1da177e4
LT
3315 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
3316 ip->i_d.di_nextents > ip->i_df.if_ext_max);
3317
3318 iip = ip->i_itemp;
3319 mp = ip->i_mount;
3320
3321
3322 /*
3323 * If the inode isn't dirty, then just release the inode
3324 * flush lock and do nothing.
3325 */
3326 if ((ip->i_update_core == 0) &&
3327 ((iip == NULL) || !(iip->ili_format.ilf_fields & XFS_ILOG_ALL))) {
3328 xfs_ifunlock(ip);
3329 return 0;
3330 }
3331
3332 /* set *dip = inode's place in the buffer */
3333 dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_boffset);
3334
3335 /*
3336 * Clear i_update_core before copying out the data.
3337 * This is for coordination with our timestamp updates
3338 * that don't hold the inode lock. They will always
3339 * update the timestamps BEFORE setting i_update_core,
3340 * so if we clear i_update_core after they set it we
3341 * are guaranteed to see their updates to the timestamps.
3342 * I believe that this depends on strongly ordered memory
3343 * semantics, but we have that. We use the SYNCHRONIZE
3344 * macro to make sure that the compiler does not reorder
3345 * the i_update_core access below the data copy below.
3346 */
3347 ip->i_update_core = 0;
3348 SYNCHRONIZE();
3349
42fe2b1f
CH
3350 /*
3351 * Make sure to get the latest atime from the Linux inode.
3352 */
3353 xfs_synchronize_atime(ip);
3354
347d1c01 3355 if (XFS_TEST_ERROR(be16_to_cpu(dip->di_core.di_magic) != XFS_DINODE_MAGIC,
1da177e4
LT
3356 mp, XFS_ERRTAG_IFLUSH_1, XFS_RANDOM_IFLUSH_1)) {
3357 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
3358 "xfs_iflush: Bad inode %Lu magic number 0x%x, ptr 0x%p",
347d1c01 3359 ip->i_ino, be16_to_cpu(dip->di_core.di_magic), dip);
1da177e4
LT
3360 goto corrupt_out;
3361 }
3362 if (XFS_TEST_ERROR(ip->i_d.di_magic != XFS_DINODE_MAGIC,
3363 mp, XFS_ERRTAG_IFLUSH_2, XFS_RANDOM_IFLUSH_2)) {
3364 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
3365 "xfs_iflush: Bad inode %Lu, ptr 0x%p, magic number 0x%x",
3366 ip->i_ino, ip, ip->i_d.di_magic);
3367 goto corrupt_out;
3368 }
3369 if ((ip->i_d.di_mode & S_IFMT) == S_IFREG) {
3370 if (XFS_TEST_ERROR(
3371 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
3372 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE),
3373 mp, XFS_ERRTAG_IFLUSH_3, XFS_RANDOM_IFLUSH_3)) {
3374 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
3375 "xfs_iflush: Bad regular inode %Lu, ptr 0x%p",
3376 ip->i_ino, ip);
3377 goto corrupt_out;
3378 }
3379 } else if ((ip->i_d.di_mode & S_IFMT) == S_IFDIR) {
3380 if (XFS_TEST_ERROR(
3381 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
3382 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
3383 (ip->i_d.di_format != XFS_DINODE_FMT_LOCAL),
3384 mp, XFS_ERRTAG_IFLUSH_4, XFS_RANDOM_IFLUSH_4)) {
3385 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
3386 "xfs_iflush: Bad directory inode %Lu, ptr 0x%p",
3387 ip->i_ino, ip);
3388 goto corrupt_out;
3389 }
3390 }
3391 if (XFS_TEST_ERROR(ip->i_d.di_nextents + ip->i_d.di_anextents >
3392 ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5,
3393 XFS_RANDOM_IFLUSH_5)) {
3394 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
3395 "xfs_iflush: detected corrupt incore inode %Lu, total extents = %d, nblocks = %Ld, ptr 0x%p",
3396 ip->i_ino,
3397 ip->i_d.di_nextents + ip->i_d.di_anextents,
3398 ip->i_d.di_nblocks,
3399 ip);
3400 goto corrupt_out;
3401 }
3402 if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize,
3403 mp, XFS_ERRTAG_IFLUSH_6, XFS_RANDOM_IFLUSH_6)) {
3404 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
3405 "xfs_iflush: bad inode %Lu, forkoff 0x%x, ptr 0x%p",
3406 ip->i_ino, ip->i_d.di_forkoff, ip);
3407 goto corrupt_out;
3408 }
3409 /*
3410 * bump the flush iteration count, used to detect flushes which
3411 * postdate a log record during recovery.
3412 */
3413
3414 ip->i_d.di_flushiter++;
3415
3416 /*
3417 * Copy the dirty parts of the inode into the on-disk
3418 * inode. We always copy out the core of the inode,
3419 * because if the inode is dirty at all the core must
3420 * be.
3421 */
347d1c01 3422 xfs_dinode_to_disk(&dip->di_core, &ip->i_d);
1da177e4
LT
3423
3424 /* Wrap, we never let the log put out DI_MAX_FLUSH */
3425 if (ip->i_d.di_flushiter == DI_MAX_FLUSH)
3426 ip->i_d.di_flushiter = 0;
3427
3428 /*
3429 * If this is really an old format inode and the superblock version
3430 * has not been updated to support only new format inodes, then
3431 * convert back to the old inode format. If the superblock version
3432 * has been updated, then make the conversion permanent.
3433 */
3434 ASSERT(ip->i_d.di_version == XFS_DINODE_VERSION_1 ||
3435 XFS_SB_VERSION_HASNLINK(&mp->m_sb));
3436 if (ip->i_d.di_version == XFS_DINODE_VERSION_1) {
3437 if (!XFS_SB_VERSION_HASNLINK(&mp->m_sb)) {
3438 /*
3439 * Convert it back.
3440 */
3441 ASSERT(ip->i_d.di_nlink <= XFS_MAXLINK_1);
347d1c01 3442 dip->di_core.di_onlink = cpu_to_be16(ip->i_d.di_nlink);
1da177e4
LT
3443 } else {
3444 /*
3445 * The superblock version has already been bumped,
3446 * so just make the conversion to the new inode
3447 * format permanent.
3448 */
3449 ip->i_d.di_version = XFS_DINODE_VERSION_2;
347d1c01 3450 dip->di_core.di_version = XFS_DINODE_VERSION_2;
1da177e4
LT
3451 ip->i_d.di_onlink = 0;
3452 dip->di_core.di_onlink = 0;
3453 memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
3454 memset(&(dip->di_core.di_pad[0]), 0,
3455 sizeof(dip->di_core.di_pad));
3456 ASSERT(ip->i_d.di_projid == 0);
3457 }
3458 }
3459
3460 if (xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK, bp) == EFSCORRUPTED) {
3461 goto corrupt_out;
3462 }
3463
3464 if (XFS_IFORK_Q(ip)) {
3465 /*
3466 * The only error from xfs_iflush_fork is on the data fork.
3467 */
3468 (void) xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK, bp);
3469 }
3470 xfs_inobp_check(mp, bp);
3471
3472 /*
3473 * We've recorded everything logged in the inode, so we'd
3474 * like to clear the ilf_fields bits so we don't log and
3475 * flush things unnecessarily. However, we can't stop
3476 * logging all this information until the data we've copied
3477 * into the disk buffer is written to disk. If we did we might
3478 * overwrite the copy of the inode in the log with all the
3479 * data after re-logging only part of it, and in the face of
3480 * a crash we wouldn't have all the data we need to recover.
3481 *
3482 * What we do is move the bits to the ili_last_fields field.
3483 * When logging the inode, these bits are moved back to the
3484 * ilf_fields field. In the xfs_iflush_done() routine we
3485 * clear ili_last_fields, since we know that the information
3486 * those bits represent is permanently on disk. As long as
3487 * the flush completes before the inode is logged again, then
3488 * both ilf_fields and ili_last_fields will be cleared.
3489 *
3490 * We can play with the ilf_fields bits here, because the inode
3491 * lock must be held exclusively in order to set bits there
3492 * and the flush lock protects the ili_last_fields bits.
3493 * Set ili_logged so the flush done
3494 * routine can tell whether or not to look in the AIL.
3495 * Also, store the current LSN of the inode so that we can tell
3496 * whether the item has moved in the AIL from xfs_iflush_done().
3497 * In order to read the lsn we need the AIL lock, because
3498 * it is a 64 bit value that cannot be read atomically.
3499 */
3500 if (iip != NULL && iip->ili_format.ilf_fields != 0) {
3501 iip->ili_last_fields = iip->ili_format.ilf_fields;
3502 iip->ili_format.ilf_fields = 0;
3503 iip->ili_logged = 1;
3504
3505 ASSERT(sizeof(xfs_lsn_t) == 8); /* don't lock if it shrinks */
287f3dad 3506 spin_lock(&mp->m_ail_lock);
1da177e4 3507 iip->ili_flush_lsn = iip->ili_item.li_lsn;
287f3dad 3508 spin_unlock(&mp->m_ail_lock);
1da177e4
LT
3509
3510 /*
3511 * Attach the function xfs_iflush_done to the inode's
3512 * buffer. This will remove the inode from the AIL
3513 * and unlock the inode's flush lock when the inode is
3514 * completely written to disk.
3515 */
3516 xfs_buf_attach_iodone(bp, (void(*)(xfs_buf_t*,xfs_log_item_t*))
3517 xfs_iflush_done, (xfs_log_item_t *)iip);
3518
3519 ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
3520 ASSERT(XFS_BUF_IODONE_FUNC(bp) != NULL);
3521 } else {
3522 /*
3523 * We're flushing an inode which is not in the AIL and has
3524 * not been logged but has i_update_core set. For this
3525 * case we can use a B_DELWRI flush and immediately drop
3526 * the inode flush lock because we can avoid the whole
3527 * AIL state thing. It's OK to drop the flush lock now,
3528 * because we've already locked the buffer and to do anything
3529 * you really need both.
3530 */
3531 if (iip != NULL) {
3532 ASSERT(iip->ili_logged == 0);
3533 ASSERT(iip->ili_last_fields == 0);
3534 ASSERT((iip->ili_item.li_flags & XFS_LI_IN_AIL) == 0);
3535 }
3536 xfs_ifunlock(ip);
3537 }
3538
3539 return 0;
3540
3541corrupt_out:
3542 return XFS_ERROR(EFSCORRUPTED);
3543}
3544
3545
3546/*
efa80278 3547 * Flush all inactive inodes in mp.
1da177e4 3548 */
efa80278 3549void
1da177e4 3550xfs_iflush_all(
efa80278 3551 xfs_mount_t *mp)
1da177e4 3552{
1da177e4 3553 xfs_inode_t *ip;
67fcaa73 3554 bhv_vnode_t *vp;
1da177e4 3555
efa80278
CH
3556 again:
3557 XFS_MOUNT_ILOCK(mp);
3558 ip = mp->m_inodes;
3559 if (ip == NULL)
3560 goto out;
1da177e4 3561
efa80278
CH
3562 do {
3563 /* Make sure we skip markers inserted by sync */
3564 if (ip->i_mount == NULL) {
3565 ip = ip->i_mnext;
3566 continue;
3567 }
1da177e4 3568
efa80278
CH
3569 vp = XFS_ITOV_NULL(ip);
3570 if (!vp) {
1da177e4 3571 XFS_MOUNT_IUNLOCK(mp);
efa80278
CH
3572 xfs_finish_reclaim(ip, 0, XFS_IFLUSH_ASYNC);
3573 goto again;
3574 }
1da177e4 3575
efa80278 3576 ASSERT(vn_count(vp) == 0);
1da177e4 3577
efa80278
CH
3578 ip = ip->i_mnext;
3579 } while (ip != mp->m_inodes);
3580 out:
1da177e4 3581 XFS_MOUNT_IUNLOCK(mp);
1da177e4
LT
3582}
3583
1da177e4
LT
3584/*
3585 * xfs_iaccess: check accessibility of inode for mode.
3586 */
3587int
3588xfs_iaccess(
3589 xfs_inode_t *ip,
3590 mode_t mode,
3591 cred_t *cr)
3592{
3593 int error;
3594 mode_t orgmode = mode;
ec86dc02 3595 struct inode *inode = vn_to_inode(XFS_ITOV(ip));
1da177e4
LT
3596
3597 if (mode & S_IWUSR) {
3598 umode_t imode = inode->i_mode;
3599
3600 if (IS_RDONLY(inode) &&
3601 (S_ISREG(imode) || S_ISDIR(imode) || S_ISLNK(imode)))
3602 return XFS_ERROR(EROFS);
3603
3604 if (IS_IMMUTABLE(inode))
3605 return XFS_ERROR(EACCES);
3606 }
3607
3608 /*
3609 * If there's an Access Control List it's used instead of
3610 * the mode bits.
3611 */
3612 if ((error = _ACL_XFS_IACCESS(ip, mode, cr)) != -1)
3613 return error ? XFS_ERROR(error) : 0;
3614
3615 if (current_fsuid(cr) != ip->i_d.di_uid) {
3616 mode >>= 3;
3617 if (!in_group_p((gid_t)ip->i_d.di_gid))
3618 mode >>= 3;
3619 }
3620
3621 /*
3622 * If the DACs are ok we don't need any capability check.
3623 */
3624 if ((ip->i_d.di_mode & mode) == mode)
3625 return 0;
3626 /*
3627 * Read/write DACs are always overridable.
3628 * Executable DACs are overridable if at least one exec bit is set.
3629 */
3630 if (!(orgmode & S_IXUSR) ||
3631 (inode->i_mode & S_IXUGO) || S_ISDIR(inode->i_mode))
3632 if (capable_cred(cr, CAP_DAC_OVERRIDE))
3633 return 0;
3634
3635 if ((orgmode == S_IRUSR) ||
3636 (S_ISDIR(inode->i_mode) && (!(orgmode & S_IWUSR)))) {
3637 if (capable_cred(cr, CAP_DAC_READ_SEARCH))
3638 return 0;
3639#ifdef NOISE
3640 cmn_err(CE_NOTE, "Ick: mode=%o, orgmode=%o", mode, orgmode);
3641#endif /* NOISE */
3642 return XFS_ERROR(EACCES);
3643 }
3644 return XFS_ERROR(EACCES);
3645}
3646
1da177e4
LT
3647#ifdef XFS_ILOCK_TRACE
3648ktrace_t *xfs_ilock_trace_buf;
3649
3650void
3651xfs_ilock_trace(xfs_inode_t *ip, int lock, unsigned int lockflags, inst_t *ra)
3652{
3653 ktrace_enter(ip->i_lock_trace,
3654 (void *)ip,
3655 (void *)(unsigned long)lock, /* 1 = LOCK, 3=UNLOCK, etc */
3656 (void *)(unsigned long)lockflags, /* XFS_ILOCK_EXCL etc */
3657 (void *)ra, /* caller of ilock */
3658 (void *)(unsigned long)current_cpu(),
3659 (void *)(unsigned long)current_pid(),
3660 NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL);
3661}
3662#endif
4eea22f0
MK
3663
3664/*
3665 * Return a pointer to the extent record at file index idx.
3666 */
a6f64d4a 3667xfs_bmbt_rec_host_t *
4eea22f0
MK
3668xfs_iext_get_ext(
3669 xfs_ifork_t *ifp, /* inode fork pointer */
3670 xfs_extnum_t idx) /* index of target extent */
3671{
3672 ASSERT(idx >= 0);
0293ce3a
MK
3673 if ((ifp->if_flags & XFS_IFEXTIREC) && (idx == 0)) {
3674 return ifp->if_u1.if_ext_irec->er_extbuf;
3675 } else if (ifp->if_flags & XFS_IFEXTIREC) {
3676 xfs_ext_irec_t *erp; /* irec pointer */
3677 int erp_idx = 0; /* irec index */
3678 xfs_extnum_t page_idx = idx; /* ext index in target list */
3679
3680 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0);
3681 return &erp->er_extbuf[page_idx];
3682 } else if (ifp->if_bytes) {
4eea22f0
MK
3683 return &ifp->if_u1.if_extents[idx];
3684 } else {
3685 return NULL;
3686 }
3687}
3688
3689/*
3690 * Insert new item(s) into the extent records for incore inode
3691 * fork 'ifp'. 'count' new items are inserted at index 'idx'.
3692 */
3693void
3694xfs_iext_insert(
3695 xfs_ifork_t *ifp, /* inode fork pointer */
3696 xfs_extnum_t idx, /* starting index of new items */
3697 xfs_extnum_t count, /* number of inserted items */
3698 xfs_bmbt_irec_t *new) /* items to insert */
3699{
4eea22f0
MK
3700 xfs_extnum_t i; /* extent record index */
3701
3702 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
3703 xfs_iext_add(ifp, idx, count);
a6f64d4a
CH
3704 for (i = idx; i < idx + count; i++, new++)
3705 xfs_bmbt_set_all(xfs_iext_get_ext(ifp, i), new);
4eea22f0
MK
3706}
3707
3708/*
3709 * This is called when the amount of space required for incore file
3710 * extents needs to be increased. The ext_diff parameter stores the
3711 * number of new extents being added and the idx parameter contains
3712 * the extent index where the new extents will be added. If the new
3713 * extents are being appended, then we just need to (re)allocate and
3714 * initialize the space. Otherwise, if the new extents are being
3715 * inserted into the middle of the existing entries, a bit more work
3716 * is required to make room for the new extents to be inserted. The
3717 * caller is responsible for filling in the new extent entries upon
3718 * return.
3719 */
3720void
3721xfs_iext_add(
3722 xfs_ifork_t *ifp, /* inode fork pointer */
3723 xfs_extnum_t idx, /* index to begin adding exts */
c41564b5 3724 int ext_diff) /* number of extents to add */
4eea22f0
MK
3725{
3726 int byte_diff; /* new bytes being added */
3727 int new_size; /* size of extents after adding */
3728 xfs_extnum_t nextents; /* number of extents in file */
3729
3730 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3731 ASSERT((idx >= 0) && (idx <= nextents));
3732 byte_diff = ext_diff * sizeof(xfs_bmbt_rec_t);
3733 new_size = ifp->if_bytes + byte_diff;
3734 /*
3735 * If the new number of extents (nextents + ext_diff)
3736 * fits inside the inode, then continue to use the inline
3737 * extent buffer.
3738 */
3739 if (nextents + ext_diff <= XFS_INLINE_EXTS) {
3740 if (idx < nextents) {
3741 memmove(&ifp->if_u2.if_inline_ext[idx + ext_diff],
3742 &ifp->if_u2.if_inline_ext[idx],
3743 (nextents - idx) * sizeof(xfs_bmbt_rec_t));
3744 memset(&ifp->if_u2.if_inline_ext[idx], 0, byte_diff);
3745 }
3746 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
3747 ifp->if_real_bytes = 0;
0293ce3a 3748 ifp->if_lastex = nextents + ext_diff;
4eea22f0
MK
3749 }
3750 /*
3751 * Otherwise use a linear (direct) extent list.
3752 * If the extents are currently inside the inode,
3753 * xfs_iext_realloc_direct will switch us from
3754 * inline to direct extent allocation mode.
3755 */
0293ce3a 3756 else if (nextents + ext_diff <= XFS_LINEAR_EXTS) {
4eea22f0
MK
3757 xfs_iext_realloc_direct(ifp, new_size);
3758 if (idx < nextents) {
3759 memmove(&ifp->if_u1.if_extents[idx + ext_diff],
3760 &ifp->if_u1.if_extents[idx],
3761 (nextents - idx) * sizeof(xfs_bmbt_rec_t));
3762 memset(&ifp->if_u1.if_extents[idx], 0, byte_diff);
3763 }
3764 }
0293ce3a
MK
3765 /* Indirection array */
3766 else {
3767 xfs_ext_irec_t *erp;
3768 int erp_idx = 0;
3769 int page_idx = idx;
3770
3771 ASSERT(nextents + ext_diff > XFS_LINEAR_EXTS);
3772 if (ifp->if_flags & XFS_IFEXTIREC) {
3773 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 1);
3774 } else {
3775 xfs_iext_irec_init(ifp);
3776 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3777 erp = ifp->if_u1.if_ext_irec;
3778 }
3779 /* Extents fit in target extent page */
3780 if (erp && erp->er_extcount + ext_diff <= XFS_LINEAR_EXTS) {
3781 if (page_idx < erp->er_extcount) {
3782 memmove(&erp->er_extbuf[page_idx + ext_diff],
3783 &erp->er_extbuf[page_idx],
3784 (erp->er_extcount - page_idx) *
3785 sizeof(xfs_bmbt_rec_t));
3786 memset(&erp->er_extbuf[page_idx], 0, byte_diff);
3787 }
3788 erp->er_extcount += ext_diff;
3789 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff);
3790 }
3791 /* Insert a new extent page */
3792 else if (erp) {
3793 xfs_iext_add_indirect_multi(ifp,
3794 erp_idx, page_idx, ext_diff);
3795 }
3796 /*
3797 * If extent(s) are being appended to the last page in
3798 * the indirection array and the new extent(s) don't fit
3799 * in the page, then erp is NULL and erp_idx is set to
3800 * the next index needed in the indirection array.
3801 */
3802 else {
3803 int count = ext_diff;
3804
3805 while (count) {
3806 erp = xfs_iext_irec_new(ifp, erp_idx);
3807 erp->er_extcount = count;
3808 count -= MIN(count, (int)XFS_LINEAR_EXTS);
3809 if (count) {
3810 erp_idx++;
3811 }
3812 }
3813 }
3814 }
4eea22f0
MK
3815 ifp->if_bytes = new_size;
3816}
3817
0293ce3a
MK
3818/*
3819 * This is called when incore extents are being added to the indirection
3820 * array and the new extents do not fit in the target extent list. The
3821 * erp_idx parameter contains the irec index for the target extent list
3822 * in the indirection array, and the idx parameter contains the extent
3823 * index within the list. The number of extents being added is stored
3824 * in the count parameter.
3825 *
3826 * |-------| |-------|
3827 * | | | | idx - number of extents before idx
3828 * | idx | | count |
3829 * | | | | count - number of extents being inserted at idx
3830 * |-------| |-------|
3831 * | count | | nex2 | nex2 - number of extents after idx + count
3832 * |-------| |-------|
3833 */
3834void
3835xfs_iext_add_indirect_multi(
3836 xfs_ifork_t *ifp, /* inode fork pointer */
3837 int erp_idx, /* target extent irec index */
3838 xfs_extnum_t idx, /* index within target list */
3839 int count) /* new extents being added */
3840{
3841 int byte_diff; /* new bytes being added */
3842 xfs_ext_irec_t *erp; /* pointer to irec entry */
3843 xfs_extnum_t ext_diff; /* number of extents to add */
3844 xfs_extnum_t ext_cnt; /* new extents still needed */
3845 xfs_extnum_t nex2; /* extents after idx + count */
3846 xfs_bmbt_rec_t *nex2_ep = NULL; /* temp list for nex2 extents */
3847 int nlists; /* number of irec's (lists) */
3848
3849 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3850 erp = &ifp->if_u1.if_ext_irec[erp_idx];
3851 nex2 = erp->er_extcount - idx;
3852 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3853
3854 /*
3855 * Save second part of target extent list
3856 * (all extents past */
3857 if (nex2) {
3858 byte_diff = nex2 * sizeof(xfs_bmbt_rec_t);
3859 nex2_ep = (xfs_bmbt_rec_t *) kmem_alloc(byte_diff, KM_SLEEP);
3860 memmove(nex2_ep, &erp->er_extbuf[idx], byte_diff);
3861 erp->er_extcount -= nex2;
3862 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -nex2);
3863 memset(&erp->er_extbuf[idx], 0, byte_diff);
3864 }
3865
3866 /*
3867 * Add the new extents to the end of the target
3868 * list, then allocate new irec record(s) and
3869 * extent buffer(s) as needed to store the rest
3870 * of the new extents.
3871 */
3872 ext_cnt = count;
3873 ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS - erp->er_extcount);
3874 if (ext_diff) {
3875 erp->er_extcount += ext_diff;
3876 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff);
3877 ext_cnt -= ext_diff;
3878 }
3879 while (ext_cnt) {
3880 erp_idx++;
3881 erp = xfs_iext_irec_new(ifp, erp_idx);
3882 ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS);
3883 erp->er_extcount = ext_diff;
3884 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff);
3885 ext_cnt -= ext_diff;
3886 }
3887
3888 /* Add nex2 extents back to indirection array */
3889 if (nex2) {
3890 xfs_extnum_t ext_avail;
3891 int i;
3892
3893 byte_diff = nex2 * sizeof(xfs_bmbt_rec_t);
3894 ext_avail = XFS_LINEAR_EXTS - erp->er_extcount;
3895 i = 0;
3896 /*
3897 * If nex2 extents fit in the current page, append
3898 * nex2_ep after the new extents.
3899 */
3900 if (nex2 <= ext_avail) {
3901 i = erp->er_extcount;
3902 }
3903 /*
3904 * Otherwise, check if space is available in the
3905 * next page.
3906 */
3907 else if ((erp_idx < nlists - 1) &&
3908 (nex2 <= (ext_avail = XFS_LINEAR_EXTS -
3909 ifp->if_u1.if_ext_irec[erp_idx+1].er_extcount))) {
3910 erp_idx++;
3911 erp++;
3912 /* Create a hole for nex2 extents */
3913 memmove(&erp->er_extbuf[nex2], erp->er_extbuf,
3914 erp->er_extcount * sizeof(xfs_bmbt_rec_t));
3915 }
3916 /*
3917 * Final choice, create a new extent page for
3918 * nex2 extents.
3919 */
3920 else {
3921 erp_idx++;
3922 erp = xfs_iext_irec_new(ifp, erp_idx);
3923 }
3924 memmove(&erp->er_extbuf[i], nex2_ep, byte_diff);
3925 kmem_free(nex2_ep, byte_diff);
3926 erp->er_extcount += nex2;
3927 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, nex2);
3928 }
3929}
3930
4eea22f0
MK
3931/*
3932 * This is called when the amount of space required for incore file
3933 * extents needs to be decreased. The ext_diff parameter stores the
3934 * number of extents to be removed and the idx parameter contains
3935 * the extent index where the extents will be removed from.
0293ce3a
MK
3936 *
3937 * If the amount of space needed has decreased below the linear
3938 * limit, XFS_IEXT_BUFSZ, then switch to using the contiguous
3939 * extent array. Otherwise, use kmem_realloc() to adjust the
3940 * size to what is needed.
4eea22f0
MK
3941 */
3942void
3943xfs_iext_remove(
3944 xfs_ifork_t *ifp, /* inode fork pointer */
3945 xfs_extnum_t idx, /* index to begin removing exts */
3946 int ext_diff) /* number of extents to remove */
3947{
3948 xfs_extnum_t nextents; /* number of extents in file */
3949 int new_size; /* size of extents after removal */
3950
3951 ASSERT(ext_diff > 0);
3952 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3953 new_size = (nextents - ext_diff) * sizeof(xfs_bmbt_rec_t);
3954
3955 if (new_size == 0) {
3956 xfs_iext_destroy(ifp);
0293ce3a
MK
3957 } else if (ifp->if_flags & XFS_IFEXTIREC) {
3958 xfs_iext_remove_indirect(ifp, idx, ext_diff);
4eea22f0
MK
3959 } else if (ifp->if_real_bytes) {
3960 xfs_iext_remove_direct(ifp, idx, ext_diff);
3961 } else {
3962 xfs_iext_remove_inline(ifp, idx, ext_diff);
3963 }
3964 ifp->if_bytes = new_size;
3965}
3966
3967/*
3968 * This removes ext_diff extents from the inline buffer, beginning
3969 * at extent index idx.
3970 */
3971void
3972xfs_iext_remove_inline(
3973 xfs_ifork_t *ifp, /* inode fork pointer */
3974 xfs_extnum_t idx, /* index to begin removing exts */
3975 int ext_diff) /* number of extents to remove */
3976{
3977 int nextents; /* number of extents in file */
3978
0293ce3a 3979 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
4eea22f0
MK
3980 ASSERT(idx < XFS_INLINE_EXTS);
3981 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3982 ASSERT(((nextents - ext_diff) > 0) &&
3983 (nextents - ext_diff) < XFS_INLINE_EXTS);
3984
3985 if (idx + ext_diff < nextents) {
3986 memmove(&ifp->if_u2.if_inline_ext[idx],
3987 &ifp->if_u2.if_inline_ext[idx + ext_diff],
3988 (nextents - (idx + ext_diff)) *
3989 sizeof(xfs_bmbt_rec_t));
3990 memset(&ifp->if_u2.if_inline_ext[nextents - ext_diff],
3991 0, ext_diff * sizeof(xfs_bmbt_rec_t));
3992 } else {
3993 memset(&ifp->if_u2.if_inline_ext[idx], 0,
3994 ext_diff * sizeof(xfs_bmbt_rec_t));
3995 }
3996}
3997
3998/*
3999 * This removes ext_diff extents from a linear (direct) extent list,
4000 * beginning at extent index idx. If the extents are being removed
4001 * from the end of the list (ie. truncate) then we just need to re-
4002 * allocate the list to remove the extra space. Otherwise, if the
4003 * extents are being removed from the middle of the existing extent
4004 * entries, then we first need to move the extent records beginning
4005 * at idx + ext_diff up in the list to overwrite the records being
4006 * removed, then remove the extra space via kmem_realloc.
4007 */
4008void
4009xfs_iext_remove_direct(
4010 xfs_ifork_t *ifp, /* inode fork pointer */
4011 xfs_extnum_t idx, /* index to begin removing exts */
4012 int ext_diff) /* number of extents to remove */
4013{
4014 xfs_extnum_t nextents; /* number of extents in file */
4015 int new_size; /* size of extents after removal */
4016
0293ce3a 4017 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
4eea22f0
MK
4018 new_size = ifp->if_bytes -
4019 (ext_diff * sizeof(xfs_bmbt_rec_t));
4020 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4021
4022 if (new_size == 0) {
4023 xfs_iext_destroy(ifp);
4024 return;
4025 }
4026 /* Move extents up in the list (if needed) */
4027 if (idx + ext_diff < nextents) {
4028 memmove(&ifp->if_u1.if_extents[idx],
4029 &ifp->if_u1.if_extents[idx + ext_diff],
4030 (nextents - (idx + ext_diff)) *
4031 sizeof(xfs_bmbt_rec_t));
4032 }
4033 memset(&ifp->if_u1.if_extents[nextents - ext_diff],
4034 0, ext_diff * sizeof(xfs_bmbt_rec_t));
4035 /*
4036 * Reallocate the direct extent list. If the extents
4037 * will fit inside the inode then xfs_iext_realloc_direct
4038 * will switch from direct to inline extent allocation
4039 * mode for us.
4040 */
4041 xfs_iext_realloc_direct(ifp, new_size);
4042 ifp->if_bytes = new_size;
4043}
4044
0293ce3a
MK
4045/*
4046 * This is called when incore extents are being removed from the
4047 * indirection array and the extents being removed span multiple extent
4048 * buffers. The idx parameter contains the file extent index where we
4049 * want to begin removing extents, and the count parameter contains
4050 * how many extents need to be removed.
4051 *
4052 * |-------| |-------|
4053 * | nex1 | | | nex1 - number of extents before idx
4054 * |-------| | count |
4055 * | | | | count - number of extents being removed at idx
4056 * | count | |-------|
4057 * | | | nex2 | nex2 - number of extents after idx + count
4058 * |-------| |-------|
4059 */
4060void
4061xfs_iext_remove_indirect(
4062 xfs_ifork_t *ifp, /* inode fork pointer */
4063 xfs_extnum_t idx, /* index to begin removing extents */
4064 int count) /* number of extents to remove */
4065{
4066 xfs_ext_irec_t *erp; /* indirection array pointer */
4067 int erp_idx = 0; /* indirection array index */
4068 xfs_extnum_t ext_cnt; /* extents left to remove */
4069 xfs_extnum_t ext_diff; /* extents to remove in current list */
4070 xfs_extnum_t nex1; /* number of extents before idx */
4071 xfs_extnum_t nex2; /* extents after idx + count */
c41564b5 4072 int nlists; /* entries in indirection array */
0293ce3a
MK
4073 int page_idx = idx; /* index in target extent list */
4074
4075 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4076 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0);
4077 ASSERT(erp != NULL);
4078 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4079 nex1 = page_idx;
4080 ext_cnt = count;
4081 while (ext_cnt) {
4082 nex2 = MAX((erp->er_extcount - (nex1 + ext_cnt)), 0);
4083 ext_diff = MIN(ext_cnt, (erp->er_extcount - nex1));
4084 /*
4085 * Check for deletion of entire list;
4086 * xfs_iext_irec_remove() updates extent offsets.
4087 */
4088 if (ext_diff == erp->er_extcount) {
4089 xfs_iext_irec_remove(ifp, erp_idx);
4090 ext_cnt -= ext_diff;
4091 nex1 = 0;
4092 if (ext_cnt) {
4093 ASSERT(erp_idx < ifp->if_real_bytes /
4094 XFS_IEXT_BUFSZ);
4095 erp = &ifp->if_u1.if_ext_irec[erp_idx];
4096 nex1 = 0;
4097 continue;
4098 } else {
4099 break;
4100 }
4101 }
4102 /* Move extents up (if needed) */
4103 if (nex2) {
4104 memmove(&erp->er_extbuf[nex1],
4105 &erp->er_extbuf[nex1 + ext_diff],
4106 nex2 * sizeof(xfs_bmbt_rec_t));
4107 }
4108 /* Zero out rest of page */
4109 memset(&erp->er_extbuf[nex1 + nex2], 0, (XFS_IEXT_BUFSZ -
4110 ((nex1 + nex2) * sizeof(xfs_bmbt_rec_t))));
4111 /* Update remaining counters */
4112 erp->er_extcount -= ext_diff;
4113 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -ext_diff);
4114 ext_cnt -= ext_diff;
4115 nex1 = 0;
4116 erp_idx++;
4117 erp++;
4118 }
4119 ifp->if_bytes -= count * sizeof(xfs_bmbt_rec_t);
4120 xfs_iext_irec_compact(ifp);
4121}
4122
4eea22f0
MK
4123/*
4124 * Create, destroy, or resize a linear (direct) block of extents.
4125 */
4126void
4127xfs_iext_realloc_direct(
4128 xfs_ifork_t *ifp, /* inode fork pointer */
4129 int new_size) /* new size of extents */
4130{
4131 int rnew_size; /* real new size of extents */
4132
4133 rnew_size = new_size;
4134
0293ce3a
MK
4135 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC) ||
4136 ((new_size >= 0) && (new_size <= XFS_IEXT_BUFSZ) &&
4137 (new_size != ifp->if_real_bytes)));
4138
4eea22f0
MK
4139 /* Free extent records */
4140 if (new_size == 0) {
4141 xfs_iext_destroy(ifp);
4142 }
4143 /* Resize direct extent list and zero any new bytes */
4144 else if (ifp->if_real_bytes) {
4145 /* Check if extents will fit inside the inode */
4146 if (new_size <= XFS_INLINE_EXTS * sizeof(xfs_bmbt_rec_t)) {
4147 xfs_iext_direct_to_inline(ifp, new_size /
4148 (uint)sizeof(xfs_bmbt_rec_t));
4149 ifp->if_bytes = new_size;
4150 return;
4151 }
16a087d8 4152 if (!is_power_of_2(new_size)){
40ebd81d 4153 rnew_size = roundup_pow_of_two(new_size);
4eea22f0
MK
4154 }
4155 if (rnew_size != ifp->if_real_bytes) {
a6f64d4a 4156 ifp->if_u1.if_extents =
4eea22f0
MK
4157 kmem_realloc(ifp->if_u1.if_extents,
4158 rnew_size,
4159 ifp->if_real_bytes,
4160 KM_SLEEP);
4161 }
4162 if (rnew_size > ifp->if_real_bytes) {
4163 memset(&ifp->if_u1.if_extents[ifp->if_bytes /
4164 (uint)sizeof(xfs_bmbt_rec_t)], 0,
4165 rnew_size - ifp->if_real_bytes);
4166 }
4167 }
4168 /*
4169 * Switch from the inline extent buffer to a direct
4170 * extent list. Be sure to include the inline extent
4171 * bytes in new_size.
4172 */
4173 else {
4174 new_size += ifp->if_bytes;
16a087d8 4175 if (!is_power_of_2(new_size)) {
40ebd81d 4176 rnew_size = roundup_pow_of_two(new_size);
4eea22f0
MK
4177 }
4178 xfs_iext_inline_to_direct(ifp, rnew_size);
4179 }
4180 ifp->if_real_bytes = rnew_size;
4181 ifp->if_bytes = new_size;
4182}
4183
4184/*
4185 * Switch from linear (direct) extent records to inline buffer.
4186 */
4187void
4188xfs_iext_direct_to_inline(
4189 xfs_ifork_t *ifp, /* inode fork pointer */
4190 xfs_extnum_t nextents) /* number of extents in file */
4191{
4192 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
4193 ASSERT(nextents <= XFS_INLINE_EXTS);
4194 /*
4195 * The inline buffer was zeroed when we switched
4196 * from inline to direct extent allocation mode,
4197 * so we don't need to clear it here.
4198 */
4199 memcpy(ifp->if_u2.if_inline_ext, ifp->if_u1.if_extents,
4200 nextents * sizeof(xfs_bmbt_rec_t));
fe6c1e72 4201 kmem_free(ifp->if_u1.if_extents, ifp->if_real_bytes);
4eea22f0
MK
4202 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
4203 ifp->if_real_bytes = 0;
4204}
4205
4206/*
4207 * Switch from inline buffer to linear (direct) extent records.
4208 * new_size should already be rounded up to the next power of 2
4209 * by the caller (when appropriate), so use new_size as it is.
4210 * However, since new_size may be rounded up, we can't update
4211 * if_bytes here. It is the caller's responsibility to update
4212 * if_bytes upon return.
4213 */
4214void
4215xfs_iext_inline_to_direct(
4216 xfs_ifork_t *ifp, /* inode fork pointer */
4217 int new_size) /* number of extents in file */
4218{
a6f64d4a 4219 ifp->if_u1.if_extents = kmem_alloc(new_size, KM_SLEEP);
4eea22f0
MK
4220 memset(ifp->if_u1.if_extents, 0, new_size);
4221 if (ifp->if_bytes) {
4222 memcpy(ifp->if_u1.if_extents, ifp->if_u2.if_inline_ext,
4223 ifp->if_bytes);
4224 memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS *
4225 sizeof(xfs_bmbt_rec_t));
4226 }
4227 ifp->if_real_bytes = new_size;
4228}
4229
0293ce3a
MK
4230/*
4231 * Resize an extent indirection array to new_size bytes.
4232 */
4233void
4234xfs_iext_realloc_indirect(
4235 xfs_ifork_t *ifp, /* inode fork pointer */
4236 int new_size) /* new indirection array size */
4237{
4238 int nlists; /* number of irec's (ex lists) */
4239 int size; /* current indirection array size */
4240
4241 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4242 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4243 size = nlists * sizeof(xfs_ext_irec_t);
4244 ASSERT(ifp->if_real_bytes);
4245 ASSERT((new_size >= 0) && (new_size != size));
4246 if (new_size == 0) {
4247 xfs_iext_destroy(ifp);
4248 } else {
4249 ifp->if_u1.if_ext_irec = (xfs_ext_irec_t *)
4250 kmem_realloc(ifp->if_u1.if_ext_irec,
4251 new_size, size, KM_SLEEP);
4252 }
4253}
4254
4255/*
4256 * Switch from indirection array to linear (direct) extent allocations.
4257 */
4258void
4259xfs_iext_indirect_to_direct(
4260 xfs_ifork_t *ifp) /* inode fork pointer */
4261{
a6f64d4a 4262 xfs_bmbt_rec_host_t *ep; /* extent record pointer */
0293ce3a
MK
4263 xfs_extnum_t nextents; /* number of extents in file */
4264 int size; /* size of file extents */
4265
4266 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4267 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4268 ASSERT(nextents <= XFS_LINEAR_EXTS);
4269 size = nextents * sizeof(xfs_bmbt_rec_t);
4270
4271 xfs_iext_irec_compact_full(ifp);
4272 ASSERT(ifp->if_real_bytes == XFS_IEXT_BUFSZ);
4273
4274 ep = ifp->if_u1.if_ext_irec->er_extbuf;
4275 kmem_free(ifp->if_u1.if_ext_irec, sizeof(xfs_ext_irec_t));
4276 ifp->if_flags &= ~XFS_IFEXTIREC;
4277 ifp->if_u1.if_extents = ep;
4278 ifp->if_bytes = size;
4279 if (nextents < XFS_LINEAR_EXTS) {
4280 xfs_iext_realloc_direct(ifp, size);
4281 }
4282}
4283
4eea22f0
MK
4284/*
4285 * Free incore file extents.
4286 */
4287void
4288xfs_iext_destroy(
4289 xfs_ifork_t *ifp) /* inode fork pointer */
4290{
0293ce3a
MK
4291 if (ifp->if_flags & XFS_IFEXTIREC) {
4292 int erp_idx;
4293 int nlists;
4294
4295 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4296 for (erp_idx = nlists - 1; erp_idx >= 0 ; erp_idx--) {
4297 xfs_iext_irec_remove(ifp, erp_idx);
4298 }
4299 ifp->if_flags &= ~XFS_IFEXTIREC;
4300 } else if (ifp->if_real_bytes) {
4eea22f0
MK
4301 kmem_free(ifp->if_u1.if_extents, ifp->if_real_bytes);
4302 } else if (ifp->if_bytes) {
4303 memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS *
4304 sizeof(xfs_bmbt_rec_t));
4305 }
4306 ifp->if_u1.if_extents = NULL;
4307 ifp->if_real_bytes = 0;
4308 ifp->if_bytes = 0;
4309}
0293ce3a 4310
8867bc9b
MK
4311/*
4312 * Return a pointer to the extent record for file system block bno.
4313 */
a6f64d4a 4314xfs_bmbt_rec_host_t * /* pointer to found extent record */
8867bc9b
MK
4315xfs_iext_bno_to_ext(
4316 xfs_ifork_t *ifp, /* inode fork pointer */
4317 xfs_fileoff_t bno, /* block number to search for */
4318 xfs_extnum_t *idxp) /* index of target extent */
4319{
a6f64d4a 4320 xfs_bmbt_rec_host_t *base; /* pointer to first extent */
8867bc9b 4321 xfs_filblks_t blockcount = 0; /* number of blocks in extent */
a6f64d4a 4322 xfs_bmbt_rec_host_t *ep = NULL; /* pointer to target extent */
8867bc9b 4323 xfs_ext_irec_t *erp = NULL; /* indirection array pointer */
c41564b5 4324 int high; /* upper boundary in search */
8867bc9b 4325 xfs_extnum_t idx = 0; /* index of target extent */
c41564b5 4326 int low; /* lower boundary in search */
8867bc9b
MK
4327 xfs_extnum_t nextents; /* number of file extents */
4328 xfs_fileoff_t startoff = 0; /* start offset of extent */
4329
4330 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4331 if (nextents == 0) {
4332 *idxp = 0;
4333 return NULL;
4334 }
4335 low = 0;
4336 if (ifp->if_flags & XFS_IFEXTIREC) {
4337 /* Find target extent list */
4338 int erp_idx = 0;
4339 erp = xfs_iext_bno_to_irec(ifp, bno, &erp_idx);
4340 base = erp->er_extbuf;
4341 high = erp->er_extcount - 1;
4342 } else {
4343 base = ifp->if_u1.if_extents;
4344 high = nextents - 1;
4345 }
4346 /* Binary search extent records */
4347 while (low <= high) {
4348 idx = (low + high) >> 1;
4349 ep = base + idx;
4350 startoff = xfs_bmbt_get_startoff(ep);
4351 blockcount = xfs_bmbt_get_blockcount(ep);
4352 if (bno < startoff) {
4353 high = idx - 1;
4354 } else if (bno >= startoff + blockcount) {
4355 low = idx + 1;
4356 } else {
4357 /* Convert back to file-based extent index */
4358 if (ifp->if_flags & XFS_IFEXTIREC) {
4359 idx += erp->er_extoff;
4360 }
4361 *idxp = idx;
4362 return ep;
4363 }
4364 }
4365 /* Convert back to file-based extent index */
4366 if (ifp->if_flags & XFS_IFEXTIREC) {
4367 idx += erp->er_extoff;
4368 }
4369 if (bno >= startoff + blockcount) {
4370 if (++idx == nextents) {
4371 ep = NULL;
4372 } else {
4373 ep = xfs_iext_get_ext(ifp, idx);
4374 }
4375 }
4376 *idxp = idx;
4377 return ep;
4378}
4379
0293ce3a
MK
4380/*
4381 * Return a pointer to the indirection array entry containing the
4382 * extent record for filesystem block bno. Store the index of the
4383 * target irec in *erp_idxp.
4384 */
8867bc9b 4385xfs_ext_irec_t * /* pointer to found extent record */
0293ce3a
MK
4386xfs_iext_bno_to_irec(
4387 xfs_ifork_t *ifp, /* inode fork pointer */
4388 xfs_fileoff_t bno, /* block number to search for */
4389 int *erp_idxp) /* irec index of target ext list */
4390{
4391 xfs_ext_irec_t *erp = NULL; /* indirection array pointer */
4392 xfs_ext_irec_t *erp_next; /* next indirection array entry */
8867bc9b 4393 int erp_idx; /* indirection array index */
0293ce3a
MK
4394 int nlists; /* number of extent irec's (lists) */
4395 int high; /* binary search upper limit */
4396 int low; /* binary search lower limit */
4397
4398 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4399 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4400 erp_idx = 0;
4401 low = 0;
4402 high = nlists - 1;
4403 while (low <= high) {
4404 erp_idx = (low + high) >> 1;
4405 erp = &ifp->if_u1.if_ext_irec[erp_idx];
4406 erp_next = erp_idx < nlists - 1 ? erp + 1 : NULL;
4407 if (bno < xfs_bmbt_get_startoff(erp->er_extbuf)) {
4408 high = erp_idx - 1;
4409 } else if (erp_next && bno >=
4410 xfs_bmbt_get_startoff(erp_next->er_extbuf)) {
4411 low = erp_idx + 1;
4412 } else {
4413 break;
4414 }
4415 }
4416 *erp_idxp = erp_idx;
4417 return erp;
4418}
4419
4420/*
4421 * Return a pointer to the indirection array entry containing the
4422 * extent record at file extent index *idxp. Store the index of the
4423 * target irec in *erp_idxp and store the page index of the target
4424 * extent record in *idxp.
4425 */
4426xfs_ext_irec_t *
4427xfs_iext_idx_to_irec(
4428 xfs_ifork_t *ifp, /* inode fork pointer */
4429 xfs_extnum_t *idxp, /* extent index (file -> page) */
4430 int *erp_idxp, /* pointer to target irec */
4431 int realloc) /* new bytes were just added */
4432{
4433 xfs_ext_irec_t *prev; /* pointer to previous irec */
4434 xfs_ext_irec_t *erp = NULL; /* pointer to current irec */
4435 int erp_idx; /* indirection array index */
4436 int nlists; /* number of irec's (ex lists) */
4437 int high; /* binary search upper limit */
4438 int low; /* binary search lower limit */
4439 xfs_extnum_t page_idx = *idxp; /* extent index in target list */
4440
4441 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4442 ASSERT(page_idx >= 0 && page_idx <=
4443 ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t));
4444 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4445 erp_idx = 0;
4446 low = 0;
4447 high = nlists - 1;
4448
4449 /* Binary search extent irec's */
4450 while (low <= high) {
4451 erp_idx = (low + high) >> 1;
4452 erp = &ifp->if_u1.if_ext_irec[erp_idx];
4453 prev = erp_idx > 0 ? erp - 1 : NULL;
4454 if (page_idx < erp->er_extoff || (page_idx == erp->er_extoff &&
4455 realloc && prev && prev->er_extcount < XFS_LINEAR_EXTS)) {
4456 high = erp_idx - 1;
4457 } else if (page_idx > erp->er_extoff + erp->er_extcount ||
4458 (page_idx == erp->er_extoff + erp->er_extcount &&
4459 !realloc)) {
4460 low = erp_idx + 1;
4461 } else if (page_idx == erp->er_extoff + erp->er_extcount &&
4462 erp->er_extcount == XFS_LINEAR_EXTS) {
4463 ASSERT(realloc);
4464 page_idx = 0;
4465 erp_idx++;
4466 erp = erp_idx < nlists ? erp + 1 : NULL;
4467 break;
4468 } else {
4469 page_idx -= erp->er_extoff;
4470 break;
4471 }
4472 }
4473 *idxp = page_idx;
4474 *erp_idxp = erp_idx;
4475 return(erp);
4476}
4477
4478/*
4479 * Allocate and initialize an indirection array once the space needed
4480 * for incore extents increases above XFS_IEXT_BUFSZ.
4481 */
4482void
4483xfs_iext_irec_init(
4484 xfs_ifork_t *ifp) /* inode fork pointer */
4485{
4486 xfs_ext_irec_t *erp; /* indirection array pointer */
4487 xfs_extnum_t nextents; /* number of extents in file */
4488
4489 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
4490 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4491 ASSERT(nextents <= XFS_LINEAR_EXTS);
4492
4493 erp = (xfs_ext_irec_t *)
4494 kmem_alloc(sizeof(xfs_ext_irec_t), KM_SLEEP);
4495
4496 if (nextents == 0) {
a6f64d4a 4497 ifp->if_u1.if_extents = kmem_alloc(XFS_IEXT_BUFSZ, KM_SLEEP);
0293ce3a
MK
4498 } else if (!ifp->if_real_bytes) {
4499 xfs_iext_inline_to_direct(ifp, XFS_IEXT_BUFSZ);
4500 } else if (ifp->if_real_bytes < XFS_IEXT_BUFSZ) {
4501 xfs_iext_realloc_direct(ifp, XFS_IEXT_BUFSZ);
4502 }
4503 erp->er_extbuf = ifp->if_u1.if_extents;
4504 erp->er_extcount = nextents;
4505 erp->er_extoff = 0;
4506
4507 ifp->if_flags |= XFS_IFEXTIREC;
4508 ifp->if_real_bytes = XFS_IEXT_BUFSZ;
4509 ifp->if_bytes = nextents * sizeof(xfs_bmbt_rec_t);
4510 ifp->if_u1.if_ext_irec = erp;
4511
4512 return;
4513}
4514
4515/*
4516 * Allocate and initialize a new entry in the indirection array.
4517 */
4518xfs_ext_irec_t *
4519xfs_iext_irec_new(
4520 xfs_ifork_t *ifp, /* inode fork pointer */
4521 int erp_idx) /* index for new irec */
4522{
4523 xfs_ext_irec_t *erp; /* indirection array pointer */
4524 int i; /* loop counter */
4525 int nlists; /* number of irec's (ex lists) */
4526
4527 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4528 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4529
4530 /* Resize indirection array */
4531 xfs_iext_realloc_indirect(ifp, ++nlists *
4532 sizeof(xfs_ext_irec_t));
4533 /*
4534 * Move records down in the array so the
4535 * new page can use erp_idx.
4536 */
4537 erp = ifp->if_u1.if_ext_irec;
4538 for (i = nlists - 1; i > erp_idx; i--) {
4539 memmove(&erp[i], &erp[i-1], sizeof(xfs_ext_irec_t));
4540 }
4541 ASSERT(i == erp_idx);
4542
4543 /* Initialize new extent record */
4544 erp = ifp->if_u1.if_ext_irec;
a6f64d4a 4545 erp[erp_idx].er_extbuf = kmem_alloc(XFS_IEXT_BUFSZ, KM_SLEEP);
0293ce3a
MK
4546 ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ;
4547 memset(erp[erp_idx].er_extbuf, 0, XFS_IEXT_BUFSZ);
4548 erp[erp_idx].er_extcount = 0;
4549 erp[erp_idx].er_extoff = erp_idx > 0 ?
4550 erp[erp_idx-1].er_extoff + erp[erp_idx-1].er_extcount : 0;
4551 return (&erp[erp_idx]);
4552}
4553
4554/*
4555 * Remove a record from the indirection array.
4556 */
4557void
4558xfs_iext_irec_remove(
4559 xfs_ifork_t *ifp, /* inode fork pointer */
4560 int erp_idx) /* irec index to remove */
4561{
4562 xfs_ext_irec_t *erp; /* indirection array pointer */
4563 int i; /* loop counter */
4564 int nlists; /* number of irec's (ex lists) */
4565
4566 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4567 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4568 erp = &ifp->if_u1.if_ext_irec[erp_idx];
4569 if (erp->er_extbuf) {
4570 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1,
4571 -erp->er_extcount);
4572 kmem_free(erp->er_extbuf, XFS_IEXT_BUFSZ);
4573 }
4574 /* Compact extent records */
4575 erp = ifp->if_u1.if_ext_irec;
4576 for (i = erp_idx; i < nlists - 1; i++) {
4577 memmove(&erp[i], &erp[i+1], sizeof(xfs_ext_irec_t));
4578 }
4579 /*
4580 * Manually free the last extent record from the indirection
4581 * array. A call to xfs_iext_realloc_indirect() with a size
4582 * of zero would result in a call to xfs_iext_destroy() which
4583 * would in turn call this function again, creating a nasty
4584 * infinite loop.
4585 */
4586 if (--nlists) {
4587 xfs_iext_realloc_indirect(ifp,
4588 nlists * sizeof(xfs_ext_irec_t));
4589 } else {
4590 kmem_free(ifp->if_u1.if_ext_irec,
4591 sizeof(xfs_ext_irec_t));
4592 }
4593 ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ;
4594}
4595
4596/*
4597 * This is called to clean up large amounts of unused memory allocated
4598 * by the indirection array. Before compacting anything though, verify
4599 * that the indirection array is still needed and switch back to the
4600 * linear extent list (or even the inline buffer) if possible. The
4601 * compaction policy is as follows:
4602 *
4603 * Full Compaction: Extents fit into a single page (or inline buffer)
4604 * Full Compaction: Extents occupy less than 10% of allocated space
4605 * Partial Compaction: Extents occupy > 10% and < 50% of allocated space
4606 * No Compaction: Extents occupy at least 50% of allocated space
4607 */
4608void
4609xfs_iext_irec_compact(
4610 xfs_ifork_t *ifp) /* inode fork pointer */
4611{
4612 xfs_extnum_t nextents; /* number of extents in file */
4613 int nlists; /* number of irec's (ex lists) */
4614
4615 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4616 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4617 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4618
4619 if (nextents == 0) {
4620 xfs_iext_destroy(ifp);
4621 } else if (nextents <= XFS_INLINE_EXTS) {
4622 xfs_iext_indirect_to_direct(ifp);
4623 xfs_iext_direct_to_inline(ifp, nextents);
4624 } else if (nextents <= XFS_LINEAR_EXTS) {
4625 xfs_iext_indirect_to_direct(ifp);
4626 } else if (nextents < (nlists * XFS_LINEAR_EXTS) >> 3) {
4627 xfs_iext_irec_compact_full(ifp);
4628 } else if (nextents < (nlists * XFS_LINEAR_EXTS) >> 1) {
4629 xfs_iext_irec_compact_pages(ifp);
4630 }
4631}
4632
4633/*
4634 * Combine extents from neighboring extent pages.
4635 */
4636void
4637xfs_iext_irec_compact_pages(
4638 xfs_ifork_t *ifp) /* inode fork pointer */
4639{
4640 xfs_ext_irec_t *erp, *erp_next;/* pointers to irec entries */
4641 int erp_idx = 0; /* indirection array index */
4642 int nlists; /* number of irec's (ex lists) */
4643
4644 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4645 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4646 while (erp_idx < nlists - 1) {
4647 erp = &ifp->if_u1.if_ext_irec[erp_idx];
4648 erp_next = erp + 1;
4649 if (erp_next->er_extcount <=
4650 (XFS_LINEAR_EXTS - erp->er_extcount)) {
4651 memmove(&erp->er_extbuf[erp->er_extcount],
4652 erp_next->er_extbuf, erp_next->er_extcount *
4653 sizeof(xfs_bmbt_rec_t));
4654 erp->er_extcount += erp_next->er_extcount;
4655 /*
4656 * Free page before removing extent record
4657 * so er_extoffs don't get modified in
4658 * xfs_iext_irec_remove.
4659 */
4660 kmem_free(erp_next->er_extbuf, XFS_IEXT_BUFSZ);
4661 erp_next->er_extbuf = NULL;
4662 xfs_iext_irec_remove(ifp, erp_idx + 1);
4663 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4664 } else {
4665 erp_idx++;
4666 }
4667 }
4668}
4669
4670/*
4671 * Fully compact the extent records managed by the indirection array.
4672 */
4673void
4674xfs_iext_irec_compact_full(
4675 xfs_ifork_t *ifp) /* inode fork pointer */
4676{
a6f64d4a 4677 xfs_bmbt_rec_host_t *ep, *ep_next; /* extent record pointers */
0293ce3a
MK
4678 xfs_ext_irec_t *erp, *erp_next; /* extent irec pointers */
4679 int erp_idx = 0; /* extent irec index */
4680 int ext_avail; /* empty entries in ex list */
4681 int ext_diff; /* number of exts to add */
4682 int nlists; /* number of irec's (ex lists) */
4683
4684 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4685 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4686 erp = ifp->if_u1.if_ext_irec;
4687 ep = &erp->er_extbuf[erp->er_extcount];
4688 erp_next = erp + 1;
4689 ep_next = erp_next->er_extbuf;
4690 while (erp_idx < nlists - 1) {
4691 ext_avail = XFS_LINEAR_EXTS - erp->er_extcount;
4692 ext_diff = MIN(ext_avail, erp_next->er_extcount);
4693 memcpy(ep, ep_next, ext_diff * sizeof(xfs_bmbt_rec_t));
4694 erp->er_extcount += ext_diff;
4695 erp_next->er_extcount -= ext_diff;
4696 /* Remove next page */
4697 if (erp_next->er_extcount == 0) {
4698 /*
4699 * Free page before removing extent record
4700 * so er_extoffs don't get modified in
4701 * xfs_iext_irec_remove.
4702 */
4703 kmem_free(erp_next->er_extbuf,
4704 erp_next->er_extcount * sizeof(xfs_bmbt_rec_t));
4705 erp_next->er_extbuf = NULL;
4706 xfs_iext_irec_remove(ifp, erp_idx + 1);
4707 erp = &ifp->if_u1.if_ext_irec[erp_idx];
4708 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4709 /* Update next page */
4710 } else {
4711 /* Move rest of page up to become next new page */
4712 memmove(erp_next->er_extbuf, ep_next,
4713 erp_next->er_extcount * sizeof(xfs_bmbt_rec_t));
4714 ep_next = erp_next->er_extbuf;
4715 memset(&ep_next[erp_next->er_extcount], 0,
4716 (XFS_LINEAR_EXTS - erp_next->er_extcount) *
4717 sizeof(xfs_bmbt_rec_t));
4718 }
4719 if (erp->er_extcount == XFS_LINEAR_EXTS) {
4720 erp_idx++;
4721 if (erp_idx < nlists)
4722 erp = &ifp->if_u1.if_ext_irec[erp_idx];
4723 else
4724 break;
4725 }
4726 ep = &erp->er_extbuf[erp->er_extcount];
4727 erp_next = erp + 1;
4728 ep_next = erp_next->er_extbuf;
4729 }
4730}
4731
4732/*
4733 * This is called to update the er_extoff field in the indirection
4734 * array when extents have been added or removed from one of the
4735 * extent lists. erp_idx contains the irec index to begin updating
4736 * at and ext_diff contains the number of extents that were added
4737 * or removed.
4738 */
4739void
4740xfs_iext_irec_update_extoffs(
4741 xfs_ifork_t *ifp, /* inode fork pointer */
4742 int erp_idx, /* irec index to update */
4743 int ext_diff) /* number of new extents */
4744{
4745 int i; /* loop counter */
4746 int nlists; /* number of irec's (ex lists */
4747
4748 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4749 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4750 for (i = erp_idx; i < nlists; i++) {
4751 ifp->if_u1.if_ext_irec[i].er_extoff += ext_diff;
4752 }
4753}
This page took 2.170234 seconds and 5 git commands to generate.