Commit | Line | Data |
---|---|---|
a86c6181 AT |
1 | /* |
2 | * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com | |
3 | * Written by Alex Tomas <alex@clusterfs.com> | |
4 | * | |
5 | * Architecture independence: | |
6 | * Copyright (c) 2005, Bull S.A. | |
7 | * Written by Pierre Peiffer <pierre.peiffer@bull.net> | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License version 2 as | |
11 | * published by the Free Software Foundation. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | * GNU General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public Licens | |
19 | * along with this program; if not, write to the Free Software | |
20 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- | |
21 | */ | |
22 | ||
23 | /* | |
24 | * Extents support for EXT4 | |
25 | * | |
26 | * TODO: | |
27 | * - ext4*_error() should be used in some situations | |
28 | * - analyze all BUG()/BUG_ON(), use -EIO where appropriate | |
29 | * - smart tree reduction | |
30 | */ | |
31 | ||
32 | #include <linux/module.h> | |
33 | #include <linux/fs.h> | |
34 | #include <linux/time.h> | |
cd02ff0b | 35 | #include <linux/jbd2.h> |
a86c6181 AT |
36 | #include <linux/highuid.h> |
37 | #include <linux/pagemap.h> | |
38 | #include <linux/quotaops.h> | |
39 | #include <linux/string.h> | |
40 | #include <linux/slab.h> | |
a2df2a63 | 41 | #include <linux/falloc.h> |
a86c6181 | 42 | #include <asm/uaccess.h> |
6873fa0d | 43 | #include <linux/fiemap.h> |
3dcf5451 CH |
44 | #include "ext4_jbd2.h" |
45 | #include "ext4_extents.h" | |
a86c6181 AT |
46 | |
47 | ||
d0d856e8 RD |
48 | /* |
49 | * ext_pblock: | |
50 | * combine low and high parts of physical block number into ext4_fsblk_t | |
51 | */ | |
748de673 | 52 | ext4_fsblk_t ext_pblock(struct ext4_extent *ex) |
f65e6fba AT |
53 | { |
54 | ext4_fsblk_t block; | |
55 | ||
b377611d | 56 | block = le32_to_cpu(ex->ee_start_lo); |
9b8f1f01 | 57 | block |= ((ext4_fsblk_t) le16_to_cpu(ex->ee_start_hi) << 31) << 1; |
f65e6fba AT |
58 | return block; |
59 | } | |
60 | ||
d0d856e8 RD |
61 | /* |
62 | * idx_pblock: | |
63 | * combine low and high parts of a leaf physical block number into ext4_fsblk_t | |
64 | */ | |
c14c6fd5 | 65 | ext4_fsblk_t idx_pblock(struct ext4_extent_idx *ix) |
f65e6fba AT |
66 | { |
67 | ext4_fsblk_t block; | |
68 | ||
d8dd0b45 | 69 | block = le32_to_cpu(ix->ei_leaf_lo); |
9b8f1f01 | 70 | block |= ((ext4_fsblk_t) le16_to_cpu(ix->ei_leaf_hi) << 31) << 1; |
f65e6fba AT |
71 | return block; |
72 | } | |
73 | ||
d0d856e8 RD |
74 | /* |
75 | * ext4_ext_store_pblock: | |
76 | * stores a large physical block number into an extent struct, | |
77 | * breaking it into parts | |
78 | */ | |
c14c6fd5 | 79 | void ext4_ext_store_pblock(struct ext4_extent *ex, ext4_fsblk_t pb) |
f65e6fba | 80 | { |
b377611d | 81 | ex->ee_start_lo = cpu_to_le32((unsigned long) (pb & 0xffffffff)); |
9b8f1f01 | 82 | ex->ee_start_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff); |
f65e6fba AT |
83 | } |
84 | ||
d0d856e8 RD |
85 | /* |
86 | * ext4_idx_store_pblock: | |
87 | * stores a large physical block number into an index struct, | |
88 | * breaking it into parts | |
89 | */ | |
09b88252 | 90 | static void ext4_idx_store_pblock(struct ext4_extent_idx *ix, ext4_fsblk_t pb) |
f65e6fba | 91 | { |
d8dd0b45 | 92 | ix->ei_leaf_lo = cpu_to_le32((unsigned long) (pb & 0xffffffff)); |
9b8f1f01 | 93 | ix->ei_leaf_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff); |
f65e6fba AT |
94 | } |
95 | ||
487caeef JK |
96 | static int ext4_ext_truncate_extend_restart(handle_t *handle, |
97 | struct inode *inode, | |
98 | int needed) | |
a86c6181 AT |
99 | { |
100 | int err; | |
101 | ||
0390131b FM |
102 | if (!ext4_handle_valid(handle)) |
103 | return 0; | |
a86c6181 | 104 | if (handle->h_buffer_credits > needed) |
9102e4fa SF |
105 | return 0; |
106 | err = ext4_journal_extend(handle, needed); | |
0123c939 | 107 | if (err <= 0) |
9102e4fa | 108 | return err; |
487caeef JK |
109 | err = ext4_truncate_restart_trans(handle, inode, needed); |
110 | /* | |
111 | * We have dropped i_data_sem so someone might have cached again | |
112 | * an extent we are going to truncate. | |
113 | */ | |
114 | ext4_ext_invalidate_cache(inode); | |
115 | ||
116 | return err; | |
a86c6181 AT |
117 | } |
118 | ||
119 | /* | |
120 | * could return: | |
121 | * - EROFS | |
122 | * - ENOMEM | |
123 | */ | |
124 | static int ext4_ext_get_access(handle_t *handle, struct inode *inode, | |
125 | struct ext4_ext_path *path) | |
126 | { | |
127 | if (path->p_bh) { | |
128 | /* path points to block */ | |
129 | return ext4_journal_get_write_access(handle, path->p_bh); | |
130 | } | |
131 | /* path points to leaf/index in inode body */ | |
132 | /* we use in-core data, no need to protect them */ | |
133 | return 0; | |
134 | } | |
135 | ||
136 | /* | |
137 | * could return: | |
138 | * - EROFS | |
139 | * - ENOMEM | |
140 | * - EIO | |
141 | */ | |
142 | static int ext4_ext_dirty(handle_t *handle, struct inode *inode, | |
143 | struct ext4_ext_path *path) | |
144 | { | |
145 | int err; | |
146 | if (path->p_bh) { | |
147 | /* path points to block */ | |
0390131b | 148 | err = ext4_handle_dirty_metadata(handle, inode, path->p_bh); |
a86c6181 AT |
149 | } else { |
150 | /* path points to leaf/index in inode body */ | |
151 | err = ext4_mark_inode_dirty(handle, inode); | |
152 | } | |
153 | return err; | |
154 | } | |
155 | ||
f65e6fba | 156 | static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode, |
a86c6181 | 157 | struct ext4_ext_path *path, |
725d26d3 | 158 | ext4_lblk_t block) |
a86c6181 AT |
159 | { |
160 | struct ext4_inode_info *ei = EXT4_I(inode); | |
f65e6fba | 161 | ext4_fsblk_t bg_start; |
74d3487f | 162 | ext4_fsblk_t last_block; |
f65e6fba | 163 | ext4_grpblk_t colour; |
a4912123 TT |
164 | ext4_group_t block_group; |
165 | int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb)); | |
a86c6181 AT |
166 | int depth; |
167 | ||
168 | if (path) { | |
169 | struct ext4_extent *ex; | |
170 | depth = path->p_depth; | |
171 | ||
172 | /* try to predict block placement */ | |
7e028976 AM |
173 | ex = path[depth].p_ext; |
174 | if (ex) | |
f65e6fba | 175 | return ext_pblock(ex)+(block-le32_to_cpu(ex->ee_block)); |
a86c6181 | 176 | |
d0d856e8 RD |
177 | /* it looks like index is empty; |
178 | * try to find starting block from index itself */ | |
a86c6181 AT |
179 | if (path[depth].p_bh) |
180 | return path[depth].p_bh->b_blocknr; | |
181 | } | |
182 | ||
183 | /* OK. use inode's group */ | |
a4912123 TT |
184 | block_group = ei->i_block_group; |
185 | if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) { | |
186 | /* | |
187 | * If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME | |
188 | * block groups per flexgroup, reserve the first block | |
189 | * group for directories and special files. Regular | |
190 | * files will start at the second block group. This | |
191 | * tends to speed up directory access and improves | |
192 | * fsck times. | |
193 | */ | |
194 | block_group &= ~(flex_size-1); | |
195 | if (S_ISREG(inode->i_mode)) | |
196 | block_group++; | |
197 | } | |
198 | bg_start = (block_group * EXT4_BLOCKS_PER_GROUP(inode->i_sb)) + | |
a86c6181 | 199 | le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_first_data_block); |
74d3487f VC |
200 | last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1; |
201 | ||
a4912123 TT |
202 | /* |
203 | * If we are doing delayed allocation, we don't need take | |
204 | * colour into account. | |
205 | */ | |
206 | if (test_opt(inode->i_sb, DELALLOC)) | |
207 | return bg_start; | |
208 | ||
74d3487f VC |
209 | if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block) |
210 | colour = (current->pid % 16) * | |
a86c6181 | 211 | (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16); |
74d3487f VC |
212 | else |
213 | colour = (current->pid % 16) * ((last_block - bg_start) / 16); | |
a86c6181 AT |
214 | return bg_start + colour + block; |
215 | } | |
216 | ||
654b4908 AK |
217 | /* |
218 | * Allocation for a meta data block | |
219 | */ | |
f65e6fba | 220 | static ext4_fsblk_t |
654b4908 | 221 | ext4_ext_new_meta_block(handle_t *handle, struct inode *inode, |
a86c6181 AT |
222 | struct ext4_ext_path *path, |
223 | struct ext4_extent *ex, int *err) | |
224 | { | |
f65e6fba | 225 | ext4_fsblk_t goal, newblock; |
a86c6181 AT |
226 | |
227 | goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block)); | |
97df5d15 | 228 | newblock = ext4_new_meta_blocks(handle, inode, goal, NULL, err); |
a86c6181 AT |
229 | return newblock; |
230 | } | |
231 | ||
55ad63bf | 232 | static inline int ext4_ext_space_block(struct inode *inode, int check) |
a86c6181 AT |
233 | { |
234 | int size; | |
235 | ||
236 | size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) | |
237 | / sizeof(struct ext4_extent); | |
55ad63bf | 238 | if (!check) { |
bbf2f9fb | 239 | #ifdef AGGRESSIVE_TEST |
55ad63bf TT |
240 | if (size > 6) |
241 | size = 6; | |
a86c6181 | 242 | #endif |
55ad63bf | 243 | } |
a86c6181 AT |
244 | return size; |
245 | } | |
246 | ||
55ad63bf | 247 | static inline int ext4_ext_space_block_idx(struct inode *inode, int check) |
a86c6181 AT |
248 | { |
249 | int size; | |
250 | ||
251 | size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) | |
252 | / sizeof(struct ext4_extent_idx); | |
55ad63bf | 253 | if (!check) { |
bbf2f9fb | 254 | #ifdef AGGRESSIVE_TEST |
55ad63bf TT |
255 | if (size > 5) |
256 | size = 5; | |
a86c6181 | 257 | #endif |
55ad63bf | 258 | } |
a86c6181 AT |
259 | return size; |
260 | } | |
261 | ||
55ad63bf | 262 | static inline int ext4_ext_space_root(struct inode *inode, int check) |
a86c6181 AT |
263 | { |
264 | int size; | |
265 | ||
266 | size = sizeof(EXT4_I(inode)->i_data); | |
267 | size -= sizeof(struct ext4_extent_header); | |
268 | size /= sizeof(struct ext4_extent); | |
55ad63bf | 269 | if (!check) { |
bbf2f9fb | 270 | #ifdef AGGRESSIVE_TEST |
55ad63bf TT |
271 | if (size > 3) |
272 | size = 3; | |
a86c6181 | 273 | #endif |
55ad63bf | 274 | } |
a86c6181 AT |
275 | return size; |
276 | } | |
277 | ||
55ad63bf | 278 | static inline int ext4_ext_space_root_idx(struct inode *inode, int check) |
a86c6181 AT |
279 | { |
280 | int size; | |
281 | ||
282 | size = sizeof(EXT4_I(inode)->i_data); | |
283 | size -= sizeof(struct ext4_extent_header); | |
284 | size /= sizeof(struct ext4_extent_idx); | |
55ad63bf | 285 | if (!check) { |
bbf2f9fb | 286 | #ifdef AGGRESSIVE_TEST |
55ad63bf TT |
287 | if (size > 4) |
288 | size = 4; | |
a86c6181 | 289 | #endif |
55ad63bf | 290 | } |
a86c6181 AT |
291 | return size; |
292 | } | |
293 | ||
d2a17637 MC |
294 | /* |
295 | * Calculate the number of metadata blocks needed | |
296 | * to allocate @blocks | |
297 | * Worse case is one block per extent | |
298 | */ | |
299 | int ext4_ext_calc_metadata_amount(struct inode *inode, int blocks) | |
300 | { | |
301 | int lcap, icap, rcap, leafs, idxs, num; | |
302 | int newextents = blocks; | |
303 | ||
55ad63bf TT |
304 | rcap = ext4_ext_space_root_idx(inode, 0); |
305 | lcap = ext4_ext_space_block(inode, 0); | |
306 | icap = ext4_ext_space_block_idx(inode, 0); | |
d2a17637 MC |
307 | |
308 | /* number of new leaf blocks needed */ | |
309 | num = leafs = (newextents + lcap - 1) / lcap; | |
310 | ||
311 | /* | |
312 | * Worse case, we need separate index block(s) | |
313 | * to link all new leaf blocks | |
314 | */ | |
315 | idxs = (leafs + icap - 1) / icap; | |
316 | do { | |
317 | num += idxs; | |
318 | idxs = (idxs + icap - 1) / icap; | |
319 | } while (idxs > rcap); | |
320 | ||
321 | return num; | |
322 | } | |
323 | ||
c29c0ae7 AT |
324 | static int |
325 | ext4_ext_max_entries(struct inode *inode, int depth) | |
326 | { | |
327 | int max; | |
328 | ||
329 | if (depth == ext_depth(inode)) { | |
330 | if (depth == 0) | |
55ad63bf | 331 | max = ext4_ext_space_root(inode, 1); |
c29c0ae7 | 332 | else |
55ad63bf | 333 | max = ext4_ext_space_root_idx(inode, 1); |
c29c0ae7 AT |
334 | } else { |
335 | if (depth == 0) | |
55ad63bf | 336 | max = ext4_ext_space_block(inode, 1); |
c29c0ae7 | 337 | else |
55ad63bf | 338 | max = ext4_ext_space_block_idx(inode, 1); |
c29c0ae7 AT |
339 | } |
340 | ||
341 | return max; | |
342 | } | |
343 | ||
56b19868 AK |
344 | static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext) |
345 | { | |
6fd058f7 | 346 | ext4_fsblk_t block = ext_pblock(ext); |
56b19868 | 347 | int len = ext4_ext_get_actual_len(ext); |
e84a26ce | 348 | |
6fd058f7 | 349 | return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len); |
56b19868 AK |
350 | } |
351 | ||
352 | static int ext4_valid_extent_idx(struct inode *inode, | |
353 | struct ext4_extent_idx *ext_idx) | |
354 | { | |
6fd058f7 | 355 | ext4_fsblk_t block = idx_pblock(ext_idx); |
e84a26ce | 356 | |
6fd058f7 | 357 | return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1); |
56b19868 AK |
358 | } |
359 | ||
360 | static int ext4_valid_extent_entries(struct inode *inode, | |
361 | struct ext4_extent_header *eh, | |
362 | int depth) | |
363 | { | |
364 | struct ext4_extent *ext; | |
365 | struct ext4_extent_idx *ext_idx; | |
366 | unsigned short entries; | |
367 | if (eh->eh_entries == 0) | |
368 | return 1; | |
369 | ||
370 | entries = le16_to_cpu(eh->eh_entries); | |
371 | ||
372 | if (depth == 0) { | |
373 | /* leaf entries */ | |
374 | ext = EXT_FIRST_EXTENT(eh); | |
375 | while (entries) { | |
376 | if (!ext4_valid_extent(inode, ext)) | |
377 | return 0; | |
378 | ext++; | |
379 | entries--; | |
380 | } | |
381 | } else { | |
382 | ext_idx = EXT_FIRST_INDEX(eh); | |
383 | while (entries) { | |
384 | if (!ext4_valid_extent_idx(inode, ext_idx)) | |
385 | return 0; | |
386 | ext_idx++; | |
387 | entries--; | |
388 | } | |
389 | } | |
390 | return 1; | |
391 | } | |
392 | ||
393 | static int __ext4_ext_check(const char *function, struct inode *inode, | |
c29c0ae7 AT |
394 | struct ext4_extent_header *eh, |
395 | int depth) | |
396 | { | |
397 | const char *error_msg; | |
398 | int max = 0; | |
399 | ||
400 | if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) { | |
401 | error_msg = "invalid magic"; | |
402 | goto corrupted; | |
403 | } | |
404 | if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) { | |
405 | error_msg = "unexpected eh_depth"; | |
406 | goto corrupted; | |
407 | } | |
408 | if (unlikely(eh->eh_max == 0)) { | |
409 | error_msg = "invalid eh_max"; | |
410 | goto corrupted; | |
411 | } | |
412 | max = ext4_ext_max_entries(inode, depth); | |
413 | if (unlikely(le16_to_cpu(eh->eh_max) > max)) { | |
414 | error_msg = "too large eh_max"; | |
415 | goto corrupted; | |
416 | } | |
417 | if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) { | |
418 | error_msg = "invalid eh_entries"; | |
419 | goto corrupted; | |
420 | } | |
56b19868 AK |
421 | if (!ext4_valid_extent_entries(inode, eh, depth)) { |
422 | error_msg = "invalid extent entries"; | |
423 | goto corrupted; | |
424 | } | |
c29c0ae7 AT |
425 | return 0; |
426 | ||
427 | corrupted: | |
428 | ext4_error(inode->i_sb, function, | |
56b19868 | 429 | "bad header/extent in inode #%lu: %s - magic %x, " |
c29c0ae7 AT |
430 | "entries %u, max %u(%u), depth %u(%u)", |
431 | inode->i_ino, error_msg, le16_to_cpu(eh->eh_magic), | |
432 | le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max), | |
433 | max, le16_to_cpu(eh->eh_depth), depth); | |
434 | ||
435 | return -EIO; | |
436 | } | |
437 | ||
56b19868 AK |
438 | #define ext4_ext_check(inode, eh, depth) \ |
439 | __ext4_ext_check(__func__, inode, eh, depth) | |
c29c0ae7 | 440 | |
7a262f7c AK |
441 | int ext4_ext_check_inode(struct inode *inode) |
442 | { | |
443 | return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode)); | |
444 | } | |
445 | ||
a86c6181 AT |
446 | #ifdef EXT_DEBUG |
447 | static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path) | |
448 | { | |
449 | int k, l = path->p_depth; | |
450 | ||
451 | ext_debug("path:"); | |
452 | for (k = 0; k <= l; k++, path++) { | |
453 | if (path->p_idx) { | |
2ae02107 | 454 | ext_debug(" %d->%llu", le32_to_cpu(path->p_idx->ei_block), |
f65e6fba | 455 | idx_pblock(path->p_idx)); |
a86c6181 | 456 | } else if (path->p_ext) { |
553f9008 | 457 | ext_debug(" %d:[%d]%d:%llu ", |
a86c6181 | 458 | le32_to_cpu(path->p_ext->ee_block), |
553f9008 | 459 | ext4_ext_is_uninitialized(path->p_ext), |
a2df2a63 | 460 | ext4_ext_get_actual_len(path->p_ext), |
f65e6fba | 461 | ext_pblock(path->p_ext)); |
a86c6181 AT |
462 | } else |
463 | ext_debug(" []"); | |
464 | } | |
465 | ext_debug("\n"); | |
466 | } | |
467 | ||
468 | static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path) | |
469 | { | |
470 | int depth = ext_depth(inode); | |
471 | struct ext4_extent_header *eh; | |
472 | struct ext4_extent *ex; | |
473 | int i; | |
474 | ||
475 | if (!path) | |
476 | return; | |
477 | ||
478 | eh = path[depth].p_hdr; | |
479 | ex = EXT_FIRST_EXTENT(eh); | |
480 | ||
553f9008 M |
481 | ext_debug("Displaying leaf extents for inode %lu\n", inode->i_ino); |
482 | ||
a86c6181 | 483 | for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) { |
553f9008 M |
484 | ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block), |
485 | ext4_ext_is_uninitialized(ex), | |
a2df2a63 | 486 | ext4_ext_get_actual_len(ex), ext_pblock(ex)); |
a86c6181 AT |
487 | } |
488 | ext_debug("\n"); | |
489 | } | |
490 | #else | |
af5bc92d TT |
491 | #define ext4_ext_show_path(inode, path) |
492 | #define ext4_ext_show_leaf(inode, path) | |
a86c6181 AT |
493 | #endif |
494 | ||
b35905c1 | 495 | void ext4_ext_drop_refs(struct ext4_ext_path *path) |
a86c6181 AT |
496 | { |
497 | int depth = path->p_depth; | |
498 | int i; | |
499 | ||
500 | for (i = 0; i <= depth; i++, path++) | |
501 | if (path->p_bh) { | |
502 | brelse(path->p_bh); | |
503 | path->p_bh = NULL; | |
504 | } | |
505 | } | |
506 | ||
507 | /* | |
d0d856e8 RD |
508 | * ext4_ext_binsearch_idx: |
509 | * binary search for the closest index of the given block | |
c29c0ae7 | 510 | * the header must be checked before calling this |
a86c6181 AT |
511 | */ |
512 | static void | |
725d26d3 AK |
513 | ext4_ext_binsearch_idx(struct inode *inode, |
514 | struct ext4_ext_path *path, ext4_lblk_t block) | |
a86c6181 AT |
515 | { |
516 | struct ext4_extent_header *eh = path->p_hdr; | |
517 | struct ext4_extent_idx *r, *l, *m; | |
518 | ||
a86c6181 | 519 | |
bba90743 | 520 | ext_debug("binsearch for %u(idx): ", block); |
a86c6181 AT |
521 | |
522 | l = EXT_FIRST_INDEX(eh) + 1; | |
e9f410b1 | 523 | r = EXT_LAST_INDEX(eh); |
a86c6181 AT |
524 | while (l <= r) { |
525 | m = l + (r - l) / 2; | |
526 | if (block < le32_to_cpu(m->ei_block)) | |
527 | r = m - 1; | |
528 | else | |
529 | l = m + 1; | |
26d535ed DM |
530 | ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block), |
531 | m, le32_to_cpu(m->ei_block), | |
532 | r, le32_to_cpu(r->ei_block)); | |
a86c6181 AT |
533 | } |
534 | ||
535 | path->p_idx = l - 1; | |
f65e6fba | 536 | ext_debug(" -> %d->%lld ", le32_to_cpu(path->p_idx->ei_block), |
26d535ed | 537 | idx_pblock(path->p_idx)); |
a86c6181 AT |
538 | |
539 | #ifdef CHECK_BINSEARCH | |
540 | { | |
541 | struct ext4_extent_idx *chix, *ix; | |
542 | int k; | |
543 | ||
544 | chix = ix = EXT_FIRST_INDEX(eh); | |
545 | for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) { | |
546 | if (k != 0 && | |
547 | le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) { | |
4776004f TT |
548 | printk(KERN_DEBUG "k=%d, ix=0x%p, " |
549 | "first=0x%p\n", k, | |
550 | ix, EXT_FIRST_INDEX(eh)); | |
551 | printk(KERN_DEBUG "%u <= %u\n", | |
a86c6181 AT |
552 | le32_to_cpu(ix->ei_block), |
553 | le32_to_cpu(ix[-1].ei_block)); | |
554 | } | |
555 | BUG_ON(k && le32_to_cpu(ix->ei_block) | |
8c55e204 | 556 | <= le32_to_cpu(ix[-1].ei_block)); |
a86c6181 AT |
557 | if (block < le32_to_cpu(ix->ei_block)) |
558 | break; | |
559 | chix = ix; | |
560 | } | |
561 | BUG_ON(chix != path->p_idx); | |
562 | } | |
563 | #endif | |
564 | ||
565 | } | |
566 | ||
567 | /* | |
d0d856e8 RD |
568 | * ext4_ext_binsearch: |
569 | * binary search for closest extent of the given block | |
c29c0ae7 | 570 | * the header must be checked before calling this |
a86c6181 AT |
571 | */ |
572 | static void | |
725d26d3 AK |
573 | ext4_ext_binsearch(struct inode *inode, |
574 | struct ext4_ext_path *path, ext4_lblk_t block) | |
a86c6181 AT |
575 | { |
576 | struct ext4_extent_header *eh = path->p_hdr; | |
577 | struct ext4_extent *r, *l, *m; | |
578 | ||
a86c6181 AT |
579 | if (eh->eh_entries == 0) { |
580 | /* | |
d0d856e8 RD |
581 | * this leaf is empty: |
582 | * we get such a leaf in split/add case | |
a86c6181 AT |
583 | */ |
584 | return; | |
585 | } | |
586 | ||
bba90743 | 587 | ext_debug("binsearch for %u: ", block); |
a86c6181 AT |
588 | |
589 | l = EXT_FIRST_EXTENT(eh) + 1; | |
e9f410b1 | 590 | r = EXT_LAST_EXTENT(eh); |
a86c6181 AT |
591 | |
592 | while (l <= r) { | |
593 | m = l + (r - l) / 2; | |
594 | if (block < le32_to_cpu(m->ee_block)) | |
595 | r = m - 1; | |
596 | else | |
597 | l = m + 1; | |
26d535ed DM |
598 | ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block), |
599 | m, le32_to_cpu(m->ee_block), | |
600 | r, le32_to_cpu(r->ee_block)); | |
a86c6181 AT |
601 | } |
602 | ||
603 | path->p_ext = l - 1; | |
553f9008 | 604 | ext_debug(" -> %d:%llu:[%d]%d ", |
8c55e204 DK |
605 | le32_to_cpu(path->p_ext->ee_block), |
606 | ext_pblock(path->p_ext), | |
553f9008 | 607 | ext4_ext_is_uninitialized(path->p_ext), |
a2df2a63 | 608 | ext4_ext_get_actual_len(path->p_ext)); |
a86c6181 AT |
609 | |
610 | #ifdef CHECK_BINSEARCH | |
611 | { | |
612 | struct ext4_extent *chex, *ex; | |
613 | int k; | |
614 | ||
615 | chex = ex = EXT_FIRST_EXTENT(eh); | |
616 | for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) { | |
617 | BUG_ON(k && le32_to_cpu(ex->ee_block) | |
8c55e204 | 618 | <= le32_to_cpu(ex[-1].ee_block)); |
a86c6181 AT |
619 | if (block < le32_to_cpu(ex->ee_block)) |
620 | break; | |
621 | chex = ex; | |
622 | } | |
623 | BUG_ON(chex != path->p_ext); | |
624 | } | |
625 | #endif | |
626 | ||
627 | } | |
628 | ||
629 | int ext4_ext_tree_init(handle_t *handle, struct inode *inode) | |
630 | { | |
631 | struct ext4_extent_header *eh; | |
632 | ||
633 | eh = ext_inode_hdr(inode); | |
634 | eh->eh_depth = 0; | |
635 | eh->eh_entries = 0; | |
636 | eh->eh_magic = EXT4_EXT_MAGIC; | |
55ad63bf | 637 | eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0)); |
a86c6181 AT |
638 | ext4_mark_inode_dirty(handle, inode); |
639 | ext4_ext_invalidate_cache(inode); | |
640 | return 0; | |
641 | } | |
642 | ||
643 | struct ext4_ext_path * | |
725d26d3 AK |
644 | ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block, |
645 | struct ext4_ext_path *path) | |
a86c6181 AT |
646 | { |
647 | struct ext4_extent_header *eh; | |
648 | struct buffer_head *bh; | |
649 | short int depth, i, ppos = 0, alloc = 0; | |
650 | ||
651 | eh = ext_inode_hdr(inode); | |
c29c0ae7 | 652 | depth = ext_depth(inode); |
a86c6181 AT |
653 | |
654 | /* account possible depth increase */ | |
655 | if (!path) { | |
5d4958f9 | 656 | path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2), |
a86c6181 AT |
657 | GFP_NOFS); |
658 | if (!path) | |
659 | return ERR_PTR(-ENOMEM); | |
660 | alloc = 1; | |
661 | } | |
a86c6181 | 662 | path[0].p_hdr = eh; |
1973adcb | 663 | path[0].p_bh = NULL; |
a86c6181 | 664 | |
c29c0ae7 | 665 | i = depth; |
a86c6181 AT |
666 | /* walk through the tree */ |
667 | while (i) { | |
7a262f7c AK |
668 | int need_to_validate = 0; |
669 | ||
a86c6181 AT |
670 | ext_debug("depth %d: num %d, max %d\n", |
671 | ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); | |
c29c0ae7 | 672 | |
a86c6181 | 673 | ext4_ext_binsearch_idx(inode, path + ppos, block); |
f65e6fba | 674 | path[ppos].p_block = idx_pblock(path[ppos].p_idx); |
a86c6181 AT |
675 | path[ppos].p_depth = i; |
676 | path[ppos].p_ext = NULL; | |
677 | ||
7a262f7c AK |
678 | bh = sb_getblk(inode->i_sb, path[ppos].p_block); |
679 | if (unlikely(!bh)) | |
a86c6181 | 680 | goto err; |
7a262f7c AK |
681 | if (!bh_uptodate_or_lock(bh)) { |
682 | if (bh_submit_read(bh) < 0) { | |
683 | put_bh(bh); | |
684 | goto err; | |
685 | } | |
686 | /* validate the extent entries */ | |
687 | need_to_validate = 1; | |
688 | } | |
a86c6181 AT |
689 | eh = ext_block_hdr(bh); |
690 | ppos++; | |
691 | BUG_ON(ppos > depth); | |
692 | path[ppos].p_bh = bh; | |
693 | path[ppos].p_hdr = eh; | |
694 | i--; | |
695 | ||
7a262f7c | 696 | if (need_to_validate && ext4_ext_check(inode, eh, i)) |
a86c6181 AT |
697 | goto err; |
698 | } | |
699 | ||
700 | path[ppos].p_depth = i; | |
a86c6181 AT |
701 | path[ppos].p_ext = NULL; |
702 | path[ppos].p_idx = NULL; | |
703 | ||
a86c6181 AT |
704 | /* find extent */ |
705 | ext4_ext_binsearch(inode, path + ppos, block); | |
1973adcb SF |
706 | /* if not an empty leaf */ |
707 | if (path[ppos].p_ext) | |
708 | path[ppos].p_block = ext_pblock(path[ppos].p_ext); | |
a86c6181 AT |
709 | |
710 | ext4_ext_show_path(inode, path); | |
711 | ||
712 | return path; | |
713 | ||
714 | err: | |
715 | ext4_ext_drop_refs(path); | |
716 | if (alloc) | |
717 | kfree(path); | |
718 | return ERR_PTR(-EIO); | |
719 | } | |
720 | ||
721 | /* | |
d0d856e8 RD |
722 | * ext4_ext_insert_index: |
723 | * insert new index [@logical;@ptr] into the block at @curp; | |
724 | * check where to insert: before @curp or after @curp | |
a86c6181 | 725 | */ |
0031462b | 726 | int ext4_ext_insert_index(handle_t *handle, struct inode *inode, |
a86c6181 | 727 | struct ext4_ext_path *curp, |
f65e6fba | 728 | int logical, ext4_fsblk_t ptr) |
a86c6181 AT |
729 | { |
730 | struct ext4_extent_idx *ix; | |
731 | int len, err; | |
732 | ||
7e028976 AM |
733 | err = ext4_ext_get_access(handle, inode, curp); |
734 | if (err) | |
a86c6181 AT |
735 | return err; |
736 | ||
737 | BUG_ON(logical == le32_to_cpu(curp->p_idx->ei_block)); | |
738 | len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx; | |
739 | if (logical > le32_to_cpu(curp->p_idx->ei_block)) { | |
740 | /* insert after */ | |
741 | if (curp->p_idx != EXT_LAST_INDEX(curp->p_hdr)) { | |
742 | len = (len - 1) * sizeof(struct ext4_extent_idx); | |
743 | len = len < 0 ? 0 : len; | |
26d535ed | 744 | ext_debug("insert new index %d after: %llu. " |
a86c6181 AT |
745 | "move %d from 0x%p to 0x%p\n", |
746 | logical, ptr, len, | |
747 | (curp->p_idx + 1), (curp->p_idx + 2)); | |
748 | memmove(curp->p_idx + 2, curp->p_idx + 1, len); | |
749 | } | |
750 | ix = curp->p_idx + 1; | |
751 | } else { | |
752 | /* insert before */ | |
753 | len = len * sizeof(struct ext4_extent_idx); | |
754 | len = len < 0 ? 0 : len; | |
26d535ed | 755 | ext_debug("insert new index %d before: %llu. " |
a86c6181 AT |
756 | "move %d from 0x%p to 0x%p\n", |
757 | logical, ptr, len, | |
758 | curp->p_idx, (curp->p_idx + 1)); | |
759 | memmove(curp->p_idx + 1, curp->p_idx, len); | |
760 | ix = curp->p_idx; | |
761 | } | |
762 | ||
763 | ix->ei_block = cpu_to_le32(logical); | |
f65e6fba | 764 | ext4_idx_store_pblock(ix, ptr); |
e8546d06 | 765 | le16_add_cpu(&curp->p_hdr->eh_entries, 1); |
a86c6181 AT |
766 | |
767 | BUG_ON(le16_to_cpu(curp->p_hdr->eh_entries) | |
8c55e204 | 768 | > le16_to_cpu(curp->p_hdr->eh_max)); |
a86c6181 AT |
769 | BUG_ON(ix > EXT_LAST_INDEX(curp->p_hdr)); |
770 | ||
771 | err = ext4_ext_dirty(handle, inode, curp); | |
772 | ext4_std_error(inode->i_sb, err); | |
773 | ||
774 | return err; | |
775 | } | |
776 | ||
777 | /* | |
d0d856e8 RD |
778 | * ext4_ext_split: |
779 | * inserts new subtree into the path, using free index entry | |
780 | * at depth @at: | |
781 | * - allocates all needed blocks (new leaf and all intermediate index blocks) | |
782 | * - makes decision where to split | |
783 | * - moves remaining extents and index entries (right to the split point) | |
784 | * into the newly allocated blocks | |
785 | * - initializes subtree | |
a86c6181 AT |
786 | */ |
787 | static int ext4_ext_split(handle_t *handle, struct inode *inode, | |
788 | struct ext4_ext_path *path, | |
789 | struct ext4_extent *newext, int at) | |
790 | { | |
791 | struct buffer_head *bh = NULL; | |
792 | int depth = ext_depth(inode); | |
793 | struct ext4_extent_header *neh; | |
794 | struct ext4_extent_idx *fidx; | |
795 | struct ext4_extent *ex; | |
796 | int i = at, k, m, a; | |
f65e6fba | 797 | ext4_fsblk_t newblock, oldblock; |
a86c6181 | 798 | __le32 border; |
f65e6fba | 799 | ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */ |
a86c6181 AT |
800 | int err = 0; |
801 | ||
802 | /* make decision: where to split? */ | |
d0d856e8 | 803 | /* FIXME: now decision is simplest: at current extent */ |
a86c6181 | 804 | |
d0d856e8 | 805 | /* if current leaf will be split, then we should use |
a86c6181 AT |
806 | * border from split point */ |
807 | BUG_ON(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr)); | |
808 | if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) { | |
809 | border = path[depth].p_ext[1].ee_block; | |
d0d856e8 | 810 | ext_debug("leaf will be split." |
a86c6181 | 811 | " next leaf starts at %d\n", |
8c55e204 | 812 | le32_to_cpu(border)); |
a86c6181 AT |
813 | } else { |
814 | border = newext->ee_block; | |
815 | ext_debug("leaf will be added." | |
816 | " next leaf starts at %d\n", | |
8c55e204 | 817 | le32_to_cpu(border)); |
a86c6181 AT |
818 | } |
819 | ||
820 | /* | |
d0d856e8 RD |
821 | * If error occurs, then we break processing |
822 | * and mark filesystem read-only. index won't | |
a86c6181 | 823 | * be inserted and tree will be in consistent |
d0d856e8 | 824 | * state. Next mount will repair buffers too. |
a86c6181 AT |
825 | */ |
826 | ||
827 | /* | |
d0d856e8 RD |
828 | * Get array to track all allocated blocks. |
829 | * We need this to handle errors and free blocks | |
830 | * upon them. | |
a86c6181 | 831 | */ |
5d4958f9 | 832 | ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS); |
a86c6181 AT |
833 | if (!ablocks) |
834 | return -ENOMEM; | |
a86c6181 AT |
835 | |
836 | /* allocate all needed blocks */ | |
837 | ext_debug("allocate %d blocks for indexes/leaf\n", depth - at); | |
838 | for (a = 0; a < depth - at; a++) { | |
654b4908 AK |
839 | newblock = ext4_ext_new_meta_block(handle, inode, path, |
840 | newext, &err); | |
a86c6181 AT |
841 | if (newblock == 0) |
842 | goto cleanup; | |
843 | ablocks[a] = newblock; | |
844 | } | |
845 | ||
846 | /* initialize new leaf */ | |
847 | newblock = ablocks[--a]; | |
848 | BUG_ON(newblock == 0); | |
849 | bh = sb_getblk(inode->i_sb, newblock); | |
850 | if (!bh) { | |
851 | err = -EIO; | |
852 | goto cleanup; | |
853 | } | |
854 | lock_buffer(bh); | |
855 | ||
7e028976 AM |
856 | err = ext4_journal_get_create_access(handle, bh); |
857 | if (err) | |
a86c6181 AT |
858 | goto cleanup; |
859 | ||
860 | neh = ext_block_hdr(bh); | |
861 | neh->eh_entries = 0; | |
55ad63bf | 862 | neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); |
a86c6181 AT |
863 | neh->eh_magic = EXT4_EXT_MAGIC; |
864 | neh->eh_depth = 0; | |
865 | ex = EXT_FIRST_EXTENT(neh); | |
866 | ||
d0d856e8 | 867 | /* move remainder of path[depth] to the new leaf */ |
a86c6181 AT |
868 | BUG_ON(path[depth].p_hdr->eh_entries != path[depth].p_hdr->eh_max); |
869 | /* start copy from next extent */ | |
870 | /* TODO: we could do it by single memmove */ | |
871 | m = 0; | |
872 | path[depth].p_ext++; | |
873 | while (path[depth].p_ext <= | |
874 | EXT_MAX_EXTENT(path[depth].p_hdr)) { | |
553f9008 | 875 | ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n", |
8c55e204 DK |
876 | le32_to_cpu(path[depth].p_ext->ee_block), |
877 | ext_pblock(path[depth].p_ext), | |
553f9008 | 878 | ext4_ext_is_uninitialized(path[depth].p_ext), |
a2df2a63 | 879 | ext4_ext_get_actual_len(path[depth].p_ext), |
a86c6181 AT |
880 | newblock); |
881 | /*memmove(ex++, path[depth].p_ext++, | |
882 | sizeof(struct ext4_extent)); | |
883 | neh->eh_entries++;*/ | |
884 | path[depth].p_ext++; | |
885 | m++; | |
886 | } | |
887 | if (m) { | |
888 | memmove(ex, path[depth].p_ext-m, sizeof(struct ext4_extent)*m); | |
e8546d06 | 889 | le16_add_cpu(&neh->eh_entries, m); |
a86c6181 AT |
890 | } |
891 | ||
892 | set_buffer_uptodate(bh); | |
893 | unlock_buffer(bh); | |
894 | ||
0390131b | 895 | err = ext4_handle_dirty_metadata(handle, inode, bh); |
7e028976 | 896 | if (err) |
a86c6181 AT |
897 | goto cleanup; |
898 | brelse(bh); | |
899 | bh = NULL; | |
900 | ||
901 | /* correct old leaf */ | |
902 | if (m) { | |
7e028976 AM |
903 | err = ext4_ext_get_access(handle, inode, path + depth); |
904 | if (err) | |
a86c6181 | 905 | goto cleanup; |
e8546d06 | 906 | le16_add_cpu(&path[depth].p_hdr->eh_entries, -m); |
7e028976 AM |
907 | err = ext4_ext_dirty(handle, inode, path + depth); |
908 | if (err) | |
a86c6181 AT |
909 | goto cleanup; |
910 | ||
911 | } | |
912 | ||
913 | /* create intermediate indexes */ | |
914 | k = depth - at - 1; | |
915 | BUG_ON(k < 0); | |
916 | if (k) | |
917 | ext_debug("create %d intermediate indices\n", k); | |
918 | /* insert new index into current index block */ | |
919 | /* current depth stored in i var */ | |
920 | i = depth - 1; | |
921 | while (k--) { | |
922 | oldblock = newblock; | |
923 | newblock = ablocks[--a]; | |
bba90743 | 924 | bh = sb_getblk(inode->i_sb, newblock); |
a86c6181 AT |
925 | if (!bh) { |
926 | err = -EIO; | |
927 | goto cleanup; | |
928 | } | |
929 | lock_buffer(bh); | |
930 | ||
7e028976 AM |
931 | err = ext4_journal_get_create_access(handle, bh); |
932 | if (err) | |
a86c6181 AT |
933 | goto cleanup; |
934 | ||
935 | neh = ext_block_hdr(bh); | |
936 | neh->eh_entries = cpu_to_le16(1); | |
937 | neh->eh_magic = EXT4_EXT_MAGIC; | |
55ad63bf | 938 | neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0)); |
a86c6181 AT |
939 | neh->eh_depth = cpu_to_le16(depth - i); |
940 | fidx = EXT_FIRST_INDEX(neh); | |
941 | fidx->ei_block = border; | |
f65e6fba | 942 | ext4_idx_store_pblock(fidx, oldblock); |
a86c6181 | 943 | |
bba90743 ES |
944 | ext_debug("int.index at %d (block %llu): %u -> %llu\n", |
945 | i, newblock, le32_to_cpu(border), oldblock); | |
a86c6181 AT |
946 | /* copy indexes */ |
947 | m = 0; | |
948 | path[i].p_idx++; | |
949 | ||
950 | ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx, | |
951 | EXT_MAX_INDEX(path[i].p_hdr)); | |
952 | BUG_ON(EXT_MAX_INDEX(path[i].p_hdr) != | |
953 | EXT_LAST_INDEX(path[i].p_hdr)); | |
954 | while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) { | |
26d535ed | 955 | ext_debug("%d: move %d:%llu in new index %llu\n", i, |
8c55e204 DK |
956 | le32_to_cpu(path[i].p_idx->ei_block), |
957 | idx_pblock(path[i].p_idx), | |
958 | newblock); | |
a86c6181 AT |
959 | /*memmove(++fidx, path[i].p_idx++, |
960 | sizeof(struct ext4_extent_idx)); | |
961 | neh->eh_entries++; | |
962 | BUG_ON(neh->eh_entries > neh->eh_max);*/ | |
963 | path[i].p_idx++; | |
964 | m++; | |
965 | } | |
966 | if (m) { | |
967 | memmove(++fidx, path[i].p_idx - m, | |
968 | sizeof(struct ext4_extent_idx) * m); | |
e8546d06 | 969 | le16_add_cpu(&neh->eh_entries, m); |
a86c6181 AT |
970 | } |
971 | set_buffer_uptodate(bh); | |
972 | unlock_buffer(bh); | |
973 | ||
0390131b | 974 | err = ext4_handle_dirty_metadata(handle, inode, bh); |
7e028976 | 975 | if (err) |
a86c6181 AT |
976 | goto cleanup; |
977 | brelse(bh); | |
978 | bh = NULL; | |
979 | ||
980 | /* correct old index */ | |
981 | if (m) { | |
982 | err = ext4_ext_get_access(handle, inode, path + i); | |
983 | if (err) | |
984 | goto cleanup; | |
e8546d06 | 985 | le16_add_cpu(&path[i].p_hdr->eh_entries, -m); |
a86c6181 AT |
986 | err = ext4_ext_dirty(handle, inode, path + i); |
987 | if (err) | |
988 | goto cleanup; | |
989 | } | |
990 | ||
991 | i--; | |
992 | } | |
993 | ||
994 | /* insert new index */ | |
a86c6181 AT |
995 | err = ext4_ext_insert_index(handle, inode, path + at, |
996 | le32_to_cpu(border), newblock); | |
997 | ||
998 | cleanup: | |
999 | if (bh) { | |
1000 | if (buffer_locked(bh)) | |
1001 | unlock_buffer(bh); | |
1002 | brelse(bh); | |
1003 | } | |
1004 | ||
1005 | if (err) { | |
1006 | /* free all allocated blocks in error case */ | |
1007 | for (i = 0; i < depth; i++) { | |
1008 | if (!ablocks[i]) | |
1009 | continue; | |
c9de560d | 1010 | ext4_free_blocks(handle, inode, ablocks[i], 1, 1); |
a86c6181 AT |
1011 | } |
1012 | } | |
1013 | kfree(ablocks); | |
1014 | ||
1015 | return err; | |
1016 | } | |
1017 | ||
1018 | /* | |
d0d856e8 RD |
1019 | * ext4_ext_grow_indepth: |
1020 | * implements tree growing procedure: | |
1021 | * - allocates new block | |
1022 | * - moves top-level data (index block or leaf) into the new block | |
1023 | * - initializes new top-level, creating index that points to the | |
1024 | * just created block | |
a86c6181 AT |
1025 | */ |
1026 | static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode, | |
1027 | struct ext4_ext_path *path, | |
1028 | struct ext4_extent *newext) | |
1029 | { | |
1030 | struct ext4_ext_path *curp = path; | |
1031 | struct ext4_extent_header *neh; | |
1032 | struct ext4_extent_idx *fidx; | |
1033 | struct buffer_head *bh; | |
f65e6fba | 1034 | ext4_fsblk_t newblock; |
a86c6181 AT |
1035 | int err = 0; |
1036 | ||
654b4908 | 1037 | newblock = ext4_ext_new_meta_block(handle, inode, path, newext, &err); |
a86c6181 AT |
1038 | if (newblock == 0) |
1039 | return err; | |
1040 | ||
1041 | bh = sb_getblk(inode->i_sb, newblock); | |
1042 | if (!bh) { | |
1043 | err = -EIO; | |
1044 | ext4_std_error(inode->i_sb, err); | |
1045 | return err; | |
1046 | } | |
1047 | lock_buffer(bh); | |
1048 | ||
7e028976 AM |
1049 | err = ext4_journal_get_create_access(handle, bh); |
1050 | if (err) { | |
a86c6181 AT |
1051 | unlock_buffer(bh); |
1052 | goto out; | |
1053 | } | |
1054 | ||
1055 | /* move top-level index/leaf into new block */ | |
1056 | memmove(bh->b_data, curp->p_hdr, sizeof(EXT4_I(inode)->i_data)); | |
1057 | ||
1058 | /* set size of new block */ | |
1059 | neh = ext_block_hdr(bh); | |
1060 | /* old root could have indexes or leaves | |
1061 | * so calculate e_max right way */ | |
1062 | if (ext_depth(inode)) | |
55ad63bf | 1063 | neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0)); |
a86c6181 | 1064 | else |
55ad63bf | 1065 | neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); |
a86c6181 AT |
1066 | neh->eh_magic = EXT4_EXT_MAGIC; |
1067 | set_buffer_uptodate(bh); | |
1068 | unlock_buffer(bh); | |
1069 | ||
0390131b | 1070 | err = ext4_handle_dirty_metadata(handle, inode, bh); |
7e028976 | 1071 | if (err) |
a86c6181 AT |
1072 | goto out; |
1073 | ||
1074 | /* create index in new top-level index: num,max,pointer */ | |
7e028976 AM |
1075 | err = ext4_ext_get_access(handle, inode, curp); |
1076 | if (err) | |
a86c6181 AT |
1077 | goto out; |
1078 | ||
1079 | curp->p_hdr->eh_magic = EXT4_EXT_MAGIC; | |
55ad63bf | 1080 | curp->p_hdr->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0)); |
a86c6181 AT |
1081 | curp->p_hdr->eh_entries = cpu_to_le16(1); |
1082 | curp->p_idx = EXT_FIRST_INDEX(curp->p_hdr); | |
e9f410b1 DM |
1083 | |
1084 | if (path[0].p_hdr->eh_depth) | |
1085 | curp->p_idx->ei_block = | |
1086 | EXT_FIRST_INDEX(path[0].p_hdr)->ei_block; | |
1087 | else | |
1088 | curp->p_idx->ei_block = | |
1089 | EXT_FIRST_EXTENT(path[0].p_hdr)->ee_block; | |
f65e6fba | 1090 | ext4_idx_store_pblock(curp->p_idx, newblock); |
a86c6181 AT |
1091 | |
1092 | neh = ext_inode_hdr(inode); | |
1093 | fidx = EXT_FIRST_INDEX(neh); | |
2ae02107 | 1094 | ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n", |
a86c6181 | 1095 | le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max), |
f65e6fba | 1096 | le32_to_cpu(fidx->ei_block), idx_pblock(fidx)); |
a86c6181 AT |
1097 | |
1098 | neh->eh_depth = cpu_to_le16(path->p_depth + 1); | |
1099 | err = ext4_ext_dirty(handle, inode, curp); | |
1100 | out: | |
1101 | brelse(bh); | |
1102 | ||
1103 | return err; | |
1104 | } | |
1105 | ||
1106 | /* | |
d0d856e8 RD |
1107 | * ext4_ext_create_new_leaf: |
1108 | * finds empty index and adds new leaf. | |
1109 | * if no free index is found, then it requests in-depth growing. | |
a86c6181 AT |
1110 | */ |
1111 | static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode, | |
1112 | struct ext4_ext_path *path, | |
1113 | struct ext4_extent *newext) | |
1114 | { | |
1115 | struct ext4_ext_path *curp; | |
1116 | int depth, i, err = 0; | |
1117 | ||
1118 | repeat: | |
1119 | i = depth = ext_depth(inode); | |
1120 | ||
1121 | /* walk up to the tree and look for free index entry */ | |
1122 | curp = path + depth; | |
1123 | while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) { | |
1124 | i--; | |
1125 | curp--; | |
1126 | } | |
1127 | ||
d0d856e8 RD |
1128 | /* we use already allocated block for index block, |
1129 | * so subsequent data blocks should be contiguous */ | |
a86c6181 AT |
1130 | if (EXT_HAS_FREE_INDEX(curp)) { |
1131 | /* if we found index with free entry, then use that | |
1132 | * entry: create all needed subtree and add new leaf */ | |
1133 | err = ext4_ext_split(handle, inode, path, newext, i); | |
787e0981 SF |
1134 | if (err) |
1135 | goto out; | |
a86c6181 AT |
1136 | |
1137 | /* refill path */ | |
1138 | ext4_ext_drop_refs(path); | |
1139 | path = ext4_ext_find_extent(inode, | |
725d26d3 AK |
1140 | (ext4_lblk_t)le32_to_cpu(newext->ee_block), |
1141 | path); | |
a86c6181 AT |
1142 | if (IS_ERR(path)) |
1143 | err = PTR_ERR(path); | |
1144 | } else { | |
1145 | /* tree is full, time to grow in depth */ | |
1146 | err = ext4_ext_grow_indepth(handle, inode, path, newext); | |
1147 | if (err) | |
1148 | goto out; | |
1149 | ||
1150 | /* refill path */ | |
1151 | ext4_ext_drop_refs(path); | |
1152 | path = ext4_ext_find_extent(inode, | |
725d26d3 AK |
1153 | (ext4_lblk_t)le32_to_cpu(newext->ee_block), |
1154 | path); | |
a86c6181 AT |
1155 | if (IS_ERR(path)) { |
1156 | err = PTR_ERR(path); | |
1157 | goto out; | |
1158 | } | |
1159 | ||
1160 | /* | |
d0d856e8 RD |
1161 | * only first (depth 0 -> 1) produces free space; |
1162 | * in all other cases we have to split the grown tree | |
a86c6181 AT |
1163 | */ |
1164 | depth = ext_depth(inode); | |
1165 | if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) { | |
d0d856e8 | 1166 | /* now we need to split */ |
a86c6181 AT |
1167 | goto repeat; |
1168 | } | |
1169 | } | |
1170 | ||
1171 | out: | |
1172 | return err; | |
1173 | } | |
1174 | ||
1988b51e AT |
1175 | /* |
1176 | * search the closest allocated block to the left for *logical | |
1177 | * and returns it at @logical + it's physical address at @phys | |
1178 | * if *logical is the smallest allocated block, the function | |
1179 | * returns 0 at @phys | |
1180 | * return value contains 0 (success) or error code | |
1181 | */ | |
1182 | int | |
1183 | ext4_ext_search_left(struct inode *inode, struct ext4_ext_path *path, | |
1184 | ext4_lblk_t *logical, ext4_fsblk_t *phys) | |
1185 | { | |
1186 | struct ext4_extent_idx *ix; | |
1187 | struct ext4_extent *ex; | |
b939e376 | 1188 | int depth, ee_len; |
1988b51e AT |
1189 | |
1190 | BUG_ON(path == NULL); | |
1191 | depth = path->p_depth; | |
1192 | *phys = 0; | |
1193 | ||
1194 | if (depth == 0 && path->p_ext == NULL) | |
1195 | return 0; | |
1196 | ||
1197 | /* usually extent in the path covers blocks smaller | |
1198 | * then *logical, but it can be that extent is the | |
1199 | * first one in the file */ | |
1200 | ||
1201 | ex = path[depth].p_ext; | |
b939e376 | 1202 | ee_len = ext4_ext_get_actual_len(ex); |
1988b51e AT |
1203 | if (*logical < le32_to_cpu(ex->ee_block)) { |
1204 | BUG_ON(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex); | |
1205 | while (--depth >= 0) { | |
1206 | ix = path[depth].p_idx; | |
1207 | BUG_ON(ix != EXT_FIRST_INDEX(path[depth].p_hdr)); | |
1208 | } | |
1209 | return 0; | |
1210 | } | |
1211 | ||
b939e376 | 1212 | BUG_ON(*logical < (le32_to_cpu(ex->ee_block) + ee_len)); |
1988b51e | 1213 | |
b939e376 AK |
1214 | *logical = le32_to_cpu(ex->ee_block) + ee_len - 1; |
1215 | *phys = ext_pblock(ex) + ee_len - 1; | |
1988b51e AT |
1216 | return 0; |
1217 | } | |
1218 | ||
1219 | /* | |
1220 | * search the closest allocated block to the right for *logical | |
1221 | * and returns it at @logical + it's physical address at @phys | |
1222 | * if *logical is the smallest allocated block, the function | |
1223 | * returns 0 at @phys | |
1224 | * return value contains 0 (success) or error code | |
1225 | */ | |
1226 | int | |
1227 | ext4_ext_search_right(struct inode *inode, struct ext4_ext_path *path, | |
1228 | ext4_lblk_t *logical, ext4_fsblk_t *phys) | |
1229 | { | |
1230 | struct buffer_head *bh = NULL; | |
1231 | struct ext4_extent_header *eh; | |
1232 | struct ext4_extent_idx *ix; | |
1233 | struct ext4_extent *ex; | |
1234 | ext4_fsblk_t block; | |
395a87bf ES |
1235 | int depth; /* Note, NOT eh_depth; depth from top of tree */ |
1236 | int ee_len; | |
1988b51e AT |
1237 | |
1238 | BUG_ON(path == NULL); | |
1239 | depth = path->p_depth; | |
1240 | *phys = 0; | |
1241 | ||
1242 | if (depth == 0 && path->p_ext == NULL) | |
1243 | return 0; | |
1244 | ||
1245 | /* usually extent in the path covers blocks smaller | |
1246 | * then *logical, but it can be that extent is the | |
1247 | * first one in the file */ | |
1248 | ||
1249 | ex = path[depth].p_ext; | |
b939e376 | 1250 | ee_len = ext4_ext_get_actual_len(ex); |
1988b51e AT |
1251 | if (*logical < le32_to_cpu(ex->ee_block)) { |
1252 | BUG_ON(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex); | |
1253 | while (--depth >= 0) { | |
1254 | ix = path[depth].p_idx; | |
1255 | BUG_ON(ix != EXT_FIRST_INDEX(path[depth].p_hdr)); | |
1256 | } | |
1257 | *logical = le32_to_cpu(ex->ee_block); | |
1258 | *phys = ext_pblock(ex); | |
1259 | return 0; | |
1260 | } | |
1261 | ||
b939e376 | 1262 | BUG_ON(*logical < (le32_to_cpu(ex->ee_block) + ee_len)); |
1988b51e AT |
1263 | |
1264 | if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) { | |
1265 | /* next allocated block in this leaf */ | |
1266 | ex++; | |
1267 | *logical = le32_to_cpu(ex->ee_block); | |
1268 | *phys = ext_pblock(ex); | |
1269 | return 0; | |
1270 | } | |
1271 | ||
1272 | /* go up and search for index to the right */ | |
1273 | while (--depth >= 0) { | |
1274 | ix = path[depth].p_idx; | |
1275 | if (ix != EXT_LAST_INDEX(path[depth].p_hdr)) | |
25f1ee3a | 1276 | goto got_index; |
1988b51e AT |
1277 | } |
1278 | ||
25f1ee3a WF |
1279 | /* we've gone up to the root and found no index to the right */ |
1280 | return 0; | |
1988b51e | 1281 | |
25f1ee3a | 1282 | got_index: |
1988b51e AT |
1283 | /* we've found index to the right, let's |
1284 | * follow it and find the closest allocated | |
1285 | * block to the right */ | |
1286 | ix++; | |
1287 | block = idx_pblock(ix); | |
1288 | while (++depth < path->p_depth) { | |
1289 | bh = sb_bread(inode->i_sb, block); | |
1290 | if (bh == NULL) | |
1291 | return -EIO; | |
1292 | eh = ext_block_hdr(bh); | |
395a87bf | 1293 | /* subtract from p_depth to get proper eh_depth */ |
56b19868 | 1294 | if (ext4_ext_check(inode, eh, path->p_depth - depth)) { |
1988b51e AT |
1295 | put_bh(bh); |
1296 | return -EIO; | |
1297 | } | |
1298 | ix = EXT_FIRST_INDEX(eh); | |
1299 | block = idx_pblock(ix); | |
1300 | put_bh(bh); | |
1301 | } | |
1302 | ||
1303 | bh = sb_bread(inode->i_sb, block); | |
1304 | if (bh == NULL) | |
1305 | return -EIO; | |
1306 | eh = ext_block_hdr(bh); | |
56b19868 | 1307 | if (ext4_ext_check(inode, eh, path->p_depth - depth)) { |
1988b51e AT |
1308 | put_bh(bh); |
1309 | return -EIO; | |
1310 | } | |
1311 | ex = EXT_FIRST_EXTENT(eh); | |
1312 | *logical = le32_to_cpu(ex->ee_block); | |
1313 | *phys = ext_pblock(ex); | |
1314 | put_bh(bh); | |
1315 | return 0; | |
1988b51e AT |
1316 | } |
1317 | ||
a86c6181 | 1318 | /* |
d0d856e8 RD |
1319 | * ext4_ext_next_allocated_block: |
1320 | * returns allocated block in subsequent extent or EXT_MAX_BLOCK. | |
1321 | * NOTE: it considers block number from index entry as | |
1322 | * allocated block. Thus, index entries have to be consistent | |
1323 | * with leaves. | |
a86c6181 | 1324 | */ |
725d26d3 | 1325 | static ext4_lblk_t |
a86c6181 AT |
1326 | ext4_ext_next_allocated_block(struct ext4_ext_path *path) |
1327 | { | |
1328 | int depth; | |
1329 | ||
1330 | BUG_ON(path == NULL); | |
1331 | depth = path->p_depth; | |
1332 | ||
1333 | if (depth == 0 && path->p_ext == NULL) | |
1334 | return EXT_MAX_BLOCK; | |
1335 | ||
1336 | while (depth >= 0) { | |
1337 | if (depth == path->p_depth) { | |
1338 | /* leaf */ | |
1339 | if (path[depth].p_ext != | |
1340 | EXT_LAST_EXTENT(path[depth].p_hdr)) | |
1341 | return le32_to_cpu(path[depth].p_ext[1].ee_block); | |
1342 | } else { | |
1343 | /* index */ | |
1344 | if (path[depth].p_idx != | |
1345 | EXT_LAST_INDEX(path[depth].p_hdr)) | |
1346 | return le32_to_cpu(path[depth].p_idx[1].ei_block); | |
1347 | } | |
1348 | depth--; | |
1349 | } | |
1350 | ||
1351 | return EXT_MAX_BLOCK; | |
1352 | } | |
1353 | ||
1354 | /* | |
d0d856e8 | 1355 | * ext4_ext_next_leaf_block: |
a86c6181 AT |
1356 | * returns first allocated block from next leaf or EXT_MAX_BLOCK |
1357 | */ | |
725d26d3 | 1358 | static ext4_lblk_t ext4_ext_next_leaf_block(struct inode *inode, |
63f57933 | 1359 | struct ext4_ext_path *path) |
a86c6181 AT |
1360 | { |
1361 | int depth; | |
1362 | ||
1363 | BUG_ON(path == NULL); | |
1364 | depth = path->p_depth; | |
1365 | ||
1366 | /* zero-tree has no leaf blocks at all */ | |
1367 | if (depth == 0) | |
1368 | return EXT_MAX_BLOCK; | |
1369 | ||
1370 | /* go to index block */ | |
1371 | depth--; | |
1372 | ||
1373 | while (depth >= 0) { | |
1374 | if (path[depth].p_idx != | |
1375 | EXT_LAST_INDEX(path[depth].p_hdr)) | |
725d26d3 AK |
1376 | return (ext4_lblk_t) |
1377 | le32_to_cpu(path[depth].p_idx[1].ei_block); | |
a86c6181 AT |
1378 | depth--; |
1379 | } | |
1380 | ||
1381 | return EXT_MAX_BLOCK; | |
1382 | } | |
1383 | ||
1384 | /* | |
d0d856e8 RD |
1385 | * ext4_ext_correct_indexes: |
1386 | * if leaf gets modified and modified extent is first in the leaf, | |
1387 | * then we have to correct all indexes above. | |
a86c6181 AT |
1388 | * TODO: do we need to correct tree in all cases? |
1389 | */ | |
1d03ec98 | 1390 | static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode, |
a86c6181 AT |
1391 | struct ext4_ext_path *path) |
1392 | { | |
1393 | struct ext4_extent_header *eh; | |
1394 | int depth = ext_depth(inode); | |
1395 | struct ext4_extent *ex; | |
1396 | __le32 border; | |
1397 | int k, err = 0; | |
1398 | ||
1399 | eh = path[depth].p_hdr; | |
1400 | ex = path[depth].p_ext; | |
1401 | BUG_ON(ex == NULL); | |
1402 | BUG_ON(eh == NULL); | |
1403 | ||
1404 | if (depth == 0) { | |
1405 | /* there is no tree at all */ | |
1406 | return 0; | |
1407 | } | |
1408 | ||
1409 | if (ex != EXT_FIRST_EXTENT(eh)) { | |
1410 | /* we correct tree if first leaf got modified only */ | |
1411 | return 0; | |
1412 | } | |
1413 | ||
1414 | /* | |
d0d856e8 | 1415 | * TODO: we need correction if border is smaller than current one |
a86c6181 AT |
1416 | */ |
1417 | k = depth - 1; | |
1418 | border = path[depth].p_ext->ee_block; | |
7e028976 AM |
1419 | err = ext4_ext_get_access(handle, inode, path + k); |
1420 | if (err) | |
a86c6181 AT |
1421 | return err; |
1422 | path[k].p_idx->ei_block = border; | |
7e028976 AM |
1423 | err = ext4_ext_dirty(handle, inode, path + k); |
1424 | if (err) | |
a86c6181 AT |
1425 | return err; |
1426 | ||
1427 | while (k--) { | |
1428 | /* change all left-side indexes */ | |
1429 | if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr)) | |
1430 | break; | |
7e028976 AM |
1431 | err = ext4_ext_get_access(handle, inode, path + k); |
1432 | if (err) | |
a86c6181 AT |
1433 | break; |
1434 | path[k].p_idx->ei_block = border; | |
7e028976 AM |
1435 | err = ext4_ext_dirty(handle, inode, path + k); |
1436 | if (err) | |
a86c6181 AT |
1437 | break; |
1438 | } | |
1439 | ||
1440 | return err; | |
1441 | } | |
1442 | ||
748de673 | 1443 | int |
a86c6181 AT |
1444 | ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1, |
1445 | struct ext4_extent *ex2) | |
1446 | { | |
749269fa | 1447 | unsigned short ext1_ee_len, ext2_ee_len, max_len; |
a2df2a63 AA |
1448 | |
1449 | /* | |
1450 | * Make sure that either both extents are uninitialized, or | |
1451 | * both are _not_. | |
1452 | */ | |
1453 | if (ext4_ext_is_uninitialized(ex1) ^ ext4_ext_is_uninitialized(ex2)) | |
1454 | return 0; | |
1455 | ||
749269fa AA |
1456 | if (ext4_ext_is_uninitialized(ex1)) |
1457 | max_len = EXT_UNINIT_MAX_LEN; | |
1458 | else | |
1459 | max_len = EXT_INIT_MAX_LEN; | |
1460 | ||
a2df2a63 AA |
1461 | ext1_ee_len = ext4_ext_get_actual_len(ex1); |
1462 | ext2_ee_len = ext4_ext_get_actual_len(ex2); | |
1463 | ||
1464 | if (le32_to_cpu(ex1->ee_block) + ext1_ee_len != | |
63f57933 | 1465 | le32_to_cpu(ex2->ee_block)) |
a86c6181 AT |
1466 | return 0; |
1467 | ||
471d4011 SB |
1468 | /* |
1469 | * To allow future support for preallocated extents to be added | |
1470 | * as an RO_COMPAT feature, refuse to merge to extents if | |
d0d856e8 | 1471 | * this can result in the top bit of ee_len being set. |
471d4011 | 1472 | */ |
749269fa | 1473 | if (ext1_ee_len + ext2_ee_len > max_len) |
471d4011 | 1474 | return 0; |
bbf2f9fb | 1475 | #ifdef AGGRESSIVE_TEST |
b939e376 | 1476 | if (ext1_ee_len >= 4) |
a86c6181 AT |
1477 | return 0; |
1478 | #endif | |
1479 | ||
a2df2a63 | 1480 | if (ext_pblock(ex1) + ext1_ee_len == ext_pblock(ex2)) |
a86c6181 AT |
1481 | return 1; |
1482 | return 0; | |
1483 | } | |
1484 | ||
56055d3a AA |
1485 | /* |
1486 | * This function tries to merge the "ex" extent to the next extent in the tree. | |
1487 | * It always tries to merge towards right. If you want to merge towards | |
1488 | * left, pass "ex - 1" as argument instead of "ex". | |
1489 | * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns | |
1490 | * 1 if they got merged. | |
1491 | */ | |
1492 | int ext4_ext_try_to_merge(struct inode *inode, | |
1493 | struct ext4_ext_path *path, | |
1494 | struct ext4_extent *ex) | |
1495 | { | |
1496 | struct ext4_extent_header *eh; | |
1497 | unsigned int depth, len; | |
1498 | int merge_done = 0; | |
1499 | int uninitialized = 0; | |
1500 | ||
1501 | depth = ext_depth(inode); | |
1502 | BUG_ON(path[depth].p_hdr == NULL); | |
1503 | eh = path[depth].p_hdr; | |
1504 | ||
1505 | while (ex < EXT_LAST_EXTENT(eh)) { | |
1506 | if (!ext4_can_extents_be_merged(inode, ex, ex + 1)) | |
1507 | break; | |
1508 | /* merge with next extent! */ | |
1509 | if (ext4_ext_is_uninitialized(ex)) | |
1510 | uninitialized = 1; | |
1511 | ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) | |
1512 | + ext4_ext_get_actual_len(ex + 1)); | |
1513 | if (uninitialized) | |
1514 | ext4_ext_mark_uninitialized(ex); | |
1515 | ||
1516 | if (ex + 1 < EXT_LAST_EXTENT(eh)) { | |
1517 | len = (EXT_LAST_EXTENT(eh) - ex - 1) | |
1518 | * sizeof(struct ext4_extent); | |
1519 | memmove(ex + 1, ex + 2, len); | |
1520 | } | |
e8546d06 | 1521 | le16_add_cpu(&eh->eh_entries, -1); |
56055d3a AA |
1522 | merge_done = 1; |
1523 | WARN_ON(eh->eh_entries == 0); | |
1524 | if (!eh->eh_entries) | |
1525 | ext4_error(inode->i_sb, "ext4_ext_try_to_merge", | |
1526 | "inode#%lu, eh->eh_entries = 0!", inode->i_ino); | |
1527 | } | |
1528 | ||
1529 | return merge_done; | |
1530 | } | |
1531 | ||
25d14f98 AA |
1532 | /* |
1533 | * check if a portion of the "newext" extent overlaps with an | |
1534 | * existing extent. | |
1535 | * | |
1536 | * If there is an overlap discovered, it updates the length of the newext | |
1537 | * such that there will be no overlap, and then returns 1. | |
1538 | * If there is no overlap found, it returns 0. | |
1539 | */ | |
1540 | unsigned int ext4_ext_check_overlap(struct inode *inode, | |
1541 | struct ext4_extent *newext, | |
1542 | struct ext4_ext_path *path) | |
1543 | { | |
725d26d3 | 1544 | ext4_lblk_t b1, b2; |
25d14f98 AA |
1545 | unsigned int depth, len1; |
1546 | unsigned int ret = 0; | |
1547 | ||
1548 | b1 = le32_to_cpu(newext->ee_block); | |
a2df2a63 | 1549 | len1 = ext4_ext_get_actual_len(newext); |
25d14f98 AA |
1550 | depth = ext_depth(inode); |
1551 | if (!path[depth].p_ext) | |
1552 | goto out; | |
1553 | b2 = le32_to_cpu(path[depth].p_ext->ee_block); | |
1554 | ||
1555 | /* | |
1556 | * get the next allocated block if the extent in the path | |
2b2d6d01 | 1557 | * is before the requested block(s) |
25d14f98 AA |
1558 | */ |
1559 | if (b2 < b1) { | |
1560 | b2 = ext4_ext_next_allocated_block(path); | |
1561 | if (b2 == EXT_MAX_BLOCK) | |
1562 | goto out; | |
1563 | } | |
1564 | ||
725d26d3 | 1565 | /* check for wrap through zero on extent logical start block*/ |
25d14f98 AA |
1566 | if (b1 + len1 < b1) { |
1567 | len1 = EXT_MAX_BLOCK - b1; | |
1568 | newext->ee_len = cpu_to_le16(len1); | |
1569 | ret = 1; | |
1570 | } | |
1571 | ||
1572 | /* check for overlap */ | |
1573 | if (b1 + len1 > b2) { | |
1574 | newext->ee_len = cpu_to_le16(b2 - b1); | |
1575 | ret = 1; | |
1576 | } | |
1577 | out: | |
1578 | return ret; | |
1579 | } | |
1580 | ||
a86c6181 | 1581 | /* |
d0d856e8 RD |
1582 | * ext4_ext_insert_extent: |
1583 | * tries to merge requsted extent into the existing extent or | |
1584 | * inserts requested extent as new one into the tree, | |
1585 | * creating new leaf in the no-space case. | |
a86c6181 AT |
1586 | */ |
1587 | int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, | |
1588 | struct ext4_ext_path *path, | |
0031462b | 1589 | struct ext4_extent *newext, int flag) |
a86c6181 | 1590 | { |
af5bc92d | 1591 | struct ext4_extent_header *eh; |
a86c6181 AT |
1592 | struct ext4_extent *ex, *fex; |
1593 | struct ext4_extent *nearex; /* nearest extent */ | |
1594 | struct ext4_ext_path *npath = NULL; | |
725d26d3 AK |
1595 | int depth, len, err; |
1596 | ext4_lblk_t next; | |
a2df2a63 | 1597 | unsigned uninitialized = 0; |
a86c6181 | 1598 | |
a2df2a63 | 1599 | BUG_ON(ext4_ext_get_actual_len(newext) == 0); |
a86c6181 AT |
1600 | depth = ext_depth(inode); |
1601 | ex = path[depth].p_ext; | |
1602 | BUG_ON(path[depth].p_hdr == NULL); | |
1603 | ||
1604 | /* try to insert block into found extent and return */ | |
0031462b MC |
1605 | if (ex && (flag != EXT4_GET_BLOCKS_DIO_CREATE_EXT) |
1606 | && ext4_can_extents_be_merged(inode, ex, newext)) { | |
553f9008 M |
1607 | ext_debug("append [%d]%d block to %d:[%d]%d (from %llu)\n", |
1608 | ext4_ext_is_uninitialized(newext), | |
a2df2a63 | 1609 | ext4_ext_get_actual_len(newext), |
a86c6181 | 1610 | le32_to_cpu(ex->ee_block), |
553f9008 | 1611 | ext4_ext_is_uninitialized(ex), |
a2df2a63 | 1612 | ext4_ext_get_actual_len(ex), ext_pblock(ex)); |
7e028976 AM |
1613 | err = ext4_ext_get_access(handle, inode, path + depth); |
1614 | if (err) | |
a86c6181 | 1615 | return err; |
a2df2a63 AA |
1616 | |
1617 | /* | |
1618 | * ext4_can_extents_be_merged should have checked that either | |
1619 | * both extents are uninitialized, or both aren't. Thus we | |
1620 | * need to check only one of them here. | |
1621 | */ | |
1622 | if (ext4_ext_is_uninitialized(ex)) | |
1623 | uninitialized = 1; | |
1624 | ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) | |
1625 | + ext4_ext_get_actual_len(newext)); | |
1626 | if (uninitialized) | |
1627 | ext4_ext_mark_uninitialized(ex); | |
a86c6181 AT |
1628 | eh = path[depth].p_hdr; |
1629 | nearex = ex; | |
1630 | goto merge; | |
1631 | } | |
1632 | ||
1633 | repeat: | |
1634 | depth = ext_depth(inode); | |
1635 | eh = path[depth].p_hdr; | |
1636 | if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) | |
1637 | goto has_space; | |
1638 | ||
1639 | /* probably next leaf has space for us? */ | |
1640 | fex = EXT_LAST_EXTENT(eh); | |
1641 | next = ext4_ext_next_leaf_block(inode, path); | |
1642 | if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block) | |
1643 | && next != EXT_MAX_BLOCK) { | |
1644 | ext_debug("next leaf block - %d\n", next); | |
1645 | BUG_ON(npath != NULL); | |
1646 | npath = ext4_ext_find_extent(inode, next, NULL); | |
1647 | if (IS_ERR(npath)) | |
1648 | return PTR_ERR(npath); | |
1649 | BUG_ON(npath->p_depth != path->p_depth); | |
1650 | eh = npath[depth].p_hdr; | |
1651 | if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) { | |
1652 | ext_debug("next leaf isnt full(%d)\n", | |
1653 | le16_to_cpu(eh->eh_entries)); | |
1654 | path = npath; | |
1655 | goto repeat; | |
1656 | } | |
1657 | ext_debug("next leaf has no free space(%d,%d)\n", | |
1658 | le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); | |
1659 | } | |
1660 | ||
1661 | /* | |
d0d856e8 RD |
1662 | * There is no free space in the found leaf. |
1663 | * We're gonna add a new leaf in the tree. | |
a86c6181 AT |
1664 | */ |
1665 | err = ext4_ext_create_new_leaf(handle, inode, path, newext); | |
1666 | if (err) | |
1667 | goto cleanup; | |
1668 | depth = ext_depth(inode); | |
1669 | eh = path[depth].p_hdr; | |
1670 | ||
1671 | has_space: | |
1672 | nearex = path[depth].p_ext; | |
1673 | ||
7e028976 AM |
1674 | err = ext4_ext_get_access(handle, inode, path + depth); |
1675 | if (err) | |
a86c6181 AT |
1676 | goto cleanup; |
1677 | ||
1678 | if (!nearex) { | |
1679 | /* there is no extent in this leaf, create first one */ | |
553f9008 | 1680 | ext_debug("first extent in the leaf: %d:%llu:[%d]%d\n", |
8c55e204 DK |
1681 | le32_to_cpu(newext->ee_block), |
1682 | ext_pblock(newext), | |
553f9008 | 1683 | ext4_ext_is_uninitialized(newext), |
a2df2a63 | 1684 | ext4_ext_get_actual_len(newext)); |
a86c6181 AT |
1685 | path[depth].p_ext = EXT_FIRST_EXTENT(eh); |
1686 | } else if (le32_to_cpu(newext->ee_block) | |
8c55e204 | 1687 | > le32_to_cpu(nearex->ee_block)) { |
a86c6181 AT |
1688 | /* BUG_ON(newext->ee_block == nearex->ee_block); */ |
1689 | if (nearex != EXT_LAST_EXTENT(eh)) { | |
1690 | len = EXT_MAX_EXTENT(eh) - nearex; | |
1691 | len = (len - 1) * sizeof(struct ext4_extent); | |
1692 | len = len < 0 ? 0 : len; | |
553f9008 | 1693 | ext_debug("insert %d:%llu:[%d]%d after: nearest 0x%p, " |
a86c6181 | 1694 | "move %d from 0x%p to 0x%p\n", |
8c55e204 DK |
1695 | le32_to_cpu(newext->ee_block), |
1696 | ext_pblock(newext), | |
553f9008 | 1697 | ext4_ext_is_uninitialized(newext), |
a2df2a63 | 1698 | ext4_ext_get_actual_len(newext), |
a86c6181 AT |
1699 | nearex, len, nearex + 1, nearex + 2); |
1700 | memmove(nearex + 2, nearex + 1, len); | |
1701 | } | |
1702 | path[depth].p_ext = nearex + 1; | |
1703 | } else { | |
1704 | BUG_ON(newext->ee_block == nearex->ee_block); | |
1705 | len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext4_extent); | |
1706 | len = len < 0 ? 0 : len; | |
553f9008 | 1707 | ext_debug("insert %d:%llu:[%d]%d before: nearest 0x%p, " |
a86c6181 AT |
1708 | "move %d from 0x%p to 0x%p\n", |
1709 | le32_to_cpu(newext->ee_block), | |
f65e6fba | 1710 | ext_pblock(newext), |
553f9008 | 1711 | ext4_ext_is_uninitialized(newext), |
a2df2a63 | 1712 | ext4_ext_get_actual_len(newext), |
a86c6181 AT |
1713 | nearex, len, nearex + 1, nearex + 2); |
1714 | memmove(nearex + 1, nearex, len); | |
1715 | path[depth].p_ext = nearex; | |
1716 | } | |
1717 | ||
e8546d06 | 1718 | le16_add_cpu(&eh->eh_entries, 1); |
a86c6181 AT |
1719 | nearex = path[depth].p_ext; |
1720 | nearex->ee_block = newext->ee_block; | |
b377611d | 1721 | ext4_ext_store_pblock(nearex, ext_pblock(newext)); |
a86c6181 | 1722 | nearex->ee_len = newext->ee_len; |
a86c6181 AT |
1723 | |
1724 | merge: | |
1725 | /* try to merge extents to the right */ | |
0031462b MC |
1726 | if (flag != EXT4_GET_BLOCKS_DIO_CREATE_EXT) |
1727 | ext4_ext_try_to_merge(inode, path, nearex); | |
a86c6181 AT |
1728 | |
1729 | /* try to merge extents to the left */ | |
1730 | ||
1731 | /* time to correct all indexes above */ | |
1732 | err = ext4_ext_correct_indexes(handle, inode, path); | |
1733 | if (err) | |
1734 | goto cleanup; | |
1735 | ||
1736 | err = ext4_ext_dirty(handle, inode, path + depth); | |
1737 | ||
1738 | cleanup: | |
1739 | if (npath) { | |
1740 | ext4_ext_drop_refs(npath); | |
1741 | kfree(npath); | |
1742 | } | |
a86c6181 AT |
1743 | ext4_ext_invalidate_cache(inode); |
1744 | return err; | |
1745 | } | |
1746 | ||
6873fa0d ES |
1747 | int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block, |
1748 | ext4_lblk_t num, ext_prepare_callback func, | |
1749 | void *cbdata) | |
1750 | { | |
1751 | struct ext4_ext_path *path = NULL; | |
1752 | struct ext4_ext_cache cbex; | |
1753 | struct ext4_extent *ex; | |
1754 | ext4_lblk_t next, start = 0, end = 0; | |
1755 | ext4_lblk_t last = block + num; | |
1756 | int depth, exists, err = 0; | |
1757 | ||
1758 | BUG_ON(func == NULL); | |
1759 | BUG_ON(inode == NULL); | |
1760 | ||
1761 | while (block < last && block != EXT_MAX_BLOCK) { | |
1762 | num = last - block; | |
1763 | /* find extent for this block */ | |
1764 | path = ext4_ext_find_extent(inode, block, path); | |
1765 | if (IS_ERR(path)) { | |
1766 | err = PTR_ERR(path); | |
1767 | path = NULL; | |
1768 | break; | |
1769 | } | |
1770 | ||
1771 | depth = ext_depth(inode); | |
1772 | BUG_ON(path[depth].p_hdr == NULL); | |
1773 | ex = path[depth].p_ext; | |
1774 | next = ext4_ext_next_allocated_block(path); | |
1775 | ||
1776 | exists = 0; | |
1777 | if (!ex) { | |
1778 | /* there is no extent yet, so try to allocate | |
1779 | * all requested space */ | |
1780 | start = block; | |
1781 | end = block + num; | |
1782 | } else if (le32_to_cpu(ex->ee_block) > block) { | |
1783 | /* need to allocate space before found extent */ | |
1784 | start = block; | |
1785 | end = le32_to_cpu(ex->ee_block); | |
1786 | if (block + num < end) | |
1787 | end = block + num; | |
1788 | } else if (block >= le32_to_cpu(ex->ee_block) | |
1789 | + ext4_ext_get_actual_len(ex)) { | |
1790 | /* need to allocate space after found extent */ | |
1791 | start = block; | |
1792 | end = block + num; | |
1793 | if (end >= next) | |
1794 | end = next; | |
1795 | } else if (block >= le32_to_cpu(ex->ee_block)) { | |
1796 | /* | |
1797 | * some part of requested space is covered | |
1798 | * by found extent | |
1799 | */ | |
1800 | start = block; | |
1801 | end = le32_to_cpu(ex->ee_block) | |
1802 | + ext4_ext_get_actual_len(ex); | |
1803 | if (block + num < end) | |
1804 | end = block + num; | |
1805 | exists = 1; | |
1806 | } else { | |
1807 | BUG(); | |
1808 | } | |
1809 | BUG_ON(end <= start); | |
1810 | ||
1811 | if (!exists) { | |
1812 | cbex.ec_block = start; | |
1813 | cbex.ec_len = end - start; | |
1814 | cbex.ec_start = 0; | |
1815 | cbex.ec_type = EXT4_EXT_CACHE_GAP; | |
1816 | } else { | |
1817 | cbex.ec_block = le32_to_cpu(ex->ee_block); | |
1818 | cbex.ec_len = ext4_ext_get_actual_len(ex); | |
1819 | cbex.ec_start = ext_pblock(ex); | |
1820 | cbex.ec_type = EXT4_EXT_CACHE_EXTENT; | |
1821 | } | |
1822 | ||
1823 | BUG_ON(cbex.ec_len == 0); | |
1824 | err = func(inode, path, &cbex, ex, cbdata); | |
1825 | ext4_ext_drop_refs(path); | |
1826 | ||
1827 | if (err < 0) | |
1828 | break; | |
1829 | ||
1830 | if (err == EXT_REPEAT) | |
1831 | continue; | |
1832 | else if (err == EXT_BREAK) { | |
1833 | err = 0; | |
1834 | break; | |
1835 | } | |
1836 | ||
1837 | if (ext_depth(inode) != depth) { | |
1838 | /* depth was changed. we have to realloc path */ | |
1839 | kfree(path); | |
1840 | path = NULL; | |
1841 | } | |
1842 | ||
1843 | block = cbex.ec_block + cbex.ec_len; | |
1844 | } | |
1845 | ||
1846 | if (path) { | |
1847 | ext4_ext_drop_refs(path); | |
1848 | kfree(path); | |
1849 | } | |
1850 | ||
1851 | return err; | |
1852 | } | |
1853 | ||
09b88252 | 1854 | static void |
725d26d3 | 1855 | ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block, |
dd54567a | 1856 | __u32 len, ext4_fsblk_t start, int type) |
a86c6181 AT |
1857 | { |
1858 | struct ext4_ext_cache *cex; | |
1859 | BUG_ON(len == 0); | |
2ec0ae3a | 1860 | spin_lock(&EXT4_I(inode)->i_block_reservation_lock); |
a86c6181 AT |
1861 | cex = &EXT4_I(inode)->i_cached_extent; |
1862 | cex->ec_type = type; | |
1863 | cex->ec_block = block; | |
1864 | cex->ec_len = len; | |
1865 | cex->ec_start = start; | |
2ec0ae3a | 1866 | spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); |
a86c6181 AT |
1867 | } |
1868 | ||
1869 | /* | |
d0d856e8 RD |
1870 | * ext4_ext_put_gap_in_cache: |
1871 | * calculate boundaries of the gap that the requested block fits into | |
a86c6181 AT |
1872 | * and cache this gap |
1873 | */ | |
09b88252 | 1874 | static void |
a86c6181 | 1875 | ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path, |
725d26d3 | 1876 | ext4_lblk_t block) |
a86c6181 AT |
1877 | { |
1878 | int depth = ext_depth(inode); | |
725d26d3 AK |
1879 | unsigned long len; |
1880 | ext4_lblk_t lblock; | |
a86c6181 AT |
1881 | struct ext4_extent *ex; |
1882 | ||
1883 | ex = path[depth].p_ext; | |
1884 | if (ex == NULL) { | |
1885 | /* there is no extent yet, so gap is [0;-] */ | |
1886 | lblock = 0; | |
1887 | len = EXT_MAX_BLOCK; | |
1888 | ext_debug("cache gap(whole file):"); | |
1889 | } else if (block < le32_to_cpu(ex->ee_block)) { | |
1890 | lblock = block; | |
1891 | len = le32_to_cpu(ex->ee_block) - block; | |
bba90743 ES |
1892 | ext_debug("cache gap(before): %u [%u:%u]", |
1893 | block, | |
1894 | le32_to_cpu(ex->ee_block), | |
1895 | ext4_ext_get_actual_len(ex)); | |
a86c6181 | 1896 | } else if (block >= le32_to_cpu(ex->ee_block) |
a2df2a63 | 1897 | + ext4_ext_get_actual_len(ex)) { |
725d26d3 | 1898 | ext4_lblk_t next; |
8c55e204 | 1899 | lblock = le32_to_cpu(ex->ee_block) |
a2df2a63 | 1900 | + ext4_ext_get_actual_len(ex); |
725d26d3 AK |
1901 | |
1902 | next = ext4_ext_next_allocated_block(path); | |
bba90743 ES |
1903 | ext_debug("cache gap(after): [%u:%u] %u", |
1904 | le32_to_cpu(ex->ee_block), | |
1905 | ext4_ext_get_actual_len(ex), | |
1906 | block); | |
725d26d3 AK |
1907 | BUG_ON(next == lblock); |
1908 | len = next - lblock; | |
a86c6181 AT |
1909 | } else { |
1910 | lblock = len = 0; | |
1911 | BUG(); | |
1912 | } | |
1913 | ||
bba90743 | 1914 | ext_debug(" -> %u:%lu\n", lblock, len); |
a86c6181 AT |
1915 | ext4_ext_put_in_cache(inode, lblock, len, 0, EXT4_EXT_CACHE_GAP); |
1916 | } | |
1917 | ||
09b88252 | 1918 | static int |
725d26d3 | 1919 | ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block, |
a86c6181 AT |
1920 | struct ext4_extent *ex) |
1921 | { | |
1922 | struct ext4_ext_cache *cex; | |
2ec0ae3a | 1923 | int ret = EXT4_EXT_CACHE_NO; |
a86c6181 | 1924 | |
2ec0ae3a TT |
1925 | /* |
1926 | * We borrow i_block_reservation_lock to protect i_cached_extent | |
1927 | */ | |
1928 | spin_lock(&EXT4_I(inode)->i_block_reservation_lock); | |
a86c6181 AT |
1929 | cex = &EXT4_I(inode)->i_cached_extent; |
1930 | ||
1931 | /* has cache valid data? */ | |
1932 | if (cex->ec_type == EXT4_EXT_CACHE_NO) | |
2ec0ae3a | 1933 | goto errout; |
a86c6181 AT |
1934 | |
1935 | BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP && | |
1936 | cex->ec_type != EXT4_EXT_CACHE_EXTENT); | |
1937 | if (block >= cex->ec_block && block < cex->ec_block + cex->ec_len) { | |
8c55e204 | 1938 | ex->ee_block = cpu_to_le32(cex->ec_block); |
f65e6fba | 1939 | ext4_ext_store_pblock(ex, cex->ec_start); |
8c55e204 | 1940 | ex->ee_len = cpu_to_le16(cex->ec_len); |
bba90743 ES |
1941 | ext_debug("%u cached by %u:%u:%llu\n", |
1942 | block, | |
1943 | cex->ec_block, cex->ec_len, cex->ec_start); | |
2ec0ae3a | 1944 | ret = cex->ec_type; |
a86c6181 | 1945 | } |
2ec0ae3a TT |
1946 | errout: |
1947 | spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); | |
1948 | return ret; | |
a86c6181 AT |
1949 | } |
1950 | ||
1951 | /* | |
d0d856e8 RD |
1952 | * ext4_ext_rm_idx: |
1953 | * removes index from the index block. | |
1954 | * It's used in truncate case only, thus all requests are for | |
1955 | * last index in the block only. | |
a86c6181 | 1956 | */ |
1d03ec98 | 1957 | static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode, |
a86c6181 AT |
1958 | struct ext4_ext_path *path) |
1959 | { | |
1960 | struct buffer_head *bh; | |
1961 | int err; | |
f65e6fba | 1962 | ext4_fsblk_t leaf; |
a86c6181 AT |
1963 | |
1964 | /* free index block */ | |
1965 | path--; | |
f65e6fba | 1966 | leaf = idx_pblock(path->p_idx); |
a86c6181 | 1967 | BUG_ON(path->p_hdr->eh_entries == 0); |
7e028976 AM |
1968 | err = ext4_ext_get_access(handle, inode, path); |
1969 | if (err) | |
a86c6181 | 1970 | return err; |
e8546d06 | 1971 | le16_add_cpu(&path->p_hdr->eh_entries, -1); |
7e028976 AM |
1972 | err = ext4_ext_dirty(handle, inode, path); |
1973 | if (err) | |
a86c6181 | 1974 | return err; |
2ae02107 | 1975 | ext_debug("index is empty, remove it, free block %llu\n", leaf); |
a86c6181 AT |
1976 | bh = sb_find_get_block(inode->i_sb, leaf); |
1977 | ext4_forget(handle, 1, inode, bh, leaf); | |
c9de560d | 1978 | ext4_free_blocks(handle, inode, leaf, 1, 1); |
a86c6181 AT |
1979 | return err; |
1980 | } | |
1981 | ||
1982 | /* | |
ee12b630 MC |
1983 | * ext4_ext_calc_credits_for_single_extent: |
1984 | * This routine returns max. credits that needed to insert an extent | |
1985 | * to the extent tree. | |
1986 | * When pass the actual path, the caller should calculate credits | |
1987 | * under i_data_sem. | |
a86c6181 | 1988 | */ |
525f4ed8 | 1989 | int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks, |
a86c6181 AT |
1990 | struct ext4_ext_path *path) |
1991 | { | |
a86c6181 | 1992 | if (path) { |
ee12b630 | 1993 | int depth = ext_depth(inode); |
f3bd1f3f | 1994 | int ret = 0; |
ee12b630 | 1995 | |
a86c6181 | 1996 | /* probably there is space in leaf? */ |
a86c6181 | 1997 | if (le16_to_cpu(path[depth].p_hdr->eh_entries) |
ee12b630 | 1998 | < le16_to_cpu(path[depth].p_hdr->eh_max)) { |
a86c6181 | 1999 | |
ee12b630 MC |
2000 | /* |
2001 | * There are some space in the leaf tree, no | |
2002 | * need to account for leaf block credit | |
2003 | * | |
2004 | * bitmaps and block group descriptor blocks | |
2005 | * and other metadat blocks still need to be | |
2006 | * accounted. | |
2007 | */ | |
525f4ed8 | 2008 | /* 1 bitmap, 1 block group descriptor */ |
ee12b630 | 2009 | ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb); |
5887e98b | 2010 | return ret; |
ee12b630 MC |
2011 | } |
2012 | } | |
a86c6181 | 2013 | |
525f4ed8 | 2014 | return ext4_chunk_trans_blocks(inode, nrblocks); |
ee12b630 | 2015 | } |
a86c6181 | 2016 | |
ee12b630 MC |
2017 | /* |
2018 | * How many index/leaf blocks need to change/allocate to modify nrblocks? | |
2019 | * | |
2020 | * if nrblocks are fit in a single extent (chunk flag is 1), then | |
2021 | * in the worse case, each tree level index/leaf need to be changed | |
2022 | * if the tree split due to insert a new extent, then the old tree | |
2023 | * index/leaf need to be updated too | |
2024 | * | |
2025 | * If the nrblocks are discontiguous, they could cause | |
2026 | * the whole tree split more than once, but this is really rare. | |
2027 | */ | |
525f4ed8 | 2028 | int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, int chunk) |
ee12b630 MC |
2029 | { |
2030 | int index; | |
2031 | int depth = ext_depth(inode); | |
a86c6181 | 2032 | |
ee12b630 MC |
2033 | if (chunk) |
2034 | index = depth * 2; | |
2035 | else | |
2036 | index = depth * 3; | |
a86c6181 | 2037 | |
ee12b630 | 2038 | return index; |
a86c6181 AT |
2039 | } |
2040 | ||
2041 | static int ext4_remove_blocks(handle_t *handle, struct inode *inode, | |
2042 | struct ext4_extent *ex, | |
725d26d3 | 2043 | ext4_lblk_t from, ext4_lblk_t to) |
a86c6181 AT |
2044 | { |
2045 | struct buffer_head *bh; | |
a2df2a63 | 2046 | unsigned short ee_len = ext4_ext_get_actual_len(ex); |
c9de560d | 2047 | int i, metadata = 0; |
a86c6181 | 2048 | |
c9de560d AT |
2049 | if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) |
2050 | metadata = 1; | |
a86c6181 AT |
2051 | #ifdef EXTENTS_STATS |
2052 | { | |
2053 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); | |
a86c6181 AT |
2054 | spin_lock(&sbi->s_ext_stats_lock); |
2055 | sbi->s_ext_blocks += ee_len; | |
2056 | sbi->s_ext_extents++; | |
2057 | if (ee_len < sbi->s_ext_min) | |
2058 | sbi->s_ext_min = ee_len; | |
2059 | if (ee_len > sbi->s_ext_max) | |
2060 | sbi->s_ext_max = ee_len; | |
2061 | if (ext_depth(inode) > sbi->s_depth_max) | |
2062 | sbi->s_depth_max = ext_depth(inode); | |
2063 | spin_unlock(&sbi->s_ext_stats_lock); | |
2064 | } | |
2065 | #endif | |
2066 | if (from >= le32_to_cpu(ex->ee_block) | |
a2df2a63 | 2067 | && to == le32_to_cpu(ex->ee_block) + ee_len - 1) { |
a86c6181 | 2068 | /* tail removal */ |
725d26d3 | 2069 | ext4_lblk_t num; |
f65e6fba | 2070 | ext4_fsblk_t start; |
725d26d3 | 2071 | |
a2df2a63 AA |
2072 | num = le32_to_cpu(ex->ee_block) + ee_len - from; |
2073 | start = ext_pblock(ex) + ee_len - num; | |
725d26d3 | 2074 | ext_debug("free last %u blocks starting %llu\n", num, start); |
a86c6181 AT |
2075 | for (i = 0; i < num; i++) { |
2076 | bh = sb_find_get_block(inode->i_sb, start + i); | |
2077 | ext4_forget(handle, 0, inode, bh, start + i); | |
2078 | } | |
c9de560d | 2079 | ext4_free_blocks(handle, inode, start, num, metadata); |
a86c6181 | 2080 | } else if (from == le32_to_cpu(ex->ee_block) |
a2df2a63 | 2081 | && to <= le32_to_cpu(ex->ee_block) + ee_len - 1) { |
725d26d3 | 2082 | printk(KERN_INFO "strange request: removal %u-%u from %u:%u\n", |
a2df2a63 | 2083 | from, to, le32_to_cpu(ex->ee_block), ee_len); |
a86c6181 | 2084 | } else { |
725d26d3 AK |
2085 | printk(KERN_INFO "strange request: removal(2) " |
2086 | "%u-%u from %u:%u\n", | |
2087 | from, to, le32_to_cpu(ex->ee_block), ee_len); | |
a86c6181 AT |
2088 | } |
2089 | return 0; | |
2090 | } | |
2091 | ||
2092 | static int | |
2093 | ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, | |
725d26d3 | 2094 | struct ext4_ext_path *path, ext4_lblk_t start) |
a86c6181 AT |
2095 | { |
2096 | int err = 0, correct_index = 0; | |
2097 | int depth = ext_depth(inode), credits; | |
2098 | struct ext4_extent_header *eh; | |
725d26d3 AK |
2099 | ext4_lblk_t a, b, block; |
2100 | unsigned num; | |
2101 | ext4_lblk_t ex_ee_block; | |
a86c6181 | 2102 | unsigned short ex_ee_len; |
a2df2a63 | 2103 | unsigned uninitialized = 0; |
a86c6181 AT |
2104 | struct ext4_extent *ex; |
2105 | ||
c29c0ae7 | 2106 | /* the header must be checked already in ext4_ext_remove_space() */ |
725d26d3 | 2107 | ext_debug("truncate since %u in leaf\n", start); |
a86c6181 AT |
2108 | if (!path[depth].p_hdr) |
2109 | path[depth].p_hdr = ext_block_hdr(path[depth].p_bh); | |
2110 | eh = path[depth].p_hdr; | |
2111 | BUG_ON(eh == NULL); | |
a86c6181 AT |
2112 | |
2113 | /* find where to start removing */ | |
2114 | ex = EXT_LAST_EXTENT(eh); | |
2115 | ||
2116 | ex_ee_block = le32_to_cpu(ex->ee_block); | |
a2df2a63 | 2117 | ex_ee_len = ext4_ext_get_actual_len(ex); |
a86c6181 AT |
2118 | |
2119 | while (ex >= EXT_FIRST_EXTENT(eh) && | |
2120 | ex_ee_block + ex_ee_len > start) { | |
a41f2071 AK |
2121 | |
2122 | if (ext4_ext_is_uninitialized(ex)) | |
2123 | uninitialized = 1; | |
2124 | else | |
2125 | uninitialized = 0; | |
2126 | ||
553f9008 M |
2127 | ext_debug("remove ext %u:[%d]%d\n", ex_ee_block, |
2128 | uninitialized, ex_ee_len); | |
a86c6181 AT |
2129 | path[depth].p_ext = ex; |
2130 | ||
2131 | a = ex_ee_block > start ? ex_ee_block : start; | |
2132 | b = ex_ee_block + ex_ee_len - 1 < EXT_MAX_BLOCK ? | |
2133 | ex_ee_block + ex_ee_len - 1 : EXT_MAX_BLOCK; | |
2134 | ||
2135 | ext_debug(" border %u:%u\n", a, b); | |
2136 | ||
2137 | if (a != ex_ee_block && b != ex_ee_block + ex_ee_len - 1) { | |
2138 | block = 0; | |
2139 | num = 0; | |
2140 | BUG(); | |
2141 | } else if (a != ex_ee_block) { | |
2142 | /* remove tail of the extent */ | |
2143 | block = ex_ee_block; | |
2144 | num = a - block; | |
2145 | } else if (b != ex_ee_block + ex_ee_len - 1) { | |
2146 | /* remove head of the extent */ | |
2147 | block = a; | |
2148 | num = b - a; | |
2149 | /* there is no "make a hole" API yet */ | |
2150 | BUG(); | |
2151 | } else { | |
2152 | /* remove whole extent: excellent! */ | |
2153 | block = ex_ee_block; | |
2154 | num = 0; | |
2155 | BUG_ON(a != ex_ee_block); | |
2156 | BUG_ON(b != ex_ee_block + ex_ee_len - 1); | |
2157 | } | |
2158 | ||
34071da7 TT |
2159 | /* |
2160 | * 3 for leaf, sb, and inode plus 2 (bmap and group | |
2161 | * descriptor) for each block group; assume two block | |
2162 | * groups plus ex_ee_len/blocks_per_block_group for | |
2163 | * the worst case | |
2164 | */ | |
2165 | credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb)); | |
a86c6181 AT |
2166 | if (ex == EXT_FIRST_EXTENT(eh)) { |
2167 | correct_index = 1; | |
2168 | credits += (ext_depth(inode)) + 1; | |
2169 | } | |
a86c6181 | 2170 | credits += 2 * EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb); |
a86c6181 | 2171 | |
487caeef | 2172 | err = ext4_ext_truncate_extend_restart(handle, inode, credits); |
9102e4fa | 2173 | if (err) |
a86c6181 | 2174 | goto out; |
a86c6181 AT |
2175 | |
2176 | err = ext4_ext_get_access(handle, inode, path + depth); | |
2177 | if (err) | |
2178 | goto out; | |
2179 | ||
2180 | err = ext4_remove_blocks(handle, inode, ex, a, b); | |
2181 | if (err) | |
2182 | goto out; | |
2183 | ||
2184 | if (num == 0) { | |
d0d856e8 | 2185 | /* this extent is removed; mark slot entirely unused */ |
f65e6fba | 2186 | ext4_ext_store_pblock(ex, 0); |
e8546d06 | 2187 | le16_add_cpu(&eh->eh_entries, -1); |
a86c6181 AT |
2188 | } |
2189 | ||
2190 | ex->ee_block = cpu_to_le32(block); | |
2191 | ex->ee_len = cpu_to_le16(num); | |
749269fa AA |
2192 | /* |
2193 | * Do not mark uninitialized if all the blocks in the | |
2194 | * extent have been removed. | |
2195 | */ | |
2196 | if (uninitialized && num) | |
a2df2a63 | 2197 | ext4_ext_mark_uninitialized(ex); |
a86c6181 AT |
2198 | |
2199 | err = ext4_ext_dirty(handle, inode, path + depth); | |
2200 | if (err) | |
2201 | goto out; | |
2202 | ||
2ae02107 | 2203 | ext_debug("new extent: %u:%u:%llu\n", block, num, |
f65e6fba | 2204 | ext_pblock(ex)); |
a86c6181 AT |
2205 | ex--; |
2206 | ex_ee_block = le32_to_cpu(ex->ee_block); | |
a2df2a63 | 2207 | ex_ee_len = ext4_ext_get_actual_len(ex); |
a86c6181 AT |
2208 | } |
2209 | ||
2210 | if (correct_index && eh->eh_entries) | |
2211 | err = ext4_ext_correct_indexes(handle, inode, path); | |
2212 | ||
2213 | /* if this leaf is free, then we should | |
2214 | * remove it from index block above */ | |
2215 | if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL) | |
2216 | err = ext4_ext_rm_idx(handle, inode, path + depth); | |
2217 | ||
2218 | out: | |
2219 | return err; | |
2220 | } | |
2221 | ||
2222 | /* | |
d0d856e8 RD |
2223 | * ext4_ext_more_to_rm: |
2224 | * returns 1 if current index has to be freed (even partial) | |
a86c6181 | 2225 | */ |
09b88252 | 2226 | static int |
a86c6181 AT |
2227 | ext4_ext_more_to_rm(struct ext4_ext_path *path) |
2228 | { | |
2229 | BUG_ON(path->p_idx == NULL); | |
2230 | ||
2231 | if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr)) | |
2232 | return 0; | |
2233 | ||
2234 | /* | |
d0d856e8 | 2235 | * if truncate on deeper level happened, it wasn't partial, |
a86c6181 AT |
2236 | * so we have to consider current index for truncation |
2237 | */ | |
2238 | if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block) | |
2239 | return 0; | |
2240 | return 1; | |
2241 | } | |
2242 | ||
1d03ec98 | 2243 | static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start) |
a86c6181 AT |
2244 | { |
2245 | struct super_block *sb = inode->i_sb; | |
2246 | int depth = ext_depth(inode); | |
2247 | struct ext4_ext_path *path; | |
2248 | handle_t *handle; | |
2249 | int i = 0, err = 0; | |
2250 | ||
725d26d3 | 2251 | ext_debug("truncate since %u\n", start); |
a86c6181 AT |
2252 | |
2253 | /* probably first extent we're gonna free will be last in block */ | |
2254 | handle = ext4_journal_start(inode, depth + 1); | |
2255 | if (IS_ERR(handle)) | |
2256 | return PTR_ERR(handle); | |
2257 | ||
2258 | ext4_ext_invalidate_cache(inode); | |
2259 | ||
2260 | /* | |
d0d856e8 RD |
2261 | * We start scanning from right side, freeing all the blocks |
2262 | * after i_size and walking into the tree depth-wise. | |
a86c6181 | 2263 | */ |
216553c4 | 2264 | path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_NOFS); |
a86c6181 AT |
2265 | if (path == NULL) { |
2266 | ext4_journal_stop(handle); | |
2267 | return -ENOMEM; | |
2268 | } | |
a86c6181 | 2269 | path[0].p_hdr = ext_inode_hdr(inode); |
56b19868 | 2270 | if (ext4_ext_check(inode, path[0].p_hdr, depth)) { |
a86c6181 AT |
2271 | err = -EIO; |
2272 | goto out; | |
2273 | } | |
2274 | path[0].p_depth = depth; | |
2275 | ||
2276 | while (i >= 0 && err == 0) { | |
2277 | if (i == depth) { | |
2278 | /* this is leaf block */ | |
2279 | err = ext4_ext_rm_leaf(handle, inode, path, start); | |
d0d856e8 | 2280 | /* root level has p_bh == NULL, brelse() eats this */ |
a86c6181 AT |
2281 | brelse(path[i].p_bh); |
2282 | path[i].p_bh = NULL; | |
2283 | i--; | |
2284 | continue; | |
2285 | } | |
2286 | ||
2287 | /* this is index block */ | |
2288 | if (!path[i].p_hdr) { | |
2289 | ext_debug("initialize header\n"); | |
2290 | path[i].p_hdr = ext_block_hdr(path[i].p_bh); | |
a86c6181 AT |
2291 | } |
2292 | ||
a86c6181 | 2293 | if (!path[i].p_idx) { |
d0d856e8 | 2294 | /* this level hasn't been touched yet */ |
a86c6181 AT |
2295 | path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr); |
2296 | path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1; | |
2297 | ext_debug("init index ptr: hdr 0x%p, num %d\n", | |
2298 | path[i].p_hdr, | |
2299 | le16_to_cpu(path[i].p_hdr->eh_entries)); | |
2300 | } else { | |
d0d856e8 | 2301 | /* we were already here, see at next index */ |
a86c6181 AT |
2302 | path[i].p_idx--; |
2303 | } | |
2304 | ||
2305 | ext_debug("level %d - index, first 0x%p, cur 0x%p\n", | |
2306 | i, EXT_FIRST_INDEX(path[i].p_hdr), | |
2307 | path[i].p_idx); | |
2308 | if (ext4_ext_more_to_rm(path + i)) { | |
c29c0ae7 | 2309 | struct buffer_head *bh; |
a86c6181 | 2310 | /* go to the next level */ |
2ae02107 | 2311 | ext_debug("move to level %d (block %llu)\n", |
f65e6fba | 2312 | i + 1, idx_pblock(path[i].p_idx)); |
a86c6181 | 2313 | memset(path + i + 1, 0, sizeof(*path)); |
c29c0ae7 AT |
2314 | bh = sb_bread(sb, idx_pblock(path[i].p_idx)); |
2315 | if (!bh) { | |
a86c6181 AT |
2316 | /* should we reset i_size? */ |
2317 | err = -EIO; | |
2318 | break; | |
2319 | } | |
c29c0ae7 AT |
2320 | if (WARN_ON(i + 1 > depth)) { |
2321 | err = -EIO; | |
2322 | break; | |
2323 | } | |
56b19868 | 2324 | if (ext4_ext_check(inode, ext_block_hdr(bh), |
c29c0ae7 AT |
2325 | depth - i - 1)) { |
2326 | err = -EIO; | |
2327 | break; | |
2328 | } | |
2329 | path[i + 1].p_bh = bh; | |
a86c6181 | 2330 | |
d0d856e8 RD |
2331 | /* save actual number of indexes since this |
2332 | * number is changed at the next iteration */ | |
a86c6181 AT |
2333 | path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries); |
2334 | i++; | |
2335 | } else { | |
d0d856e8 | 2336 | /* we finished processing this index, go up */ |
a86c6181 | 2337 | if (path[i].p_hdr->eh_entries == 0 && i > 0) { |
d0d856e8 | 2338 | /* index is empty, remove it; |
a86c6181 AT |
2339 | * handle must be already prepared by the |
2340 | * truncatei_leaf() */ | |
2341 | err = ext4_ext_rm_idx(handle, inode, path + i); | |
2342 | } | |
d0d856e8 | 2343 | /* root level has p_bh == NULL, brelse() eats this */ |
a86c6181 AT |
2344 | brelse(path[i].p_bh); |
2345 | path[i].p_bh = NULL; | |
2346 | i--; | |
2347 | ext_debug("return to level %d\n", i); | |
2348 | } | |
2349 | } | |
2350 | ||
2351 | /* TODO: flexible tree reduction should be here */ | |
2352 | if (path->p_hdr->eh_entries == 0) { | |
2353 | /* | |
d0d856e8 RD |
2354 | * truncate to zero freed all the tree, |
2355 | * so we need to correct eh_depth | |
a86c6181 AT |
2356 | */ |
2357 | err = ext4_ext_get_access(handle, inode, path); | |
2358 | if (err == 0) { | |
2359 | ext_inode_hdr(inode)->eh_depth = 0; | |
2360 | ext_inode_hdr(inode)->eh_max = | |
55ad63bf | 2361 | cpu_to_le16(ext4_ext_space_root(inode, 0)); |
a86c6181 AT |
2362 | err = ext4_ext_dirty(handle, inode, path); |
2363 | } | |
2364 | } | |
2365 | out: | |
a86c6181 AT |
2366 | ext4_ext_drop_refs(path); |
2367 | kfree(path); | |
2368 | ext4_journal_stop(handle); | |
2369 | ||
2370 | return err; | |
2371 | } | |
2372 | ||
2373 | /* | |
2374 | * called at mount time | |
2375 | */ | |
2376 | void ext4_ext_init(struct super_block *sb) | |
2377 | { | |
2378 | /* | |
2379 | * possible initialization would be here | |
2380 | */ | |
2381 | ||
83982b6f | 2382 | if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) { |
90576c0b | 2383 | #if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS) |
4776004f | 2384 | printk(KERN_INFO "EXT4-fs: file extents enabled"); |
bbf2f9fb RD |
2385 | #ifdef AGGRESSIVE_TEST |
2386 | printk(", aggressive tests"); | |
a86c6181 AT |
2387 | #endif |
2388 | #ifdef CHECK_BINSEARCH | |
2389 | printk(", check binsearch"); | |
2390 | #endif | |
2391 | #ifdef EXTENTS_STATS | |
2392 | printk(", stats"); | |
2393 | #endif | |
2394 | printk("\n"); | |
90576c0b | 2395 | #endif |
a86c6181 AT |
2396 | #ifdef EXTENTS_STATS |
2397 | spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock); | |
2398 | EXT4_SB(sb)->s_ext_min = 1 << 30; | |
2399 | EXT4_SB(sb)->s_ext_max = 0; | |
2400 | #endif | |
2401 | } | |
2402 | } | |
2403 | ||
2404 | /* | |
2405 | * called at umount time | |
2406 | */ | |
2407 | void ext4_ext_release(struct super_block *sb) | |
2408 | { | |
83982b6f | 2409 | if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) |
a86c6181 AT |
2410 | return; |
2411 | ||
2412 | #ifdef EXTENTS_STATS | |
2413 | if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) { | |
2414 | struct ext4_sb_info *sbi = EXT4_SB(sb); | |
2415 | printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n", | |
2416 | sbi->s_ext_blocks, sbi->s_ext_extents, | |
2417 | sbi->s_ext_blocks / sbi->s_ext_extents); | |
2418 | printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n", | |
2419 | sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max); | |
2420 | } | |
2421 | #endif | |
2422 | } | |
2423 | ||
093a088b AK |
2424 | static void bi_complete(struct bio *bio, int error) |
2425 | { | |
2426 | complete((struct completion *)bio->bi_private); | |
2427 | } | |
2428 | ||
2429 | /* FIXME!! we need to try to merge to left or right after zero-out */ | |
2430 | static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex) | |
2431 | { | |
2432 | int ret = -EIO; | |
2433 | struct bio *bio; | |
2434 | int blkbits, blocksize; | |
2435 | sector_t ee_pblock; | |
2436 | struct completion event; | |
2437 | unsigned int ee_len, len, done, offset; | |
2438 | ||
2439 | ||
2440 | blkbits = inode->i_blkbits; | |
2441 | blocksize = inode->i_sb->s_blocksize; | |
2442 | ee_len = ext4_ext_get_actual_len(ex); | |
2443 | ee_pblock = ext_pblock(ex); | |
2444 | ||
2445 | /* convert ee_pblock to 512 byte sectors */ | |
2446 | ee_pblock = ee_pblock << (blkbits - 9); | |
2447 | ||
2448 | while (ee_len > 0) { | |
2449 | ||
2450 | if (ee_len > BIO_MAX_PAGES) | |
2451 | len = BIO_MAX_PAGES; | |
2452 | else | |
2453 | len = ee_len; | |
2454 | ||
2455 | bio = bio_alloc(GFP_NOIO, len); | |
093a088b AK |
2456 | bio->bi_sector = ee_pblock; |
2457 | bio->bi_bdev = inode->i_sb->s_bdev; | |
2458 | ||
2459 | done = 0; | |
2460 | offset = 0; | |
2461 | while (done < len) { | |
2462 | ret = bio_add_page(bio, ZERO_PAGE(0), | |
2463 | blocksize, offset); | |
2464 | if (ret != blocksize) { | |
2465 | /* | |
2466 | * We can't add any more pages because of | |
2467 | * hardware limitations. Start a new bio. | |
2468 | */ | |
2469 | break; | |
2470 | } | |
2471 | done++; | |
2472 | offset += blocksize; | |
2473 | if (offset >= PAGE_CACHE_SIZE) | |
2474 | offset = 0; | |
2475 | } | |
2476 | ||
2477 | init_completion(&event); | |
2478 | bio->bi_private = &event; | |
2479 | bio->bi_end_io = bi_complete; | |
2480 | submit_bio(WRITE, bio); | |
2481 | wait_for_completion(&event); | |
2482 | ||
2483 | if (test_bit(BIO_UPTODATE, &bio->bi_flags)) | |
2484 | ret = 0; | |
2485 | else { | |
2486 | ret = -EIO; | |
2487 | break; | |
2488 | } | |
2489 | bio_put(bio); | |
2490 | ee_len -= done; | |
2491 | ee_pblock += done << (blkbits - 9); | |
2492 | } | |
2493 | return ret; | |
2494 | } | |
2495 | ||
3977c965 | 2496 | #define EXT4_EXT_ZERO_LEN 7 |
56055d3a AA |
2497 | /* |
2498 | * This function is called by ext4_ext_get_blocks() if someone tries to write | |
2499 | * to an uninitialized extent. It may result in splitting the uninitialized | |
2500 | * extent into multiple extents (upto three - one initialized and two | |
2501 | * uninitialized). | |
2502 | * There are three possibilities: | |
2503 | * a> There is no split required: Entire extent should be initialized | |
2504 | * b> Splits in two extents: Write is happening at either end of the extent | |
2505 | * c> Splits in three extents: Somone is writing in middle of the extent | |
2506 | */ | |
725d26d3 AK |
2507 | static int ext4_ext_convert_to_initialized(handle_t *handle, |
2508 | struct inode *inode, | |
2509 | struct ext4_ext_path *path, | |
2510 | ext4_lblk_t iblock, | |
498e5f24 | 2511 | unsigned int max_blocks) |
56055d3a | 2512 | { |
95c3889c | 2513 | struct ext4_extent *ex, newex, orig_ex; |
56055d3a AA |
2514 | struct ext4_extent *ex1 = NULL; |
2515 | struct ext4_extent *ex2 = NULL; | |
2516 | struct ext4_extent *ex3 = NULL; | |
2517 | struct ext4_extent_header *eh; | |
725d26d3 AK |
2518 | ext4_lblk_t ee_block; |
2519 | unsigned int allocated, ee_len, depth; | |
56055d3a AA |
2520 | ext4_fsblk_t newblock; |
2521 | int err = 0; | |
2522 | int ret = 0; | |
2523 | ||
2524 | depth = ext_depth(inode); | |
2525 | eh = path[depth].p_hdr; | |
2526 | ex = path[depth].p_ext; | |
2527 | ee_block = le32_to_cpu(ex->ee_block); | |
2528 | ee_len = ext4_ext_get_actual_len(ex); | |
2529 | allocated = ee_len - (iblock - ee_block); | |
2530 | newblock = iblock - ee_block + ext_pblock(ex); | |
2531 | ex2 = ex; | |
95c3889c AK |
2532 | orig_ex.ee_block = ex->ee_block; |
2533 | orig_ex.ee_len = cpu_to_le16(ee_len); | |
2534 | ext4_ext_store_pblock(&orig_ex, ext_pblock(ex)); | |
56055d3a | 2535 | |
9df5643a AK |
2536 | err = ext4_ext_get_access(handle, inode, path + depth); |
2537 | if (err) | |
2538 | goto out; | |
3977c965 AK |
2539 | /* If extent has less than 2*EXT4_EXT_ZERO_LEN zerout directly */ |
2540 | if (ee_len <= 2*EXT4_EXT_ZERO_LEN) { | |
2541 | err = ext4_ext_zeroout(inode, &orig_ex); | |
2542 | if (err) | |
2543 | goto fix_extent_len; | |
2544 | /* update the extent length and mark as initialized */ | |
2545 | ex->ee_block = orig_ex.ee_block; | |
2546 | ex->ee_len = orig_ex.ee_len; | |
2547 | ext4_ext_store_pblock(ex, ext_pblock(&orig_ex)); | |
2548 | ext4_ext_dirty(handle, inode, path + depth); | |
161e7b7c AK |
2549 | /* zeroed the full extent */ |
2550 | return allocated; | |
3977c965 | 2551 | } |
9df5643a | 2552 | |
56055d3a AA |
2553 | /* ex1: ee_block to iblock - 1 : uninitialized */ |
2554 | if (iblock > ee_block) { | |
2555 | ex1 = ex; | |
2556 | ex1->ee_len = cpu_to_le16(iblock - ee_block); | |
2557 | ext4_ext_mark_uninitialized(ex1); | |
2558 | ex2 = &newex; | |
2559 | } | |
2560 | /* | |
2561 | * for sanity, update the length of the ex2 extent before | |
2562 | * we insert ex3, if ex1 is NULL. This is to avoid temporary | |
2563 | * overlap of blocks. | |
2564 | */ | |
2565 | if (!ex1 && allocated > max_blocks) | |
2566 | ex2->ee_len = cpu_to_le16(max_blocks); | |
2567 | /* ex3: to ee_block + ee_len : uninitialised */ | |
2568 | if (allocated > max_blocks) { | |
2569 | unsigned int newdepth; | |
3977c965 AK |
2570 | /* If extent has less than EXT4_EXT_ZERO_LEN zerout directly */ |
2571 | if (allocated <= EXT4_EXT_ZERO_LEN) { | |
d03856bd AK |
2572 | /* |
2573 | * iblock == ee_block is handled by the zerouout | |
2574 | * at the beginning. | |
2575 | * Mark first half uninitialized. | |
3977c965 AK |
2576 | * Mark second half initialized and zero out the |
2577 | * initialized extent | |
2578 | */ | |
2579 | ex->ee_block = orig_ex.ee_block; | |
2580 | ex->ee_len = cpu_to_le16(ee_len - allocated); | |
2581 | ext4_ext_mark_uninitialized(ex); | |
2582 | ext4_ext_store_pblock(ex, ext_pblock(&orig_ex)); | |
2583 | ext4_ext_dirty(handle, inode, path + depth); | |
2584 | ||
2585 | ex3 = &newex; | |
2586 | ex3->ee_block = cpu_to_le32(iblock); | |
2587 | ext4_ext_store_pblock(ex3, newblock); | |
2588 | ex3->ee_len = cpu_to_le16(allocated); | |
0031462b MC |
2589 | err = ext4_ext_insert_extent(handle, inode, path, |
2590 | ex3, 0); | |
3977c965 AK |
2591 | if (err == -ENOSPC) { |
2592 | err = ext4_ext_zeroout(inode, &orig_ex); | |
2593 | if (err) | |
2594 | goto fix_extent_len; | |
2595 | ex->ee_block = orig_ex.ee_block; | |
2596 | ex->ee_len = orig_ex.ee_len; | |
2597 | ext4_ext_store_pblock(ex, ext_pblock(&orig_ex)); | |
2598 | ext4_ext_dirty(handle, inode, path + depth); | |
d03856bd | 2599 | /* blocks available from iblock */ |
161e7b7c | 2600 | return allocated; |
3977c965 AK |
2601 | |
2602 | } else if (err) | |
2603 | goto fix_extent_len; | |
2604 | ||
161e7b7c AK |
2605 | /* |
2606 | * We need to zero out the second half because | |
2607 | * an fallocate request can update file size and | |
2608 | * converting the second half to initialized extent | |
2609 | * implies that we can leak some junk data to user | |
2610 | * space. | |
2611 | */ | |
2612 | err = ext4_ext_zeroout(inode, ex3); | |
2613 | if (err) { | |
2614 | /* | |
2615 | * We should actually mark the | |
2616 | * second half as uninit and return error | |
2617 | * Insert would have changed the extent | |
2618 | */ | |
2619 | depth = ext_depth(inode); | |
2620 | ext4_ext_drop_refs(path); | |
2621 | path = ext4_ext_find_extent(inode, | |
2622 | iblock, path); | |
2623 | if (IS_ERR(path)) { | |
2624 | err = PTR_ERR(path); | |
2625 | return err; | |
2626 | } | |
d03856bd | 2627 | /* get the second half extent details */ |
161e7b7c AK |
2628 | ex = path[depth].p_ext; |
2629 | err = ext4_ext_get_access(handle, inode, | |
2630 | path + depth); | |
2631 | if (err) | |
2632 | return err; | |
2633 | ext4_ext_mark_uninitialized(ex); | |
2634 | ext4_ext_dirty(handle, inode, path + depth); | |
2635 | return err; | |
2636 | } | |
2637 | ||
2638 | /* zeroed the second half */ | |
3977c965 AK |
2639 | return allocated; |
2640 | } | |
56055d3a AA |
2641 | ex3 = &newex; |
2642 | ex3->ee_block = cpu_to_le32(iblock + max_blocks); | |
2643 | ext4_ext_store_pblock(ex3, newblock + max_blocks); | |
2644 | ex3->ee_len = cpu_to_le16(allocated - max_blocks); | |
2645 | ext4_ext_mark_uninitialized(ex3); | |
0031462b | 2646 | err = ext4_ext_insert_extent(handle, inode, path, ex3, 0); |
093a088b AK |
2647 | if (err == -ENOSPC) { |
2648 | err = ext4_ext_zeroout(inode, &orig_ex); | |
2649 | if (err) | |
2650 | goto fix_extent_len; | |
2651 | /* update the extent length and mark as initialized */ | |
95c3889c AK |
2652 | ex->ee_block = orig_ex.ee_block; |
2653 | ex->ee_len = orig_ex.ee_len; | |
2654 | ext4_ext_store_pblock(ex, ext_pblock(&orig_ex)); | |
95c3889c | 2655 | ext4_ext_dirty(handle, inode, path + depth); |
161e7b7c | 2656 | /* zeroed the full extent */ |
d03856bd | 2657 | /* blocks available from iblock */ |
161e7b7c | 2658 | return allocated; |
093a088b AK |
2659 | |
2660 | } else if (err) | |
2661 | goto fix_extent_len; | |
56055d3a AA |
2662 | /* |
2663 | * The depth, and hence eh & ex might change | |
2664 | * as part of the insert above. | |
2665 | */ | |
2666 | newdepth = ext_depth(inode); | |
95c3889c | 2667 | /* |
73ac36ea | 2668 | * update the extent length after successful insert of the |
95c3889c AK |
2669 | * split extent |
2670 | */ | |
2671 | orig_ex.ee_len = cpu_to_le16(ee_len - | |
2672 | ext4_ext_get_actual_len(ex3)); | |
d03856bd AK |
2673 | depth = newdepth; |
2674 | ext4_ext_drop_refs(path); | |
2675 | path = ext4_ext_find_extent(inode, iblock, path); | |
2676 | if (IS_ERR(path)) { | |
2677 | err = PTR_ERR(path); | |
2678 | goto out; | |
56055d3a | 2679 | } |
d03856bd AK |
2680 | eh = path[depth].p_hdr; |
2681 | ex = path[depth].p_ext; | |
2682 | if (ex2 != &newex) | |
2683 | ex2 = ex; | |
2684 | ||
2685 | err = ext4_ext_get_access(handle, inode, path + depth); | |
2686 | if (err) | |
2687 | goto out; | |
2688 | ||
56055d3a | 2689 | allocated = max_blocks; |
3977c965 AK |
2690 | |
2691 | /* If extent has less than EXT4_EXT_ZERO_LEN and we are trying | |
2692 | * to insert a extent in the middle zerout directly | |
2693 | * otherwise give the extent a chance to merge to left | |
2694 | */ | |
2695 | if (le16_to_cpu(orig_ex.ee_len) <= EXT4_EXT_ZERO_LEN && | |
2696 | iblock != ee_block) { | |
2697 | err = ext4_ext_zeroout(inode, &orig_ex); | |
2698 | if (err) | |
2699 | goto fix_extent_len; | |
2700 | /* update the extent length and mark as initialized */ | |
2701 | ex->ee_block = orig_ex.ee_block; | |
2702 | ex->ee_len = orig_ex.ee_len; | |
2703 | ext4_ext_store_pblock(ex, ext_pblock(&orig_ex)); | |
2704 | ext4_ext_dirty(handle, inode, path + depth); | |
161e7b7c | 2705 | /* zero out the first half */ |
d03856bd | 2706 | /* blocks available from iblock */ |
161e7b7c | 2707 | return allocated; |
3977c965 | 2708 | } |
56055d3a AA |
2709 | } |
2710 | /* | |
2711 | * If there was a change of depth as part of the | |
2712 | * insertion of ex3 above, we need to update the length | |
2713 | * of the ex1 extent again here | |
2714 | */ | |
2715 | if (ex1 && ex1 != ex) { | |
2716 | ex1 = ex; | |
2717 | ex1->ee_len = cpu_to_le16(iblock - ee_block); | |
2718 | ext4_ext_mark_uninitialized(ex1); | |
2719 | ex2 = &newex; | |
2720 | } | |
2721 | /* ex2: iblock to iblock + maxblocks-1 : initialised */ | |
2722 | ex2->ee_block = cpu_to_le32(iblock); | |
56055d3a AA |
2723 | ext4_ext_store_pblock(ex2, newblock); |
2724 | ex2->ee_len = cpu_to_le16(allocated); | |
2725 | if (ex2 != ex) | |
2726 | goto insert; | |
56055d3a AA |
2727 | /* |
2728 | * New (initialized) extent starts from the first block | |
2729 | * in the current extent. i.e., ex2 == ex | |
2730 | * We have to see if it can be merged with the extent | |
2731 | * on the left. | |
2732 | */ | |
2733 | if (ex2 > EXT_FIRST_EXTENT(eh)) { | |
2734 | /* | |
2735 | * To merge left, pass "ex2 - 1" to try_to_merge(), | |
2736 | * since it merges towards right _only_. | |
2737 | */ | |
2738 | ret = ext4_ext_try_to_merge(inode, path, ex2 - 1); | |
2739 | if (ret) { | |
2740 | err = ext4_ext_correct_indexes(handle, inode, path); | |
2741 | if (err) | |
2742 | goto out; | |
2743 | depth = ext_depth(inode); | |
2744 | ex2--; | |
2745 | } | |
2746 | } | |
2747 | /* | |
2748 | * Try to Merge towards right. This might be required | |
2749 | * only when the whole extent is being written to. | |
2750 | * i.e. ex2 == ex and ex3 == NULL. | |
2751 | */ | |
2752 | if (!ex3) { | |
2753 | ret = ext4_ext_try_to_merge(inode, path, ex2); | |
2754 | if (ret) { | |
2755 | err = ext4_ext_correct_indexes(handle, inode, path); | |
2756 | if (err) | |
2757 | goto out; | |
2758 | } | |
2759 | } | |
2760 | /* Mark modified extent as dirty */ | |
2761 | err = ext4_ext_dirty(handle, inode, path + depth); | |
2762 | goto out; | |
2763 | insert: | |
0031462b | 2764 | err = ext4_ext_insert_extent(handle, inode, path, &newex, 0); |
093a088b AK |
2765 | if (err == -ENOSPC) { |
2766 | err = ext4_ext_zeroout(inode, &orig_ex); | |
2767 | if (err) | |
2768 | goto fix_extent_len; | |
2769 | /* update the extent length and mark as initialized */ | |
95c3889c AK |
2770 | ex->ee_block = orig_ex.ee_block; |
2771 | ex->ee_len = orig_ex.ee_len; | |
2772 | ext4_ext_store_pblock(ex, ext_pblock(&orig_ex)); | |
95c3889c | 2773 | ext4_ext_dirty(handle, inode, path + depth); |
161e7b7c AK |
2774 | /* zero out the first half */ |
2775 | return allocated; | |
093a088b AK |
2776 | } else if (err) |
2777 | goto fix_extent_len; | |
56055d3a | 2778 | out: |
553f9008 | 2779 | ext4_ext_show_leaf(inode, path); |
56055d3a | 2780 | return err ? err : allocated; |
093a088b AK |
2781 | |
2782 | fix_extent_len: | |
2783 | ex->ee_block = orig_ex.ee_block; | |
2784 | ex->ee_len = orig_ex.ee_len; | |
2785 | ext4_ext_store_pblock(ex, ext_pblock(&orig_ex)); | |
2786 | ext4_ext_mark_uninitialized(ex); | |
2787 | ext4_ext_dirty(handle, inode, path + depth); | |
2788 | return err; | |
56055d3a AA |
2789 | } |
2790 | ||
0031462b MC |
2791 | /* |
2792 | * This function is called by ext4_ext_get_blocks() from | |
2793 | * ext4_get_blocks_dio_write() when DIO to write | |
2794 | * to an uninitialized extent. | |
2795 | * | |
2796 | * Writing to an uninitized extent may result in splitting the uninitialized | |
2797 | * extent into multiple /intialized unintialized extents (up to three) | |
2798 | * There are three possibilities: | |
2799 | * a> There is no split required: Entire extent should be uninitialized | |
2800 | * b> Splits in two extents: Write is happening at either end of the extent | |
2801 | * c> Splits in three extents: Somone is writing in middle of the extent | |
2802 | * | |
2803 | * One of more index blocks maybe needed if the extent tree grow after | |
2804 | * the unintialized extent split. To prevent ENOSPC occur at the IO | |
2805 | * complete, we need to split the uninitialized extent before DIO submit | |
2806 | * the IO. The uninitilized extent called at this time will be split | |
2807 | * into three uninitialized extent(at most). After IO complete, the part | |
2808 | * being filled will be convert to initialized by the end_io callback function | |
2809 | * via ext4_convert_unwritten_extents(). | |
2810 | */ | |
2811 | static int ext4_split_unwritten_extents(handle_t *handle, | |
2812 | struct inode *inode, | |
2813 | struct ext4_ext_path *path, | |
2814 | ext4_lblk_t iblock, | |
2815 | unsigned int max_blocks, | |
2816 | int flags) | |
2817 | { | |
2818 | struct ext4_extent *ex, newex, orig_ex; | |
2819 | struct ext4_extent *ex1 = NULL; | |
2820 | struct ext4_extent *ex2 = NULL; | |
2821 | struct ext4_extent *ex3 = NULL; | |
2822 | struct ext4_extent_header *eh; | |
2823 | ext4_lblk_t ee_block; | |
2824 | unsigned int allocated, ee_len, depth; | |
2825 | ext4_fsblk_t newblock; | |
2826 | int err = 0; | |
2827 | int ret = 0; | |
2828 | ||
2829 | ext_debug("ext4_split_unwritten_extents: inode %lu," | |
2830 | "iblock %llu, max_blocks %u\n", inode->i_ino, | |
2831 | (unsigned long long)iblock, max_blocks); | |
2832 | depth = ext_depth(inode); | |
2833 | eh = path[depth].p_hdr; | |
2834 | ex = path[depth].p_ext; | |
2835 | ee_block = le32_to_cpu(ex->ee_block); | |
2836 | ee_len = ext4_ext_get_actual_len(ex); | |
2837 | allocated = ee_len - (iblock - ee_block); | |
2838 | newblock = iblock - ee_block + ext_pblock(ex); | |
2839 | ex2 = ex; | |
2840 | orig_ex.ee_block = ex->ee_block; | |
2841 | orig_ex.ee_len = cpu_to_le16(ee_len); | |
2842 | ext4_ext_store_pblock(&orig_ex, ext_pblock(ex)); | |
2843 | ||
2844 | /* | |
2845 | * if the entire unintialized extent length less than | |
2846 | * the size of extent to write, there is no need to split | |
2847 | * uninitialized extent | |
2848 | */ | |
2849 | if (allocated <= max_blocks) | |
2850 | return ret; | |
2851 | ||
2852 | err = ext4_ext_get_access(handle, inode, path + depth); | |
2853 | if (err) | |
2854 | goto out; | |
2855 | /* ex1: ee_block to iblock - 1 : uninitialized */ | |
2856 | if (iblock > ee_block) { | |
2857 | ex1 = ex; | |
2858 | ex1->ee_len = cpu_to_le16(iblock - ee_block); | |
2859 | ext4_ext_mark_uninitialized(ex1); | |
2860 | ex2 = &newex; | |
2861 | } | |
2862 | /* | |
2863 | * for sanity, update the length of the ex2 extent before | |
2864 | * we insert ex3, if ex1 is NULL. This is to avoid temporary | |
2865 | * overlap of blocks. | |
2866 | */ | |
2867 | if (!ex1 && allocated > max_blocks) | |
2868 | ex2->ee_len = cpu_to_le16(max_blocks); | |
2869 | /* ex3: to ee_block + ee_len : uninitialised */ | |
2870 | if (allocated > max_blocks) { | |
2871 | unsigned int newdepth; | |
2872 | ex3 = &newex; | |
2873 | ex3->ee_block = cpu_to_le32(iblock + max_blocks); | |
2874 | ext4_ext_store_pblock(ex3, newblock + max_blocks); | |
2875 | ex3->ee_len = cpu_to_le16(allocated - max_blocks); | |
2876 | ext4_ext_mark_uninitialized(ex3); | |
2877 | err = ext4_ext_insert_extent(handle, inode, path, ex3, flags); | |
2878 | if (err == -ENOSPC) { | |
2879 | err = ext4_ext_zeroout(inode, &orig_ex); | |
2880 | if (err) | |
2881 | goto fix_extent_len; | |
2882 | /* update the extent length and mark as initialized */ | |
2883 | ex->ee_block = orig_ex.ee_block; | |
2884 | ex->ee_len = orig_ex.ee_len; | |
2885 | ext4_ext_store_pblock(ex, ext_pblock(&orig_ex)); | |
2886 | ext4_ext_dirty(handle, inode, path + depth); | |
2887 | /* zeroed the full extent */ | |
2888 | /* blocks available from iblock */ | |
2889 | return allocated; | |
2890 | ||
2891 | } else if (err) | |
2892 | goto fix_extent_len; | |
2893 | /* | |
2894 | * The depth, and hence eh & ex might change | |
2895 | * as part of the insert above. | |
2896 | */ | |
2897 | newdepth = ext_depth(inode); | |
2898 | /* | |
2899 | * update the extent length after successful insert of the | |
2900 | * split extent | |
2901 | */ | |
2902 | orig_ex.ee_len = cpu_to_le16(ee_len - | |
2903 | ext4_ext_get_actual_len(ex3)); | |
2904 | depth = newdepth; | |
2905 | ext4_ext_drop_refs(path); | |
2906 | path = ext4_ext_find_extent(inode, iblock, path); | |
2907 | if (IS_ERR(path)) { | |
2908 | err = PTR_ERR(path); | |
2909 | goto out; | |
2910 | } | |
2911 | eh = path[depth].p_hdr; | |
2912 | ex = path[depth].p_ext; | |
2913 | if (ex2 != &newex) | |
2914 | ex2 = ex; | |
2915 | ||
2916 | err = ext4_ext_get_access(handle, inode, path + depth); | |
2917 | if (err) | |
2918 | goto out; | |
2919 | ||
2920 | allocated = max_blocks; | |
2921 | } | |
2922 | /* | |
2923 | * If there was a change of depth as part of the | |
2924 | * insertion of ex3 above, we need to update the length | |
2925 | * of the ex1 extent again here | |
2926 | */ | |
2927 | if (ex1 && ex1 != ex) { | |
2928 | ex1 = ex; | |
2929 | ex1->ee_len = cpu_to_le16(iblock - ee_block); | |
2930 | ext4_ext_mark_uninitialized(ex1); | |
2931 | ex2 = &newex; | |
2932 | } | |
2933 | /* | |
2934 | * ex2: iblock to iblock + maxblocks-1 : to be direct IO written, | |
2935 | * uninitialised still. | |
2936 | */ | |
2937 | ex2->ee_block = cpu_to_le32(iblock); | |
2938 | ext4_ext_store_pblock(ex2, newblock); | |
2939 | ex2->ee_len = cpu_to_le16(allocated); | |
2940 | ext4_ext_mark_uninitialized(ex2); | |
2941 | if (ex2 != ex) | |
2942 | goto insert; | |
2943 | /* Mark modified extent as dirty */ | |
2944 | err = ext4_ext_dirty(handle, inode, path + depth); | |
2945 | ext_debug("out here\n"); | |
2946 | goto out; | |
2947 | insert: | |
2948 | err = ext4_ext_insert_extent(handle, inode, path, &newex, flags); | |
2949 | if (err == -ENOSPC) { | |
2950 | err = ext4_ext_zeroout(inode, &orig_ex); | |
2951 | if (err) | |
2952 | goto fix_extent_len; | |
2953 | /* update the extent length and mark as initialized */ | |
2954 | ex->ee_block = orig_ex.ee_block; | |
2955 | ex->ee_len = orig_ex.ee_len; | |
2956 | ext4_ext_store_pblock(ex, ext_pblock(&orig_ex)); | |
2957 | ext4_ext_dirty(handle, inode, path + depth); | |
2958 | /* zero out the first half */ | |
2959 | return allocated; | |
2960 | } else if (err) | |
2961 | goto fix_extent_len; | |
2962 | out: | |
2963 | ext4_ext_show_leaf(inode, path); | |
2964 | return err ? err : allocated; | |
2965 | ||
2966 | fix_extent_len: | |
2967 | ex->ee_block = orig_ex.ee_block; | |
2968 | ex->ee_len = orig_ex.ee_len; | |
2969 | ext4_ext_store_pblock(ex, ext_pblock(&orig_ex)); | |
2970 | ext4_ext_mark_uninitialized(ex); | |
2971 | ext4_ext_dirty(handle, inode, path + depth); | |
2972 | return err; | |
2973 | } | |
2974 | static int ext4_convert_unwritten_extents_dio(handle_t *handle, | |
2975 | struct inode *inode, | |
2976 | struct ext4_ext_path *path) | |
2977 | { | |
2978 | struct ext4_extent *ex; | |
2979 | struct ext4_extent_header *eh; | |
2980 | int depth; | |
2981 | int err = 0; | |
2982 | int ret = 0; | |
2983 | ||
2984 | depth = ext_depth(inode); | |
2985 | eh = path[depth].p_hdr; | |
2986 | ex = path[depth].p_ext; | |
2987 | ||
2988 | err = ext4_ext_get_access(handle, inode, path + depth); | |
2989 | if (err) | |
2990 | goto out; | |
2991 | /* first mark the extent as initialized */ | |
2992 | ext4_ext_mark_initialized(ex); | |
2993 | ||
2994 | /* | |
2995 | * We have to see if it can be merged with the extent | |
2996 | * on the left. | |
2997 | */ | |
2998 | if (ex > EXT_FIRST_EXTENT(eh)) { | |
2999 | /* | |
3000 | * To merge left, pass "ex - 1" to try_to_merge(), | |
3001 | * since it merges towards right _only_. | |
3002 | */ | |
3003 | ret = ext4_ext_try_to_merge(inode, path, ex - 1); | |
3004 | if (ret) { | |
3005 | err = ext4_ext_correct_indexes(handle, inode, path); | |
3006 | if (err) | |
3007 | goto out; | |
3008 | depth = ext_depth(inode); | |
3009 | ex--; | |
3010 | } | |
3011 | } | |
3012 | /* | |
3013 | * Try to Merge towards right. | |
3014 | */ | |
3015 | ret = ext4_ext_try_to_merge(inode, path, ex); | |
3016 | if (ret) { | |
3017 | err = ext4_ext_correct_indexes(handle, inode, path); | |
3018 | if (err) | |
3019 | goto out; | |
3020 | depth = ext_depth(inode); | |
3021 | } | |
3022 | /* Mark modified extent as dirty */ | |
3023 | err = ext4_ext_dirty(handle, inode, path + depth); | |
3024 | out: | |
3025 | ext4_ext_show_leaf(inode, path); | |
3026 | return err; | |
3027 | } | |
3028 | ||
3029 | static int | |
3030 | ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode, | |
3031 | ext4_lblk_t iblock, unsigned int max_blocks, | |
3032 | struct ext4_ext_path *path, int flags, | |
3033 | unsigned int allocated, struct buffer_head *bh_result, | |
3034 | ext4_fsblk_t newblock) | |
3035 | { | |
3036 | int ret = 0; | |
3037 | int err = 0; | |
8d5d02e6 | 3038 | ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio; |
0031462b MC |
3039 | |
3040 | ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical" | |
3041 | "block %llu, max_blocks %u, flags %d, allocated %u", | |
3042 | inode->i_ino, (unsigned long long)iblock, max_blocks, | |
3043 | flags, allocated); | |
3044 | ext4_ext_show_leaf(inode, path); | |
3045 | ||
3046 | /* DIO get_block() before submit the IO, split the extent */ | |
3047 | if (flags == EXT4_GET_BLOCKS_DIO_CREATE_EXT) { | |
3048 | ret = ext4_split_unwritten_extents(handle, | |
3049 | inode, path, iblock, | |
3050 | max_blocks, flags); | |
8d5d02e6 MC |
3051 | /* flag the io_end struct that we need convert when IO done */ |
3052 | if (io) | |
3053 | io->flag = DIO_AIO_UNWRITTEN; | |
0031462b MC |
3054 | goto out; |
3055 | } | |
3056 | /* DIO end_io complete, convert the filled extent to written */ | |
3057 | if (flags == EXT4_GET_BLOCKS_DIO_CONVERT_EXT) { | |
3058 | ret = ext4_convert_unwritten_extents_dio(handle, inode, | |
3059 | path); | |
3060 | goto out2; | |
3061 | } | |
3062 | /* buffered IO case */ | |
3063 | /* | |
3064 | * repeat fallocate creation request | |
3065 | * we already have an unwritten extent | |
3066 | */ | |
3067 | if (flags & EXT4_GET_BLOCKS_UNINIT_EXT) | |
3068 | goto map_out; | |
3069 | ||
3070 | /* buffered READ or buffered write_begin() lookup */ | |
3071 | if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { | |
3072 | /* | |
3073 | * We have blocks reserved already. We | |
3074 | * return allocated blocks so that delalloc | |
3075 | * won't do block reservation for us. But | |
3076 | * the buffer head will be unmapped so that | |
3077 | * a read from the block returns 0s. | |
3078 | */ | |
3079 | set_buffer_unwritten(bh_result); | |
3080 | goto out1; | |
3081 | } | |
3082 | ||
3083 | /* buffered write, writepage time, convert*/ | |
3084 | ret = ext4_ext_convert_to_initialized(handle, inode, | |
3085 | path, iblock, | |
3086 | max_blocks); | |
3087 | out: | |
3088 | if (ret <= 0) { | |
3089 | err = ret; | |
3090 | goto out2; | |
3091 | } else | |
3092 | allocated = ret; | |
3093 | set_buffer_new(bh_result); | |
3094 | map_out: | |
3095 | set_buffer_mapped(bh_result); | |
3096 | out1: | |
3097 | if (allocated > max_blocks) | |
3098 | allocated = max_blocks; | |
3099 | ext4_ext_show_leaf(inode, path); | |
3100 | bh_result->b_bdev = inode->i_sb->s_bdev; | |
3101 | bh_result->b_blocknr = newblock; | |
3102 | out2: | |
3103 | if (path) { | |
3104 | ext4_ext_drop_refs(path); | |
3105 | kfree(path); | |
3106 | } | |
3107 | return err ? err : allocated; | |
3108 | } | |
c278bfec | 3109 | /* |
f5ab0d1f MC |
3110 | * Block allocation/map/preallocation routine for extents based files |
3111 | * | |
3112 | * | |
c278bfec | 3113 | * Need to be called with |
0e855ac8 AK |
3114 | * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block |
3115 | * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem) | |
f5ab0d1f MC |
3116 | * |
3117 | * return > 0, number of of blocks already mapped/allocated | |
3118 | * if create == 0 and these are pre-allocated blocks | |
3119 | * buffer head is unmapped | |
3120 | * otherwise blocks are mapped | |
3121 | * | |
3122 | * return = 0, if plain look up failed (blocks have not been allocated) | |
3123 | * buffer head is unmapped | |
3124 | * | |
3125 | * return < 0, error case. | |
c278bfec | 3126 | */ |
f65e6fba | 3127 | int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, |
725d26d3 | 3128 | ext4_lblk_t iblock, |
498e5f24 | 3129 | unsigned int max_blocks, struct buffer_head *bh_result, |
c2177057 | 3130 | int flags) |
a86c6181 AT |
3131 | { |
3132 | struct ext4_ext_path *path = NULL; | |
56055d3a | 3133 | struct ext4_extent_header *eh; |
a86c6181 | 3134 | struct ext4_extent newex, *ex; |
498e5f24 TT |
3135 | ext4_fsblk_t newblock; |
3136 | int err = 0, depth, ret, cache_type; | |
3137 | unsigned int allocated = 0; | |
c9de560d | 3138 | struct ext4_allocation_request ar; |
8d5d02e6 | 3139 | ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio; |
a86c6181 AT |
3140 | |
3141 | __clear_bit(BH_New, &bh_result->b_state); | |
84fe3bef | 3142 | ext_debug("blocks %u/%u requested for inode %lu\n", |
bba90743 | 3143 | iblock, max_blocks, inode->i_ino); |
a86c6181 AT |
3144 | |
3145 | /* check in cache */ | |
498e5f24 TT |
3146 | cache_type = ext4_ext_in_cache(inode, iblock, &newex); |
3147 | if (cache_type) { | |
3148 | if (cache_type == EXT4_EXT_CACHE_GAP) { | |
c2177057 | 3149 | if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { |
56055d3a AA |
3150 | /* |
3151 | * block isn't allocated yet and | |
3152 | * user doesn't want to allocate it | |
3153 | */ | |
a86c6181 AT |
3154 | goto out2; |
3155 | } | |
3156 | /* we should allocate requested block */ | |
498e5f24 | 3157 | } else if (cache_type == EXT4_EXT_CACHE_EXTENT) { |
a86c6181 | 3158 | /* block is already allocated */ |
8c55e204 DK |
3159 | newblock = iblock |
3160 | - le32_to_cpu(newex.ee_block) | |
3161 | + ext_pblock(&newex); | |
d0d856e8 | 3162 | /* number of remaining blocks in the extent */ |
b939e376 | 3163 | allocated = ext4_ext_get_actual_len(&newex) - |
a86c6181 AT |
3164 | (iblock - le32_to_cpu(newex.ee_block)); |
3165 | goto out; | |
3166 | } else { | |
3167 | BUG(); | |
3168 | } | |
3169 | } | |
3170 | ||
3171 | /* find extent for this block */ | |
3172 | path = ext4_ext_find_extent(inode, iblock, NULL); | |
3173 | if (IS_ERR(path)) { | |
3174 | err = PTR_ERR(path); | |
3175 | path = NULL; | |
3176 | goto out2; | |
3177 | } | |
3178 | ||
3179 | depth = ext_depth(inode); | |
3180 | ||
3181 | /* | |
d0d856e8 RD |
3182 | * consistent leaf must not be empty; |
3183 | * this situation is possible, though, _during_ tree modification; | |
a86c6181 AT |
3184 | * this is why assert can't be put in ext4_ext_find_extent() |
3185 | */ | |
3186 | BUG_ON(path[depth].p_ext == NULL && depth != 0); | |
56055d3a | 3187 | eh = path[depth].p_hdr; |
a86c6181 | 3188 | |
7e028976 AM |
3189 | ex = path[depth].p_ext; |
3190 | if (ex) { | |
725d26d3 | 3191 | ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block); |
f65e6fba | 3192 | ext4_fsblk_t ee_start = ext_pblock(ex); |
a2df2a63 | 3193 | unsigned short ee_len; |
471d4011 SB |
3194 | |
3195 | /* | |
471d4011 | 3196 | * Uninitialized extents are treated as holes, except that |
56055d3a | 3197 | * we split out initialized portions during a write. |
471d4011 | 3198 | */ |
a2df2a63 | 3199 | ee_len = ext4_ext_get_actual_len(ex); |
d0d856e8 | 3200 | /* if found extent covers block, simply return it */ |
8c55e204 | 3201 | if (iblock >= ee_block && iblock < ee_block + ee_len) { |
a86c6181 | 3202 | newblock = iblock - ee_block + ee_start; |
d0d856e8 | 3203 | /* number of remaining blocks in the extent */ |
a86c6181 | 3204 | allocated = ee_len - (iblock - ee_block); |
84fe3bef | 3205 | ext_debug("%u fit into %u:%d -> %llu\n", iblock, |
a86c6181 | 3206 | ee_block, ee_len, newblock); |
56055d3a | 3207 | |
a2df2a63 | 3208 | /* Do not put uninitialized extent in the cache */ |
56055d3a | 3209 | if (!ext4_ext_is_uninitialized(ex)) { |
a2df2a63 AA |
3210 | ext4_ext_put_in_cache(inode, ee_block, |
3211 | ee_len, ee_start, | |
3212 | EXT4_EXT_CACHE_EXTENT); | |
56055d3a AA |
3213 | goto out; |
3214 | } | |
0031462b MC |
3215 | ret = ext4_ext_handle_uninitialized_extents(handle, |
3216 | inode, iblock, max_blocks, path, | |
3217 | flags, allocated, bh_result, newblock); | |
3218 | return ret; | |
a86c6181 AT |
3219 | } |
3220 | } | |
3221 | ||
3222 | /* | |
d0d856e8 | 3223 | * requested block isn't allocated yet; |
a86c6181 AT |
3224 | * we couldn't try to create block if create flag is zero |
3225 | */ | |
c2177057 | 3226 | if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { |
56055d3a AA |
3227 | /* |
3228 | * put just found gap into cache to speed up | |
3229 | * subsequent requests | |
3230 | */ | |
a86c6181 AT |
3231 | ext4_ext_put_gap_in_cache(inode, path, iblock); |
3232 | goto out2; | |
3233 | } | |
3234 | /* | |
c2ea3fde | 3235 | * Okay, we need to do block allocation. |
63f57933 | 3236 | */ |
a86c6181 | 3237 | |
c9de560d AT |
3238 | /* find neighbour allocated blocks */ |
3239 | ar.lleft = iblock; | |
3240 | err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft); | |
3241 | if (err) | |
3242 | goto out2; | |
3243 | ar.lright = iblock; | |
3244 | err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright); | |
3245 | if (err) | |
3246 | goto out2; | |
25d14f98 | 3247 | |
749269fa AA |
3248 | /* |
3249 | * See if request is beyond maximum number of blocks we can have in | |
3250 | * a single extent. For an initialized extent this limit is | |
3251 | * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is | |
3252 | * EXT_UNINIT_MAX_LEN. | |
3253 | */ | |
3254 | if (max_blocks > EXT_INIT_MAX_LEN && | |
c2177057 | 3255 | !(flags & EXT4_GET_BLOCKS_UNINIT_EXT)) |
749269fa AA |
3256 | max_blocks = EXT_INIT_MAX_LEN; |
3257 | else if (max_blocks > EXT_UNINIT_MAX_LEN && | |
c2177057 | 3258 | (flags & EXT4_GET_BLOCKS_UNINIT_EXT)) |
749269fa AA |
3259 | max_blocks = EXT_UNINIT_MAX_LEN; |
3260 | ||
25d14f98 AA |
3261 | /* Check if we can really insert (iblock)::(iblock+max_blocks) extent */ |
3262 | newex.ee_block = cpu_to_le32(iblock); | |
3263 | newex.ee_len = cpu_to_le16(max_blocks); | |
3264 | err = ext4_ext_check_overlap(inode, &newex, path); | |
3265 | if (err) | |
b939e376 | 3266 | allocated = ext4_ext_get_actual_len(&newex); |
25d14f98 AA |
3267 | else |
3268 | allocated = max_blocks; | |
c9de560d AT |
3269 | |
3270 | /* allocate new block */ | |
3271 | ar.inode = inode; | |
3272 | ar.goal = ext4_ext_find_goal(inode, path, iblock); | |
3273 | ar.logical = iblock; | |
3274 | ar.len = allocated; | |
3275 | if (S_ISREG(inode->i_mode)) | |
3276 | ar.flags = EXT4_MB_HINT_DATA; | |
3277 | else | |
3278 | /* disable in-core preallocation for non-regular files */ | |
3279 | ar.flags = 0; | |
3280 | newblock = ext4_mb_new_blocks(handle, &ar, &err); | |
a86c6181 AT |
3281 | if (!newblock) |
3282 | goto out2; | |
84fe3bef | 3283 | ext_debug("allocate new block: goal %llu, found %llu/%u\n", |
498e5f24 | 3284 | ar.goal, newblock, allocated); |
a86c6181 AT |
3285 | |
3286 | /* try to insert new extent into found leaf and return */ | |
f65e6fba | 3287 | ext4_ext_store_pblock(&newex, newblock); |
c9de560d | 3288 | newex.ee_len = cpu_to_le16(ar.len); |
8d5d02e6 MC |
3289 | /* Mark uninitialized */ |
3290 | if (flags & EXT4_GET_BLOCKS_UNINIT_EXT){ | |
a2df2a63 | 3291 | ext4_ext_mark_uninitialized(&newex); |
8d5d02e6 MC |
3292 | /* |
3293 | * io_end structure was created for every async | |
3294 | * direct IO write to the middle of the file. | |
3295 | * To avoid unecessary convertion for every aio dio rewrite | |
3296 | * to the mid of file, here we flag the IO that is really | |
3297 | * need the convertion. | |
3298 | * | |
3299 | */ | |
3300 | if (io && flags == EXT4_GET_BLOCKS_DIO_CREATE_EXT) | |
3301 | io->flag = DIO_AIO_UNWRITTEN; | |
3302 | } | |
0031462b | 3303 | err = ext4_ext_insert_extent(handle, inode, path, &newex, flags); |
315054f0 AT |
3304 | if (err) { |
3305 | /* free data blocks we just allocated */ | |
c9de560d AT |
3306 | /* not a good idea to call discard here directly, |
3307 | * but otherwise we'd need to call it every free() */ | |
c2ea3fde | 3308 | ext4_discard_preallocations(inode); |
315054f0 | 3309 | ext4_free_blocks(handle, inode, ext_pblock(&newex), |
b939e376 | 3310 | ext4_ext_get_actual_len(&newex), 0); |
a86c6181 | 3311 | goto out2; |
315054f0 | 3312 | } |
a86c6181 | 3313 | |
a86c6181 | 3314 | /* previous routine could use block we allocated */ |
f65e6fba | 3315 | newblock = ext_pblock(&newex); |
b939e376 | 3316 | allocated = ext4_ext_get_actual_len(&newex); |
953e622b | 3317 | set_buffer_new(bh_result); |
a86c6181 | 3318 | |
a2df2a63 | 3319 | /* Cache only when it is _not_ an uninitialized extent */ |
c2177057 | 3320 | if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) |
a2df2a63 AA |
3321 | ext4_ext_put_in_cache(inode, iblock, allocated, newblock, |
3322 | EXT4_EXT_CACHE_EXTENT); | |
a86c6181 AT |
3323 | out: |
3324 | if (allocated > max_blocks) | |
3325 | allocated = max_blocks; | |
3326 | ext4_ext_show_leaf(inode, path); | |
953e622b | 3327 | set_buffer_mapped(bh_result); |
a86c6181 AT |
3328 | bh_result->b_bdev = inode->i_sb->s_bdev; |
3329 | bh_result->b_blocknr = newblock; | |
3330 | out2: | |
3331 | if (path) { | |
3332 | ext4_ext_drop_refs(path); | |
3333 | kfree(path); | |
3334 | } | |
a86c6181 AT |
3335 | return err ? err : allocated; |
3336 | } | |
3337 | ||
cf108bca | 3338 | void ext4_ext_truncate(struct inode *inode) |
a86c6181 AT |
3339 | { |
3340 | struct address_space *mapping = inode->i_mapping; | |
3341 | struct super_block *sb = inode->i_sb; | |
725d26d3 | 3342 | ext4_lblk_t last_block; |
a86c6181 AT |
3343 | handle_t *handle; |
3344 | int err = 0; | |
3345 | ||
3346 | /* | |
3347 | * probably first extent we're gonna free will be last in block | |
3348 | */ | |
f3bd1f3f | 3349 | err = ext4_writepage_trans_blocks(inode); |
a86c6181 | 3350 | handle = ext4_journal_start(inode, err); |
cf108bca | 3351 | if (IS_ERR(handle)) |
a86c6181 | 3352 | return; |
a86c6181 | 3353 | |
cf108bca JK |
3354 | if (inode->i_size & (sb->s_blocksize - 1)) |
3355 | ext4_block_truncate_page(handle, mapping, inode->i_size); | |
a86c6181 | 3356 | |
9ddfc3dc JK |
3357 | if (ext4_orphan_add(handle, inode)) |
3358 | goto out_stop; | |
3359 | ||
0e855ac8 | 3360 | down_write(&EXT4_I(inode)->i_data_sem); |
a86c6181 AT |
3361 | ext4_ext_invalidate_cache(inode); |
3362 | ||
c2ea3fde | 3363 | ext4_discard_preallocations(inode); |
c9de560d | 3364 | |
a86c6181 | 3365 | /* |
d0d856e8 RD |
3366 | * TODO: optimization is possible here. |
3367 | * Probably we need not scan at all, | |
3368 | * because page truncation is enough. | |
a86c6181 | 3369 | */ |
a86c6181 AT |
3370 | |
3371 | /* we have to know where to truncate from in crash case */ | |
3372 | EXT4_I(inode)->i_disksize = inode->i_size; | |
3373 | ext4_mark_inode_dirty(handle, inode); | |
3374 | ||
3375 | last_block = (inode->i_size + sb->s_blocksize - 1) | |
3376 | >> EXT4_BLOCK_SIZE_BITS(sb); | |
3377 | err = ext4_ext_remove_space(inode, last_block); | |
3378 | ||
3379 | /* In a multi-transaction truncate, we only make the final | |
56055d3a AA |
3380 | * transaction synchronous. |
3381 | */ | |
a86c6181 | 3382 | if (IS_SYNC(inode)) |
0390131b | 3383 | ext4_handle_sync(handle); |
a86c6181 AT |
3384 | |
3385 | out_stop: | |
9ddfc3dc | 3386 | up_write(&EXT4_I(inode)->i_data_sem); |
a86c6181 | 3387 | /* |
d0d856e8 | 3388 | * If this was a simple ftruncate() and the file will remain alive, |
a86c6181 AT |
3389 | * then we need to clear up the orphan record which we created above. |
3390 | * However, if this was a real unlink then we were called by | |
3391 | * ext4_delete_inode(), and we allow that function to clean up the | |
3392 | * orphan info for us. | |
3393 | */ | |
3394 | if (inode->i_nlink) | |
3395 | ext4_orphan_del(handle, inode); | |
3396 | ||
ef737728 SR |
3397 | inode->i_mtime = inode->i_ctime = ext4_current_time(inode); |
3398 | ext4_mark_inode_dirty(handle, inode); | |
a86c6181 AT |
3399 | ext4_journal_stop(handle); |
3400 | } | |
3401 | ||
fd28784a AK |
3402 | static void ext4_falloc_update_inode(struct inode *inode, |
3403 | int mode, loff_t new_size, int update_ctime) | |
3404 | { | |
3405 | struct timespec now; | |
3406 | ||
3407 | if (update_ctime) { | |
3408 | now = current_fs_time(inode->i_sb); | |
3409 | if (!timespec_equal(&inode->i_ctime, &now)) | |
3410 | inode->i_ctime = now; | |
3411 | } | |
3412 | /* | |
3413 | * Update only when preallocation was requested beyond | |
3414 | * the file size. | |
3415 | */ | |
cf17fea6 AK |
3416 | if (!(mode & FALLOC_FL_KEEP_SIZE)) { |
3417 | if (new_size > i_size_read(inode)) | |
3418 | i_size_write(inode, new_size); | |
3419 | if (new_size > EXT4_I(inode)->i_disksize) | |
3420 | ext4_update_i_disksize(inode, new_size); | |
fd28784a AK |
3421 | } |
3422 | ||
3423 | } | |
3424 | ||
a2df2a63 AA |
3425 | /* |
3426 | * preallocate space for a file. This implements ext4's fallocate inode | |
3427 | * operation, which gets called from sys_fallocate system call. | |
3428 | * For block-mapped files, posix_fallocate should fall back to the method | |
3429 | * of writing zeroes to the required new blocks (the same behavior which is | |
3430 | * expected for file systems which do not support fallocate() system call). | |
3431 | */ | |
3432 | long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len) | |
3433 | { | |
3434 | handle_t *handle; | |
725d26d3 | 3435 | ext4_lblk_t block; |
fd28784a | 3436 | loff_t new_size; |
498e5f24 | 3437 | unsigned int max_blocks; |
a2df2a63 AA |
3438 | int ret = 0; |
3439 | int ret2 = 0; | |
3440 | int retries = 0; | |
3441 | struct buffer_head map_bh; | |
3442 | unsigned int credits, blkbits = inode->i_blkbits; | |
3443 | ||
3444 | /* | |
3445 | * currently supporting (pre)allocate mode for extent-based | |
3446 | * files _only_ | |
3447 | */ | |
3448 | if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) | |
3449 | return -EOPNOTSUPP; | |
3450 | ||
3451 | /* preallocation to directories is currently not supported */ | |
3452 | if (S_ISDIR(inode->i_mode)) | |
3453 | return -ENODEV; | |
3454 | ||
3455 | block = offset >> blkbits; | |
fd28784a AK |
3456 | /* |
3457 | * We can't just convert len to max_blocks because | |
3458 | * If blocksize = 4096 offset = 3072 and len = 2048 | |
3459 | */ | |
a2df2a63 | 3460 | max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) |
fd28784a | 3461 | - block; |
a2df2a63 | 3462 | /* |
f3bd1f3f | 3463 | * credits to insert 1 extent into extent tree |
a2df2a63 | 3464 | */ |
f3bd1f3f | 3465 | credits = ext4_chunk_trans_blocks(inode, max_blocks); |
55bd725a | 3466 | mutex_lock(&inode->i_mutex); |
a2df2a63 AA |
3467 | retry: |
3468 | while (ret >= 0 && ret < max_blocks) { | |
3469 | block = block + ret; | |
3470 | max_blocks = max_blocks - ret; | |
3471 | handle = ext4_journal_start(inode, credits); | |
3472 | if (IS_ERR(handle)) { | |
3473 | ret = PTR_ERR(handle); | |
3474 | break; | |
3475 | } | |
79ffab34 | 3476 | map_bh.b_state = 0; |
12b7ac17 TT |
3477 | ret = ext4_get_blocks(handle, inode, block, |
3478 | max_blocks, &map_bh, | |
c2177057 | 3479 | EXT4_GET_BLOCKS_CREATE_UNINIT_EXT); |
221879c9 | 3480 | if (ret <= 0) { |
2c98615d AK |
3481 | #ifdef EXT4FS_DEBUG |
3482 | WARN_ON(ret <= 0); | |
3483 | printk(KERN_ERR "%s: ext4_ext_get_blocks " | |
3484 | "returned error inode#%lu, block=%u, " | |
9fd9784c | 3485 | "max_blocks=%u", __func__, |
221879c9 | 3486 | inode->i_ino, block, max_blocks); |
2c98615d | 3487 | #endif |
a2df2a63 AA |
3488 | ext4_mark_inode_dirty(handle, inode); |
3489 | ret2 = ext4_journal_stop(handle); | |
3490 | break; | |
3491 | } | |
fd28784a AK |
3492 | if ((block + ret) >= (EXT4_BLOCK_ALIGN(offset + len, |
3493 | blkbits) >> blkbits)) | |
3494 | new_size = offset + len; | |
3495 | else | |
3496 | new_size = (block + ret) << blkbits; | |
a2df2a63 | 3497 | |
fd28784a AK |
3498 | ext4_falloc_update_inode(inode, mode, new_size, |
3499 | buffer_new(&map_bh)); | |
a2df2a63 AA |
3500 | ext4_mark_inode_dirty(handle, inode); |
3501 | ret2 = ext4_journal_stop(handle); | |
3502 | if (ret2) | |
3503 | break; | |
3504 | } | |
fd28784a AK |
3505 | if (ret == -ENOSPC && |
3506 | ext4_should_retry_alloc(inode->i_sb, &retries)) { | |
3507 | ret = 0; | |
a2df2a63 | 3508 | goto retry; |
a2df2a63 | 3509 | } |
55bd725a | 3510 | mutex_unlock(&inode->i_mutex); |
a2df2a63 AA |
3511 | return ret > 0 ? ret2 : ret; |
3512 | } | |
6873fa0d | 3513 | |
0031462b MC |
3514 | /* |
3515 | * This function convert a range of blocks to written extents | |
3516 | * The caller of this function will pass the start offset and the size. | |
3517 | * all unwritten extents within this range will be converted to | |
3518 | * written extents. | |
3519 | * | |
3520 | * This function is called from the direct IO end io call back | |
3521 | * function, to convert the fallocated extents after IO is completed. | |
3522 | */ | |
3523 | int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset, | |
3524 | loff_t len) | |
3525 | { | |
3526 | handle_t *handle; | |
3527 | ext4_lblk_t block; | |
3528 | unsigned int max_blocks; | |
3529 | int ret = 0; | |
3530 | int ret2 = 0; | |
3531 | struct buffer_head map_bh; | |
3532 | unsigned int credits, blkbits = inode->i_blkbits; | |
3533 | ||
3534 | block = offset >> blkbits; | |
3535 | /* | |
3536 | * We can't just convert len to max_blocks because | |
3537 | * If blocksize = 4096 offset = 3072 and len = 2048 | |
3538 | */ | |
3539 | max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) | |
3540 | - block; | |
3541 | /* | |
3542 | * credits to insert 1 extent into extent tree | |
3543 | */ | |
3544 | credits = ext4_chunk_trans_blocks(inode, max_blocks); | |
3545 | while (ret >= 0 && ret < max_blocks) { | |
3546 | block = block + ret; | |
3547 | max_blocks = max_blocks - ret; | |
3548 | handle = ext4_journal_start(inode, credits); | |
3549 | if (IS_ERR(handle)) { | |
3550 | ret = PTR_ERR(handle); | |
3551 | break; | |
3552 | } | |
3553 | map_bh.b_state = 0; | |
3554 | ret = ext4_get_blocks(handle, inode, block, | |
3555 | max_blocks, &map_bh, | |
3556 | EXT4_GET_BLOCKS_DIO_CONVERT_EXT); | |
3557 | if (ret <= 0) { | |
3558 | WARN_ON(ret <= 0); | |
3559 | printk(KERN_ERR "%s: ext4_ext_get_blocks " | |
3560 | "returned error inode#%lu, block=%u, " | |
3561 | "max_blocks=%u", __func__, | |
3562 | inode->i_ino, block, max_blocks); | |
3563 | } | |
3564 | ext4_mark_inode_dirty(handle, inode); | |
3565 | ret2 = ext4_journal_stop(handle); | |
3566 | if (ret <= 0 || ret2 ) | |
3567 | break; | |
3568 | } | |
3569 | return ret > 0 ? ret2 : ret; | |
3570 | } | |
6873fa0d ES |
3571 | /* |
3572 | * Callback function called for each extent to gather FIEMAP information. | |
3573 | */ | |
3a06d778 | 3574 | static int ext4_ext_fiemap_cb(struct inode *inode, struct ext4_ext_path *path, |
6873fa0d ES |
3575 | struct ext4_ext_cache *newex, struct ext4_extent *ex, |
3576 | void *data) | |
3577 | { | |
3578 | struct fiemap_extent_info *fieinfo = data; | |
c9877b20 | 3579 | unsigned char blksize_bits = inode->i_sb->s_blocksize_bits; |
6873fa0d ES |
3580 | __u64 logical; |
3581 | __u64 physical; | |
3582 | __u64 length; | |
3583 | __u32 flags = 0; | |
3584 | int error; | |
3585 | ||
3586 | logical = (__u64)newex->ec_block << blksize_bits; | |
3587 | ||
3588 | if (newex->ec_type == EXT4_EXT_CACHE_GAP) { | |
3589 | pgoff_t offset; | |
3590 | struct page *page; | |
3591 | struct buffer_head *bh = NULL; | |
3592 | ||
3593 | offset = logical >> PAGE_SHIFT; | |
3594 | page = find_get_page(inode->i_mapping, offset); | |
3595 | if (!page || !page_has_buffers(page)) | |
3596 | return EXT_CONTINUE; | |
3597 | ||
3598 | bh = page_buffers(page); | |
3599 | ||
3600 | if (!bh) | |
3601 | return EXT_CONTINUE; | |
3602 | ||
3603 | if (buffer_delay(bh)) { | |
3604 | flags |= FIEMAP_EXTENT_DELALLOC; | |
3605 | page_cache_release(page); | |
3606 | } else { | |
3607 | page_cache_release(page); | |
3608 | return EXT_CONTINUE; | |
3609 | } | |
3610 | } | |
3611 | ||
3612 | physical = (__u64)newex->ec_start << blksize_bits; | |
3613 | length = (__u64)newex->ec_len << blksize_bits; | |
3614 | ||
3615 | if (ex && ext4_ext_is_uninitialized(ex)) | |
3616 | flags |= FIEMAP_EXTENT_UNWRITTEN; | |
3617 | ||
3618 | /* | |
3619 | * If this extent reaches EXT_MAX_BLOCK, it must be last. | |
3620 | * | |
3621 | * Or if ext4_ext_next_allocated_block is EXT_MAX_BLOCK, | |
3622 | * this also indicates no more allocated blocks. | |
3623 | * | |
3624 | * XXX this might miss a single-block extent at EXT_MAX_BLOCK | |
3625 | */ | |
c9877b20 | 3626 | if (ext4_ext_next_allocated_block(path) == EXT_MAX_BLOCK || |
eefd7f03 TT |
3627 | newex->ec_block + newex->ec_len - 1 == EXT_MAX_BLOCK) { |
3628 | loff_t size = i_size_read(inode); | |
3629 | loff_t bs = EXT4_BLOCK_SIZE(inode->i_sb); | |
3630 | ||
6873fa0d | 3631 | flags |= FIEMAP_EXTENT_LAST; |
eefd7f03 TT |
3632 | if ((flags & FIEMAP_EXTENT_DELALLOC) && |
3633 | logical+length > size) | |
3634 | length = (size - logical + bs - 1) & ~(bs-1); | |
3635 | } | |
6873fa0d ES |
3636 | |
3637 | error = fiemap_fill_next_extent(fieinfo, logical, physical, | |
3638 | length, flags); | |
3639 | if (error < 0) | |
3640 | return error; | |
3641 | if (error == 1) | |
3642 | return EXT_BREAK; | |
3643 | ||
3644 | return EXT_CONTINUE; | |
3645 | } | |
3646 | ||
3647 | /* fiemap flags we can handle specified here */ | |
3648 | #define EXT4_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR) | |
3649 | ||
3a06d778 AK |
3650 | static int ext4_xattr_fiemap(struct inode *inode, |
3651 | struct fiemap_extent_info *fieinfo) | |
6873fa0d ES |
3652 | { |
3653 | __u64 physical = 0; | |
3654 | __u64 length; | |
3655 | __u32 flags = FIEMAP_EXTENT_LAST; | |
3656 | int blockbits = inode->i_sb->s_blocksize_bits; | |
3657 | int error = 0; | |
3658 | ||
3659 | /* in-inode? */ | |
3660 | if (EXT4_I(inode)->i_state & EXT4_STATE_XATTR) { | |
3661 | struct ext4_iloc iloc; | |
3662 | int offset; /* offset of xattr in inode */ | |
3663 | ||
3664 | error = ext4_get_inode_loc(inode, &iloc); | |
3665 | if (error) | |
3666 | return error; | |
3667 | physical = iloc.bh->b_blocknr << blockbits; | |
3668 | offset = EXT4_GOOD_OLD_INODE_SIZE + | |
3669 | EXT4_I(inode)->i_extra_isize; | |
3670 | physical += offset; | |
3671 | length = EXT4_SB(inode->i_sb)->s_inode_size - offset; | |
3672 | flags |= FIEMAP_EXTENT_DATA_INLINE; | |
3673 | } else { /* external block */ | |
3674 | physical = EXT4_I(inode)->i_file_acl << blockbits; | |
3675 | length = inode->i_sb->s_blocksize; | |
3676 | } | |
3677 | ||
3678 | if (physical) | |
3679 | error = fiemap_fill_next_extent(fieinfo, 0, physical, | |
3680 | length, flags); | |
3681 | return (error < 0 ? error : 0); | |
3682 | } | |
3683 | ||
3684 | int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | |
3685 | __u64 start, __u64 len) | |
3686 | { | |
3687 | ext4_lblk_t start_blk; | |
3688 | ext4_lblk_t len_blks; | |
3689 | int error = 0; | |
3690 | ||
3691 | /* fallback to generic here if not in extents fmt */ | |
3692 | if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) | |
3693 | return generic_block_fiemap(inode, fieinfo, start, len, | |
3694 | ext4_get_block); | |
3695 | ||
3696 | if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS)) | |
3697 | return -EBADR; | |
3698 | ||
3699 | if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) { | |
3700 | error = ext4_xattr_fiemap(inode, fieinfo); | |
3701 | } else { | |
3702 | start_blk = start >> inode->i_sb->s_blocksize_bits; | |
3703 | len_blks = len >> inode->i_sb->s_blocksize_bits; | |
3704 | ||
3705 | /* | |
3706 | * Walk the extent tree gathering extent information. | |
3707 | * ext4_ext_fiemap_cb will push extents back to user. | |
3708 | */ | |
0568c518 | 3709 | down_read(&EXT4_I(inode)->i_data_sem); |
6873fa0d ES |
3710 | error = ext4_ext_walk_space(inode, start_blk, len_blks, |
3711 | ext4_ext_fiemap_cb, fieinfo); | |
0568c518 | 3712 | up_read(&EXT4_I(inode)->i_data_sem); |
6873fa0d ES |
3713 | } |
3714 | ||
3715 | return error; | |
3716 | } | |
3717 |