Btrfs: Deal with failed writes in mirrored configurations
[deliverable/linux.git] / fs / btrfs / volumes.c
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/buffer_head.h>
21 #include <linux/blkdev.h>
22 #include <linux/random.h>
23 #include <asm/div64.h>
24 #include "ctree.h"
25 #include "extent_map.h"
26 #include "disk-io.h"
27 #include "transaction.h"
28 #include "print-tree.h"
29 #include "volumes.h"
30
31 struct map_lookup {
32 u64 type;
33 int io_align;
34 int io_width;
35 int stripe_len;
36 int sector_size;
37 int num_stripes;
38 int sub_stripes;
39 struct btrfs_bio_stripe stripes[];
40 };
41
42 #define map_lookup_size(n) (sizeof(struct map_lookup) + \
43 (sizeof(struct btrfs_bio_stripe) * (n)))
44
45 static DEFINE_MUTEX(uuid_mutex);
46 static LIST_HEAD(fs_uuids);
47
48 int btrfs_cleanup_fs_uuids(void)
49 {
50 struct btrfs_fs_devices *fs_devices;
51 struct list_head *uuid_cur;
52 struct list_head *devices_cur;
53 struct btrfs_device *dev;
54
55 list_for_each(uuid_cur, &fs_uuids) {
56 fs_devices = list_entry(uuid_cur, struct btrfs_fs_devices,
57 list);
58 while(!list_empty(&fs_devices->devices)) {
59 devices_cur = fs_devices->devices.next;
60 dev = list_entry(devices_cur, struct btrfs_device,
61 dev_list);
62 if (dev->bdev) {
63 close_bdev_excl(dev->bdev);
64 }
65 list_del(&dev->dev_list);
66 kfree(dev);
67 }
68 }
69 return 0;
70 }
71
72 static struct btrfs_device *__find_device(struct list_head *head, u64 devid,
73 u8 *uuid)
74 {
75 struct btrfs_device *dev;
76 struct list_head *cur;
77
78 list_for_each(cur, head) {
79 dev = list_entry(cur, struct btrfs_device, dev_list);
80 if (dev->devid == devid &&
81 (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
82 return dev;
83 }
84 }
85 return NULL;
86 }
87
88 static struct btrfs_fs_devices *find_fsid(u8 *fsid)
89 {
90 struct list_head *cur;
91 struct btrfs_fs_devices *fs_devices;
92
93 list_for_each(cur, &fs_uuids) {
94 fs_devices = list_entry(cur, struct btrfs_fs_devices, list);
95 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
96 return fs_devices;
97 }
98 return NULL;
99 }
100
101 static int device_list_add(const char *path,
102 struct btrfs_super_block *disk_super,
103 u64 devid, struct btrfs_fs_devices **fs_devices_ret)
104 {
105 struct btrfs_device *device;
106 struct btrfs_fs_devices *fs_devices;
107 u64 found_transid = btrfs_super_generation(disk_super);
108
109 fs_devices = find_fsid(disk_super->fsid);
110 if (!fs_devices) {
111 fs_devices = kmalloc(sizeof(*fs_devices), GFP_NOFS);
112 if (!fs_devices)
113 return -ENOMEM;
114 INIT_LIST_HEAD(&fs_devices->devices);
115 INIT_LIST_HEAD(&fs_devices->alloc_list);
116 list_add(&fs_devices->list, &fs_uuids);
117 memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
118 fs_devices->latest_devid = devid;
119 fs_devices->latest_trans = found_transid;
120 fs_devices->lowest_devid = (u64)-1;
121 fs_devices->num_devices = 0;
122 device = NULL;
123 } else {
124 device = __find_device(&fs_devices->devices, devid,
125 disk_super->dev_item.uuid);
126 }
127 if (!device) {
128 device = kzalloc(sizeof(*device), GFP_NOFS);
129 if (!device) {
130 /* we can safely leave the fs_devices entry around */
131 return -ENOMEM;
132 }
133 device->devid = devid;
134 memcpy(device->uuid, disk_super->dev_item.uuid,
135 BTRFS_UUID_SIZE);
136 device->barriers = 1;
137 spin_lock_init(&device->io_lock);
138 device->name = kstrdup(path, GFP_NOFS);
139 if (!device->name) {
140 kfree(device);
141 return -ENOMEM;
142 }
143 list_add(&device->dev_list, &fs_devices->devices);
144 list_add(&device->dev_alloc_list, &fs_devices->alloc_list);
145 fs_devices->num_devices++;
146 }
147
148 if (found_transid > fs_devices->latest_trans) {
149 fs_devices->latest_devid = devid;
150 fs_devices->latest_trans = found_transid;
151 }
152 if (fs_devices->lowest_devid > devid) {
153 fs_devices->lowest_devid = devid;
154 }
155 *fs_devices_ret = fs_devices;
156 return 0;
157 }
158
159 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
160 {
161 struct list_head *head = &fs_devices->devices;
162 struct list_head *cur;
163 struct btrfs_device *device;
164
165 mutex_lock(&uuid_mutex);
166 list_for_each(cur, head) {
167 device = list_entry(cur, struct btrfs_device, dev_list);
168 if (device->bdev) {
169 close_bdev_excl(device->bdev);
170 }
171 device->bdev = NULL;
172 }
173 mutex_unlock(&uuid_mutex);
174 return 0;
175 }
176
177 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
178 int flags, void *holder)
179 {
180 struct block_device *bdev;
181 struct list_head *head = &fs_devices->devices;
182 struct list_head *cur;
183 struct btrfs_device *device;
184 int ret;
185
186 mutex_lock(&uuid_mutex);
187 list_for_each(cur, head) {
188 device = list_entry(cur, struct btrfs_device, dev_list);
189 bdev = open_bdev_excl(device->name, flags, holder);
190
191 if (IS_ERR(bdev)) {
192 printk("open %s failed\n", device->name);
193 ret = PTR_ERR(bdev);
194 goto fail;
195 }
196 if (device->devid == fs_devices->latest_devid)
197 fs_devices->latest_bdev = bdev;
198 if (device->devid == fs_devices->lowest_devid) {
199 fs_devices->lowest_bdev = bdev;
200 }
201 device->bdev = bdev;
202 }
203 mutex_unlock(&uuid_mutex);
204 return 0;
205 fail:
206 mutex_unlock(&uuid_mutex);
207 btrfs_close_devices(fs_devices);
208 return ret;
209 }
210
211 int btrfs_scan_one_device(const char *path, int flags, void *holder,
212 struct btrfs_fs_devices **fs_devices_ret)
213 {
214 struct btrfs_super_block *disk_super;
215 struct block_device *bdev;
216 struct buffer_head *bh;
217 int ret;
218 u64 devid;
219 u64 transid;
220
221 mutex_lock(&uuid_mutex);
222
223 bdev = open_bdev_excl(path, flags, holder);
224
225 if (IS_ERR(bdev)) {
226 ret = PTR_ERR(bdev);
227 goto error;
228 }
229
230 ret = set_blocksize(bdev, 4096);
231 if (ret)
232 goto error_close;
233 bh = __bread(bdev, BTRFS_SUPER_INFO_OFFSET / 4096, 4096);
234 if (!bh) {
235 ret = -EIO;
236 goto error_close;
237 }
238 disk_super = (struct btrfs_super_block *)bh->b_data;
239 if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
240 sizeof(disk_super->magic))) {
241 ret = -EINVAL;
242 goto error_brelse;
243 }
244 devid = le64_to_cpu(disk_super->dev_item.devid);
245 transid = btrfs_super_generation(disk_super);
246 if (disk_super->label[0])
247 printk("device label %s ", disk_super->label);
248 else {
249 /* FIXME, make a readl uuid parser */
250 printk("device fsid %llx-%llx ",
251 *(unsigned long long *)disk_super->fsid,
252 *(unsigned long long *)(disk_super->fsid + 8));
253 }
254 printk("devid %Lu transid %Lu %s\n", devid, transid, path);
255 ret = device_list_add(path, disk_super, devid, fs_devices_ret);
256
257 error_brelse:
258 brelse(bh);
259 error_close:
260 close_bdev_excl(bdev);
261 error:
262 mutex_unlock(&uuid_mutex);
263 return ret;
264 }
265
266 /*
267 * this uses a pretty simple search, the expectation is that it is
268 * called very infrequently and that a given device has a small number
269 * of extents
270 */
271 static int find_free_dev_extent(struct btrfs_trans_handle *trans,
272 struct btrfs_device *device,
273 struct btrfs_path *path,
274 u64 num_bytes, u64 *start)
275 {
276 struct btrfs_key key;
277 struct btrfs_root *root = device->dev_root;
278 struct btrfs_dev_extent *dev_extent = NULL;
279 u64 hole_size = 0;
280 u64 last_byte = 0;
281 u64 search_start = 0;
282 u64 search_end = device->total_bytes;
283 int ret;
284 int slot = 0;
285 int start_found;
286 struct extent_buffer *l;
287
288 start_found = 0;
289 path->reada = 2;
290
291 /* FIXME use last free of some kind */
292
293 /* we don't want to overwrite the superblock on the drive,
294 * so we make sure to start at an offset of at least 1MB
295 */
296 search_start = max((u64)1024 * 1024, search_start);
297
298 if (root->fs_info->alloc_start + num_bytes <= device->total_bytes)
299 search_start = max(root->fs_info->alloc_start, search_start);
300
301 key.objectid = device->devid;
302 key.offset = search_start;
303 key.type = BTRFS_DEV_EXTENT_KEY;
304 ret = btrfs_search_slot(trans, root, &key, path, 0, 0);
305 if (ret < 0)
306 goto error;
307 ret = btrfs_previous_item(root, path, 0, key.type);
308 if (ret < 0)
309 goto error;
310 l = path->nodes[0];
311 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
312 while (1) {
313 l = path->nodes[0];
314 slot = path->slots[0];
315 if (slot >= btrfs_header_nritems(l)) {
316 ret = btrfs_next_leaf(root, path);
317 if (ret == 0)
318 continue;
319 if (ret < 0)
320 goto error;
321 no_more_items:
322 if (!start_found) {
323 if (search_start >= search_end) {
324 ret = -ENOSPC;
325 goto error;
326 }
327 *start = search_start;
328 start_found = 1;
329 goto check_pending;
330 }
331 *start = last_byte > search_start ?
332 last_byte : search_start;
333 if (search_end <= *start) {
334 ret = -ENOSPC;
335 goto error;
336 }
337 goto check_pending;
338 }
339 btrfs_item_key_to_cpu(l, &key, slot);
340
341 if (key.objectid < device->devid)
342 goto next;
343
344 if (key.objectid > device->devid)
345 goto no_more_items;
346
347 if (key.offset >= search_start && key.offset > last_byte &&
348 start_found) {
349 if (last_byte < search_start)
350 last_byte = search_start;
351 hole_size = key.offset - last_byte;
352 if (key.offset > last_byte &&
353 hole_size >= num_bytes) {
354 *start = last_byte;
355 goto check_pending;
356 }
357 }
358 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY) {
359 goto next;
360 }
361
362 start_found = 1;
363 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
364 last_byte = key.offset + btrfs_dev_extent_length(l, dev_extent);
365 next:
366 path->slots[0]++;
367 cond_resched();
368 }
369 check_pending:
370 /* we have to make sure we didn't find an extent that has already
371 * been allocated by the map tree or the original allocation
372 */
373 btrfs_release_path(root, path);
374 BUG_ON(*start < search_start);
375
376 if (*start + num_bytes > search_end) {
377 ret = -ENOSPC;
378 goto error;
379 }
380 /* check for pending inserts here */
381 return 0;
382
383 error:
384 btrfs_release_path(root, path);
385 return ret;
386 }
387
388 int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
389 struct btrfs_device *device,
390 u64 start)
391 {
392 int ret;
393 struct btrfs_path *path;
394 struct btrfs_root *root = device->dev_root;
395 struct btrfs_key key;
396
397 path = btrfs_alloc_path();
398 if (!path)
399 return -ENOMEM;
400
401 key.objectid = device->devid;
402 key.offset = start;
403 key.type = BTRFS_DEV_EXTENT_KEY;
404
405 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
406 BUG_ON(ret);
407
408 ret = btrfs_del_item(trans, root, path);
409 BUG_ON(ret);
410
411 btrfs_free_path(path);
412 return ret;
413 }
414
415 int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
416 struct btrfs_device *device,
417 u64 chunk_tree, u64 chunk_objectid,
418 u64 chunk_offset,
419 u64 num_bytes, u64 *start)
420 {
421 int ret;
422 struct btrfs_path *path;
423 struct btrfs_root *root = device->dev_root;
424 struct btrfs_dev_extent *extent;
425 struct extent_buffer *leaf;
426 struct btrfs_key key;
427
428 path = btrfs_alloc_path();
429 if (!path)
430 return -ENOMEM;
431
432 ret = find_free_dev_extent(trans, device, path, num_bytes, start);
433 if (ret) {
434 goto err;
435 }
436
437 key.objectid = device->devid;
438 key.offset = *start;
439 key.type = BTRFS_DEV_EXTENT_KEY;
440 ret = btrfs_insert_empty_item(trans, root, path, &key,
441 sizeof(*extent));
442 BUG_ON(ret);
443
444 leaf = path->nodes[0];
445 extent = btrfs_item_ptr(leaf, path->slots[0],
446 struct btrfs_dev_extent);
447 btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
448 btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
449 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
450
451 write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
452 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
453 BTRFS_UUID_SIZE);
454
455 btrfs_set_dev_extent_length(leaf, extent, num_bytes);
456 btrfs_mark_buffer_dirty(leaf);
457 err:
458 btrfs_free_path(path);
459 return ret;
460 }
461
462 static int find_next_chunk(struct btrfs_root *root, u64 objectid, u64 *offset)
463 {
464 struct btrfs_path *path;
465 int ret;
466 struct btrfs_key key;
467 struct btrfs_chunk *chunk;
468 struct btrfs_key found_key;
469
470 path = btrfs_alloc_path();
471 BUG_ON(!path);
472
473 key.objectid = objectid;
474 key.offset = (u64)-1;
475 key.type = BTRFS_CHUNK_ITEM_KEY;
476
477 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
478 if (ret < 0)
479 goto error;
480
481 BUG_ON(ret == 0);
482
483 ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
484 if (ret) {
485 *offset = 0;
486 } else {
487 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
488 path->slots[0]);
489 if (found_key.objectid != objectid)
490 *offset = 0;
491 else {
492 chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
493 struct btrfs_chunk);
494 *offset = found_key.offset +
495 btrfs_chunk_length(path->nodes[0], chunk);
496 }
497 }
498 ret = 0;
499 error:
500 btrfs_free_path(path);
501 return ret;
502 }
503
504 static int find_next_devid(struct btrfs_root *root, struct btrfs_path *path,
505 u64 *objectid)
506 {
507 int ret;
508 struct btrfs_key key;
509 struct btrfs_key found_key;
510
511 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
512 key.type = BTRFS_DEV_ITEM_KEY;
513 key.offset = (u64)-1;
514
515 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
516 if (ret < 0)
517 goto error;
518
519 BUG_ON(ret == 0);
520
521 ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
522 BTRFS_DEV_ITEM_KEY);
523 if (ret) {
524 *objectid = 1;
525 } else {
526 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
527 path->slots[0]);
528 *objectid = found_key.offset + 1;
529 }
530 ret = 0;
531 error:
532 btrfs_release_path(root, path);
533 return ret;
534 }
535
536 /*
537 * the device information is stored in the chunk root
538 * the btrfs_device struct should be fully filled in
539 */
540 int btrfs_add_device(struct btrfs_trans_handle *trans,
541 struct btrfs_root *root,
542 struct btrfs_device *device)
543 {
544 int ret;
545 struct btrfs_path *path;
546 struct btrfs_dev_item *dev_item;
547 struct extent_buffer *leaf;
548 struct btrfs_key key;
549 unsigned long ptr;
550 u64 free_devid;
551
552 root = root->fs_info->chunk_root;
553
554 path = btrfs_alloc_path();
555 if (!path)
556 return -ENOMEM;
557
558 ret = find_next_devid(root, path, &free_devid);
559 if (ret)
560 goto out;
561
562 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
563 key.type = BTRFS_DEV_ITEM_KEY;
564 key.offset = free_devid;
565
566 ret = btrfs_insert_empty_item(trans, root, path, &key,
567 sizeof(*dev_item));
568 if (ret)
569 goto out;
570
571 leaf = path->nodes[0];
572 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
573
574 device->devid = free_devid;
575 btrfs_set_device_id(leaf, dev_item, device->devid);
576 btrfs_set_device_type(leaf, dev_item, device->type);
577 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
578 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
579 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
580 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
581 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
582 btrfs_set_device_group(leaf, dev_item, 0);
583 btrfs_set_device_seek_speed(leaf, dev_item, 0);
584 btrfs_set_device_bandwidth(leaf, dev_item, 0);
585
586 ptr = (unsigned long)btrfs_device_uuid(dev_item);
587 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
588 btrfs_mark_buffer_dirty(leaf);
589 ret = 0;
590
591 out:
592 btrfs_free_path(path);
593 return ret;
594 }
595
596 int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
597 {
598 struct btrfs_trans_handle *trans;
599 struct btrfs_device *device;
600 struct block_device *bdev;
601 struct list_head *cur;
602 struct list_head *devices;
603 u64 total_bytes;
604 int ret = 0;
605
606
607 bdev = open_bdev_excl(device_path, 0, root->fs_info->bdev_holder);
608 if (!bdev) {
609 return -EIO;
610 }
611 mutex_lock(&root->fs_info->fs_mutex);
612 trans = btrfs_start_transaction(root, 1);
613 devices = &root->fs_info->fs_devices->devices;
614 list_for_each(cur, devices) {
615 device = list_entry(cur, struct btrfs_device, dev_list);
616 if (device->bdev == bdev) {
617 ret = -EEXIST;
618 goto out;
619 }
620 }
621
622 device = kzalloc(sizeof(*device), GFP_NOFS);
623 if (!device) {
624 /* we can safely leave the fs_devices entry around */
625 ret = -ENOMEM;
626 goto out_close_bdev;
627 }
628
629 device->barriers = 1;
630 generate_random_uuid(device->uuid);
631 spin_lock_init(&device->io_lock);
632 device->name = kstrdup(device_path, GFP_NOFS);
633 if (!device->name) {
634 kfree(device);
635 goto out_close_bdev;
636 }
637 device->io_width = root->sectorsize;
638 device->io_align = root->sectorsize;
639 device->sector_size = root->sectorsize;
640 device->total_bytes = i_size_read(bdev->bd_inode);
641 device->dev_root = root->fs_info->dev_root;
642 device->bdev = bdev;
643
644 ret = btrfs_add_device(trans, root, device);
645 if (ret)
646 goto out_close_bdev;
647
648 total_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
649 btrfs_set_super_total_bytes(&root->fs_info->super_copy,
650 total_bytes + device->total_bytes);
651
652 total_bytes = btrfs_super_num_devices(&root->fs_info->super_copy);
653 btrfs_set_super_num_devices(&root->fs_info->super_copy,
654 total_bytes + 1);
655
656 list_add(&device->dev_list, &root->fs_info->fs_devices->devices);
657 list_add(&device->dev_alloc_list,
658 &root->fs_info->fs_devices->alloc_list);
659 root->fs_info->fs_devices->num_devices++;
660 out:
661 btrfs_end_transaction(trans, root);
662 mutex_unlock(&root->fs_info->fs_mutex);
663 return ret;
664
665 out_close_bdev:
666 close_bdev_excl(bdev);
667 goto out;
668 }
669
670 int btrfs_update_device(struct btrfs_trans_handle *trans,
671 struct btrfs_device *device)
672 {
673 int ret;
674 struct btrfs_path *path;
675 struct btrfs_root *root;
676 struct btrfs_dev_item *dev_item;
677 struct extent_buffer *leaf;
678 struct btrfs_key key;
679
680 root = device->dev_root->fs_info->chunk_root;
681
682 path = btrfs_alloc_path();
683 if (!path)
684 return -ENOMEM;
685
686 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
687 key.type = BTRFS_DEV_ITEM_KEY;
688 key.offset = device->devid;
689
690 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
691 if (ret < 0)
692 goto out;
693
694 if (ret > 0) {
695 ret = -ENOENT;
696 goto out;
697 }
698
699 leaf = path->nodes[0];
700 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
701
702 btrfs_set_device_id(leaf, dev_item, device->devid);
703 btrfs_set_device_type(leaf, dev_item, device->type);
704 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
705 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
706 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
707 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
708 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
709 btrfs_mark_buffer_dirty(leaf);
710
711 out:
712 btrfs_free_path(path);
713 return ret;
714 }
715
716 int btrfs_grow_device(struct btrfs_trans_handle *trans,
717 struct btrfs_device *device, u64 new_size)
718 {
719 struct btrfs_super_block *super_copy =
720 &device->dev_root->fs_info->super_copy;
721 u64 old_total = btrfs_super_total_bytes(super_copy);
722 u64 diff = new_size - device->total_bytes;
723
724 btrfs_set_super_total_bytes(super_copy, old_total + diff);
725 return btrfs_update_device(trans, device);
726 }
727
728 static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
729 struct btrfs_root *root,
730 u64 chunk_tree, u64 chunk_objectid,
731 u64 chunk_offset)
732 {
733 int ret;
734 struct btrfs_path *path;
735 struct btrfs_key key;
736
737 root = root->fs_info->chunk_root;
738 path = btrfs_alloc_path();
739 if (!path)
740 return -ENOMEM;
741
742 key.objectid = chunk_objectid;
743 key.offset = chunk_offset;
744 key.type = BTRFS_CHUNK_ITEM_KEY;
745
746 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
747 BUG_ON(ret);
748
749 ret = btrfs_del_item(trans, root, path);
750 BUG_ON(ret);
751
752 btrfs_free_path(path);
753 return 0;
754 }
755
756 int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
757 chunk_offset)
758 {
759 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
760 struct btrfs_disk_key *disk_key;
761 struct btrfs_chunk *chunk;
762 u8 *ptr;
763 int ret = 0;
764 u32 num_stripes;
765 u32 array_size;
766 u32 len = 0;
767 u32 cur;
768 struct btrfs_key key;
769
770 array_size = btrfs_super_sys_array_size(super_copy);
771
772 ptr = super_copy->sys_chunk_array;
773 cur = 0;
774
775 while (cur < array_size) {
776 disk_key = (struct btrfs_disk_key *)ptr;
777 btrfs_disk_key_to_cpu(&key, disk_key);
778
779 len = sizeof(*disk_key);
780
781 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
782 chunk = (struct btrfs_chunk *)(ptr + len);
783 num_stripes = btrfs_stack_chunk_num_stripes(chunk);
784 len += btrfs_chunk_item_size(num_stripes);
785 } else {
786 ret = -EIO;
787 break;
788 }
789 if (key.objectid == chunk_objectid &&
790 key.offset == chunk_offset) {
791 memmove(ptr, ptr + len, array_size - (cur + len));
792 array_size -= len;
793 btrfs_set_super_sys_array_size(super_copy, array_size);
794 } else {
795 ptr += len;
796 cur += len;
797 }
798 }
799 return ret;
800 }
801
802
803 int btrfs_relocate_chunk(struct btrfs_root *root,
804 u64 chunk_tree, u64 chunk_objectid,
805 u64 chunk_offset)
806 {
807 struct extent_map_tree *em_tree;
808 struct btrfs_root *extent_root;
809 struct btrfs_trans_handle *trans;
810 struct extent_map *em;
811 struct map_lookup *map;
812 int ret;
813 int i;
814
815 root = root->fs_info->chunk_root;
816 extent_root = root->fs_info->extent_root;
817 em_tree = &root->fs_info->mapping_tree.map_tree;
818
819 /* step one, relocate all the extents inside this chunk */
820 ret = btrfs_shrink_extent_tree(extent_root, chunk_offset);
821 BUG_ON(ret);
822
823 trans = btrfs_start_transaction(root, 1);
824 BUG_ON(!trans);
825
826 /*
827 * step two, delete the device extents and the
828 * chunk tree entries
829 */
830 spin_lock(&em_tree->lock);
831 em = lookup_extent_mapping(em_tree, chunk_offset, 1);
832 spin_unlock(&em_tree->lock);
833
834 BUG_ON(em->start > chunk_offset || em->start + em->len < chunk_offset);
835 map = (struct map_lookup *)em->bdev;
836
837 for (i = 0; i < map->num_stripes; i++) {
838 ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
839 map->stripes[i].physical);
840 BUG_ON(ret);
841 }
842 ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
843 chunk_offset);
844
845 BUG_ON(ret);
846
847 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
848 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
849 BUG_ON(ret);
850 goto out;
851 }
852
853
854
855 spin_lock(&em_tree->lock);
856 remove_extent_mapping(em_tree, em);
857 kfree(map);
858 em->bdev = NULL;
859
860 /* once for the tree */
861 free_extent_map(em);
862 spin_unlock(&em_tree->lock);
863
864 out:
865 /* once for us */
866 free_extent_map(em);
867
868 btrfs_end_transaction(trans, root);
869 return 0;
870 }
871
872 static u64 div_factor(u64 num, int factor)
873 {
874 if (factor == 10)
875 return num;
876 num *= factor;
877 do_div(num, 10);
878 return num;
879 }
880
881
882 int btrfs_balance(struct btrfs_root *dev_root)
883 {
884 int ret;
885 struct list_head *cur;
886 struct list_head *devices = &dev_root->fs_info->fs_devices->devices;
887 struct btrfs_device *device;
888 u64 old_size;
889 u64 size_to_free;
890 struct btrfs_path *path;
891 struct btrfs_key key;
892 struct btrfs_chunk *chunk;
893 struct btrfs_root *chunk_root = dev_root->fs_info->chunk_root;
894 struct btrfs_trans_handle *trans;
895 struct btrfs_key found_key;
896
897
898 dev_root = dev_root->fs_info->dev_root;
899
900 mutex_lock(&dev_root->fs_info->fs_mutex);
901 /* step one make some room on all the devices */
902 list_for_each(cur, devices) {
903 device = list_entry(cur, struct btrfs_device, dev_list);
904 old_size = device->total_bytes;
905 size_to_free = div_factor(old_size, 1);
906 size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
907 if (device->total_bytes - device->bytes_used > size_to_free)
908 continue;
909
910 ret = btrfs_shrink_device(device, old_size - size_to_free);
911 BUG_ON(ret);
912
913 trans = btrfs_start_transaction(dev_root, 1);
914 BUG_ON(!trans);
915
916 ret = btrfs_grow_device(trans, device, old_size);
917 BUG_ON(ret);
918
919 btrfs_end_transaction(trans, dev_root);
920 }
921
922 /* step two, relocate all the chunks */
923 path = btrfs_alloc_path();
924 BUG_ON(!path);
925
926 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
927 key.offset = (u64)-1;
928 key.type = BTRFS_CHUNK_ITEM_KEY;
929
930 while(1) {
931 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
932 if (ret < 0)
933 goto error;
934
935 /*
936 * this shouldn't happen, it means the last relocate
937 * failed
938 */
939 if (ret == 0)
940 break;
941
942 ret = btrfs_previous_item(chunk_root, path, 0,
943 BTRFS_CHUNK_ITEM_KEY);
944 if (ret) {
945 break;
946 }
947 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
948 path->slots[0]);
949 if (found_key.objectid != key.objectid)
950 break;
951 chunk = btrfs_item_ptr(path->nodes[0],
952 path->slots[0],
953 struct btrfs_chunk);
954 key.offset = found_key.offset;
955 /* chunk zero is special */
956 if (key.offset == 0)
957 break;
958
959 ret = btrfs_relocate_chunk(chunk_root,
960 chunk_root->root_key.objectid,
961 found_key.objectid,
962 found_key.offset);
963 BUG_ON(ret);
964 btrfs_release_path(chunk_root, path);
965 }
966 ret = 0;
967 error:
968 btrfs_free_path(path);
969 mutex_unlock(&dev_root->fs_info->fs_mutex);
970 return ret;
971 }
972
973 /*
974 * shrinking a device means finding all of the device extents past
975 * the new size, and then following the back refs to the chunks.
976 * The chunk relocation code actually frees the device extent
977 */
978 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
979 {
980 struct btrfs_trans_handle *trans;
981 struct btrfs_root *root = device->dev_root;
982 struct btrfs_dev_extent *dev_extent = NULL;
983 struct btrfs_path *path;
984 u64 length;
985 u64 chunk_tree;
986 u64 chunk_objectid;
987 u64 chunk_offset;
988 int ret;
989 int slot;
990 struct extent_buffer *l;
991 struct btrfs_key key;
992 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
993 u64 old_total = btrfs_super_total_bytes(super_copy);
994 u64 diff = device->total_bytes - new_size;
995
996
997 path = btrfs_alloc_path();
998 if (!path)
999 return -ENOMEM;
1000
1001 trans = btrfs_start_transaction(root, 1);
1002 if (!trans) {
1003 ret = -ENOMEM;
1004 goto done;
1005 }
1006
1007 path->reada = 2;
1008
1009 device->total_bytes = new_size;
1010 ret = btrfs_update_device(trans, device);
1011 if (ret) {
1012 btrfs_end_transaction(trans, root);
1013 goto done;
1014 }
1015 WARN_ON(diff > old_total);
1016 btrfs_set_super_total_bytes(super_copy, old_total - diff);
1017 btrfs_end_transaction(trans, root);
1018
1019 key.objectid = device->devid;
1020 key.offset = (u64)-1;
1021 key.type = BTRFS_DEV_EXTENT_KEY;
1022
1023 while (1) {
1024 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1025 if (ret < 0)
1026 goto done;
1027
1028 ret = btrfs_previous_item(root, path, 0, key.type);
1029 if (ret < 0)
1030 goto done;
1031 if (ret) {
1032 ret = 0;
1033 goto done;
1034 }
1035
1036 l = path->nodes[0];
1037 slot = path->slots[0];
1038 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
1039
1040 if (key.objectid != device->devid)
1041 goto done;
1042
1043 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1044 length = btrfs_dev_extent_length(l, dev_extent);
1045
1046 if (key.offset + length <= new_size)
1047 goto done;
1048
1049 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
1050 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
1051 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
1052 btrfs_release_path(root, path);
1053
1054 ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
1055 chunk_offset);
1056 if (ret)
1057 goto done;
1058 }
1059
1060 done:
1061 btrfs_free_path(path);
1062 return ret;
1063 }
1064
1065 int btrfs_add_system_chunk(struct btrfs_trans_handle *trans,
1066 struct btrfs_root *root,
1067 struct btrfs_key *key,
1068 struct btrfs_chunk *chunk, int item_size)
1069 {
1070 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
1071 struct btrfs_disk_key disk_key;
1072 u32 array_size;
1073 u8 *ptr;
1074
1075 array_size = btrfs_super_sys_array_size(super_copy);
1076 if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
1077 return -EFBIG;
1078
1079 ptr = super_copy->sys_chunk_array + array_size;
1080 btrfs_cpu_key_to_disk(&disk_key, key);
1081 memcpy(ptr, &disk_key, sizeof(disk_key));
1082 ptr += sizeof(disk_key);
1083 memcpy(ptr, chunk, item_size);
1084 item_size += sizeof(disk_key);
1085 btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
1086 return 0;
1087 }
1088
1089 static u64 chunk_bytes_by_type(u64 type, u64 calc_size, int num_stripes,
1090 int sub_stripes)
1091 {
1092 if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP))
1093 return calc_size;
1094 else if (type & BTRFS_BLOCK_GROUP_RAID10)
1095 return calc_size * (num_stripes / sub_stripes);
1096 else
1097 return calc_size * num_stripes;
1098 }
1099
1100
1101 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
1102 struct btrfs_root *extent_root, u64 *start,
1103 u64 *num_bytes, u64 type)
1104 {
1105 u64 dev_offset;
1106 struct btrfs_fs_info *info = extent_root->fs_info;
1107 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
1108 struct btrfs_path *path;
1109 struct btrfs_stripe *stripes;
1110 struct btrfs_device *device = NULL;
1111 struct btrfs_chunk *chunk;
1112 struct list_head private_devs;
1113 struct list_head *dev_list;
1114 struct list_head *cur;
1115 struct extent_map_tree *em_tree;
1116 struct map_lookup *map;
1117 struct extent_map *em;
1118 int min_stripe_size = 1 * 1024 * 1024;
1119 u64 physical;
1120 u64 calc_size = 1024 * 1024 * 1024;
1121 u64 max_chunk_size = calc_size;
1122 u64 min_free;
1123 u64 avail;
1124 u64 max_avail = 0;
1125 u64 percent_max;
1126 int num_stripes = 1;
1127 int min_stripes = 1;
1128 int sub_stripes = 0;
1129 int looped = 0;
1130 int ret;
1131 int index;
1132 int stripe_len = 64 * 1024;
1133 struct btrfs_key key;
1134
1135 if ((type & BTRFS_BLOCK_GROUP_RAID1) &&
1136 (type & BTRFS_BLOCK_GROUP_DUP)) {
1137 WARN_ON(1);
1138 type &= ~BTRFS_BLOCK_GROUP_DUP;
1139 }
1140 dev_list = &extent_root->fs_info->fs_devices->alloc_list;
1141 if (list_empty(dev_list))
1142 return -ENOSPC;
1143
1144 if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
1145 num_stripes = btrfs_super_num_devices(&info->super_copy);
1146 min_stripes = 2;
1147 }
1148 if (type & (BTRFS_BLOCK_GROUP_DUP)) {
1149 num_stripes = 2;
1150 min_stripes = 2;
1151 }
1152 if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
1153 num_stripes = min_t(u64, 2,
1154 btrfs_super_num_devices(&info->super_copy));
1155 if (num_stripes < 2)
1156 return -ENOSPC;
1157 min_stripes = 2;
1158 }
1159 if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
1160 num_stripes = btrfs_super_num_devices(&info->super_copy);
1161 if (num_stripes < 4)
1162 return -ENOSPC;
1163 num_stripes &= ~(u32)1;
1164 sub_stripes = 2;
1165 min_stripes = 4;
1166 }
1167
1168 if (type & BTRFS_BLOCK_GROUP_DATA) {
1169 max_chunk_size = 10 * calc_size;
1170 min_stripe_size = 64 * 1024 * 1024;
1171 } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
1172 max_chunk_size = 4 * calc_size;
1173 min_stripe_size = 32 * 1024 * 1024;
1174 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
1175 calc_size = 8 * 1024 * 1024;
1176 max_chunk_size = calc_size * 2;
1177 min_stripe_size = 1 * 1024 * 1024;
1178 }
1179
1180 path = btrfs_alloc_path();
1181 if (!path)
1182 return -ENOMEM;
1183
1184 /* we don't want a chunk larger than 10% of the FS */
1185 percent_max = div_factor(btrfs_super_total_bytes(&info->super_copy), 1);
1186 max_chunk_size = min(percent_max, max_chunk_size);
1187
1188 again:
1189 if (calc_size * num_stripes > max_chunk_size) {
1190 calc_size = max_chunk_size;
1191 do_div(calc_size, num_stripes);
1192 do_div(calc_size, stripe_len);
1193 calc_size *= stripe_len;
1194 }
1195 /* we don't want tiny stripes */
1196 calc_size = max_t(u64, min_stripe_size, calc_size);
1197
1198 do_div(calc_size, stripe_len);
1199 calc_size *= stripe_len;
1200
1201 INIT_LIST_HEAD(&private_devs);
1202 cur = dev_list->next;
1203 index = 0;
1204
1205 if (type & BTRFS_BLOCK_GROUP_DUP)
1206 min_free = calc_size * 2;
1207 else
1208 min_free = calc_size;
1209
1210 /* we add 1MB because we never use the first 1MB of the device */
1211 min_free += 1024 * 1024;
1212
1213 /* build a private list of devices we will allocate from */
1214 while(index < num_stripes) {
1215 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
1216
1217 avail = device->total_bytes - device->bytes_used;
1218 cur = cur->next;
1219
1220 if (avail >= min_free) {
1221 u64 ignored_start = 0;
1222 ret = find_free_dev_extent(trans, device, path,
1223 min_free,
1224 &ignored_start);
1225 if (ret == 0) {
1226 list_move_tail(&device->dev_alloc_list,
1227 &private_devs);
1228 index++;
1229 if (type & BTRFS_BLOCK_GROUP_DUP)
1230 index++;
1231 }
1232 } else if (avail > max_avail)
1233 max_avail = avail;
1234 if (cur == dev_list)
1235 break;
1236 }
1237 if (index < num_stripes) {
1238 list_splice(&private_devs, dev_list);
1239 if (index >= min_stripes) {
1240 num_stripes = index;
1241 if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
1242 num_stripes /= sub_stripes;
1243 num_stripes *= sub_stripes;
1244 }
1245 looped = 1;
1246 goto again;
1247 }
1248 if (!looped && max_avail > 0) {
1249 looped = 1;
1250 calc_size = max_avail;
1251 goto again;
1252 }
1253 btrfs_free_path(path);
1254 return -ENOSPC;
1255 }
1256 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
1257 key.type = BTRFS_CHUNK_ITEM_KEY;
1258 ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
1259 &key.offset);
1260 if (ret) {
1261 btrfs_free_path(path);
1262 return ret;
1263 }
1264
1265 chunk = kmalloc(btrfs_chunk_item_size(num_stripes), GFP_NOFS);
1266 if (!chunk) {
1267 btrfs_free_path(path);
1268 return -ENOMEM;
1269 }
1270
1271 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
1272 if (!map) {
1273 kfree(chunk);
1274 btrfs_free_path(path);
1275 return -ENOMEM;
1276 }
1277 btrfs_free_path(path);
1278 path = NULL;
1279
1280 stripes = &chunk->stripe;
1281 *num_bytes = chunk_bytes_by_type(type, calc_size,
1282 num_stripes, sub_stripes);
1283
1284 index = 0;
1285 while(index < num_stripes) {
1286 struct btrfs_stripe *stripe;
1287 BUG_ON(list_empty(&private_devs));
1288 cur = private_devs.next;
1289 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
1290
1291 /* loop over this device again if we're doing a dup group */
1292 if (!(type & BTRFS_BLOCK_GROUP_DUP) ||
1293 (index == num_stripes - 1))
1294 list_move_tail(&device->dev_alloc_list, dev_list);
1295
1296 ret = btrfs_alloc_dev_extent(trans, device,
1297 info->chunk_root->root_key.objectid,
1298 BTRFS_FIRST_CHUNK_TREE_OBJECTID, key.offset,
1299 calc_size, &dev_offset);
1300 BUG_ON(ret);
1301 device->bytes_used += calc_size;
1302 ret = btrfs_update_device(trans, device);
1303 BUG_ON(ret);
1304
1305 map->stripes[index].dev = device;
1306 map->stripes[index].physical = dev_offset;
1307 stripe = stripes + index;
1308 btrfs_set_stack_stripe_devid(stripe, device->devid);
1309 btrfs_set_stack_stripe_offset(stripe, dev_offset);
1310 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
1311 physical = dev_offset;
1312 index++;
1313 }
1314 BUG_ON(!list_empty(&private_devs));
1315
1316 /* key was set above */
1317 btrfs_set_stack_chunk_length(chunk, *num_bytes);
1318 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
1319 btrfs_set_stack_chunk_stripe_len(chunk, stripe_len);
1320 btrfs_set_stack_chunk_type(chunk, type);
1321 btrfs_set_stack_chunk_num_stripes(chunk, num_stripes);
1322 btrfs_set_stack_chunk_io_align(chunk, stripe_len);
1323 btrfs_set_stack_chunk_io_width(chunk, stripe_len);
1324 btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
1325 btrfs_set_stack_chunk_sub_stripes(chunk, sub_stripes);
1326 map->sector_size = extent_root->sectorsize;
1327 map->stripe_len = stripe_len;
1328 map->io_align = stripe_len;
1329 map->io_width = stripe_len;
1330 map->type = type;
1331 map->num_stripes = num_stripes;
1332 map->sub_stripes = sub_stripes;
1333
1334 ret = btrfs_insert_item(trans, chunk_root, &key, chunk,
1335 btrfs_chunk_item_size(num_stripes));
1336 BUG_ON(ret);
1337 *start = key.offset;;
1338
1339 em = alloc_extent_map(GFP_NOFS);
1340 if (!em)
1341 return -ENOMEM;
1342 em->bdev = (struct block_device *)map;
1343 em->start = key.offset;
1344 em->len = *num_bytes;
1345 em->block_start = 0;
1346
1347 if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
1348 ret = btrfs_add_system_chunk(trans, chunk_root, &key,
1349 chunk, btrfs_chunk_item_size(num_stripes));
1350 BUG_ON(ret);
1351 }
1352 kfree(chunk);
1353
1354 em_tree = &extent_root->fs_info->mapping_tree.map_tree;
1355 spin_lock(&em_tree->lock);
1356 ret = add_extent_mapping(em_tree, em);
1357 spin_unlock(&em_tree->lock);
1358 BUG_ON(ret);
1359 free_extent_map(em);
1360 return ret;
1361 }
1362
1363 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
1364 {
1365 extent_map_tree_init(&tree->map_tree, GFP_NOFS);
1366 }
1367
1368 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
1369 {
1370 struct extent_map *em;
1371
1372 while(1) {
1373 spin_lock(&tree->map_tree.lock);
1374 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
1375 if (em)
1376 remove_extent_mapping(&tree->map_tree, em);
1377 spin_unlock(&tree->map_tree.lock);
1378 if (!em)
1379 break;
1380 kfree(em->bdev);
1381 /* once for us */
1382 free_extent_map(em);
1383 /* once for the tree */
1384 free_extent_map(em);
1385 }
1386 }
1387
1388 int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len)
1389 {
1390 struct extent_map *em;
1391 struct map_lookup *map;
1392 struct extent_map_tree *em_tree = &map_tree->map_tree;
1393 int ret;
1394
1395 spin_lock(&em_tree->lock);
1396 em = lookup_extent_mapping(em_tree, logical, len);
1397 spin_unlock(&em_tree->lock);
1398 BUG_ON(!em);
1399
1400 BUG_ON(em->start > logical || em->start + em->len < logical);
1401 map = (struct map_lookup *)em->bdev;
1402 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
1403 ret = map->num_stripes;
1404 else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
1405 ret = map->sub_stripes;
1406 else
1407 ret = 1;
1408 free_extent_map(em);
1409 return ret;
1410 }
1411
1412 static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
1413 u64 logical, u64 *length,
1414 struct btrfs_multi_bio **multi_ret,
1415 int mirror_num, struct page *unplug_page)
1416 {
1417 struct extent_map *em;
1418 struct map_lookup *map;
1419 struct extent_map_tree *em_tree = &map_tree->map_tree;
1420 u64 offset;
1421 u64 stripe_offset;
1422 u64 stripe_nr;
1423 int stripes_allocated = 8;
1424 int stripes_required = 1;
1425 int stripe_index;
1426 int i;
1427 int num_stripes;
1428 int max_errors = 0;
1429 struct btrfs_multi_bio *multi = NULL;
1430
1431 if (multi_ret && !(rw & (1 << BIO_RW))) {
1432 stripes_allocated = 1;
1433 }
1434 again:
1435 if (multi_ret) {
1436 multi = kzalloc(btrfs_multi_bio_size(stripes_allocated),
1437 GFP_NOFS);
1438 if (!multi)
1439 return -ENOMEM;
1440
1441 atomic_set(&multi->error, 0);
1442 }
1443
1444 spin_lock(&em_tree->lock);
1445 em = lookup_extent_mapping(em_tree, logical, *length);
1446 spin_unlock(&em_tree->lock);
1447
1448 if (!em && unplug_page)
1449 return 0;
1450
1451 if (!em) {
1452 printk("unable to find logical %Lu\n", logical);
1453 BUG();
1454 }
1455
1456 BUG_ON(em->start > logical || em->start + em->len < logical);
1457 map = (struct map_lookup *)em->bdev;
1458 offset = logical - em->start;
1459
1460 if (mirror_num > map->num_stripes)
1461 mirror_num = 0;
1462
1463 /* if our multi bio struct is too small, back off and try again */
1464 if (rw & (1 << BIO_RW)) {
1465 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
1466 BTRFS_BLOCK_GROUP_DUP)) {
1467 stripes_required = map->num_stripes;
1468 max_errors = 1;
1469 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
1470 stripes_required = map->sub_stripes;
1471 max_errors = 1;
1472 }
1473 }
1474 if (multi_ret && rw == WRITE &&
1475 stripes_allocated < stripes_required) {
1476 stripes_allocated = map->num_stripes;
1477 free_extent_map(em);
1478 kfree(multi);
1479 goto again;
1480 }
1481 stripe_nr = offset;
1482 /*
1483 * stripe_nr counts the total number of stripes we have to stride
1484 * to get to this block
1485 */
1486 do_div(stripe_nr, map->stripe_len);
1487
1488 stripe_offset = stripe_nr * map->stripe_len;
1489 BUG_ON(offset < stripe_offset);
1490
1491 /* stripe_offset is the offset of this block in its stripe*/
1492 stripe_offset = offset - stripe_offset;
1493
1494 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
1495 BTRFS_BLOCK_GROUP_RAID10 |
1496 BTRFS_BLOCK_GROUP_DUP)) {
1497 /* we limit the length of each bio to what fits in a stripe */
1498 *length = min_t(u64, em->len - offset,
1499 map->stripe_len - stripe_offset);
1500 } else {
1501 *length = em->len - offset;
1502 }
1503
1504 if (!multi_ret && !unplug_page)
1505 goto out;
1506
1507 num_stripes = 1;
1508 stripe_index = 0;
1509 if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
1510 if (unplug_page || (rw & (1 << BIO_RW)))
1511 num_stripes = map->num_stripes;
1512 else if (mirror_num) {
1513 stripe_index = mirror_num - 1;
1514 } else {
1515 u64 orig_stripe_nr = stripe_nr;
1516 stripe_index = do_div(orig_stripe_nr, num_stripes);
1517 }
1518 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
1519 if (rw & (1 << BIO_RW))
1520 num_stripes = map->num_stripes;
1521 else if (mirror_num)
1522 stripe_index = mirror_num - 1;
1523 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
1524 int factor = map->num_stripes / map->sub_stripes;
1525
1526 stripe_index = do_div(stripe_nr, factor);
1527 stripe_index *= map->sub_stripes;
1528
1529 if (unplug_page || (rw & (1 << BIO_RW)))
1530 num_stripes = map->sub_stripes;
1531 else if (mirror_num)
1532 stripe_index += mirror_num - 1;
1533 else {
1534 u64 orig_stripe_nr = stripe_nr;
1535 stripe_index += do_div(orig_stripe_nr,
1536 map->sub_stripes);
1537 }
1538 } else {
1539 /*
1540 * after this do_div call, stripe_nr is the number of stripes
1541 * on this device we have to walk to find the data, and
1542 * stripe_index is the number of our device in the stripe array
1543 */
1544 stripe_index = do_div(stripe_nr, map->num_stripes);
1545 }
1546 BUG_ON(stripe_index >= map->num_stripes);
1547
1548 for (i = 0; i < num_stripes; i++) {
1549 if (unplug_page) {
1550 struct btrfs_device *device;
1551 struct backing_dev_info *bdi;
1552
1553 device = map->stripes[stripe_index].dev;
1554 bdi = blk_get_backing_dev_info(device->bdev);
1555 if (bdi->unplug_io_fn) {
1556 bdi->unplug_io_fn(bdi, unplug_page);
1557 }
1558 } else {
1559 multi->stripes[i].physical =
1560 map->stripes[stripe_index].physical +
1561 stripe_offset + stripe_nr * map->stripe_len;
1562 multi->stripes[i].dev = map->stripes[stripe_index].dev;
1563 }
1564 stripe_index++;
1565 }
1566 if (multi_ret) {
1567 *multi_ret = multi;
1568 multi->num_stripes = num_stripes;
1569 multi->max_errors = max_errors;
1570 }
1571 out:
1572 free_extent_map(em);
1573 return 0;
1574 }
1575
1576 int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
1577 u64 logical, u64 *length,
1578 struct btrfs_multi_bio **multi_ret, int mirror_num)
1579 {
1580 return __btrfs_map_block(map_tree, rw, logical, length, multi_ret,
1581 mirror_num, NULL);
1582 }
1583
1584 int btrfs_unplug_page(struct btrfs_mapping_tree *map_tree,
1585 u64 logical, struct page *page)
1586 {
1587 u64 length = PAGE_CACHE_SIZE;
1588 return __btrfs_map_block(map_tree, READ, logical, &length,
1589 NULL, 0, page);
1590 }
1591
1592
1593 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1594 static void end_bio_multi_stripe(struct bio *bio, int err)
1595 #else
1596 static int end_bio_multi_stripe(struct bio *bio,
1597 unsigned int bytes_done, int err)
1598 #endif
1599 {
1600 struct btrfs_multi_bio *multi = bio->bi_private;
1601
1602 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1603 if (bio->bi_size)
1604 return 1;
1605 #endif
1606 if (err)
1607 atomic_inc(&multi->error);
1608
1609 if (atomic_dec_and_test(&multi->stripes_pending)) {
1610 bio->bi_private = multi->private;
1611 bio->bi_end_io = multi->end_io;
1612
1613 /* only send an error to the higher layers if it is
1614 * beyond the tolerance of the multi-bio
1615 */
1616 if (atomic_read(&multi->error) > multi->max_errors)
1617 err = -EIO;
1618 else
1619 err = 0;
1620 kfree(multi);
1621
1622 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1623 bio_endio(bio, bio->bi_size, err);
1624 #else
1625 bio_endio(bio, err);
1626 #endif
1627 } else {
1628 bio_put(bio);
1629 }
1630 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1631 return 0;
1632 #endif
1633 }
1634
1635 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
1636 int mirror_num)
1637 {
1638 struct btrfs_mapping_tree *map_tree;
1639 struct btrfs_device *dev;
1640 struct bio *first_bio = bio;
1641 u64 logical = bio->bi_sector << 9;
1642 u64 length = 0;
1643 u64 map_length;
1644 struct btrfs_multi_bio *multi = NULL;
1645 int ret;
1646 int dev_nr = 0;
1647 int total_devs = 1;
1648
1649 length = bio->bi_size;
1650 map_tree = &root->fs_info->mapping_tree;
1651 map_length = length;
1652
1653 ret = btrfs_map_block(map_tree, rw, logical, &map_length, &multi,
1654 mirror_num);
1655 BUG_ON(ret);
1656
1657 total_devs = multi->num_stripes;
1658 if (map_length < length) {
1659 printk("mapping failed logical %Lu bio len %Lu "
1660 "len %Lu\n", logical, length, map_length);
1661 BUG();
1662 }
1663 multi->end_io = first_bio->bi_end_io;
1664 multi->private = first_bio->bi_private;
1665 atomic_set(&multi->stripes_pending, multi->num_stripes);
1666
1667 while(dev_nr < total_devs) {
1668 if (total_devs > 1) {
1669 if (dev_nr < total_devs - 1) {
1670 bio = bio_clone(first_bio, GFP_NOFS);
1671 BUG_ON(!bio);
1672 } else {
1673 bio = first_bio;
1674 }
1675 bio->bi_private = multi;
1676 bio->bi_end_io = end_bio_multi_stripe;
1677 }
1678 bio->bi_sector = multi->stripes[dev_nr].physical >> 9;
1679 dev = multi->stripes[dev_nr].dev;
1680
1681 bio->bi_bdev = dev->bdev;
1682 spin_lock(&dev->io_lock);
1683 dev->total_ios++;
1684 spin_unlock(&dev->io_lock);
1685 submit_bio(rw, bio);
1686 dev_nr++;
1687 }
1688 if (total_devs == 1)
1689 kfree(multi);
1690 return 0;
1691 }
1692
1693 struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
1694 u8 *uuid)
1695 {
1696 struct list_head *head = &root->fs_info->fs_devices->devices;
1697
1698 return __find_device(head, devid, uuid);
1699 }
1700
1701 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
1702 struct extent_buffer *leaf,
1703 struct btrfs_chunk *chunk)
1704 {
1705 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
1706 struct map_lookup *map;
1707 struct extent_map *em;
1708 u64 logical;
1709 u64 length;
1710 u64 devid;
1711 u8 uuid[BTRFS_UUID_SIZE];
1712 int num_stripes;
1713 int ret;
1714 int i;
1715
1716 logical = key->offset;
1717 length = btrfs_chunk_length(leaf, chunk);
1718 spin_lock(&map_tree->map_tree.lock);
1719 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
1720 spin_unlock(&map_tree->map_tree.lock);
1721
1722 /* already mapped? */
1723 if (em && em->start <= logical && em->start + em->len > logical) {
1724 free_extent_map(em);
1725 return 0;
1726 } else if (em) {
1727 free_extent_map(em);
1728 }
1729
1730 map = kzalloc(sizeof(*map), GFP_NOFS);
1731 if (!map)
1732 return -ENOMEM;
1733
1734 em = alloc_extent_map(GFP_NOFS);
1735 if (!em)
1736 return -ENOMEM;
1737 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
1738 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
1739 if (!map) {
1740 free_extent_map(em);
1741 return -ENOMEM;
1742 }
1743
1744 em->bdev = (struct block_device *)map;
1745 em->start = logical;
1746 em->len = length;
1747 em->block_start = 0;
1748
1749 map->num_stripes = num_stripes;
1750 map->io_width = btrfs_chunk_io_width(leaf, chunk);
1751 map->io_align = btrfs_chunk_io_align(leaf, chunk);
1752 map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
1753 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
1754 map->type = btrfs_chunk_type(leaf, chunk);
1755 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
1756 for (i = 0; i < num_stripes; i++) {
1757 map->stripes[i].physical =
1758 btrfs_stripe_offset_nr(leaf, chunk, i);
1759 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
1760 read_extent_buffer(leaf, uuid, (unsigned long)
1761 btrfs_stripe_dev_uuid_nr(chunk, i),
1762 BTRFS_UUID_SIZE);
1763 map->stripes[i].dev = btrfs_find_device(root, devid, uuid);
1764 if (!map->stripes[i].dev) {
1765 kfree(map);
1766 free_extent_map(em);
1767 return -EIO;
1768 }
1769 }
1770
1771 spin_lock(&map_tree->map_tree.lock);
1772 ret = add_extent_mapping(&map_tree->map_tree, em);
1773 spin_unlock(&map_tree->map_tree.lock);
1774 BUG_ON(ret);
1775 free_extent_map(em);
1776
1777 return 0;
1778 }
1779
1780 static int fill_device_from_item(struct extent_buffer *leaf,
1781 struct btrfs_dev_item *dev_item,
1782 struct btrfs_device *device)
1783 {
1784 unsigned long ptr;
1785
1786 device->devid = btrfs_device_id(leaf, dev_item);
1787 device->total_bytes = btrfs_device_total_bytes(leaf, dev_item);
1788 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
1789 device->type = btrfs_device_type(leaf, dev_item);
1790 device->io_align = btrfs_device_io_align(leaf, dev_item);
1791 device->io_width = btrfs_device_io_width(leaf, dev_item);
1792 device->sector_size = btrfs_device_sector_size(leaf, dev_item);
1793
1794 ptr = (unsigned long)btrfs_device_uuid(dev_item);
1795 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1796
1797 return 0;
1798 }
1799
1800 static int read_one_dev(struct btrfs_root *root,
1801 struct extent_buffer *leaf,
1802 struct btrfs_dev_item *dev_item)
1803 {
1804 struct btrfs_device *device;
1805 u64 devid;
1806 int ret;
1807 u8 dev_uuid[BTRFS_UUID_SIZE];
1808
1809 devid = btrfs_device_id(leaf, dev_item);
1810 read_extent_buffer(leaf, dev_uuid,
1811 (unsigned long)btrfs_device_uuid(dev_item),
1812 BTRFS_UUID_SIZE);
1813 device = btrfs_find_device(root, devid, dev_uuid);
1814 if (!device) {
1815 printk("warning devid %Lu not found already\n", devid);
1816 device = kzalloc(sizeof(*device), GFP_NOFS);
1817 if (!device)
1818 return -ENOMEM;
1819 list_add(&device->dev_list,
1820 &root->fs_info->fs_devices->devices);
1821 list_add(&device->dev_alloc_list,
1822 &root->fs_info->fs_devices->alloc_list);
1823 device->barriers = 1;
1824 spin_lock_init(&device->io_lock);
1825 }
1826
1827 fill_device_from_item(leaf, dev_item, device);
1828 device->dev_root = root->fs_info->dev_root;
1829 ret = 0;
1830 #if 0
1831 ret = btrfs_open_device(device);
1832 if (ret) {
1833 kfree(device);
1834 }
1835 #endif
1836 return ret;
1837 }
1838
1839 int btrfs_read_super_device(struct btrfs_root *root, struct extent_buffer *buf)
1840 {
1841 struct btrfs_dev_item *dev_item;
1842
1843 dev_item = (struct btrfs_dev_item *)offsetof(struct btrfs_super_block,
1844 dev_item);
1845 return read_one_dev(root, buf, dev_item);
1846 }
1847
1848 int btrfs_read_sys_array(struct btrfs_root *root)
1849 {
1850 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
1851 struct extent_buffer *sb = root->fs_info->sb_buffer;
1852 struct btrfs_disk_key *disk_key;
1853 struct btrfs_chunk *chunk;
1854 u8 *ptr;
1855 unsigned long sb_ptr;
1856 int ret = 0;
1857 u32 num_stripes;
1858 u32 array_size;
1859 u32 len = 0;
1860 u32 cur;
1861 struct btrfs_key key;
1862
1863 array_size = btrfs_super_sys_array_size(super_copy);
1864
1865 ptr = super_copy->sys_chunk_array;
1866 sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
1867 cur = 0;
1868
1869 while (cur < array_size) {
1870 disk_key = (struct btrfs_disk_key *)ptr;
1871 btrfs_disk_key_to_cpu(&key, disk_key);
1872
1873 len = sizeof(*disk_key);
1874 ptr += len;
1875 sb_ptr += len;
1876 cur += len;
1877
1878 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
1879 chunk = (struct btrfs_chunk *)sb_ptr;
1880 ret = read_one_chunk(root, &key, sb, chunk);
1881 if (ret)
1882 break;
1883 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
1884 len = btrfs_chunk_item_size(num_stripes);
1885 } else {
1886 ret = -EIO;
1887 break;
1888 }
1889 ptr += len;
1890 sb_ptr += len;
1891 cur += len;
1892 }
1893 return ret;
1894 }
1895
1896 int btrfs_read_chunk_tree(struct btrfs_root *root)
1897 {
1898 struct btrfs_path *path;
1899 struct extent_buffer *leaf;
1900 struct btrfs_key key;
1901 struct btrfs_key found_key;
1902 int ret;
1903 int slot;
1904
1905 root = root->fs_info->chunk_root;
1906
1907 path = btrfs_alloc_path();
1908 if (!path)
1909 return -ENOMEM;
1910
1911 /* first we search for all of the device items, and then we
1912 * read in all of the chunk items. This way we can create chunk
1913 * mappings that reference all of the devices that are afound
1914 */
1915 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1916 key.offset = 0;
1917 key.type = 0;
1918 again:
1919 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1920 while(1) {
1921 leaf = path->nodes[0];
1922 slot = path->slots[0];
1923 if (slot >= btrfs_header_nritems(leaf)) {
1924 ret = btrfs_next_leaf(root, path);
1925 if (ret == 0)
1926 continue;
1927 if (ret < 0)
1928 goto error;
1929 break;
1930 }
1931 btrfs_item_key_to_cpu(leaf, &found_key, slot);
1932 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
1933 if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
1934 break;
1935 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
1936 struct btrfs_dev_item *dev_item;
1937 dev_item = btrfs_item_ptr(leaf, slot,
1938 struct btrfs_dev_item);
1939 ret = read_one_dev(root, leaf, dev_item);
1940 BUG_ON(ret);
1941 }
1942 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
1943 struct btrfs_chunk *chunk;
1944 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
1945 ret = read_one_chunk(root, &found_key, leaf, chunk);
1946 }
1947 path->slots[0]++;
1948 }
1949 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
1950 key.objectid = 0;
1951 btrfs_release_path(root, path);
1952 goto again;
1953 }
1954
1955 btrfs_free_path(path);
1956 ret = 0;
1957 error:
1958 return ret;
1959 }
1960
This page took 0.074948 seconds and 5 git commands to generate.