Merge branches 'pm-domains' and 'pm-cpuidle' into linux-next
[deliverable/linux.git] / drivers / block / brd.c
1 /*
2 * Ram backed block device driver.
3 *
4 * Copyright (C) 2007 Nick Piggin
5 * Copyright (C) 2007 Novell Inc.
6 *
7 * Parts derived from drivers/block/rd.c, and drivers/block/loop.c, copyright
8 * of their respective owners.
9 */
10
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/moduleparam.h>
14 #include <linux/major.h>
15 #include <linux/blkdev.h>
16 #include <linux/bio.h>
17 #include <linux/highmem.h>
18 #include <linux/mutex.h>
19 #include <linux/radix-tree.h>
20 #include <linux/fs.h>
21 #include <linux/slab.h>
22 #ifdef CONFIG_BLK_DEV_RAM_DAX
23 #include <linux/pfn_t.h>
24 #endif
25
26 #include <asm/uaccess.h>
27
28 #define SECTOR_SHIFT 9
29 #define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
30 #define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT)
31
32 /*
33 * Each block ramdisk device has a radix_tree brd_pages of pages that stores
34 * the pages containing the block device's contents. A brd page's ->index is
35 * its offset in PAGE_SIZE units. This is similar to, but in no way connected
36 * with, the kernel's pagecache or buffer cache (which sit above our block
37 * device).
38 */
39 struct brd_device {
40 int brd_number;
41
42 struct request_queue *brd_queue;
43 struct gendisk *brd_disk;
44 struct list_head brd_list;
45
46 /*
47 * Backing store of pages and lock to protect it. This is the contents
48 * of the block device.
49 */
50 spinlock_t brd_lock;
51 struct radix_tree_root brd_pages;
52 };
53
54 /*
55 * Look up and return a brd's page for a given sector.
56 */
57 static DEFINE_MUTEX(brd_mutex);
58 static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector)
59 {
60 pgoff_t idx;
61 struct page *page;
62
63 /*
64 * The page lifetime is protected by the fact that we have opened the
65 * device node -- brd pages will never be deleted under us, so we
66 * don't need any further locking or refcounting.
67 *
68 * This is strictly true for the radix-tree nodes as well (ie. we
69 * don't actually need the rcu_read_lock()), however that is not a
70 * documented feature of the radix-tree API so it is better to be
71 * safe here (we don't have total exclusion from radix tree updates
72 * here, only deletes).
73 */
74 rcu_read_lock();
75 idx = sector >> PAGE_SECTORS_SHIFT; /* sector to page index */
76 page = radix_tree_lookup(&brd->brd_pages, idx);
77 rcu_read_unlock();
78
79 BUG_ON(page && page->index != idx);
80
81 return page;
82 }
83
84 /*
85 * Look up and return a brd's page for a given sector.
86 * If one does not exist, allocate an empty page, and insert that. Then
87 * return it.
88 */
89 static struct page *brd_insert_page(struct brd_device *brd, sector_t sector)
90 {
91 pgoff_t idx;
92 struct page *page;
93 gfp_t gfp_flags;
94
95 page = brd_lookup_page(brd, sector);
96 if (page)
97 return page;
98
99 /*
100 * Must use NOIO because we don't want to recurse back into the
101 * block or filesystem layers from page reclaim.
102 *
103 * Cannot support DAX and highmem, because our ->direct_access
104 * routine for DAX must return memory that is always addressable.
105 * If DAX was reworked to use pfns and kmap throughout, this
106 * restriction might be able to be lifted.
107 */
108 gfp_flags = GFP_NOIO | __GFP_ZERO;
109 #ifndef CONFIG_BLK_DEV_RAM_DAX
110 gfp_flags |= __GFP_HIGHMEM;
111 #endif
112 page = alloc_page(gfp_flags);
113 if (!page)
114 return NULL;
115
116 if (radix_tree_preload(GFP_NOIO)) {
117 __free_page(page);
118 return NULL;
119 }
120
121 spin_lock(&brd->brd_lock);
122 idx = sector >> PAGE_SECTORS_SHIFT;
123 page->index = idx;
124 if (radix_tree_insert(&brd->brd_pages, idx, page)) {
125 __free_page(page);
126 page = radix_tree_lookup(&brd->brd_pages, idx);
127 BUG_ON(!page);
128 BUG_ON(page->index != idx);
129 }
130 spin_unlock(&brd->brd_lock);
131
132 radix_tree_preload_end();
133
134 return page;
135 }
136
137 static void brd_free_page(struct brd_device *brd, sector_t sector)
138 {
139 struct page *page;
140 pgoff_t idx;
141
142 spin_lock(&brd->brd_lock);
143 idx = sector >> PAGE_SECTORS_SHIFT;
144 page = radix_tree_delete(&brd->brd_pages, idx);
145 spin_unlock(&brd->brd_lock);
146 if (page)
147 __free_page(page);
148 }
149
150 static void brd_zero_page(struct brd_device *brd, sector_t sector)
151 {
152 struct page *page;
153
154 page = brd_lookup_page(brd, sector);
155 if (page)
156 clear_highpage(page);
157 }
158
159 /*
160 * Free all backing store pages and radix tree. This must only be called when
161 * there are no other users of the device.
162 */
163 #define FREE_BATCH 16
164 static void brd_free_pages(struct brd_device *brd)
165 {
166 unsigned long pos = 0;
167 struct page *pages[FREE_BATCH];
168 int nr_pages;
169
170 do {
171 int i;
172
173 nr_pages = radix_tree_gang_lookup(&brd->brd_pages,
174 (void **)pages, pos, FREE_BATCH);
175
176 for (i = 0; i < nr_pages; i++) {
177 void *ret;
178
179 BUG_ON(pages[i]->index < pos);
180 pos = pages[i]->index;
181 ret = radix_tree_delete(&brd->brd_pages, pos);
182 BUG_ON(!ret || ret != pages[i]);
183 __free_page(pages[i]);
184 }
185
186 pos++;
187
188 /*
189 * This assumes radix_tree_gang_lookup always returns as
190 * many pages as possible. If the radix-tree code changes,
191 * so will this have to.
192 */
193 } while (nr_pages == FREE_BATCH);
194 }
195
196 /*
197 * copy_to_brd_setup must be called before copy_to_brd. It may sleep.
198 */
199 static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n)
200 {
201 unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
202 size_t copy;
203
204 copy = min_t(size_t, n, PAGE_SIZE - offset);
205 if (!brd_insert_page(brd, sector))
206 return -ENOSPC;
207 if (copy < n) {
208 sector += copy >> SECTOR_SHIFT;
209 if (!brd_insert_page(brd, sector))
210 return -ENOSPC;
211 }
212 return 0;
213 }
214
215 static void discard_from_brd(struct brd_device *brd,
216 sector_t sector, size_t n)
217 {
218 while (n >= PAGE_SIZE) {
219 /*
220 * Don't want to actually discard pages here because
221 * re-allocating the pages can result in writeback
222 * deadlocks under heavy load.
223 */
224 if (0)
225 brd_free_page(brd, sector);
226 else
227 brd_zero_page(brd, sector);
228 sector += PAGE_SIZE >> SECTOR_SHIFT;
229 n -= PAGE_SIZE;
230 }
231 }
232
233 /*
234 * Copy n bytes from src to the brd starting at sector. Does not sleep.
235 */
236 static void copy_to_brd(struct brd_device *brd, const void *src,
237 sector_t sector, size_t n)
238 {
239 struct page *page;
240 void *dst;
241 unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
242 size_t copy;
243
244 copy = min_t(size_t, n, PAGE_SIZE - offset);
245 page = brd_lookup_page(brd, sector);
246 BUG_ON(!page);
247
248 dst = kmap_atomic(page);
249 memcpy(dst + offset, src, copy);
250 kunmap_atomic(dst);
251
252 if (copy < n) {
253 src += copy;
254 sector += copy >> SECTOR_SHIFT;
255 copy = n - copy;
256 page = brd_lookup_page(brd, sector);
257 BUG_ON(!page);
258
259 dst = kmap_atomic(page);
260 memcpy(dst, src, copy);
261 kunmap_atomic(dst);
262 }
263 }
264
265 /*
266 * Copy n bytes to dst from the brd starting at sector. Does not sleep.
267 */
268 static void copy_from_brd(void *dst, struct brd_device *brd,
269 sector_t sector, size_t n)
270 {
271 struct page *page;
272 void *src;
273 unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
274 size_t copy;
275
276 copy = min_t(size_t, n, PAGE_SIZE - offset);
277 page = brd_lookup_page(brd, sector);
278 if (page) {
279 src = kmap_atomic(page);
280 memcpy(dst, src + offset, copy);
281 kunmap_atomic(src);
282 } else
283 memset(dst, 0, copy);
284
285 if (copy < n) {
286 dst += copy;
287 sector += copy >> SECTOR_SHIFT;
288 copy = n - copy;
289 page = brd_lookup_page(brd, sector);
290 if (page) {
291 src = kmap_atomic(page);
292 memcpy(dst, src, copy);
293 kunmap_atomic(src);
294 } else
295 memset(dst, 0, copy);
296 }
297 }
298
299 /*
300 * Process a single bvec of a bio.
301 */
302 static int brd_do_bvec(struct brd_device *brd, struct page *page,
303 unsigned int len, unsigned int off, bool is_write,
304 sector_t sector)
305 {
306 void *mem;
307 int err = 0;
308
309 if (is_write) {
310 err = copy_to_brd_setup(brd, sector, len);
311 if (err)
312 goto out;
313 }
314
315 mem = kmap_atomic(page);
316 if (!is_write) {
317 copy_from_brd(mem + off, brd, sector, len);
318 flush_dcache_page(page);
319 } else {
320 flush_dcache_page(page);
321 copy_to_brd(brd, mem + off, sector, len);
322 }
323 kunmap_atomic(mem);
324
325 out:
326 return err;
327 }
328
329 static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
330 {
331 struct block_device *bdev = bio->bi_bdev;
332 struct brd_device *brd = bdev->bd_disk->private_data;
333 struct bio_vec bvec;
334 sector_t sector;
335 struct bvec_iter iter;
336
337 sector = bio->bi_iter.bi_sector;
338 if (bio_end_sector(bio) > get_capacity(bdev->bd_disk))
339 goto io_error;
340
341 if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
342 if (sector & ((PAGE_SIZE >> SECTOR_SHIFT) - 1) ||
343 bio->bi_iter.bi_size & ~PAGE_MASK)
344 goto io_error;
345 discard_from_brd(brd, sector, bio->bi_iter.bi_size);
346 goto out;
347 }
348
349 bio_for_each_segment(bvec, bio, iter) {
350 unsigned int len = bvec.bv_len;
351 int err;
352
353 err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset,
354 op_is_write(bio_op(bio)), sector);
355 if (err)
356 goto io_error;
357 sector += len >> SECTOR_SHIFT;
358 }
359
360 out:
361 bio_endio(bio);
362 return BLK_QC_T_NONE;
363 io_error:
364 bio_io_error(bio);
365 return BLK_QC_T_NONE;
366 }
367
368 static int brd_rw_page(struct block_device *bdev, sector_t sector,
369 struct page *page, bool is_write)
370 {
371 struct brd_device *brd = bdev->bd_disk->private_data;
372 int err = brd_do_bvec(brd, page, PAGE_SIZE, 0, is_write, sector);
373 page_endio(page, is_write, err);
374 return err;
375 }
376
377 #ifdef CONFIG_BLK_DEV_RAM_DAX
378 static long brd_direct_access(struct block_device *bdev, sector_t sector,
379 void **kaddr, pfn_t *pfn, long size)
380 {
381 struct brd_device *brd = bdev->bd_disk->private_data;
382 struct page *page;
383
384 if (!brd)
385 return -ENODEV;
386 page = brd_insert_page(brd, sector);
387 if (!page)
388 return -ENOSPC;
389 *kaddr = page_address(page);
390 *pfn = page_to_pfn_t(page);
391
392 return PAGE_SIZE;
393 }
394 #else
395 #define brd_direct_access NULL
396 #endif
397
398 static int brd_ioctl(struct block_device *bdev, fmode_t mode,
399 unsigned int cmd, unsigned long arg)
400 {
401 int error;
402 struct brd_device *brd = bdev->bd_disk->private_data;
403
404 if (cmd != BLKFLSBUF)
405 return -ENOTTY;
406
407 /*
408 * ram device BLKFLSBUF has special semantics, we want to actually
409 * release and destroy the ramdisk data.
410 */
411 mutex_lock(&brd_mutex);
412 mutex_lock(&bdev->bd_mutex);
413 error = -EBUSY;
414 if (bdev->bd_openers <= 1) {
415 /*
416 * Kill the cache first, so it isn't written back to the
417 * device.
418 *
419 * Another thread might instantiate more buffercache here,
420 * but there is not much we can do to close that race.
421 */
422 kill_bdev(bdev);
423 brd_free_pages(brd);
424 error = 0;
425 }
426 mutex_unlock(&bdev->bd_mutex);
427 mutex_unlock(&brd_mutex);
428
429 return error;
430 }
431
432 static const struct block_device_operations brd_fops = {
433 .owner = THIS_MODULE,
434 .rw_page = brd_rw_page,
435 .ioctl = brd_ioctl,
436 .direct_access = brd_direct_access,
437 };
438
439 /*
440 * And now the modules code and kernel interface.
441 */
442 static int rd_nr = CONFIG_BLK_DEV_RAM_COUNT;
443 module_param(rd_nr, int, S_IRUGO);
444 MODULE_PARM_DESC(rd_nr, "Maximum number of brd devices");
445
446 int rd_size = CONFIG_BLK_DEV_RAM_SIZE;
447 module_param(rd_size, int, S_IRUGO);
448 MODULE_PARM_DESC(rd_size, "Size of each RAM disk in kbytes.");
449
450 static int max_part = 1;
451 module_param(max_part, int, S_IRUGO);
452 MODULE_PARM_DESC(max_part, "Num Minors to reserve between devices");
453
454 MODULE_LICENSE("GPL");
455 MODULE_ALIAS_BLOCKDEV_MAJOR(RAMDISK_MAJOR);
456 MODULE_ALIAS("rd");
457
458 #ifndef MODULE
459 /* Legacy boot options - nonmodular */
460 static int __init ramdisk_size(char *str)
461 {
462 rd_size = simple_strtol(str, NULL, 0);
463 return 1;
464 }
465 __setup("ramdisk_size=", ramdisk_size);
466 #endif
467
468 /*
469 * The device scheme is derived from loop.c. Keep them in synch where possible
470 * (should share code eventually).
471 */
472 static LIST_HEAD(brd_devices);
473 static DEFINE_MUTEX(brd_devices_mutex);
474
475 static struct brd_device *brd_alloc(int i)
476 {
477 struct brd_device *brd;
478 struct gendisk *disk;
479
480 brd = kzalloc(sizeof(*brd), GFP_KERNEL);
481 if (!brd)
482 goto out;
483 brd->brd_number = i;
484 spin_lock_init(&brd->brd_lock);
485 INIT_RADIX_TREE(&brd->brd_pages, GFP_ATOMIC);
486
487 brd->brd_queue = blk_alloc_queue(GFP_KERNEL);
488 if (!brd->brd_queue)
489 goto out_free_dev;
490
491 blk_queue_make_request(brd->brd_queue, brd_make_request);
492 blk_queue_max_hw_sectors(brd->brd_queue, 1024);
493 blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY);
494
495 /* This is so fdisk will align partitions on 4k, because of
496 * direct_access API needing 4k alignment, returning a PFN
497 * (This is only a problem on very small devices <= 4M,
498 * otherwise fdisk will align on 1M. Regardless this call
499 * is harmless)
500 */
501 blk_queue_physical_block_size(brd->brd_queue, PAGE_SIZE);
502
503 brd->brd_queue->limits.discard_granularity = PAGE_SIZE;
504 blk_queue_max_discard_sectors(brd->brd_queue, UINT_MAX);
505 brd->brd_queue->limits.discard_zeroes_data = 1;
506 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, brd->brd_queue);
507 #ifdef CONFIG_BLK_DEV_RAM_DAX
508 queue_flag_set_unlocked(QUEUE_FLAG_DAX, brd->brd_queue);
509 #endif
510 disk = brd->brd_disk = alloc_disk(max_part);
511 if (!disk)
512 goto out_free_queue;
513 disk->major = RAMDISK_MAJOR;
514 disk->first_minor = i * max_part;
515 disk->fops = &brd_fops;
516 disk->private_data = brd;
517 disk->queue = brd->brd_queue;
518 disk->flags = GENHD_FL_EXT_DEVT;
519 sprintf(disk->disk_name, "ram%d", i);
520 set_capacity(disk, rd_size * 2);
521
522 return brd;
523
524 out_free_queue:
525 blk_cleanup_queue(brd->brd_queue);
526 out_free_dev:
527 kfree(brd);
528 out:
529 return NULL;
530 }
531
532 static void brd_free(struct brd_device *brd)
533 {
534 put_disk(brd->brd_disk);
535 blk_cleanup_queue(brd->brd_queue);
536 brd_free_pages(brd);
537 kfree(brd);
538 }
539
540 static struct brd_device *brd_init_one(int i, bool *new)
541 {
542 struct brd_device *brd;
543
544 *new = false;
545 list_for_each_entry(brd, &brd_devices, brd_list) {
546 if (brd->brd_number == i)
547 goto out;
548 }
549
550 brd = brd_alloc(i);
551 if (brd) {
552 add_disk(brd->brd_disk);
553 list_add_tail(&brd->brd_list, &brd_devices);
554 }
555 *new = true;
556 out:
557 return brd;
558 }
559
560 static void brd_del_one(struct brd_device *brd)
561 {
562 list_del(&brd->brd_list);
563 del_gendisk(brd->brd_disk);
564 brd_free(brd);
565 }
566
567 static struct kobject *brd_probe(dev_t dev, int *part, void *data)
568 {
569 struct brd_device *brd;
570 struct kobject *kobj;
571 bool new;
572
573 mutex_lock(&brd_devices_mutex);
574 brd = brd_init_one(MINOR(dev) / max_part, &new);
575 kobj = brd ? get_disk(brd->brd_disk) : NULL;
576 mutex_unlock(&brd_devices_mutex);
577
578 if (new)
579 *part = 0;
580
581 return kobj;
582 }
583
584 static int __init brd_init(void)
585 {
586 struct brd_device *brd, *next;
587 int i;
588
589 /*
590 * brd module now has a feature to instantiate underlying device
591 * structure on-demand, provided that there is an access dev node.
592 *
593 * (1) if rd_nr is specified, create that many upfront. else
594 * it defaults to CONFIG_BLK_DEV_RAM_COUNT
595 * (2) User can further extend brd devices by create dev node themselves
596 * and have kernel automatically instantiate actual device
597 * on-demand. Example:
598 * mknod /path/devnod_name b 1 X # 1 is the rd major
599 * fdisk -l /path/devnod_name
600 * If (X / max_part) was not already created it will be created
601 * dynamically.
602 */
603
604 if (register_blkdev(RAMDISK_MAJOR, "ramdisk"))
605 return -EIO;
606
607 if (unlikely(!max_part))
608 max_part = 1;
609
610 for (i = 0; i < rd_nr; i++) {
611 brd = brd_alloc(i);
612 if (!brd)
613 goto out_free;
614 list_add_tail(&brd->brd_list, &brd_devices);
615 }
616
617 /* point of no return */
618
619 list_for_each_entry(brd, &brd_devices, brd_list)
620 add_disk(brd->brd_disk);
621
622 blk_register_region(MKDEV(RAMDISK_MAJOR, 0), 1UL << MINORBITS,
623 THIS_MODULE, brd_probe, NULL, NULL);
624
625 pr_info("brd: module loaded\n");
626 return 0;
627
628 out_free:
629 list_for_each_entry_safe(brd, next, &brd_devices, brd_list) {
630 list_del(&brd->brd_list);
631 brd_free(brd);
632 }
633 unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
634
635 pr_info("brd: module NOT loaded !!!\n");
636 return -ENOMEM;
637 }
638
639 static void __exit brd_exit(void)
640 {
641 struct brd_device *brd, *next;
642
643 list_for_each_entry_safe(brd, next, &brd_devices, brd_list)
644 brd_del_one(brd);
645
646 blk_unregister_region(MKDEV(RAMDISK_MAJOR, 0), 1UL << MINORBITS);
647 unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
648
649 pr_info("brd: module unloaded\n");
650 }
651
652 module_init(brd_init);
653 module_exit(brd_exit);
654
This page took 0.044283 seconds and 6 git commands to generate.