Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/fs/block_dev.c | |
3 | * | |
4 | * Copyright (C) 1991, 1992 Linus Torvalds | |
5 | * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE | |
6 | */ | |
7 | ||
1da177e4 LT |
8 | #include <linux/init.h> |
9 | #include <linux/mm.h> | |
10 | #include <linux/fcntl.h> | |
11 | #include <linux/slab.h> | |
12 | #include <linux/kmod.h> | |
13 | #include <linux/major.h> | |
1da177e4 LT |
14 | #include <linux/smp_lock.h> |
15 | #include <linux/highmem.h> | |
16 | #include <linux/blkdev.h> | |
17 | #include <linux/module.h> | |
18 | #include <linux/blkpg.h> | |
19 | #include <linux/buffer_head.h> | |
811d736f | 20 | #include <linux/writeback.h> |
1da177e4 LT |
21 | #include <linux/mpage.h> |
22 | #include <linux/mount.h> | |
23 | #include <linux/uio.h> | |
24 | #include <linux/namei.h> | |
25 | #include <asm/uaccess.h> | |
07f3f05c | 26 | #include "internal.h" |
1da177e4 LT |
27 | |
28 | struct bdev_inode { | |
29 | struct block_device bdev; | |
30 | struct inode vfs_inode; | |
31 | }; | |
32 | ||
33 | static inline struct bdev_inode *BDEV_I(struct inode *inode) | |
34 | { | |
35 | return container_of(inode, struct bdev_inode, vfs_inode); | |
36 | } | |
37 | ||
38 | inline struct block_device *I_BDEV(struct inode *inode) | |
39 | { | |
40 | return &BDEV_I(inode)->bdev; | |
41 | } | |
42 | ||
43 | EXPORT_SYMBOL(I_BDEV); | |
44 | ||
45 | static sector_t max_block(struct block_device *bdev) | |
46 | { | |
47 | sector_t retval = ~((sector_t)0); | |
48 | loff_t sz = i_size_read(bdev->bd_inode); | |
49 | ||
50 | if (sz) { | |
51 | unsigned int size = block_size(bdev); | |
52 | unsigned int sizebits = blksize_bits(size); | |
53 | retval = (sz >> sizebits); | |
54 | } | |
55 | return retval; | |
56 | } | |
57 | ||
58 | /* Kill _all_ buffers, dirty or not.. */ | |
59 | static void kill_bdev(struct block_device *bdev) | |
60 | { | |
61 | invalidate_bdev(bdev, 1); | |
62 | truncate_inode_pages(bdev->bd_inode->i_mapping, 0); | |
63 | } | |
64 | ||
65 | int set_blocksize(struct block_device *bdev, int size) | |
66 | { | |
67 | /* Size must be a power of two, and between 512 and PAGE_SIZE */ | |
68 | if (size > PAGE_SIZE || size < 512 || (size & (size-1))) | |
69 | return -EINVAL; | |
70 | ||
71 | /* Size cannot be smaller than the size supported by the device */ | |
72 | if (size < bdev_hardsect_size(bdev)) | |
73 | return -EINVAL; | |
74 | ||
75 | /* Don't change the size if it is same as current */ | |
76 | if (bdev->bd_block_size != size) { | |
77 | sync_blockdev(bdev); | |
78 | bdev->bd_block_size = size; | |
79 | bdev->bd_inode->i_blkbits = blksize_bits(size); | |
80 | kill_bdev(bdev); | |
81 | } | |
82 | return 0; | |
83 | } | |
84 | ||
85 | EXPORT_SYMBOL(set_blocksize); | |
86 | ||
87 | int sb_set_blocksize(struct super_block *sb, int size) | |
88 | { | |
1da177e4 LT |
89 | if (set_blocksize(sb->s_bdev, size)) |
90 | return 0; | |
91 | /* If we get here, we know size is power of two | |
92 | * and it's value is between 512 and PAGE_SIZE */ | |
93 | sb->s_blocksize = size; | |
38885bd4 | 94 | sb->s_blocksize_bits = blksize_bits(size); |
1da177e4 LT |
95 | return sb->s_blocksize; |
96 | } | |
97 | ||
98 | EXPORT_SYMBOL(sb_set_blocksize); | |
99 | ||
100 | int sb_min_blocksize(struct super_block *sb, int size) | |
101 | { | |
102 | int minsize = bdev_hardsect_size(sb->s_bdev); | |
103 | if (size < minsize) | |
104 | size = minsize; | |
105 | return sb_set_blocksize(sb, size); | |
106 | } | |
107 | ||
108 | EXPORT_SYMBOL(sb_min_blocksize); | |
109 | ||
110 | static int | |
111 | blkdev_get_block(struct inode *inode, sector_t iblock, | |
112 | struct buffer_head *bh, int create) | |
113 | { | |
114 | if (iblock >= max_block(I_BDEV(inode))) { | |
115 | if (create) | |
116 | return -EIO; | |
117 | ||
118 | /* | |
119 | * for reads, we're just trying to fill a partial page. | |
120 | * return a hole, they will have to call get_block again | |
121 | * before they can fill it, and they will get -EIO at that | |
122 | * time | |
123 | */ | |
124 | return 0; | |
125 | } | |
126 | bh->b_bdev = I_BDEV(inode); | |
127 | bh->b_blocknr = iblock; | |
128 | set_buffer_mapped(bh); | |
129 | return 0; | |
130 | } | |
131 | ||
b2e895db AM |
132 | static int |
133 | blkdev_get_blocks(struct inode *inode, sector_t iblock, | |
134 | struct buffer_head *bh, int create) | |
135 | { | |
136 | sector_t end_block = max_block(I_BDEV(inode)); | |
137 | unsigned long max_blocks = bh->b_size >> inode->i_blkbits; | |
138 | ||
139 | if ((iblock + max_blocks) > end_block) { | |
140 | max_blocks = end_block - iblock; | |
141 | if ((long)max_blocks <= 0) { | |
142 | if (create) | |
143 | return -EIO; /* write fully beyond EOF */ | |
144 | /* | |
145 | * It is a read which is fully beyond EOF. We return | |
146 | * a !buffer_mapped buffer | |
147 | */ | |
148 | max_blocks = 0; | |
149 | } | |
150 | } | |
151 | ||
152 | bh->b_bdev = I_BDEV(inode); | |
153 | bh->b_blocknr = iblock; | |
154 | bh->b_size = max_blocks << inode->i_blkbits; | |
155 | if (max_blocks) | |
156 | set_buffer_mapped(bh); | |
157 | return 0; | |
158 | } | |
159 | ||
160 | static ssize_t | |
161 | blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, | |
162 | loff_t offset, unsigned long nr_segs) | |
163 | { | |
164 | struct file *file = iocb->ki_filp; | |
165 | struct inode *inode = file->f_mapping->host; | |
166 | ||
167 | return blockdev_direct_IO_no_locking(rw, iocb, inode, I_BDEV(inode), | |
168 | iov, offset, nr_segs, blkdev_get_blocks, NULL); | |
169 | } | |
170 | ||
171 | #if 0 | |
e61c9018 | 172 | static int blk_end_aio(struct bio *bio, unsigned int bytes_done, int error) |
1da177e4 | 173 | { |
e61c9018 CK |
174 | struct kiocb *iocb = bio->bi_private; |
175 | atomic_t *bio_count = &iocb->ki_bio_count; | |
176 | ||
177 | if (bio_data_dir(bio) == READ) | |
178 | bio_check_pages_dirty(bio); | |
179 | else { | |
180 | bio_release_pages(bio); | |
181 | bio_put(bio); | |
182 | } | |
183 | ||
184 | /* iocb->ki_nbytes stores error code from LLDD */ | |
185 | if (error) | |
186 | iocb->ki_nbytes = -EIO; | |
187 | ||
188 | if (atomic_dec_and_test(bio_count)) { | |
790816dd | 189 | if ((long)iocb->ki_nbytes < 0) |
e61c9018 CK |
190 | aio_complete(iocb, iocb->ki_nbytes, 0); |
191 | else | |
192 | aio_complete(iocb, iocb->ki_left, 0); | |
1da177e4 LT |
193 | } |
194 | ||
1da177e4 LT |
195 | return 0; |
196 | } | |
197 | ||
e61c9018 CK |
198 | #define VEC_SIZE 16 |
199 | struct pvec { | |
200 | unsigned short nr; | |
201 | unsigned short idx; | |
202 | struct page *page[VEC_SIZE]; | |
203 | }; | |
204 | ||
205 | #define PAGES_SPANNED(addr, len) \ | |
206 | (DIV_ROUND_UP((addr) + (len), PAGE_SIZE) - (addr) / PAGE_SIZE); | |
207 | ||
208 | /* | |
209 | * get page pointer for user addr, we internally cache struct page array for | |
210 | * (addr, count) range in pvec to avoid frequent call to get_user_pages. If | |
211 | * internal page list is exhausted, a batch count of up to VEC_SIZE is used | |
212 | * to get next set of page struct. | |
213 | */ | |
214 | static struct page *blk_get_page(unsigned long addr, size_t count, int rw, | |
215 | struct pvec *pvec) | |
216 | { | |
217 | int ret, nr_pages; | |
218 | if (pvec->idx == pvec->nr) { | |
219 | nr_pages = PAGES_SPANNED(addr, count); | |
220 | nr_pages = min(nr_pages, VEC_SIZE); | |
221 | down_read(¤t->mm->mmap_sem); | |
222 | ret = get_user_pages(current, current->mm, addr, nr_pages, | |
223 | rw == READ, 0, pvec->page, NULL); | |
224 | up_read(¤t->mm->mmap_sem); | |
225 | if (ret < 0) | |
226 | return ERR_PTR(ret); | |
227 | pvec->nr = ret; | |
228 | pvec->idx = 0; | |
229 | } | |
230 | return pvec->page[pvec->idx++]; | |
231 | } | |
232 | ||
cda9205d CK |
233 | /* return a page back to pvec array */ |
234 | static void blk_unget_page(struct page *page, struct pvec *pvec) | |
235 | { | |
236 | pvec->page[--pvec->idx] = page; | |
237 | } | |
238 | ||
1da177e4 LT |
239 | static ssize_t |
240 | blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, | |
e61c9018 | 241 | loff_t pos, unsigned long nr_segs) |
1da177e4 | 242 | { |
e61c9018 CK |
243 | struct inode *inode = iocb->ki_filp->f_mapping->host; |
244 | unsigned blkbits = blksize_bits(bdev_hardsect_size(I_BDEV(inode))); | |
245 | unsigned blocksize_mask = (1 << blkbits) - 1; | |
246 | unsigned long seg = 0; /* iov segment iterator */ | |
247 | unsigned long nvec; /* number of bio vec needed */ | |
248 | unsigned long cur_off; /* offset into current page */ | |
249 | unsigned long cur_len; /* I/O len of current page, up to PAGE_SIZE */ | |
250 | ||
251 | unsigned long addr; /* user iovec address */ | |
252 | size_t count; /* user iovec len */ | |
253 | size_t nbytes = iocb->ki_nbytes = iocb->ki_left; /* total xfer size */ | |
254 | loff_t size; /* size of block device */ | |
255 | struct bio *bio; | |
256 | atomic_t *bio_count = &iocb->ki_bio_count; | |
257 | struct page *page; | |
258 | struct pvec pvec; | |
259 | ||
260 | pvec.nr = 0; | |
261 | pvec.idx = 0; | |
262 | ||
263 | if (pos & blocksize_mask) | |
264 | return -EINVAL; | |
265 | ||
266 | size = i_size_read(inode); | |
267 | if (pos + nbytes > size) { | |
268 | nbytes = size - pos; | |
269 | iocb->ki_left = nbytes; | |
270 | } | |
271 | ||
272 | /* | |
273 | * check first non-zero iov alignment, the remaining | |
274 | * iov alignment is checked inside bio loop below. | |
275 | */ | |
276 | do { | |
277 | addr = (unsigned long) iov[seg].iov_base; | |
278 | count = min(iov[seg].iov_len, nbytes); | |
279 | if (addr & blocksize_mask || count & blocksize_mask) | |
280 | return -EINVAL; | |
281 | } while (!count && ++seg < nr_segs); | |
282 | atomic_set(bio_count, 1); | |
283 | ||
284 | while (nbytes) { | |
285 | /* roughly estimate number of bio vec needed */ | |
286 | nvec = (nbytes + PAGE_SIZE - 1) / PAGE_SIZE; | |
287 | nvec = max(nvec, nr_segs - seg); | |
288 | nvec = min(nvec, (unsigned long) BIO_MAX_PAGES); | |
289 | ||
290 | /* bio_alloc should not fail with GFP_KERNEL flag */ | |
291 | bio = bio_alloc(GFP_KERNEL, nvec); | |
292 | bio->bi_bdev = I_BDEV(inode); | |
293 | bio->bi_end_io = blk_end_aio; | |
294 | bio->bi_private = iocb; | |
295 | bio->bi_sector = pos >> blkbits; | |
296 | same_bio: | |
297 | cur_off = addr & ~PAGE_MASK; | |
298 | cur_len = PAGE_SIZE - cur_off; | |
299 | if (count < cur_len) | |
300 | cur_len = count; | |
301 | ||
302 | page = blk_get_page(addr, count, rw, &pvec); | |
303 | if (unlikely(IS_ERR(page))) | |
304 | goto backout; | |
305 | ||
306 | if (bio_add_page(bio, page, cur_len, cur_off)) { | |
307 | pos += cur_len; | |
308 | addr += cur_len; | |
309 | count -= cur_len; | |
310 | nbytes -= cur_len; | |
311 | ||
312 | if (count) | |
313 | goto same_bio; | |
314 | while (++seg < nr_segs) { | |
315 | addr = (unsigned long) iov[seg].iov_base; | |
316 | count = iov[seg].iov_len; | |
317 | if (!count) | |
318 | continue; | |
319 | if (unlikely(addr & blocksize_mask || | |
320 | count & blocksize_mask)) { | |
321 | page = ERR_PTR(-EINVAL); | |
322 | goto backout; | |
323 | } | |
324 | count = min(count, nbytes); | |
325 | goto same_bio; | |
326 | } | |
cda9205d CK |
327 | } else { |
328 | blk_unget_page(page, &pvec); | |
e61c9018 CK |
329 | } |
330 | ||
331 | /* bio is ready, submit it */ | |
332 | if (rw == READ) | |
333 | bio_set_pages_dirty(bio); | |
334 | atomic_inc(bio_count); | |
335 | submit_bio(rw, bio); | |
336 | } | |
337 | ||
338 | completion: | |
339 | iocb->ki_left -= nbytes; | |
340 | nbytes = iocb->ki_left; | |
341 | iocb->ki_pos += nbytes; | |
342 | ||
343 | blk_run_address_space(inode->i_mapping); | |
344 | if (atomic_dec_and_test(bio_count)) | |
345 | aio_complete(iocb, nbytes, 0); | |
346 | ||
347 | return -EIOCBQUEUED; | |
348 | ||
349 | backout: | |
350 | /* | |
351 | * back out nbytes count constructed so far for this bio, | |
352 | * we will throw away current bio. | |
353 | */ | |
354 | nbytes += bio->bi_size; | |
355 | bio_release_pages(bio); | |
356 | bio_put(bio); | |
1da177e4 | 357 | |
e61c9018 CK |
358 | /* |
359 | * if no bio was submmitted, return the error code. | |
360 | * otherwise, proceed with pending I/O completion. | |
361 | */ | |
362 | if (atomic_read(bio_count) == 1) | |
363 | return PTR_ERR(page); | |
364 | goto completion; | |
1da177e4 | 365 | } |
b2e895db | 366 | #endif |
1da177e4 LT |
367 | |
368 | static int blkdev_writepage(struct page *page, struct writeback_control *wbc) | |
369 | { | |
370 | return block_write_full_page(page, blkdev_get_block, wbc); | |
371 | } | |
372 | ||
373 | static int blkdev_readpage(struct file * file, struct page * page) | |
374 | { | |
375 | return block_read_full_page(page, blkdev_get_block); | |
376 | } | |
377 | ||
378 | static int blkdev_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to) | |
379 | { | |
380 | return block_prepare_write(page, from, to, blkdev_get_block); | |
381 | } | |
382 | ||
383 | static int blkdev_commit_write(struct file *file, struct page *page, unsigned from, unsigned to) | |
384 | { | |
385 | return block_commit_write(page, from, to); | |
386 | } | |
387 | ||
388 | /* | |
389 | * private llseek: | |
0f7fc9e4 | 390 | * for a block special file file->f_path.dentry->d_inode->i_size is zero |
1da177e4 LT |
391 | * so we compute the size by hand (just as in block_read/write above) |
392 | */ | |
393 | static loff_t block_llseek(struct file *file, loff_t offset, int origin) | |
394 | { | |
395 | struct inode *bd_inode = file->f_mapping->host; | |
396 | loff_t size; | |
397 | loff_t retval; | |
398 | ||
1b1dcc1b | 399 | mutex_lock(&bd_inode->i_mutex); |
1da177e4 LT |
400 | size = i_size_read(bd_inode); |
401 | ||
402 | switch (origin) { | |
403 | case 2: | |
404 | offset += size; | |
405 | break; | |
406 | case 1: | |
407 | offset += file->f_pos; | |
408 | } | |
409 | retval = -EINVAL; | |
410 | if (offset >= 0 && offset <= size) { | |
411 | if (offset != file->f_pos) { | |
412 | file->f_pos = offset; | |
413 | } | |
414 | retval = offset; | |
415 | } | |
1b1dcc1b | 416 | mutex_unlock(&bd_inode->i_mutex); |
1da177e4 LT |
417 | return retval; |
418 | } | |
419 | ||
420 | /* | |
421 | * Filp is never NULL; the only case when ->fsync() is called with | |
422 | * NULL first argument is nfsd_sync_dir() and that's not a directory. | |
423 | */ | |
424 | ||
425 | static int block_fsync(struct file *filp, struct dentry *dentry, int datasync) | |
426 | { | |
427 | return sync_blockdev(I_BDEV(filp->f_mapping->host)); | |
428 | } | |
429 | ||
430 | /* | |
431 | * pseudo-fs | |
432 | */ | |
433 | ||
434 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock); | |
e18b890b | 435 | static struct kmem_cache * bdev_cachep __read_mostly; |
1da177e4 LT |
436 | |
437 | static struct inode *bdev_alloc_inode(struct super_block *sb) | |
438 | { | |
e94b1766 | 439 | struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, GFP_KERNEL); |
1da177e4 LT |
440 | if (!ei) |
441 | return NULL; | |
442 | return &ei->vfs_inode; | |
443 | } | |
444 | ||
445 | static void bdev_destroy_inode(struct inode *inode) | |
446 | { | |
447 | struct bdev_inode *bdi = BDEV_I(inode); | |
448 | ||
449 | bdi->bdev.bd_inode_backing_dev_info = NULL; | |
450 | kmem_cache_free(bdev_cachep, bdi); | |
451 | } | |
452 | ||
e18b890b | 453 | static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) |
1da177e4 LT |
454 | { |
455 | struct bdev_inode *ei = (struct bdev_inode *) foo; | |
456 | struct block_device *bdev = &ei->bdev; | |
457 | ||
458 | if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == | |
459 | SLAB_CTOR_CONSTRUCTOR) | |
460 | { | |
461 | memset(bdev, 0, sizeof(*bdev)); | |
c039e313 | 462 | mutex_init(&bdev->bd_mutex); |
f73ca1b7 | 463 | sema_init(&bdev->bd_mount_sem, 1); |
1da177e4 LT |
464 | INIT_LIST_HEAD(&bdev->bd_inodes); |
465 | INIT_LIST_HEAD(&bdev->bd_list); | |
641dc636 JN |
466 | #ifdef CONFIG_SYSFS |
467 | INIT_LIST_HEAD(&bdev->bd_holder_list); | |
468 | #endif | |
1da177e4 LT |
469 | inode_init_once(&ei->vfs_inode); |
470 | } | |
471 | } | |
472 | ||
473 | static inline void __bd_forget(struct inode *inode) | |
474 | { | |
475 | list_del_init(&inode->i_devices); | |
476 | inode->i_bdev = NULL; | |
477 | inode->i_mapping = &inode->i_data; | |
478 | } | |
479 | ||
480 | static void bdev_clear_inode(struct inode *inode) | |
481 | { | |
482 | struct block_device *bdev = &BDEV_I(inode)->bdev; | |
483 | struct list_head *p; | |
484 | spin_lock(&bdev_lock); | |
485 | while ( (p = bdev->bd_inodes.next) != &bdev->bd_inodes ) { | |
486 | __bd_forget(list_entry(p, struct inode, i_devices)); | |
487 | } | |
488 | list_del_init(&bdev->bd_list); | |
489 | spin_unlock(&bdev_lock); | |
490 | } | |
491 | ||
ee9b6d61 | 492 | static const struct super_operations bdev_sops = { |
1da177e4 LT |
493 | .statfs = simple_statfs, |
494 | .alloc_inode = bdev_alloc_inode, | |
495 | .destroy_inode = bdev_destroy_inode, | |
496 | .drop_inode = generic_delete_inode, | |
497 | .clear_inode = bdev_clear_inode, | |
498 | }; | |
499 | ||
454e2398 DH |
500 | static int bd_get_sb(struct file_system_type *fs_type, |
501 | int flags, const char *dev_name, void *data, struct vfsmount *mnt) | |
1da177e4 | 502 | { |
454e2398 | 503 | return get_sb_pseudo(fs_type, "bdev:", &bdev_sops, 0x62646576, mnt); |
1da177e4 LT |
504 | } |
505 | ||
506 | static struct file_system_type bd_type = { | |
507 | .name = "bdev", | |
508 | .get_sb = bd_get_sb, | |
509 | .kill_sb = kill_anon_super, | |
510 | }; | |
511 | ||
fa3536cc | 512 | static struct vfsmount *bd_mnt __read_mostly; |
1da177e4 LT |
513 | struct super_block *blockdev_superblock; |
514 | ||
515 | void __init bdev_cache_init(void) | |
516 | { | |
517 | int err; | |
518 | bdev_cachep = kmem_cache_create("bdev_cache", sizeof(struct bdev_inode), | |
fffb60f9 PJ |
519 | 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| |
520 | SLAB_MEM_SPREAD|SLAB_PANIC), | |
1da177e4 LT |
521 | init_once, NULL); |
522 | err = register_filesystem(&bd_type); | |
523 | if (err) | |
524 | panic("Cannot register bdev pseudo-fs"); | |
525 | bd_mnt = kern_mount(&bd_type); | |
526 | err = PTR_ERR(bd_mnt); | |
527 | if (IS_ERR(bd_mnt)) | |
528 | panic("Cannot create bdev pseudo-fs"); | |
529 | blockdev_superblock = bd_mnt->mnt_sb; /* For writeback */ | |
530 | } | |
531 | ||
532 | /* | |
533 | * Most likely _very_ bad one - but then it's hardly critical for small | |
534 | * /dev and can be fixed when somebody will need really large one. | |
535 | * Keep in mind that it will be fed through icache hash function too. | |
536 | */ | |
537 | static inline unsigned long hash(dev_t dev) | |
538 | { | |
539 | return MAJOR(dev)+MINOR(dev); | |
540 | } | |
541 | ||
542 | static int bdev_test(struct inode *inode, void *data) | |
543 | { | |
544 | return BDEV_I(inode)->bdev.bd_dev == *(dev_t *)data; | |
545 | } | |
546 | ||
547 | static int bdev_set(struct inode *inode, void *data) | |
548 | { | |
549 | BDEV_I(inode)->bdev.bd_dev = *(dev_t *)data; | |
550 | return 0; | |
551 | } | |
552 | ||
553 | static LIST_HEAD(all_bdevs); | |
554 | ||
555 | struct block_device *bdget(dev_t dev) | |
556 | { | |
557 | struct block_device *bdev; | |
558 | struct inode *inode; | |
559 | ||
560 | inode = iget5_locked(bd_mnt->mnt_sb, hash(dev), | |
561 | bdev_test, bdev_set, &dev); | |
562 | ||
563 | if (!inode) | |
564 | return NULL; | |
565 | ||
566 | bdev = &BDEV_I(inode)->bdev; | |
567 | ||
568 | if (inode->i_state & I_NEW) { | |
569 | bdev->bd_contains = NULL; | |
570 | bdev->bd_inode = inode; | |
571 | bdev->bd_block_size = (1 << inode->i_blkbits); | |
572 | bdev->bd_part_count = 0; | |
573 | bdev->bd_invalidated = 0; | |
574 | inode->i_mode = S_IFBLK; | |
575 | inode->i_rdev = dev; | |
576 | inode->i_bdev = bdev; | |
577 | inode->i_data.a_ops = &def_blk_aops; | |
578 | mapping_set_gfp_mask(&inode->i_data, GFP_USER); | |
579 | inode->i_data.backing_dev_info = &default_backing_dev_info; | |
580 | spin_lock(&bdev_lock); | |
581 | list_add(&bdev->bd_list, &all_bdevs); | |
582 | spin_unlock(&bdev_lock); | |
583 | unlock_new_inode(inode); | |
584 | } | |
585 | return bdev; | |
586 | } | |
587 | ||
588 | EXPORT_SYMBOL(bdget); | |
589 | ||
590 | long nr_blockdev_pages(void) | |
591 | { | |
592 | struct list_head *p; | |
593 | long ret = 0; | |
594 | spin_lock(&bdev_lock); | |
595 | list_for_each(p, &all_bdevs) { | |
596 | struct block_device *bdev; | |
597 | bdev = list_entry(p, struct block_device, bd_list); | |
598 | ret += bdev->bd_inode->i_mapping->nrpages; | |
599 | } | |
600 | spin_unlock(&bdev_lock); | |
601 | return ret; | |
602 | } | |
603 | ||
604 | void bdput(struct block_device *bdev) | |
605 | { | |
606 | iput(bdev->bd_inode); | |
607 | } | |
608 | ||
609 | EXPORT_SYMBOL(bdput); | |
610 | ||
611 | static struct block_device *bd_acquire(struct inode *inode) | |
612 | { | |
613 | struct block_device *bdev; | |
09d967c6 | 614 | |
1da177e4 LT |
615 | spin_lock(&bdev_lock); |
616 | bdev = inode->i_bdev; | |
09d967c6 OH |
617 | if (bdev) { |
618 | atomic_inc(&bdev->bd_inode->i_count); | |
1da177e4 LT |
619 | spin_unlock(&bdev_lock); |
620 | return bdev; | |
621 | } | |
622 | spin_unlock(&bdev_lock); | |
09d967c6 | 623 | |
1da177e4 LT |
624 | bdev = bdget(inode->i_rdev); |
625 | if (bdev) { | |
626 | spin_lock(&bdev_lock); | |
09d967c6 OH |
627 | if (!inode->i_bdev) { |
628 | /* | |
629 | * We take an additional bd_inode->i_count for inode, | |
630 | * and it's released in clear_inode() of inode. | |
631 | * So, we can access it via ->i_mapping always | |
632 | * without igrab(). | |
633 | */ | |
634 | atomic_inc(&bdev->bd_inode->i_count); | |
635 | inode->i_bdev = bdev; | |
636 | inode->i_mapping = bdev->bd_inode->i_mapping; | |
637 | list_add(&inode->i_devices, &bdev->bd_inodes); | |
638 | } | |
1da177e4 LT |
639 | spin_unlock(&bdev_lock); |
640 | } | |
641 | return bdev; | |
642 | } | |
643 | ||
644 | /* Call when you free inode */ | |
645 | ||
646 | void bd_forget(struct inode *inode) | |
647 | { | |
09d967c6 OH |
648 | struct block_device *bdev = NULL; |
649 | ||
1da177e4 | 650 | spin_lock(&bdev_lock); |
09d967c6 OH |
651 | if (inode->i_bdev) { |
652 | if (inode->i_sb != blockdev_superblock) | |
653 | bdev = inode->i_bdev; | |
1da177e4 | 654 | __bd_forget(inode); |
09d967c6 | 655 | } |
1da177e4 | 656 | spin_unlock(&bdev_lock); |
09d967c6 OH |
657 | |
658 | if (bdev) | |
659 | iput(bdev->bd_inode); | |
1da177e4 LT |
660 | } |
661 | ||
662 | int bd_claim(struct block_device *bdev, void *holder) | |
663 | { | |
664 | int res; | |
665 | spin_lock(&bdev_lock); | |
666 | ||
667 | /* first decide result */ | |
668 | if (bdev->bd_holder == holder) | |
669 | res = 0; /* already a holder */ | |
670 | else if (bdev->bd_holder != NULL) | |
671 | res = -EBUSY; /* held by someone else */ | |
672 | else if (bdev->bd_contains == bdev) | |
673 | res = 0; /* is a whole device which isn't held */ | |
674 | ||
675 | else if (bdev->bd_contains->bd_holder == bd_claim) | |
676 | res = 0; /* is a partition of a device that is being partitioned */ | |
677 | else if (bdev->bd_contains->bd_holder != NULL) | |
678 | res = -EBUSY; /* is a partition of a held device */ | |
679 | else | |
680 | res = 0; /* is a partition of an un-held device */ | |
681 | ||
682 | /* now impose change */ | |
683 | if (res==0) { | |
684 | /* note that for a whole device bd_holders | |
685 | * will be incremented twice, and bd_holder will | |
686 | * be set to bd_claim before being set to holder | |
687 | */ | |
688 | bdev->bd_contains->bd_holders ++; | |
689 | bdev->bd_contains->bd_holder = bd_claim; | |
690 | bdev->bd_holders++; | |
691 | bdev->bd_holder = holder; | |
692 | } | |
693 | spin_unlock(&bdev_lock); | |
694 | return res; | |
695 | } | |
696 | ||
697 | EXPORT_SYMBOL(bd_claim); | |
698 | ||
699 | void bd_release(struct block_device *bdev) | |
700 | { | |
701 | spin_lock(&bdev_lock); | |
702 | if (!--bdev->bd_contains->bd_holders) | |
703 | bdev->bd_contains->bd_holder = NULL; | |
704 | if (!--bdev->bd_holders) | |
705 | bdev->bd_holder = NULL; | |
706 | spin_unlock(&bdev_lock); | |
707 | } | |
708 | ||
709 | EXPORT_SYMBOL(bd_release); | |
710 | ||
641dc636 JN |
711 | #ifdef CONFIG_SYSFS |
712 | /* | |
713 | * Functions for bd_claim_by_kobject / bd_release_from_kobject | |
714 | * | |
715 | * If a kobject is passed to bd_claim_by_kobject() | |
716 | * and the kobject has a parent directory, | |
717 | * following symlinks are created: | |
718 | * o from the kobject to the claimed bdev | |
719 | * o from "holders" directory of the bdev to the parent of the kobject | |
720 | * bd_release_from_kobject() removes these symlinks. | |
721 | * | |
722 | * Example: | |
723 | * If /dev/dm-0 maps to /dev/sda, kobject corresponding to | |
724 | * /sys/block/dm-0/slaves is passed to bd_claim_by_kobject(), then: | |
725 | * /sys/block/dm-0/slaves/sda --> /sys/block/sda | |
726 | * /sys/block/sda/holders/dm-0 --> /sys/block/dm-0 | |
727 | */ | |
728 | ||
729 | static struct kobject *bdev_get_kobj(struct block_device *bdev) | |
730 | { | |
731 | if (bdev->bd_contains != bdev) | |
732 | return kobject_get(&bdev->bd_part->kobj); | |
733 | else | |
734 | return kobject_get(&bdev->bd_disk->kobj); | |
735 | } | |
736 | ||
737 | static struct kobject *bdev_get_holder(struct block_device *bdev) | |
738 | { | |
739 | if (bdev->bd_contains != bdev) | |
740 | return kobject_get(bdev->bd_part->holder_dir); | |
741 | else | |
742 | return kobject_get(bdev->bd_disk->holder_dir); | |
743 | } | |
744 | ||
4d7dd8fd | 745 | static int add_symlink(struct kobject *from, struct kobject *to) |
641dc636 JN |
746 | { |
747 | if (!from || !to) | |
4d7dd8fd AM |
748 | return 0; |
749 | return sysfs_create_link(from, to, kobject_name(to)); | |
641dc636 JN |
750 | } |
751 | ||
752 | static void del_symlink(struct kobject *from, struct kobject *to) | |
753 | { | |
754 | if (!from || !to) | |
755 | return; | |
756 | sysfs_remove_link(from, kobject_name(to)); | |
757 | } | |
758 | ||
759 | /* | |
760 | * 'struct bd_holder' contains pointers to kobjects symlinked by | |
761 | * bd_claim_by_kobject. | |
762 | * It's connected to bd_holder_list which is protected by bdev->bd_sem. | |
763 | */ | |
764 | struct bd_holder { | |
765 | struct list_head list; /* chain of holders of the bdev */ | |
766 | int count; /* references from the holder */ | |
767 | struct kobject *sdir; /* holder object, e.g. "/block/dm-0/slaves" */ | |
768 | struct kobject *hdev; /* e.g. "/block/dm-0" */ | |
769 | struct kobject *hdir; /* e.g. "/block/sda/holders" */ | |
770 | struct kobject *sdev; /* e.g. "/block/sda" */ | |
771 | }; | |
772 | ||
773 | /* | |
774 | * Get references of related kobjects at once. | |
775 | * Returns 1 on success. 0 on failure. | |
776 | * | |
777 | * Should call bd_holder_release_dirs() after successful use. | |
778 | */ | |
779 | static int bd_holder_grab_dirs(struct block_device *bdev, | |
780 | struct bd_holder *bo) | |
781 | { | |
782 | if (!bdev || !bo) | |
783 | return 0; | |
784 | ||
785 | bo->sdir = kobject_get(bo->sdir); | |
786 | if (!bo->sdir) | |
787 | return 0; | |
788 | ||
789 | bo->hdev = kobject_get(bo->sdir->parent); | |
790 | if (!bo->hdev) | |
791 | goto fail_put_sdir; | |
792 | ||
793 | bo->sdev = bdev_get_kobj(bdev); | |
794 | if (!bo->sdev) | |
795 | goto fail_put_hdev; | |
796 | ||
797 | bo->hdir = bdev_get_holder(bdev); | |
798 | if (!bo->hdir) | |
799 | goto fail_put_sdev; | |
800 | ||
801 | return 1; | |
802 | ||
803 | fail_put_sdev: | |
804 | kobject_put(bo->sdev); | |
805 | fail_put_hdev: | |
806 | kobject_put(bo->hdev); | |
807 | fail_put_sdir: | |
808 | kobject_put(bo->sdir); | |
809 | ||
810 | return 0; | |
811 | } | |
812 | ||
813 | /* Put references of related kobjects at once. */ | |
814 | static void bd_holder_release_dirs(struct bd_holder *bo) | |
815 | { | |
816 | kobject_put(bo->hdir); | |
817 | kobject_put(bo->sdev); | |
818 | kobject_put(bo->hdev); | |
819 | kobject_put(bo->sdir); | |
820 | } | |
821 | ||
822 | static struct bd_holder *alloc_bd_holder(struct kobject *kobj) | |
823 | { | |
824 | struct bd_holder *bo; | |
825 | ||
826 | bo = kzalloc(sizeof(*bo), GFP_KERNEL); | |
827 | if (!bo) | |
828 | return NULL; | |
829 | ||
830 | bo->count = 1; | |
831 | bo->sdir = kobj; | |
832 | ||
833 | return bo; | |
834 | } | |
835 | ||
836 | static void free_bd_holder(struct bd_holder *bo) | |
837 | { | |
838 | kfree(bo); | |
839 | } | |
840 | ||
df6c0cd9 JN |
841 | /** |
842 | * find_bd_holder - find matching struct bd_holder from the block device | |
843 | * | |
844 | * @bdev: struct block device to be searched | |
845 | * @bo: target struct bd_holder | |
846 | * | |
847 | * Returns matching entry with @bo in @bdev->bd_holder_list. | |
848 | * If found, increment the reference count and return the pointer. | |
849 | * If not found, returns NULL. | |
850 | */ | |
36a561d6 AM |
851 | static struct bd_holder *find_bd_holder(struct block_device *bdev, |
852 | struct bd_holder *bo) | |
df6c0cd9 JN |
853 | { |
854 | struct bd_holder *tmp; | |
855 | ||
856 | list_for_each_entry(tmp, &bdev->bd_holder_list, list) | |
857 | if (tmp->sdir == bo->sdir) { | |
858 | tmp->count++; | |
859 | return tmp; | |
860 | } | |
861 | ||
862 | return NULL; | |
863 | } | |
864 | ||
641dc636 JN |
865 | /** |
866 | * add_bd_holder - create sysfs symlinks for bd_claim() relationship | |
867 | * | |
868 | * @bdev: block device to be bd_claimed | |
869 | * @bo: preallocated and initialized by alloc_bd_holder() | |
870 | * | |
df6c0cd9 | 871 | * Add @bo to @bdev->bd_holder_list, create symlinks. |
641dc636 | 872 | * |
df6c0cd9 JN |
873 | * Returns 0 if symlinks are created. |
874 | * Returns -ve if something fails. | |
641dc636 JN |
875 | */ |
876 | static int add_bd_holder(struct block_device *bdev, struct bd_holder *bo) | |
877 | { | |
4d7dd8fd | 878 | int ret; |
641dc636 JN |
879 | |
880 | if (!bo) | |
4d7dd8fd | 881 | return -EINVAL; |
641dc636 | 882 | |
641dc636 | 883 | if (!bd_holder_grab_dirs(bdev, bo)) |
4d7dd8fd | 884 | return -EBUSY; |
641dc636 | 885 | |
4d7dd8fd AM |
886 | ret = add_symlink(bo->sdir, bo->sdev); |
887 | if (ret == 0) { | |
888 | ret = add_symlink(bo->hdir, bo->hdev); | |
889 | if (ret) | |
890 | del_symlink(bo->sdir, bo->sdev); | |
891 | } | |
892 | if (ret == 0) | |
893 | list_add_tail(&bo->list, &bdev->bd_holder_list); | |
894 | return ret; | |
641dc636 JN |
895 | } |
896 | ||
897 | /** | |
898 | * del_bd_holder - delete sysfs symlinks for bd_claim() relationship | |
899 | * | |
900 | * @bdev: block device to be bd_claimed | |
901 | * @kobj: holder's kobject | |
902 | * | |
903 | * If there is matching entry with @kobj in @bdev->bd_holder_list | |
904 | * and no other bd_claim() from the same kobject, | |
905 | * remove the struct bd_holder from the list, delete symlinks for it. | |
906 | * | |
907 | * Returns a pointer to the struct bd_holder when it's removed from the list | |
908 | * and ready to be freed. | |
909 | * Returns NULL if matching claim isn't found or there is other bd_claim() | |
910 | * by the same kobject. | |
911 | */ | |
912 | static struct bd_holder *del_bd_holder(struct block_device *bdev, | |
913 | struct kobject *kobj) | |
914 | { | |
915 | struct bd_holder *bo; | |
916 | ||
917 | list_for_each_entry(bo, &bdev->bd_holder_list, list) { | |
918 | if (bo->sdir == kobj) { | |
919 | bo->count--; | |
920 | BUG_ON(bo->count < 0); | |
921 | if (!bo->count) { | |
922 | list_del(&bo->list); | |
923 | del_symlink(bo->sdir, bo->sdev); | |
924 | del_symlink(bo->hdir, bo->hdev); | |
925 | bd_holder_release_dirs(bo); | |
926 | return bo; | |
927 | } | |
928 | break; | |
929 | } | |
930 | } | |
931 | ||
932 | return NULL; | |
933 | } | |
934 | ||
935 | /** | |
936 | * bd_claim_by_kobject - bd_claim() with additional kobject signature | |
937 | * | |
938 | * @bdev: block device to be claimed | |
939 | * @holder: holder's signature | |
940 | * @kobj: holder's kobject | |
941 | * | |
942 | * Do bd_claim() and if it succeeds, create sysfs symlinks between | |
943 | * the bdev and the holder's kobject. | |
944 | * Use bd_release_from_kobject() when relesing the claimed bdev. | |
945 | * | |
946 | * Returns 0 on success. (same as bd_claim()) | |
947 | * Returns errno on failure. | |
948 | */ | |
949 | static int bd_claim_by_kobject(struct block_device *bdev, void *holder, | |
950 | struct kobject *kobj) | |
951 | { | |
952 | int res; | |
df6c0cd9 | 953 | struct bd_holder *bo, *found; |
641dc636 JN |
954 | |
955 | if (!kobj) | |
956 | return -EINVAL; | |
957 | ||
958 | bo = alloc_bd_holder(kobj); | |
959 | if (!bo) | |
960 | return -ENOMEM; | |
961 | ||
2e7b651d | 962 | mutex_lock(&bdev->bd_mutex); |
641dc636 | 963 | res = bd_claim(bdev, holder); |
bcb55165 | 964 | if (res == 0) { |
df6c0cd9 JN |
965 | found = find_bd_holder(bdev, bo); |
966 | if (found == NULL) { | |
967 | res = add_bd_holder(bdev, bo); | |
968 | if (res) | |
969 | bd_release(bdev); | |
970 | } | |
bcb55165 | 971 | } |
df6c0cd9 JN |
972 | |
973 | if (res || found) | |
641dc636 | 974 | free_bd_holder(bo); |
b4cf1b72 | 975 | mutex_unlock(&bdev->bd_mutex); |
641dc636 JN |
976 | |
977 | return res; | |
978 | } | |
979 | ||
980 | /** | |
981 | * bd_release_from_kobject - bd_release() with additional kobject signature | |
982 | * | |
983 | * @bdev: block device to be released | |
984 | * @kobj: holder's kobject | |
985 | * | |
986 | * Do bd_release() and remove sysfs symlinks created by bd_claim_by_kobject(). | |
987 | */ | |
988 | static void bd_release_from_kobject(struct block_device *bdev, | |
989 | struct kobject *kobj) | |
990 | { | |
991 | struct bd_holder *bo; | |
992 | ||
993 | if (!kobj) | |
994 | return; | |
995 | ||
2e7b651d | 996 | mutex_lock(&bdev->bd_mutex); |
641dc636 JN |
997 | bd_release(bdev); |
998 | if ((bo = del_bd_holder(bdev, kobj))) | |
999 | free_bd_holder(bo); | |
b4cf1b72 | 1000 | mutex_unlock(&bdev->bd_mutex); |
641dc636 JN |
1001 | } |
1002 | ||
1003 | /** | |
1004 | * bd_claim_by_disk - wrapper function for bd_claim_by_kobject() | |
1005 | * | |
1006 | * @bdev: block device to be claimed | |
1007 | * @holder: holder's signature | |
1008 | * @disk: holder's gendisk | |
1009 | * | |
1010 | * Call bd_claim_by_kobject() with getting @disk->slave_dir. | |
1011 | */ | |
1012 | int bd_claim_by_disk(struct block_device *bdev, void *holder, | |
1013 | struct gendisk *disk) | |
1014 | { | |
1015 | return bd_claim_by_kobject(bdev, holder, kobject_get(disk->slave_dir)); | |
1016 | } | |
1017 | EXPORT_SYMBOL_GPL(bd_claim_by_disk); | |
1018 | ||
1019 | /** | |
1020 | * bd_release_from_disk - wrapper function for bd_release_from_kobject() | |
1021 | * | |
1022 | * @bdev: block device to be claimed | |
1023 | * @disk: holder's gendisk | |
1024 | * | |
1025 | * Call bd_release_from_kobject() and put @disk->slave_dir. | |
1026 | */ | |
1027 | void bd_release_from_disk(struct block_device *bdev, struct gendisk *disk) | |
1028 | { | |
1029 | bd_release_from_kobject(bdev, disk->slave_dir); | |
1030 | kobject_put(disk->slave_dir); | |
1031 | } | |
1032 | EXPORT_SYMBOL_GPL(bd_release_from_disk); | |
1033 | #endif | |
1034 | ||
1da177e4 LT |
1035 | /* |
1036 | * Tries to open block device by device number. Use it ONLY if you | |
1037 | * really do not have anything better - i.e. when you are behind a | |
1038 | * truly sucky interface and all you are given is a device number. _Never_ | |
1039 | * to be used for internal purposes. If you ever need it - reconsider | |
1040 | * your API. | |
1041 | */ | |
1042 | struct block_device *open_by_devnum(dev_t dev, unsigned mode) | |
1043 | { | |
1044 | struct block_device *bdev = bdget(dev); | |
1045 | int err = -ENOMEM; | |
1046 | int flags = mode & FMODE_WRITE ? O_RDWR : O_RDONLY; | |
1047 | if (bdev) | |
1048 | err = blkdev_get(bdev, mode, flags); | |
1049 | return err ? ERR_PTR(err) : bdev; | |
1050 | } | |
1051 | ||
1052 | EXPORT_SYMBOL(open_by_devnum); | |
1053 | ||
1054 | /* | |
1055 | * This routine checks whether a removable media has been changed, | |
1056 | * and invalidates all buffer-cache-entries in that case. This | |
1057 | * is a relatively slow routine, so we have to try to minimize using | |
1058 | * it. Thus it is called only upon a 'mount' or 'open'. This | |
1059 | * is the best way of combining speed and utility, I think. | |
1060 | * People changing diskettes in the middle of an operation deserve | |
1061 | * to lose :-) | |
1062 | */ | |
1063 | int check_disk_change(struct block_device *bdev) | |
1064 | { | |
1065 | struct gendisk *disk = bdev->bd_disk; | |
1066 | struct block_device_operations * bdops = disk->fops; | |
1067 | ||
1068 | if (!bdops->media_changed) | |
1069 | return 0; | |
1070 | if (!bdops->media_changed(bdev->bd_disk)) | |
1071 | return 0; | |
1072 | ||
2ef41634 | 1073 | if (__invalidate_device(bdev)) |
1da177e4 LT |
1074 | printk("VFS: busy inodes on changed media.\n"); |
1075 | ||
1076 | if (bdops->revalidate_disk) | |
1077 | bdops->revalidate_disk(bdev->bd_disk); | |
1078 | if (bdev->bd_disk->minors > 1) | |
1079 | bdev->bd_invalidated = 1; | |
1080 | return 1; | |
1081 | } | |
1082 | ||
1083 | EXPORT_SYMBOL(check_disk_change); | |
1084 | ||
1085 | void bd_set_size(struct block_device *bdev, loff_t size) | |
1086 | { | |
1087 | unsigned bsize = bdev_hardsect_size(bdev); | |
1088 | ||
1089 | bdev->bd_inode->i_size = size; | |
1090 | while (bsize < PAGE_CACHE_SIZE) { | |
1091 | if (size & bsize) | |
1092 | break; | |
1093 | bsize <<= 1; | |
1094 | } | |
1095 | bdev->bd_block_size = bsize; | |
1096 | bdev->bd_inode->i_blkbits = blksize_bits(bsize); | |
1097 | } | |
1098 | EXPORT_SYMBOL(bd_set_size); | |
1099 | ||
37be4124 N |
1100 | static int __blkdev_get(struct block_device *bdev, mode_t mode, unsigned flags, |
1101 | int for_part); | |
c48f70c3 | 1102 | static int __blkdev_put(struct block_device *bdev, int for_part); |
37be4124 | 1103 | |
6d740cd5 PZ |
1104 | /* |
1105 | * bd_mutex locking: | |
1106 | * | |
1107 | * mutex_lock(part->bd_mutex) | |
1108 | * mutex_lock_nested(whole->bd_mutex, 1) | |
1109 | */ | |
1110 | ||
37be4124 | 1111 | static int do_open(struct block_device *bdev, struct file *file, int for_part) |
1da177e4 LT |
1112 | { |
1113 | struct module *owner = NULL; | |
1114 | struct gendisk *disk; | |
1115 | int ret = -ENXIO; | |
1116 | int part; | |
1117 | ||
1118 | file->f_mapping = bdev->bd_inode->i_mapping; | |
1119 | lock_kernel(); | |
1120 | disk = get_gendisk(bdev->bd_dev, &part); | |
1121 | if (!disk) { | |
1122 | unlock_kernel(); | |
1123 | bdput(bdev); | |
1124 | return ret; | |
1125 | } | |
1126 | owner = disk->fops->owner; | |
1127 | ||
6796bf54 | 1128 | mutex_lock_nested(&bdev->bd_mutex, for_part); |
1da177e4 LT |
1129 | if (!bdev->bd_openers) { |
1130 | bdev->bd_disk = disk; | |
1131 | bdev->bd_contains = bdev; | |
1132 | if (!part) { | |
1133 | struct backing_dev_info *bdi; | |
1134 | if (disk->fops->open) { | |
1135 | ret = disk->fops->open(bdev->bd_inode, file); | |
1136 | if (ret) | |
1137 | goto out_first; | |
1138 | } | |
1139 | if (!bdev->bd_openers) { | |
1140 | bd_set_size(bdev,(loff_t)get_capacity(disk)<<9); | |
1141 | bdi = blk_get_backing_dev_info(bdev); | |
1142 | if (bdi == NULL) | |
1143 | bdi = &default_backing_dev_info; | |
1144 | bdev->bd_inode->i_data.backing_dev_info = bdi; | |
1145 | } | |
1146 | if (bdev->bd_invalidated) | |
1147 | rescan_partitions(disk, bdev); | |
1148 | } else { | |
1149 | struct hd_struct *p; | |
1150 | struct block_device *whole; | |
1151 | whole = bdget_disk(disk, 0); | |
1152 | ret = -ENOMEM; | |
1153 | if (!whole) | |
1154 | goto out_first; | |
37be4124 N |
1155 | BUG_ON(for_part); |
1156 | ret = __blkdev_get(whole, file->f_mode, file->f_flags, 1); | |
1da177e4 LT |
1157 | if (ret) |
1158 | goto out_first; | |
1159 | bdev->bd_contains = whole; | |
1da177e4 LT |
1160 | p = disk->part[part - 1]; |
1161 | bdev->bd_inode->i_data.backing_dev_info = | |
1162 | whole->bd_inode->i_data.backing_dev_info; | |
1163 | if (!(disk->flags & GENHD_FL_UP) || !p || !p->nr_sects) { | |
1da177e4 LT |
1164 | ret = -ENXIO; |
1165 | goto out_first; | |
1166 | } | |
1167 | kobject_get(&p->kobj); | |
1168 | bdev->bd_part = p; | |
1169 | bd_set_size(bdev, (loff_t) p->nr_sects << 9); | |
1da177e4 LT |
1170 | } |
1171 | } else { | |
1172 | put_disk(disk); | |
1173 | module_put(owner); | |
1174 | if (bdev->bd_contains == bdev) { | |
1175 | if (bdev->bd_disk->fops->open) { | |
1176 | ret = bdev->bd_disk->fops->open(bdev->bd_inode, file); | |
1177 | if (ret) | |
1178 | goto out; | |
1179 | } | |
1180 | if (bdev->bd_invalidated) | |
1181 | rescan_partitions(bdev->bd_disk, bdev); | |
1da177e4 LT |
1182 | } |
1183 | } | |
1184 | bdev->bd_openers++; | |
37be4124 N |
1185 | if (for_part) |
1186 | bdev->bd_part_count++; | |
c039e313 | 1187 | mutex_unlock(&bdev->bd_mutex); |
1da177e4 LT |
1188 | unlock_kernel(); |
1189 | return 0; | |
1190 | ||
1191 | out_first: | |
1192 | bdev->bd_disk = NULL; | |
1193 | bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info; | |
1194 | if (bdev != bdev->bd_contains) | |
c48f70c3 | 1195 | __blkdev_put(bdev->bd_contains, 1); |
1da177e4 LT |
1196 | bdev->bd_contains = NULL; |
1197 | put_disk(disk); | |
1198 | module_put(owner); | |
1199 | out: | |
c039e313 | 1200 | mutex_unlock(&bdev->bd_mutex); |
1da177e4 LT |
1201 | unlock_kernel(); |
1202 | if (ret) | |
1203 | bdput(bdev); | |
1204 | return ret; | |
1205 | } | |
1206 | ||
37be4124 N |
1207 | static int __blkdev_get(struct block_device *bdev, mode_t mode, unsigned flags, |
1208 | int for_part) | |
1da177e4 LT |
1209 | { |
1210 | /* | |
1211 | * This crockload is due to bad choice of ->open() type. | |
1212 | * It will go away. | |
1213 | * For now, block device ->open() routine must _not_ | |
1214 | * examine anything in 'inode' argument except ->i_rdev. | |
1215 | */ | |
1216 | struct file fake_file = {}; | |
1217 | struct dentry fake_dentry = {}; | |
1218 | fake_file.f_mode = mode; | |
1219 | fake_file.f_flags = flags; | |
0f7fc9e4 | 1220 | fake_file.f_path.dentry = &fake_dentry; |
1da177e4 LT |
1221 | fake_dentry.d_inode = bdev->bd_inode; |
1222 | ||
37be4124 | 1223 | return do_open(bdev, &fake_file, for_part); |
1da177e4 LT |
1224 | } |
1225 | ||
37be4124 N |
1226 | int blkdev_get(struct block_device *bdev, mode_t mode, unsigned flags) |
1227 | { | |
1228 | return __blkdev_get(bdev, mode, flags, 0); | |
1229 | } | |
1da177e4 LT |
1230 | EXPORT_SYMBOL(blkdev_get); |
1231 | ||
1232 | static int blkdev_open(struct inode * inode, struct file * filp) | |
1233 | { | |
1234 | struct block_device *bdev; | |
1235 | int res; | |
1236 | ||
1237 | /* | |
1238 | * Preserve backwards compatibility and allow large file access | |
1239 | * even if userspace doesn't ask for it explicitly. Some mkfs | |
1240 | * binary needs it. We might want to drop this workaround | |
1241 | * during an unstable branch. | |
1242 | */ | |
1243 | filp->f_flags |= O_LARGEFILE; | |
1244 | ||
1245 | bdev = bd_acquire(inode); | |
6a2aae06 PE |
1246 | if (bdev == NULL) |
1247 | return -ENOMEM; | |
1da177e4 | 1248 | |
37be4124 | 1249 | res = do_open(bdev, filp, 0); |
1da177e4 LT |
1250 | if (res) |
1251 | return res; | |
1252 | ||
1253 | if (!(filp->f_flags & O_EXCL) ) | |
1254 | return 0; | |
1255 | ||
1256 | if (!(res = bd_claim(bdev, filp))) | |
1257 | return 0; | |
1258 | ||
1259 | blkdev_put(bdev); | |
1260 | return res; | |
1261 | } | |
1262 | ||
37be4124 | 1263 | static int __blkdev_put(struct block_device *bdev, int for_part) |
2e7b651d PZ |
1264 | { |
1265 | int ret = 0; | |
1266 | struct inode *bd_inode = bdev->bd_inode; | |
1267 | struct gendisk *disk = bdev->bd_disk; | |
37be4124 | 1268 | struct block_device *victim = NULL; |
2e7b651d | 1269 | |
6796bf54 | 1270 | mutex_lock_nested(&bdev->bd_mutex, for_part); |
2e7b651d | 1271 | lock_kernel(); |
37be4124 N |
1272 | if (for_part) |
1273 | bdev->bd_part_count--; | |
1274 | ||
2e7b651d PZ |
1275 | if (!--bdev->bd_openers) { |
1276 | sync_blockdev(bdev); | |
1277 | kill_bdev(bdev); | |
1278 | } | |
1279 | if (bdev->bd_contains == bdev) { | |
1280 | if (disk->fops->release) | |
1281 | ret = disk->fops->release(bd_inode, NULL); | |
2e7b651d PZ |
1282 | } |
1283 | if (!bdev->bd_openers) { | |
1284 | struct module *owner = disk->fops->owner; | |
1285 | ||
1286 | put_disk(disk); | |
1287 | module_put(owner); | |
1288 | ||
1289 | if (bdev->bd_contains != bdev) { | |
1290 | kobject_put(&bdev->bd_part->kobj); | |
1291 | bdev->bd_part = NULL; | |
1292 | } | |
1293 | bdev->bd_disk = NULL; | |
1294 | bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info; | |
37be4124 N |
1295 | if (bdev != bdev->bd_contains) |
1296 | victim = bdev->bd_contains; | |
2e7b651d PZ |
1297 | bdev->bd_contains = NULL; |
1298 | } | |
1299 | unlock_kernel(); | |
1300 | mutex_unlock(&bdev->bd_mutex); | |
1301 | bdput(bdev); | |
37be4124 N |
1302 | if (victim) |
1303 | __blkdev_put(victim, 1); | |
2e7b651d PZ |
1304 | return ret; |
1305 | } | |
1306 | ||
37be4124 N |
1307 | int blkdev_put(struct block_device *bdev) |
1308 | { | |
1309 | return __blkdev_put(bdev, 0); | |
1310 | } | |
2e7b651d PZ |
1311 | EXPORT_SYMBOL(blkdev_put); |
1312 | ||
1da177e4 LT |
1313 | static int blkdev_close(struct inode * inode, struct file * filp) |
1314 | { | |
1315 | struct block_device *bdev = I_BDEV(filp->f_mapping->host); | |
1316 | if (bdev->bd_holder == filp) | |
1317 | bd_release(bdev); | |
1318 | return blkdev_put(bdev); | |
1319 | } | |
1320 | ||
bb93e3a5 | 1321 | static long block_ioctl(struct file *file, unsigned cmd, unsigned long arg) |
1da177e4 LT |
1322 | { |
1323 | return blkdev_ioctl(file->f_mapping->host, file, cmd, arg); | |
1324 | } | |
1325 | ||
f5e54d6e | 1326 | const struct address_space_operations def_blk_aops = { |
1da177e4 LT |
1327 | .readpage = blkdev_readpage, |
1328 | .writepage = blkdev_writepage, | |
1329 | .sync_page = block_sync_page, | |
1330 | .prepare_write = blkdev_prepare_write, | |
1331 | .commit_write = blkdev_commit_write, | |
1332 | .writepages = generic_writepages, | |
1333 | .direct_IO = blkdev_direct_IO, | |
1334 | }; | |
1335 | ||
4b6f5d20 | 1336 | const struct file_operations def_blk_fops = { |
1da177e4 LT |
1337 | .open = blkdev_open, |
1338 | .release = blkdev_close, | |
1339 | .llseek = block_llseek, | |
543ade1f BP |
1340 | .read = do_sync_read, |
1341 | .write = do_sync_write, | |
1da177e4 | 1342 | .aio_read = generic_file_aio_read, |
027445c3 | 1343 | .aio_write = generic_file_aio_write_nolock, |
1da177e4 LT |
1344 | .mmap = generic_file_mmap, |
1345 | .fsync = block_fsync, | |
bb93e3a5 | 1346 | .unlocked_ioctl = block_ioctl, |
1da177e4 LT |
1347 | #ifdef CONFIG_COMPAT |
1348 | .compat_ioctl = compat_blkdev_ioctl, | |
1349 | #endif | |
1da177e4 | 1350 | .sendfile = generic_file_sendfile, |
7f9c51f0 JA |
1351 | .splice_read = generic_file_splice_read, |
1352 | .splice_write = generic_file_splice_write, | |
1da177e4 LT |
1353 | }; |
1354 | ||
1355 | int ioctl_by_bdev(struct block_device *bdev, unsigned cmd, unsigned long arg) | |
1356 | { | |
1357 | int res; | |
1358 | mm_segment_t old_fs = get_fs(); | |
1359 | set_fs(KERNEL_DS); | |
1360 | res = blkdev_ioctl(bdev->bd_inode, NULL, cmd, arg); | |
1361 | set_fs(old_fs); | |
1362 | return res; | |
1363 | } | |
1364 | ||
1365 | EXPORT_SYMBOL(ioctl_by_bdev); | |
1366 | ||
1367 | /** | |
1368 | * lookup_bdev - lookup a struct block_device by name | |
1369 | * | |
1370 | * @path: special file representing the block device | |
1371 | * | |
1372 | * Get a reference to the blockdevice at @path in the current | |
1373 | * namespace if possible and return it. Return ERR_PTR(error) | |
1374 | * otherwise. | |
1375 | */ | |
1376 | struct block_device *lookup_bdev(const char *path) | |
1377 | { | |
1378 | struct block_device *bdev; | |
1379 | struct inode *inode; | |
1380 | struct nameidata nd; | |
1381 | int error; | |
1382 | ||
1383 | if (!path || !*path) | |
1384 | return ERR_PTR(-EINVAL); | |
1385 | ||
1386 | error = path_lookup(path, LOOKUP_FOLLOW, &nd); | |
1387 | if (error) | |
1388 | return ERR_PTR(error); | |
1389 | ||
1390 | inode = nd.dentry->d_inode; | |
1391 | error = -ENOTBLK; | |
1392 | if (!S_ISBLK(inode->i_mode)) | |
1393 | goto fail; | |
1394 | error = -EACCES; | |
1395 | if (nd.mnt->mnt_flags & MNT_NODEV) | |
1396 | goto fail; | |
1397 | error = -ENOMEM; | |
1398 | bdev = bd_acquire(inode); | |
1399 | if (!bdev) | |
1400 | goto fail; | |
1401 | out: | |
1402 | path_release(&nd); | |
1403 | return bdev; | |
1404 | fail: | |
1405 | bdev = ERR_PTR(error); | |
1406 | goto out; | |
1407 | } | |
1408 | ||
1409 | /** | |
1410 | * open_bdev_excl - open a block device by name and set it up for use | |
1411 | * | |
1412 | * @path: special file representing the block device | |
1413 | * @flags: %MS_RDONLY for opening read-only | |
1414 | * @holder: owner for exclusion | |
1415 | * | |
1416 | * Open the blockdevice described by the special file at @path, claim it | |
1417 | * for the @holder. | |
1418 | */ | |
1419 | struct block_device *open_bdev_excl(const char *path, int flags, void *holder) | |
1420 | { | |
1421 | struct block_device *bdev; | |
1422 | mode_t mode = FMODE_READ; | |
1423 | int error = 0; | |
1424 | ||
1425 | bdev = lookup_bdev(path); | |
1426 | if (IS_ERR(bdev)) | |
1427 | return bdev; | |
1428 | ||
1429 | if (!(flags & MS_RDONLY)) | |
1430 | mode |= FMODE_WRITE; | |
1431 | error = blkdev_get(bdev, mode, 0); | |
1432 | if (error) | |
1433 | return ERR_PTR(error); | |
1434 | error = -EACCES; | |
1435 | if (!(flags & MS_RDONLY) && bdev_read_only(bdev)) | |
1436 | goto blkdev_put; | |
1437 | error = bd_claim(bdev, holder); | |
1438 | if (error) | |
1439 | goto blkdev_put; | |
1440 | ||
1441 | return bdev; | |
1442 | ||
1443 | blkdev_put: | |
1444 | blkdev_put(bdev); | |
1445 | return ERR_PTR(error); | |
1446 | } | |
1447 | ||
1448 | EXPORT_SYMBOL(open_bdev_excl); | |
1449 | ||
1450 | /** | |
1451 | * close_bdev_excl - release a blockdevice openen by open_bdev_excl() | |
1452 | * | |
1453 | * @bdev: blockdevice to close | |
1454 | * | |
1455 | * This is the counterpart to open_bdev_excl(). | |
1456 | */ | |
1457 | void close_bdev_excl(struct block_device *bdev) | |
1458 | { | |
1459 | bd_release(bdev); | |
1460 | blkdev_put(bdev); | |
1461 | } | |
1462 | ||
1463 | EXPORT_SYMBOL(close_bdev_excl); | |
b71e8a4c DH |
1464 | |
1465 | int __invalidate_device(struct block_device *bdev) | |
1466 | { | |
1467 | struct super_block *sb = get_super(bdev); | |
1468 | int res = 0; | |
1469 | ||
1470 | if (sb) { | |
1471 | /* | |
1472 | * no need to lock the super, get_super holds the | |
1473 | * read mutex so the filesystem cannot go away | |
1474 | * under us (->put_super runs with the write lock | |
1475 | * hold). | |
1476 | */ | |
1477 | shrink_dcache_sb(sb); | |
1478 | res = invalidate_inodes(sb); | |
1479 | drop_super(sb); | |
1480 | } | |
1481 | invalidate_bdev(bdev, 0); | |
1482 | return res; | |
1483 | } | |
1484 | EXPORT_SYMBOL(__invalidate_device); |