2 * Network block device - make block devices work over TCP
4 * Note that you can not swap over this thing, yet. Seems to work but
5 * deadlocks sometimes - you can not swap over TCP in general.
7 * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz>
8 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
10 * This file is released under GPLv2 or later.
12 * (part of code stolen from loop.c)
15 #include <linux/major.h>
17 #include <linux/blkdev.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/sched.h>
22 #include <linux/bio.h>
23 #include <linux/stat.h>
24 #include <linux/errno.h>
25 #include <linux/file.h>
26 #include <linux/ioctl.h>
27 #include <linux/mutex.h>
28 #include <linux/compiler.h>
29 #include <linux/err.h>
30 #include <linux/kernel.h>
31 #include <linux/slab.h>
33 #include <linux/net.h>
34 #include <linux/kthread.h>
35 #include <linux/types.h>
37 #include <asm/uaccess.h>
38 #include <asm/types.h>
40 #include <linux/nbd.h>
44 int harderror
; /* Code of hard error */
45 struct socket
* sock
; /* If == NULL, device is not ready, yet */
48 spinlock_t queue_lock
;
49 struct list_head queue_head
; /* Requests waiting result */
50 struct request
*active_req
;
51 wait_queue_head_t active_wq
;
52 struct list_head waiting_queue
; /* Requests to be sent */
53 wait_queue_head_t waiting_wq
;
59 pid_t pid
; /* pid of nbd-client, if attached */
61 int disconnect
; /* a disconnect has been requested by user */
64 #define NBD_MAGIC 0x68797548
66 static unsigned int nbds_max
= 16;
67 static struct nbd_device
*nbd_dev
;
71 * Use just one lock (or at most 1 per NIC). Two arguments for this:
72 * 1. Each NIC is essentially a synchronization point for all servers
73 * accessed through that NIC so there's no need to have more locks
75 * 2. More locks lead to more "Dirty cache line bouncing" which will slow
76 * down each lock to the point where they're actually slower than just
78 * Thanks go to Jens Axboe and Al Viro for their LKML emails explaining this!
80 static DEFINE_SPINLOCK(nbd_lock
);
82 static inline struct device
*nbd_to_dev(struct nbd_device
*nbd
)
84 return disk_to_dev(nbd
->disk
);
87 static const char *nbdcmd_to_ascii(int cmd
)
90 case NBD_CMD_READ
: return "read";
91 case NBD_CMD_WRITE
: return "write";
92 case NBD_CMD_DISC
: return "disconnect";
93 case NBD_CMD_FLUSH
: return "flush";
94 case NBD_CMD_TRIM
: return "trim/discard";
99 static void nbd_end_request(struct nbd_device
*nbd
, struct request
*req
)
101 int error
= req
->errors
? -EIO
: 0;
102 struct request_queue
*q
= req
->q
;
105 dev_dbg(nbd_to_dev(nbd
), "request %p: %s\n", req
,
106 error
? "failed" : "done");
108 spin_lock_irqsave(q
->queue_lock
, flags
);
109 __blk_end_request_all(req
, error
);
110 spin_unlock_irqrestore(q
->queue_lock
, flags
);
114 * Forcibly shutdown the socket causing all listeners to error
116 static void sock_shutdown(struct nbd_device
*nbd
, int lock
)
119 mutex_lock(&nbd
->tx_lock
);
121 dev_warn(disk_to_dev(nbd
->disk
), "shutting down socket\n");
122 kernel_sock_shutdown(nbd
->sock
, SHUT_RDWR
);
126 mutex_unlock(&nbd
->tx_lock
);
129 static void nbd_xmit_timeout(unsigned long arg
)
131 struct task_struct
*task
= (struct task_struct
*)arg
;
133 printk(KERN_WARNING
"nbd: killing hung xmit (%s, pid: %d)\n",
134 task
->comm
, task
->pid
);
135 force_sig(SIGKILL
, task
);
139 * Send or receive packet.
141 static int sock_xmit(struct nbd_device
*nbd
, int send
, void *buf
, int size
,
144 struct socket
*sock
= nbd
->sock
;
148 sigset_t blocked
, oldset
;
149 unsigned long pflags
= current
->flags
;
151 if (unlikely(!sock
)) {
152 dev_err(disk_to_dev(nbd
->disk
),
153 "Attempted %s on closed socket in sock_xmit\n",
154 (send
? "send" : "recv"));
158 /* Allow interception of SIGKILL only
159 * Don't allow other signals to interrupt the transmission */
160 siginitsetinv(&blocked
, sigmask(SIGKILL
));
161 sigprocmask(SIG_SETMASK
, &blocked
, &oldset
);
163 current
->flags
|= PF_MEMALLOC
;
165 sock
->sk
->sk_allocation
= GFP_NOIO
| __GFP_MEMALLOC
;
170 msg
.msg_control
= NULL
;
171 msg
.msg_controllen
= 0;
172 msg
.msg_flags
= msg_flags
| MSG_NOSIGNAL
;
175 struct timer_list ti
;
177 if (nbd
->xmit_timeout
) {
179 ti
.function
= nbd_xmit_timeout
;
180 ti
.data
= (unsigned long)current
;
181 ti
.expires
= jiffies
+ nbd
->xmit_timeout
;
184 result
= kernel_sendmsg(sock
, &msg
, &iov
, 1, size
);
185 if (nbd
->xmit_timeout
)
188 result
= kernel_recvmsg(sock
, &msg
, &iov
, 1, size
,
191 if (signal_pending(current
)) {
193 printk(KERN_WARNING
"nbd (pid %d: %s) got signal %d\n",
194 task_pid_nr(current
), current
->comm
,
195 dequeue_signal_lock(current
, ¤t
->blocked
, &info
));
197 sock_shutdown(nbd
, !send
);
203 result
= -EPIPE
; /* short read */
210 sigprocmask(SIG_SETMASK
, &oldset
, NULL
);
211 tsk_restore_flags(current
, pflags
, PF_MEMALLOC
);
216 static inline int sock_send_bvec(struct nbd_device
*nbd
, struct bio_vec
*bvec
,
220 void *kaddr
= kmap(bvec
->bv_page
);
221 result
= sock_xmit(nbd
, 1, kaddr
+ bvec
->bv_offset
,
222 bvec
->bv_len
, flags
);
223 kunmap(bvec
->bv_page
);
227 /* always call with the tx_lock held */
228 static int nbd_send_req(struct nbd_device
*nbd
, struct request
*req
)
231 struct nbd_request request
;
232 unsigned long size
= blk_rq_bytes(req
);
234 memset(&request
, 0, sizeof(request
));
235 request
.magic
= htonl(NBD_REQUEST_MAGIC
);
236 request
.type
= htonl(nbd_cmd(req
));
238 if (nbd_cmd(req
) != NBD_CMD_FLUSH
&& nbd_cmd(req
) != NBD_CMD_DISC
) {
239 request
.from
= cpu_to_be64((u64
)blk_rq_pos(req
) << 9);
240 request
.len
= htonl(size
);
242 memcpy(request
.handle
, &req
, sizeof(req
));
244 dev_dbg(nbd_to_dev(nbd
), "request %p: sending control (%s@%llu,%uB)\n",
245 req
, nbdcmd_to_ascii(nbd_cmd(req
)),
246 (unsigned long long)blk_rq_pos(req
) << 9, blk_rq_bytes(req
));
247 result
= sock_xmit(nbd
, 1, &request
, sizeof(request
),
248 (nbd_cmd(req
) == NBD_CMD_WRITE
) ? MSG_MORE
: 0);
250 dev_err(disk_to_dev(nbd
->disk
),
251 "Send control failed (result %d)\n", result
);
255 if (nbd_cmd(req
) == NBD_CMD_WRITE
) {
256 struct req_iterator iter
;
259 * we are really probing at internals to determine
260 * whether to set MSG_MORE or not...
262 rq_for_each_segment(bvec
, req
, iter
) {
264 if (!rq_iter_last(bvec
, iter
))
266 dev_dbg(nbd_to_dev(nbd
), "request %p: sending %d bytes data\n",
268 result
= sock_send_bvec(nbd
, &bvec
, flags
);
270 dev_err(disk_to_dev(nbd
->disk
),
271 "Send data failed (result %d)\n",
280 static struct request
*nbd_find_request(struct nbd_device
*nbd
,
281 struct request
*xreq
)
283 struct request
*req
, *tmp
;
286 err
= wait_event_interruptible(nbd
->active_wq
, nbd
->active_req
!= xreq
);
290 spin_lock(&nbd
->queue_lock
);
291 list_for_each_entry_safe(req
, tmp
, &nbd
->queue_head
, queuelist
) {
294 list_del_init(&req
->queuelist
);
295 spin_unlock(&nbd
->queue_lock
);
298 spin_unlock(&nbd
->queue_lock
);
300 return ERR_PTR(-ENOENT
);
303 static inline int sock_recv_bvec(struct nbd_device
*nbd
, struct bio_vec
*bvec
)
306 void *kaddr
= kmap(bvec
->bv_page
);
307 result
= sock_xmit(nbd
, 0, kaddr
+ bvec
->bv_offset
, bvec
->bv_len
,
309 kunmap(bvec
->bv_page
);
313 /* NULL returned = something went wrong, inform userspace */
314 static struct request
*nbd_read_stat(struct nbd_device
*nbd
)
317 struct nbd_reply reply
;
321 result
= sock_xmit(nbd
, 0, &reply
, sizeof(reply
), MSG_WAITALL
);
323 dev_err(disk_to_dev(nbd
->disk
),
324 "Receive control failed (result %d)\n", result
);
328 if (ntohl(reply
.magic
) != NBD_REPLY_MAGIC
) {
329 dev_err(disk_to_dev(nbd
->disk
), "Wrong magic (0x%lx)\n",
330 (unsigned long)ntohl(reply
.magic
));
335 req
= nbd_find_request(nbd
, *(struct request
**)reply
.handle
);
337 result
= PTR_ERR(req
);
338 if (result
!= -ENOENT
)
341 dev_err(disk_to_dev(nbd
->disk
), "Unexpected reply (%p)\n",
347 if (ntohl(reply
.error
)) {
348 dev_err(disk_to_dev(nbd
->disk
), "Other side returned error (%d)\n",
354 dev_dbg(nbd_to_dev(nbd
), "request %p: got reply\n", req
);
355 if (nbd_cmd(req
) == NBD_CMD_READ
) {
356 struct req_iterator iter
;
359 rq_for_each_segment(bvec
, req
, iter
) {
360 result
= sock_recv_bvec(nbd
, &bvec
);
362 dev_err(disk_to_dev(nbd
->disk
), "Receive data failed (result %d)\n",
367 dev_dbg(nbd_to_dev(nbd
), "request %p: got %d bytes data\n",
373 nbd
->harderror
= result
;
377 static ssize_t
pid_show(struct device
*dev
,
378 struct device_attribute
*attr
, char *buf
)
380 struct gendisk
*disk
= dev_to_disk(dev
);
382 return sprintf(buf
, "%ld\n",
383 (long) ((struct nbd_device
*)disk
->private_data
)->pid
);
386 static struct device_attribute pid_attr
= {
387 .attr
= { .name
= "pid", .mode
= S_IRUGO
},
391 static int nbd_do_it(struct nbd_device
*nbd
)
396 BUG_ON(nbd
->magic
!= NBD_MAGIC
);
398 sk_set_memalloc(nbd
->sock
->sk
);
399 nbd
->pid
= task_pid_nr(current
);
400 ret
= device_create_file(disk_to_dev(nbd
->disk
), &pid_attr
);
402 dev_err(disk_to_dev(nbd
->disk
), "device_create_file failed!\n");
407 while ((req
= nbd_read_stat(nbd
)) != NULL
)
408 nbd_end_request(nbd
, req
);
410 device_remove_file(disk_to_dev(nbd
->disk
), &pid_attr
);
415 static void nbd_clear_que(struct nbd_device
*nbd
)
419 BUG_ON(nbd
->magic
!= NBD_MAGIC
);
422 * Because we have set nbd->sock to NULL under the tx_lock, all
423 * modifications to the list must have completed by now. For
424 * the same reason, the active_req must be NULL.
426 * As a consequence, we don't need to take the spin lock while
427 * purging the list here.
430 BUG_ON(nbd
->active_req
);
432 while (!list_empty(&nbd
->queue_head
)) {
433 req
= list_entry(nbd
->queue_head
.next
, struct request
,
435 list_del_init(&req
->queuelist
);
437 nbd_end_request(nbd
, req
);
440 while (!list_empty(&nbd
->waiting_queue
)) {
441 req
= list_entry(nbd
->waiting_queue
.next
, struct request
,
443 list_del_init(&req
->queuelist
);
445 nbd_end_request(nbd
, req
);
450 static void nbd_handle_req(struct nbd_device
*nbd
, struct request
*req
)
452 if (req
->cmd_type
!= REQ_TYPE_FS
)
455 nbd_cmd(req
) = NBD_CMD_READ
;
456 if (rq_data_dir(req
) == WRITE
) {
457 if ((req
->cmd_flags
& REQ_DISCARD
)) {
458 WARN_ON(!(nbd
->flags
& NBD_FLAG_SEND_TRIM
));
459 nbd_cmd(req
) = NBD_CMD_TRIM
;
461 nbd_cmd(req
) = NBD_CMD_WRITE
;
462 if (nbd
->flags
& NBD_FLAG_READ_ONLY
) {
463 dev_err(disk_to_dev(nbd
->disk
),
464 "Write on read-only\n");
469 if (req
->cmd_flags
& REQ_FLUSH
) {
470 BUG_ON(unlikely(blk_rq_sectors(req
)));
471 nbd_cmd(req
) = NBD_CMD_FLUSH
;
476 mutex_lock(&nbd
->tx_lock
);
477 if (unlikely(!nbd
->sock
)) {
478 mutex_unlock(&nbd
->tx_lock
);
479 dev_err(disk_to_dev(nbd
->disk
),
480 "Attempted send on closed socket\n");
484 nbd
->active_req
= req
;
486 if (nbd_send_req(nbd
, req
) != 0) {
487 dev_err(disk_to_dev(nbd
->disk
), "Request send failed\n");
489 nbd_end_request(nbd
, req
);
491 spin_lock(&nbd
->queue_lock
);
492 list_add_tail(&req
->queuelist
, &nbd
->queue_head
);
493 spin_unlock(&nbd
->queue_lock
);
496 nbd
->active_req
= NULL
;
497 mutex_unlock(&nbd
->tx_lock
);
498 wake_up_all(&nbd
->active_wq
);
504 nbd_end_request(nbd
, req
);
507 static int nbd_thread(void *data
)
509 struct nbd_device
*nbd
= data
;
512 set_user_nice(current
, MIN_NICE
);
513 while (!kthread_should_stop() || !list_empty(&nbd
->waiting_queue
)) {
514 /* wait for something to do */
515 wait_event_interruptible(nbd
->waiting_wq
,
516 kthread_should_stop() ||
517 !list_empty(&nbd
->waiting_queue
));
519 /* extract request */
520 if (list_empty(&nbd
->waiting_queue
))
523 spin_lock_irq(&nbd
->queue_lock
);
524 req
= list_entry(nbd
->waiting_queue
.next
, struct request
,
526 list_del_init(&req
->queuelist
);
527 spin_unlock_irq(&nbd
->queue_lock
);
530 nbd_handle_req(nbd
, req
);
536 * We always wait for result of write, for now. It would be nice to make it optional
538 * if ((rq_data_dir(req) == WRITE) && (nbd->flags & NBD_WRITE_NOCHK))
539 * { printk( "Warning: Ignoring result!\n"); nbd_end_request( req ); }
542 static void do_nbd_request(struct request_queue
*q
)
543 __releases(q
->queue_lock
) __acquires(q
->queue_lock
)
547 while ((req
= blk_fetch_request(q
)) != NULL
) {
548 struct nbd_device
*nbd
;
550 spin_unlock_irq(q
->queue_lock
);
552 nbd
= req
->rq_disk
->private_data
;
554 BUG_ON(nbd
->magic
!= NBD_MAGIC
);
556 dev_dbg(nbd_to_dev(nbd
), "request %p: dequeued (flags=%x)\n",
559 if (unlikely(!nbd
->sock
)) {
560 dev_err(disk_to_dev(nbd
->disk
),
561 "Attempted send on closed socket\n");
563 nbd_end_request(nbd
, req
);
564 spin_lock_irq(q
->queue_lock
);
568 spin_lock_irq(&nbd
->queue_lock
);
569 list_add_tail(&req
->queuelist
, &nbd
->waiting_queue
);
570 spin_unlock_irq(&nbd
->queue_lock
);
572 wake_up(&nbd
->waiting_wq
);
574 spin_lock_irq(q
->queue_lock
);
578 /* Must be called with tx_lock held */
580 static int __nbd_ioctl(struct block_device
*bdev
, struct nbd_device
*nbd
,
581 unsigned int cmd
, unsigned long arg
)
584 case NBD_DISCONNECT
: {
587 dev_info(disk_to_dev(nbd
->disk
), "NBD_DISCONNECT\n");
591 mutex_unlock(&nbd
->tx_lock
);
593 mutex_lock(&nbd
->tx_lock
);
594 blk_rq_init(NULL
, &sreq
);
595 sreq
.cmd_type
= REQ_TYPE_SPECIAL
;
596 nbd_cmd(&sreq
) = NBD_CMD_DISC
;
598 /* Check again after getting mutex back. */
604 nbd_send_req(nbd
, &sreq
);
608 case NBD_CLEAR_SOCK
: {
609 struct socket
*sock
= nbd
->sock
;
612 BUG_ON(!list_empty(&nbd
->queue_head
));
613 BUG_ON(!list_empty(&nbd
->waiting_queue
));
625 sock
= sockfd_lookup(arg
, &err
);
629 bdev
->bd_invalidated
= 1;
630 nbd
->disconnect
= 0; /* we're connected now */
636 case NBD_SET_BLKSIZE
:
638 nbd
->bytesize
&= ~(nbd
->blksize
-1);
639 bdev
->bd_inode
->i_size
= nbd
->bytesize
;
640 set_blocksize(bdev
, nbd
->blksize
);
641 set_capacity(nbd
->disk
, nbd
->bytesize
>> 9);
645 nbd
->bytesize
= arg
& ~(nbd
->blksize
-1);
646 bdev
->bd_inode
->i_size
= nbd
->bytesize
;
647 set_blocksize(bdev
, nbd
->blksize
);
648 set_capacity(nbd
->disk
, nbd
->bytesize
>> 9);
651 case NBD_SET_TIMEOUT
:
652 nbd
->xmit_timeout
= arg
* HZ
;
659 case NBD_SET_SIZE_BLOCKS
:
660 nbd
->bytesize
= ((u64
) arg
) * nbd
->blksize
;
661 bdev
->bd_inode
->i_size
= nbd
->bytesize
;
662 set_blocksize(bdev
, nbd
->blksize
);
663 set_capacity(nbd
->disk
, nbd
->bytesize
>> 9);
667 struct task_struct
*thread
;
676 mutex_unlock(&nbd
->tx_lock
);
678 if (nbd
->flags
& NBD_FLAG_READ_ONLY
)
679 set_device_ro(bdev
, true);
680 if (nbd
->flags
& NBD_FLAG_SEND_TRIM
)
681 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD
,
683 if (nbd
->flags
& NBD_FLAG_SEND_FLUSH
)
684 blk_queue_flush(nbd
->disk
->queue
, REQ_FLUSH
);
686 blk_queue_flush(nbd
->disk
->queue
, 0);
688 thread
= kthread_run(nbd_thread
, nbd
, "%s",
689 nbd
->disk
->disk_name
);
690 if (IS_ERR(thread
)) {
691 mutex_lock(&nbd
->tx_lock
);
692 return PTR_ERR(thread
);
695 error
= nbd_do_it(nbd
);
696 kthread_stop(thread
);
698 mutex_lock(&nbd
->tx_lock
);
701 sock_shutdown(nbd
, 0);
705 dev_warn(disk_to_dev(nbd
->disk
), "queue cleared\n");
707 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD
, nbd
->disk
->queue
);
708 set_device_ro(bdev
, false);
713 bdev
->bd_inode
->i_size
= 0;
714 set_capacity(nbd
->disk
, 0);
716 ioctl_by_bdev(bdev
, BLKRRPART
, 0);
717 if (nbd
->disconnect
) /* user requested, ignore socket errors */
719 return nbd
->harderror
;
724 * This is for compatibility only. The queue is always cleared
725 * by NBD_DO_IT or NBD_CLEAR_SOCK.
729 case NBD_PRINT_DEBUG
:
730 dev_info(disk_to_dev(nbd
->disk
),
731 "next = %p, prev = %p, head = %p\n",
732 nbd
->queue_head
.next
, nbd
->queue_head
.prev
,
739 static int nbd_ioctl(struct block_device
*bdev
, fmode_t mode
,
740 unsigned int cmd
, unsigned long arg
)
742 struct nbd_device
*nbd
= bdev
->bd_disk
->private_data
;
745 if (!capable(CAP_SYS_ADMIN
))
748 BUG_ON(nbd
->magic
!= NBD_MAGIC
);
750 mutex_lock(&nbd
->tx_lock
);
751 error
= __nbd_ioctl(bdev
, nbd
, cmd
, arg
);
752 mutex_unlock(&nbd
->tx_lock
);
757 static const struct block_device_operations nbd_fops
=
759 .owner
= THIS_MODULE
,
764 * And here should be modules and kernel interface
765 * (Just smiley confuses emacs :-)
768 static int __init
nbd_init(void)
774 BUILD_BUG_ON(sizeof(struct nbd_request
) != 28);
777 printk(KERN_ERR
"nbd: max_part must be >= 0\n");
783 part_shift
= fls(max_part
);
786 * Adjust max_part according to part_shift as it is exported
787 * to user space so that user can know the max number of
788 * partition kernel should be able to manage.
790 * Note that -1 is required because partition 0 is reserved
791 * for the whole disk.
793 max_part
= (1UL << part_shift
) - 1;
796 if ((1UL << part_shift
) > DISK_MAX_PARTS
)
799 if (nbds_max
> 1UL << (MINORBITS
- part_shift
))
802 nbd_dev
= kcalloc(nbds_max
, sizeof(*nbd_dev
), GFP_KERNEL
);
806 for (i
= 0; i
< nbds_max
; i
++) {
807 struct gendisk
*disk
= alloc_disk(1 << part_shift
);
810 nbd_dev
[i
].disk
= disk
;
812 * The new linux 2.5 block layer implementation requires
813 * every gendisk to have its very own request_queue struct.
814 * These structs are big so we dynamically allocate them.
816 disk
->queue
= blk_init_queue(do_nbd_request
, &nbd_lock
);
822 * Tell the block layer that we are not a rotational device
824 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, disk
->queue
);
825 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM
, disk
->queue
);
826 disk
->queue
->limits
.discard_granularity
= 512;
827 disk
->queue
->limits
.max_discard_sectors
= UINT_MAX
;
828 disk
->queue
->limits
.discard_zeroes_data
= 0;
829 blk_queue_max_hw_sectors(disk
->queue
, 65536);
830 disk
->queue
->limits
.max_sectors
= 256;
833 if (register_blkdev(NBD_MAJOR
, "nbd")) {
838 printk(KERN_INFO
"nbd: registered device at major %d\n", NBD_MAJOR
);
840 for (i
= 0; i
< nbds_max
; i
++) {
841 struct gendisk
*disk
= nbd_dev
[i
].disk
;
842 nbd_dev
[i
].magic
= NBD_MAGIC
;
843 INIT_LIST_HEAD(&nbd_dev
[i
].waiting_queue
);
844 spin_lock_init(&nbd_dev
[i
].queue_lock
);
845 INIT_LIST_HEAD(&nbd_dev
[i
].queue_head
);
846 mutex_init(&nbd_dev
[i
].tx_lock
);
847 init_waitqueue_head(&nbd_dev
[i
].active_wq
);
848 init_waitqueue_head(&nbd_dev
[i
].waiting_wq
);
849 nbd_dev
[i
].blksize
= 1024;
850 nbd_dev
[i
].bytesize
= 0;
851 disk
->major
= NBD_MAJOR
;
852 disk
->first_minor
= i
<< part_shift
;
853 disk
->fops
= &nbd_fops
;
854 disk
->private_data
= &nbd_dev
[i
];
855 sprintf(disk
->disk_name
, "nbd%d", i
);
856 set_capacity(disk
, 0);
863 blk_cleanup_queue(nbd_dev
[i
].disk
->queue
);
864 put_disk(nbd_dev
[i
].disk
);
870 static void __exit
nbd_cleanup(void)
873 for (i
= 0; i
< nbds_max
; i
++) {
874 struct gendisk
*disk
= nbd_dev
[i
].disk
;
875 nbd_dev
[i
].magic
= 0;
878 blk_cleanup_queue(disk
->queue
);
882 unregister_blkdev(NBD_MAJOR
, "nbd");
884 printk(KERN_INFO
"nbd: unregistered device at major %d\n", NBD_MAJOR
);
887 module_init(nbd_init
);
888 module_exit(nbd_cleanup
);
890 MODULE_DESCRIPTION("Network Block Device");
891 MODULE_LICENSE("GPL");
893 module_param(nbds_max
, int, 0444);
894 MODULE_PARM_DESC(nbds_max
, "number of network block devices to initialize (default: 16)");
895 module_param(max_part
, int, 0444);
896 MODULE_PARM_DESC(max_part
, "number of partitions per device (default: 0)");