Merge branch 'for-4.5/nvme' of git://git.kernel.dk/linux-block
[deliverable/linux.git] / drivers / block / nbd.c
1 /*
2 * Network block device - make block devices work over TCP
3 *
4 * Note that you can not swap over this thing, yet. Seems to work but
5 * deadlocks sometimes - you can not swap over TCP in general.
6 *
7 * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz>
8 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
9 *
10 * This file is released under GPLv2 or later.
11 *
12 * (part of code stolen from loop.c)
13 */
14
15 #include <linux/major.h>
16
17 #include <linux/blkdev.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/sched.h>
21 #include <linux/fs.h>
22 #include <linux/bio.h>
23 #include <linux/stat.h>
24 #include <linux/errno.h>
25 #include <linux/file.h>
26 #include <linux/ioctl.h>
27 #include <linux/mutex.h>
28 #include <linux/compiler.h>
29 #include <linux/err.h>
30 #include <linux/kernel.h>
31 #include <linux/slab.h>
32 #include <net/sock.h>
33 #include <linux/net.h>
34 #include <linux/kthread.h>
35 #include <linux/types.h>
36 #include <linux/debugfs.h>
37
38 #include <asm/uaccess.h>
39 #include <asm/types.h>
40
41 #include <linux/nbd.h>
42
43 struct nbd_device {
44 u32 flags;
45 struct socket * sock; /* If == NULL, device is not ready, yet */
46 int magic;
47
48 spinlock_t queue_lock;
49 struct list_head queue_head; /* Requests waiting result */
50 struct request *active_req;
51 wait_queue_head_t active_wq;
52 struct list_head waiting_queue; /* Requests to be sent */
53 wait_queue_head_t waiting_wq;
54
55 struct mutex tx_lock;
56 struct gendisk *disk;
57 int blksize;
58 loff_t bytesize;
59 int xmit_timeout;
60 bool disconnect; /* a disconnect has been requested by user */
61
62 struct timer_list timeout_timer;
63 spinlock_t tasks_lock;
64 struct task_struct *task_recv;
65 struct task_struct *task_send;
66
67 #if IS_ENABLED(CONFIG_DEBUG_FS)
68 struct dentry *dbg_dir;
69 #endif
70 };
71
72 #if IS_ENABLED(CONFIG_DEBUG_FS)
73 static struct dentry *nbd_dbg_dir;
74 #endif
75
76 #define nbd_name(nbd) ((nbd)->disk->disk_name)
77
78 #define NBD_MAGIC 0x68797548
79
80 static unsigned int nbds_max = 16;
81 static struct nbd_device *nbd_dev;
82 static int max_part;
83
84 /*
85 * Use just one lock (or at most 1 per NIC). Two arguments for this:
86 * 1. Each NIC is essentially a synchronization point for all servers
87 * accessed through that NIC so there's no need to have more locks
88 * than NICs anyway.
89 * 2. More locks lead to more "Dirty cache line bouncing" which will slow
90 * down each lock to the point where they're actually slower than just
91 * a single lock.
92 * Thanks go to Jens Axboe and Al Viro for their LKML emails explaining this!
93 */
94 static DEFINE_SPINLOCK(nbd_lock);
95
96 static inline struct device *nbd_to_dev(struct nbd_device *nbd)
97 {
98 return disk_to_dev(nbd->disk);
99 }
100
101 static const char *nbdcmd_to_ascii(int cmd)
102 {
103 switch (cmd) {
104 case NBD_CMD_READ: return "read";
105 case NBD_CMD_WRITE: return "write";
106 case NBD_CMD_DISC: return "disconnect";
107 case NBD_CMD_FLUSH: return "flush";
108 case NBD_CMD_TRIM: return "trim/discard";
109 }
110 return "invalid";
111 }
112
113 static void nbd_end_request(struct nbd_device *nbd, struct request *req)
114 {
115 int error = req->errors ? -EIO : 0;
116 struct request_queue *q = req->q;
117 unsigned long flags;
118
119 dev_dbg(nbd_to_dev(nbd), "request %p: %s\n", req,
120 error ? "failed" : "done");
121
122 spin_lock_irqsave(q->queue_lock, flags);
123 __blk_end_request_all(req, error);
124 spin_unlock_irqrestore(q->queue_lock, flags);
125 }
126
127 /*
128 * Forcibly shutdown the socket causing all listeners to error
129 */
130 static void sock_shutdown(struct nbd_device *nbd)
131 {
132 if (!nbd->sock)
133 return;
134
135 dev_warn(disk_to_dev(nbd->disk), "shutting down socket\n");
136 kernel_sock_shutdown(nbd->sock, SHUT_RDWR);
137 nbd->sock = NULL;
138 del_timer_sync(&nbd->timeout_timer);
139 }
140
141 static void nbd_xmit_timeout(unsigned long arg)
142 {
143 struct nbd_device *nbd = (struct nbd_device *)arg;
144 unsigned long flags;
145
146 if (list_empty(&nbd->queue_head))
147 return;
148
149 nbd->disconnect = true;
150
151 spin_lock_irqsave(&nbd->tasks_lock, flags);
152
153 if (nbd->task_recv)
154 force_sig(SIGKILL, nbd->task_recv);
155
156 if (nbd->task_send)
157 force_sig(SIGKILL, nbd->task_send);
158
159 spin_unlock_irqrestore(&nbd->tasks_lock, flags);
160
161 dev_err(nbd_to_dev(nbd), "Connection timed out, killed receiver and sender, shutting down connection\n");
162 }
163
164 /*
165 * Send or receive packet.
166 */
167 static int sock_xmit(struct nbd_device *nbd, int send, void *buf, int size,
168 int msg_flags)
169 {
170 struct socket *sock = nbd->sock;
171 int result;
172 struct msghdr msg;
173 struct kvec iov;
174 sigset_t blocked, oldset;
175 unsigned long pflags = current->flags;
176
177 if (unlikely(!sock)) {
178 dev_err(disk_to_dev(nbd->disk),
179 "Attempted %s on closed socket in sock_xmit\n",
180 (send ? "send" : "recv"));
181 return -EINVAL;
182 }
183
184 /* Allow interception of SIGKILL only
185 * Don't allow other signals to interrupt the transmission */
186 siginitsetinv(&blocked, sigmask(SIGKILL));
187 sigprocmask(SIG_SETMASK, &blocked, &oldset);
188
189 current->flags |= PF_MEMALLOC;
190 do {
191 sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
192 iov.iov_base = buf;
193 iov.iov_len = size;
194 msg.msg_name = NULL;
195 msg.msg_namelen = 0;
196 msg.msg_control = NULL;
197 msg.msg_controllen = 0;
198 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
199
200 if (send)
201 result = kernel_sendmsg(sock, &msg, &iov, 1, size);
202 else
203 result = kernel_recvmsg(sock, &msg, &iov, 1, size,
204 msg.msg_flags);
205
206 if (result <= 0) {
207 if (result == 0)
208 result = -EPIPE; /* short read */
209 break;
210 }
211 size -= result;
212 buf += result;
213 } while (size > 0);
214
215 sigprocmask(SIG_SETMASK, &oldset, NULL);
216 tsk_restore_flags(current, pflags, PF_MEMALLOC);
217
218 if (!send && nbd->xmit_timeout)
219 mod_timer(&nbd->timeout_timer, jiffies + nbd->xmit_timeout);
220
221 return result;
222 }
223
224 static inline int sock_send_bvec(struct nbd_device *nbd, struct bio_vec *bvec,
225 int flags)
226 {
227 int result;
228 void *kaddr = kmap(bvec->bv_page);
229 result = sock_xmit(nbd, 1, kaddr + bvec->bv_offset,
230 bvec->bv_len, flags);
231 kunmap(bvec->bv_page);
232 return result;
233 }
234
235 /* always call with the tx_lock held */
236 static int nbd_send_req(struct nbd_device *nbd, struct request *req)
237 {
238 int result, flags;
239 struct nbd_request request;
240 unsigned long size = blk_rq_bytes(req);
241 u32 type;
242
243 if (req->cmd_type == REQ_TYPE_DRV_PRIV)
244 type = NBD_CMD_DISC;
245 else if (req->cmd_flags & REQ_DISCARD)
246 type = NBD_CMD_TRIM;
247 else if (req->cmd_flags & REQ_FLUSH)
248 type = NBD_CMD_FLUSH;
249 else if (rq_data_dir(req) == WRITE)
250 type = NBD_CMD_WRITE;
251 else
252 type = NBD_CMD_READ;
253
254 memset(&request, 0, sizeof(request));
255 request.magic = htonl(NBD_REQUEST_MAGIC);
256 request.type = htonl(type);
257 if (type != NBD_CMD_FLUSH && type != NBD_CMD_DISC) {
258 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
259 request.len = htonl(size);
260 }
261 memcpy(request.handle, &req, sizeof(req));
262
263 dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
264 req, nbdcmd_to_ascii(type),
265 (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
266 result = sock_xmit(nbd, 1, &request, sizeof(request),
267 (type == NBD_CMD_WRITE) ? MSG_MORE : 0);
268 if (result <= 0) {
269 dev_err(disk_to_dev(nbd->disk),
270 "Send control failed (result %d)\n", result);
271 return -EIO;
272 }
273
274 if (type == NBD_CMD_WRITE) {
275 struct req_iterator iter;
276 struct bio_vec bvec;
277 /*
278 * we are really probing at internals to determine
279 * whether to set MSG_MORE or not...
280 */
281 rq_for_each_segment(bvec, req, iter) {
282 flags = 0;
283 if (!rq_iter_last(bvec, iter))
284 flags = MSG_MORE;
285 dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
286 req, bvec.bv_len);
287 result = sock_send_bvec(nbd, &bvec, flags);
288 if (result <= 0) {
289 dev_err(disk_to_dev(nbd->disk),
290 "Send data failed (result %d)\n",
291 result);
292 return -EIO;
293 }
294 }
295 }
296 return 0;
297 }
298
299 static struct request *nbd_find_request(struct nbd_device *nbd,
300 struct request *xreq)
301 {
302 struct request *req, *tmp;
303 int err;
304
305 err = wait_event_interruptible(nbd->active_wq, nbd->active_req != xreq);
306 if (unlikely(err))
307 return ERR_PTR(err);
308
309 spin_lock(&nbd->queue_lock);
310 list_for_each_entry_safe(req, tmp, &nbd->queue_head, queuelist) {
311 if (req != xreq)
312 continue;
313 list_del_init(&req->queuelist);
314 spin_unlock(&nbd->queue_lock);
315 return req;
316 }
317 spin_unlock(&nbd->queue_lock);
318
319 return ERR_PTR(-ENOENT);
320 }
321
322 static inline int sock_recv_bvec(struct nbd_device *nbd, struct bio_vec *bvec)
323 {
324 int result;
325 void *kaddr = kmap(bvec->bv_page);
326 result = sock_xmit(nbd, 0, kaddr + bvec->bv_offset, bvec->bv_len,
327 MSG_WAITALL);
328 kunmap(bvec->bv_page);
329 return result;
330 }
331
332 /* NULL returned = something went wrong, inform userspace */
333 static struct request *nbd_read_stat(struct nbd_device *nbd)
334 {
335 int result;
336 struct nbd_reply reply;
337 struct request *req;
338
339 reply.magic = 0;
340 result = sock_xmit(nbd, 0, &reply, sizeof(reply), MSG_WAITALL);
341 if (result <= 0) {
342 dev_err(disk_to_dev(nbd->disk),
343 "Receive control failed (result %d)\n", result);
344 return ERR_PTR(result);
345 }
346
347 if (ntohl(reply.magic) != NBD_REPLY_MAGIC) {
348 dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n",
349 (unsigned long)ntohl(reply.magic));
350 return ERR_PTR(-EPROTO);
351 }
352
353 req = nbd_find_request(nbd, *(struct request **)reply.handle);
354 if (IS_ERR(req)) {
355 result = PTR_ERR(req);
356 if (result != -ENOENT)
357 return ERR_PTR(result);
358
359 dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%p)\n",
360 reply.handle);
361 return ERR_PTR(-EBADR);
362 }
363
364 if (ntohl(reply.error)) {
365 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
366 ntohl(reply.error));
367 req->errors++;
368 return req;
369 }
370
371 dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req);
372 if (rq_data_dir(req) != WRITE) {
373 struct req_iterator iter;
374 struct bio_vec bvec;
375
376 rq_for_each_segment(bvec, req, iter) {
377 result = sock_recv_bvec(nbd, &bvec);
378 if (result <= 0) {
379 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
380 result);
381 req->errors++;
382 return req;
383 }
384 dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
385 req, bvec.bv_len);
386 }
387 }
388 return req;
389 }
390
391 static ssize_t pid_show(struct device *dev,
392 struct device_attribute *attr, char *buf)
393 {
394 struct gendisk *disk = dev_to_disk(dev);
395 struct nbd_device *nbd = (struct nbd_device *)disk->private_data;
396
397 return sprintf(buf, "%d\n", task_pid_nr(nbd->task_recv));
398 }
399
400 static struct device_attribute pid_attr = {
401 .attr = { .name = "pid", .mode = S_IRUGO},
402 .show = pid_show,
403 };
404
405 static int nbd_thread_recv(struct nbd_device *nbd)
406 {
407 struct request *req;
408 int ret;
409 unsigned long flags;
410
411 BUG_ON(nbd->magic != NBD_MAGIC);
412
413 sk_set_memalloc(nbd->sock->sk);
414
415 spin_lock_irqsave(&nbd->tasks_lock, flags);
416 nbd->task_recv = current;
417 spin_unlock_irqrestore(&nbd->tasks_lock, flags);
418
419 ret = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
420 if (ret) {
421 dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
422
423 spin_lock_irqsave(&nbd->tasks_lock, flags);
424 nbd->task_recv = NULL;
425 spin_unlock_irqrestore(&nbd->tasks_lock, flags);
426
427 return ret;
428 }
429
430 while (1) {
431 req = nbd_read_stat(nbd);
432 if (IS_ERR(req)) {
433 ret = PTR_ERR(req);
434 break;
435 }
436
437 nbd_end_request(nbd, req);
438 }
439
440 device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
441
442 spin_lock_irqsave(&nbd->tasks_lock, flags);
443 nbd->task_recv = NULL;
444 spin_unlock_irqrestore(&nbd->tasks_lock, flags);
445
446 if (signal_pending(current)) {
447 ret = kernel_dequeue_signal(NULL);
448 dev_warn(nbd_to_dev(nbd), "pid %d, %s, got signal %d\n",
449 task_pid_nr(current), current->comm, ret);
450 mutex_lock(&nbd->tx_lock);
451 sock_shutdown(nbd);
452 mutex_unlock(&nbd->tx_lock);
453 ret = -ETIMEDOUT;
454 }
455
456 return ret;
457 }
458
459 static void nbd_clear_que(struct nbd_device *nbd)
460 {
461 struct request *req;
462
463 BUG_ON(nbd->magic != NBD_MAGIC);
464
465 /*
466 * Because we have set nbd->sock to NULL under the tx_lock, all
467 * modifications to the list must have completed by now. For
468 * the same reason, the active_req must be NULL.
469 *
470 * As a consequence, we don't need to take the spin lock while
471 * purging the list here.
472 */
473 BUG_ON(nbd->sock);
474 BUG_ON(nbd->active_req);
475
476 while (!list_empty(&nbd->queue_head)) {
477 req = list_entry(nbd->queue_head.next, struct request,
478 queuelist);
479 list_del_init(&req->queuelist);
480 req->errors++;
481 nbd_end_request(nbd, req);
482 }
483
484 while (!list_empty(&nbd->waiting_queue)) {
485 req = list_entry(nbd->waiting_queue.next, struct request,
486 queuelist);
487 list_del_init(&req->queuelist);
488 req->errors++;
489 nbd_end_request(nbd, req);
490 }
491 dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n");
492 }
493
494
495 static void nbd_handle_req(struct nbd_device *nbd, struct request *req)
496 {
497 if (req->cmd_type != REQ_TYPE_FS)
498 goto error_out;
499
500 if (rq_data_dir(req) == WRITE &&
501 (nbd->flags & NBD_FLAG_READ_ONLY)) {
502 dev_err(disk_to_dev(nbd->disk),
503 "Write on read-only\n");
504 goto error_out;
505 }
506
507 req->errors = 0;
508
509 mutex_lock(&nbd->tx_lock);
510 if (unlikely(!nbd->sock)) {
511 mutex_unlock(&nbd->tx_lock);
512 dev_err(disk_to_dev(nbd->disk),
513 "Attempted send on closed socket\n");
514 goto error_out;
515 }
516
517 nbd->active_req = req;
518
519 if (nbd->xmit_timeout && list_empty_careful(&nbd->queue_head))
520 mod_timer(&nbd->timeout_timer, jiffies + nbd->xmit_timeout);
521
522 if (nbd_send_req(nbd, req) != 0) {
523 dev_err(disk_to_dev(nbd->disk), "Request send failed\n");
524 req->errors++;
525 nbd_end_request(nbd, req);
526 } else {
527 spin_lock(&nbd->queue_lock);
528 list_add_tail(&req->queuelist, &nbd->queue_head);
529 spin_unlock(&nbd->queue_lock);
530 }
531
532 nbd->active_req = NULL;
533 mutex_unlock(&nbd->tx_lock);
534 wake_up_all(&nbd->active_wq);
535
536 return;
537
538 error_out:
539 req->errors++;
540 nbd_end_request(nbd, req);
541 }
542
543 static int nbd_thread_send(void *data)
544 {
545 struct nbd_device *nbd = data;
546 struct request *req;
547 unsigned long flags;
548
549 spin_lock_irqsave(&nbd->tasks_lock, flags);
550 nbd->task_send = current;
551 spin_unlock_irqrestore(&nbd->tasks_lock, flags);
552
553 set_user_nice(current, MIN_NICE);
554 while (!kthread_should_stop() || !list_empty(&nbd->waiting_queue)) {
555 /* wait for something to do */
556 wait_event_interruptible(nbd->waiting_wq,
557 kthread_should_stop() ||
558 !list_empty(&nbd->waiting_queue));
559
560 if (signal_pending(current)) {
561 int ret = kernel_dequeue_signal(NULL);
562
563 dev_warn(nbd_to_dev(nbd), "pid %d, %s, got signal %d\n",
564 task_pid_nr(current), current->comm, ret);
565 mutex_lock(&nbd->tx_lock);
566 sock_shutdown(nbd);
567 mutex_unlock(&nbd->tx_lock);
568 break;
569 }
570
571 /* extract request */
572 if (list_empty(&nbd->waiting_queue))
573 continue;
574
575 spin_lock_irq(&nbd->queue_lock);
576 req = list_entry(nbd->waiting_queue.next, struct request,
577 queuelist);
578 list_del_init(&req->queuelist);
579 spin_unlock_irq(&nbd->queue_lock);
580
581 /* handle request */
582 nbd_handle_req(nbd, req);
583 }
584
585 spin_lock_irqsave(&nbd->tasks_lock, flags);
586 nbd->task_send = NULL;
587 spin_unlock_irqrestore(&nbd->tasks_lock, flags);
588
589 /* Clear maybe pending signals */
590 if (signal_pending(current))
591 kernel_dequeue_signal(NULL);
592
593 return 0;
594 }
595
596 /*
597 * We always wait for result of write, for now. It would be nice to make it optional
598 * in future
599 * if ((rq_data_dir(req) == WRITE) && (nbd->flags & NBD_WRITE_NOCHK))
600 * { printk( "Warning: Ignoring result!\n"); nbd_end_request( req ); }
601 */
602
603 static void nbd_request_handler(struct request_queue *q)
604 __releases(q->queue_lock) __acquires(q->queue_lock)
605 {
606 struct request *req;
607
608 while ((req = blk_fetch_request(q)) != NULL) {
609 struct nbd_device *nbd;
610
611 spin_unlock_irq(q->queue_lock);
612
613 nbd = req->rq_disk->private_data;
614
615 BUG_ON(nbd->magic != NBD_MAGIC);
616
617 dev_dbg(nbd_to_dev(nbd), "request %p: dequeued (flags=%x)\n",
618 req, req->cmd_type);
619
620 if (unlikely(!nbd->sock)) {
621 dev_err(disk_to_dev(nbd->disk),
622 "Attempted send on closed socket\n");
623 req->errors++;
624 nbd_end_request(nbd, req);
625 spin_lock_irq(q->queue_lock);
626 continue;
627 }
628
629 spin_lock_irq(&nbd->queue_lock);
630 list_add_tail(&req->queuelist, &nbd->waiting_queue);
631 spin_unlock_irq(&nbd->queue_lock);
632
633 wake_up(&nbd->waiting_wq);
634
635 spin_lock_irq(q->queue_lock);
636 }
637 }
638
639 static int nbd_dev_dbg_init(struct nbd_device *nbd);
640 static void nbd_dev_dbg_close(struct nbd_device *nbd);
641
642 /* Must be called with tx_lock held */
643
644 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
645 unsigned int cmd, unsigned long arg)
646 {
647 switch (cmd) {
648 case NBD_DISCONNECT: {
649 struct request sreq;
650
651 dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
652 if (!nbd->sock)
653 return -EINVAL;
654
655 mutex_unlock(&nbd->tx_lock);
656 fsync_bdev(bdev);
657 mutex_lock(&nbd->tx_lock);
658 blk_rq_init(NULL, &sreq);
659 sreq.cmd_type = REQ_TYPE_DRV_PRIV;
660
661 /* Check again after getting mutex back. */
662 if (!nbd->sock)
663 return -EINVAL;
664
665 nbd->disconnect = true;
666
667 nbd_send_req(nbd, &sreq);
668 return 0;
669 }
670
671 case NBD_CLEAR_SOCK: {
672 struct socket *sock = nbd->sock;
673 nbd->sock = NULL;
674 nbd_clear_que(nbd);
675 BUG_ON(!list_empty(&nbd->queue_head));
676 BUG_ON(!list_empty(&nbd->waiting_queue));
677 kill_bdev(bdev);
678 if (sock)
679 sockfd_put(sock);
680 return 0;
681 }
682
683 case NBD_SET_SOCK: {
684 struct socket *sock;
685 int err;
686 if (nbd->sock)
687 return -EBUSY;
688 sock = sockfd_lookup(arg, &err);
689 if (sock) {
690 nbd->sock = sock;
691 if (max_part > 0)
692 bdev->bd_invalidated = 1;
693 nbd->disconnect = false; /* we're connected now */
694 return 0;
695 }
696 return -EINVAL;
697 }
698
699 case NBD_SET_BLKSIZE:
700 nbd->blksize = arg;
701 nbd->bytesize &= ~(nbd->blksize-1);
702 bdev->bd_inode->i_size = nbd->bytesize;
703 set_blocksize(bdev, nbd->blksize);
704 set_capacity(nbd->disk, nbd->bytesize >> 9);
705 return 0;
706
707 case NBD_SET_SIZE:
708 nbd->bytesize = arg & ~(nbd->blksize-1);
709 bdev->bd_inode->i_size = nbd->bytesize;
710 set_blocksize(bdev, nbd->blksize);
711 set_capacity(nbd->disk, nbd->bytesize >> 9);
712 return 0;
713
714 case NBD_SET_TIMEOUT:
715 nbd->xmit_timeout = arg * HZ;
716 if (arg)
717 mod_timer(&nbd->timeout_timer,
718 jiffies + nbd->xmit_timeout);
719 else
720 del_timer_sync(&nbd->timeout_timer);
721
722 return 0;
723
724 case NBD_SET_FLAGS:
725 nbd->flags = arg;
726 return 0;
727
728 case NBD_SET_SIZE_BLOCKS:
729 nbd->bytesize = ((u64) arg) * nbd->blksize;
730 bdev->bd_inode->i_size = nbd->bytesize;
731 set_blocksize(bdev, nbd->blksize);
732 set_capacity(nbd->disk, nbd->bytesize >> 9);
733 return 0;
734
735 case NBD_DO_IT: {
736 struct task_struct *thread;
737 struct socket *sock;
738 int error;
739
740 if (nbd->task_recv)
741 return -EBUSY;
742 if (!nbd->sock)
743 return -EINVAL;
744
745 mutex_unlock(&nbd->tx_lock);
746
747 if (nbd->flags & NBD_FLAG_READ_ONLY)
748 set_device_ro(bdev, true);
749 if (nbd->flags & NBD_FLAG_SEND_TRIM)
750 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
751 nbd->disk->queue);
752 if (nbd->flags & NBD_FLAG_SEND_FLUSH)
753 blk_queue_flush(nbd->disk->queue, REQ_FLUSH);
754 else
755 blk_queue_flush(nbd->disk->queue, 0);
756
757 thread = kthread_run(nbd_thread_send, nbd, "%s",
758 nbd_name(nbd));
759 if (IS_ERR(thread)) {
760 mutex_lock(&nbd->tx_lock);
761 return PTR_ERR(thread);
762 }
763
764 nbd_dev_dbg_init(nbd);
765 error = nbd_thread_recv(nbd);
766 nbd_dev_dbg_close(nbd);
767 kthread_stop(thread);
768
769 mutex_lock(&nbd->tx_lock);
770
771 sock_shutdown(nbd);
772 sock = nbd->sock;
773 nbd->sock = NULL;
774 nbd_clear_que(nbd);
775 kill_bdev(bdev);
776 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
777 set_device_ro(bdev, false);
778 if (sock)
779 sockfd_put(sock);
780 nbd->flags = 0;
781 nbd->bytesize = 0;
782 bdev->bd_inode->i_size = 0;
783 set_capacity(nbd->disk, 0);
784 if (max_part > 0)
785 blkdev_reread_part(bdev);
786 if (nbd->disconnect) /* user requested, ignore socket errors */
787 return 0;
788 return error;
789 }
790
791 case NBD_CLEAR_QUE:
792 /*
793 * This is for compatibility only. The queue is always cleared
794 * by NBD_DO_IT or NBD_CLEAR_SOCK.
795 */
796 return 0;
797
798 case NBD_PRINT_DEBUG:
799 dev_info(disk_to_dev(nbd->disk),
800 "next = %p, prev = %p, head = %p\n",
801 nbd->queue_head.next, nbd->queue_head.prev,
802 &nbd->queue_head);
803 return 0;
804 }
805 return -ENOTTY;
806 }
807
808 static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
809 unsigned int cmd, unsigned long arg)
810 {
811 struct nbd_device *nbd = bdev->bd_disk->private_data;
812 int error;
813
814 if (!capable(CAP_SYS_ADMIN))
815 return -EPERM;
816
817 BUG_ON(nbd->magic != NBD_MAGIC);
818
819 mutex_lock(&nbd->tx_lock);
820 error = __nbd_ioctl(bdev, nbd, cmd, arg);
821 mutex_unlock(&nbd->tx_lock);
822
823 return error;
824 }
825
826 static const struct block_device_operations nbd_fops =
827 {
828 .owner = THIS_MODULE,
829 .ioctl = nbd_ioctl,
830 .compat_ioctl = nbd_ioctl,
831 };
832
833 #if IS_ENABLED(CONFIG_DEBUG_FS)
834
835 static int nbd_dbg_tasks_show(struct seq_file *s, void *unused)
836 {
837 struct nbd_device *nbd = s->private;
838
839 if (nbd->task_recv)
840 seq_printf(s, "recv: %d\n", task_pid_nr(nbd->task_recv));
841 if (nbd->task_send)
842 seq_printf(s, "send: %d\n", task_pid_nr(nbd->task_send));
843
844 return 0;
845 }
846
847 static int nbd_dbg_tasks_open(struct inode *inode, struct file *file)
848 {
849 return single_open(file, nbd_dbg_tasks_show, inode->i_private);
850 }
851
852 static const struct file_operations nbd_dbg_tasks_ops = {
853 .open = nbd_dbg_tasks_open,
854 .read = seq_read,
855 .llseek = seq_lseek,
856 .release = single_release,
857 };
858
859 static int nbd_dbg_flags_show(struct seq_file *s, void *unused)
860 {
861 struct nbd_device *nbd = s->private;
862 u32 flags = nbd->flags;
863
864 seq_printf(s, "Hex: 0x%08x\n\n", flags);
865
866 seq_puts(s, "Known flags:\n");
867
868 if (flags & NBD_FLAG_HAS_FLAGS)
869 seq_puts(s, "NBD_FLAG_HAS_FLAGS\n");
870 if (flags & NBD_FLAG_READ_ONLY)
871 seq_puts(s, "NBD_FLAG_READ_ONLY\n");
872 if (flags & NBD_FLAG_SEND_FLUSH)
873 seq_puts(s, "NBD_FLAG_SEND_FLUSH\n");
874 if (flags & NBD_FLAG_SEND_TRIM)
875 seq_puts(s, "NBD_FLAG_SEND_TRIM\n");
876
877 return 0;
878 }
879
880 static int nbd_dbg_flags_open(struct inode *inode, struct file *file)
881 {
882 return single_open(file, nbd_dbg_flags_show, inode->i_private);
883 }
884
885 static const struct file_operations nbd_dbg_flags_ops = {
886 .open = nbd_dbg_flags_open,
887 .read = seq_read,
888 .llseek = seq_lseek,
889 .release = single_release,
890 };
891
892 static int nbd_dev_dbg_init(struct nbd_device *nbd)
893 {
894 struct dentry *dir;
895 struct dentry *f;
896
897 dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir);
898 if (IS_ERR_OR_NULL(dir)) {
899 dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s' (%ld)\n",
900 nbd_name(nbd), PTR_ERR(dir));
901 return PTR_ERR(dir);
902 }
903 nbd->dbg_dir = dir;
904
905 f = debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops);
906 if (IS_ERR_OR_NULL(f)) {
907 dev_err(nbd_to_dev(nbd), "Failed to create debugfs file 'tasks', %ld\n",
908 PTR_ERR(f));
909 return PTR_ERR(f);
910 }
911
912 f = debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize);
913 if (IS_ERR_OR_NULL(f)) {
914 dev_err(nbd_to_dev(nbd), "Failed to create debugfs file 'size_bytes', %ld\n",
915 PTR_ERR(f));
916 return PTR_ERR(f);
917 }
918
919 f = debugfs_create_u32("timeout", 0444, dir, &nbd->xmit_timeout);
920 if (IS_ERR_OR_NULL(f)) {
921 dev_err(nbd_to_dev(nbd), "Failed to create debugfs file 'timeout', %ld\n",
922 PTR_ERR(f));
923 return PTR_ERR(f);
924 }
925
926 f = debugfs_create_u32("blocksize", 0444, dir, &nbd->blksize);
927 if (IS_ERR_OR_NULL(f)) {
928 dev_err(nbd_to_dev(nbd), "Failed to create debugfs file 'blocksize', %ld\n",
929 PTR_ERR(f));
930 return PTR_ERR(f);
931 }
932
933 f = debugfs_create_file("flags", 0444, dir, &nbd, &nbd_dbg_flags_ops);
934 if (IS_ERR_OR_NULL(f)) {
935 dev_err(nbd_to_dev(nbd), "Failed to create debugfs file 'flags', %ld\n",
936 PTR_ERR(f));
937 return PTR_ERR(f);
938 }
939
940 return 0;
941 }
942
943 static void nbd_dev_dbg_close(struct nbd_device *nbd)
944 {
945 debugfs_remove_recursive(nbd->dbg_dir);
946 }
947
948 static int nbd_dbg_init(void)
949 {
950 struct dentry *dbg_dir;
951
952 dbg_dir = debugfs_create_dir("nbd", NULL);
953 if (IS_ERR(dbg_dir))
954 return PTR_ERR(dbg_dir);
955
956 nbd_dbg_dir = dbg_dir;
957
958 return 0;
959 }
960
961 static void nbd_dbg_close(void)
962 {
963 debugfs_remove_recursive(nbd_dbg_dir);
964 }
965
966 #else /* IS_ENABLED(CONFIG_DEBUG_FS) */
967
968 static int nbd_dev_dbg_init(struct nbd_device *nbd)
969 {
970 return 0;
971 }
972
973 static void nbd_dev_dbg_close(struct nbd_device *nbd)
974 {
975 }
976
977 static int nbd_dbg_init(void)
978 {
979 return 0;
980 }
981
982 static void nbd_dbg_close(void)
983 {
984 }
985
986 #endif
987
988 /*
989 * And here should be modules and kernel interface
990 * (Just smiley confuses emacs :-)
991 */
992
993 static int __init nbd_init(void)
994 {
995 int err = -ENOMEM;
996 int i;
997 int part_shift;
998
999 BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
1000
1001 if (max_part < 0) {
1002 printk(KERN_ERR "nbd: max_part must be >= 0\n");
1003 return -EINVAL;
1004 }
1005
1006 part_shift = 0;
1007 if (max_part > 0) {
1008 part_shift = fls(max_part);
1009
1010 /*
1011 * Adjust max_part according to part_shift as it is exported
1012 * to user space so that user can know the max number of
1013 * partition kernel should be able to manage.
1014 *
1015 * Note that -1 is required because partition 0 is reserved
1016 * for the whole disk.
1017 */
1018 max_part = (1UL << part_shift) - 1;
1019 }
1020
1021 if ((1UL << part_shift) > DISK_MAX_PARTS)
1022 return -EINVAL;
1023
1024 if (nbds_max > 1UL << (MINORBITS - part_shift))
1025 return -EINVAL;
1026
1027 nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL);
1028 if (!nbd_dev)
1029 return -ENOMEM;
1030
1031 for (i = 0; i < nbds_max; i++) {
1032 struct gendisk *disk = alloc_disk(1 << part_shift);
1033 if (!disk)
1034 goto out;
1035 nbd_dev[i].disk = disk;
1036 /*
1037 * The new linux 2.5 block layer implementation requires
1038 * every gendisk to have its very own request_queue struct.
1039 * These structs are big so we dynamically allocate them.
1040 */
1041 disk->queue = blk_init_queue(nbd_request_handler, &nbd_lock);
1042 if (!disk->queue) {
1043 put_disk(disk);
1044 goto out;
1045 }
1046 /*
1047 * Tell the block layer that we are not a rotational device
1048 */
1049 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue);
1050 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue);
1051 disk->queue->limits.discard_granularity = 512;
1052 blk_queue_max_discard_sectors(disk->queue, UINT_MAX);
1053 disk->queue->limits.discard_zeroes_data = 0;
1054 blk_queue_max_hw_sectors(disk->queue, 65536);
1055 disk->queue->limits.max_sectors = 256;
1056 }
1057
1058 if (register_blkdev(NBD_MAJOR, "nbd")) {
1059 err = -EIO;
1060 goto out;
1061 }
1062
1063 printk(KERN_INFO "nbd: registered device at major %d\n", NBD_MAJOR);
1064
1065 nbd_dbg_init();
1066
1067 for (i = 0; i < nbds_max; i++) {
1068 struct gendisk *disk = nbd_dev[i].disk;
1069 nbd_dev[i].magic = NBD_MAGIC;
1070 INIT_LIST_HEAD(&nbd_dev[i].waiting_queue);
1071 spin_lock_init(&nbd_dev[i].queue_lock);
1072 spin_lock_init(&nbd_dev[i].tasks_lock);
1073 INIT_LIST_HEAD(&nbd_dev[i].queue_head);
1074 mutex_init(&nbd_dev[i].tx_lock);
1075 init_timer(&nbd_dev[i].timeout_timer);
1076 nbd_dev[i].timeout_timer.function = nbd_xmit_timeout;
1077 nbd_dev[i].timeout_timer.data = (unsigned long)&nbd_dev[i];
1078 init_waitqueue_head(&nbd_dev[i].active_wq);
1079 init_waitqueue_head(&nbd_dev[i].waiting_wq);
1080 nbd_dev[i].blksize = 1024;
1081 nbd_dev[i].bytesize = 0;
1082 disk->major = NBD_MAJOR;
1083 disk->first_minor = i << part_shift;
1084 disk->fops = &nbd_fops;
1085 disk->private_data = &nbd_dev[i];
1086 sprintf(disk->disk_name, "nbd%d", i);
1087 set_capacity(disk, 0);
1088 add_disk(disk);
1089 }
1090
1091 return 0;
1092 out:
1093 while (i--) {
1094 blk_cleanup_queue(nbd_dev[i].disk->queue);
1095 put_disk(nbd_dev[i].disk);
1096 }
1097 kfree(nbd_dev);
1098 return err;
1099 }
1100
1101 static void __exit nbd_cleanup(void)
1102 {
1103 int i;
1104
1105 nbd_dbg_close();
1106
1107 for (i = 0; i < nbds_max; i++) {
1108 struct gendisk *disk = nbd_dev[i].disk;
1109 nbd_dev[i].magic = 0;
1110 if (disk) {
1111 del_gendisk(disk);
1112 blk_cleanup_queue(disk->queue);
1113 put_disk(disk);
1114 }
1115 }
1116 unregister_blkdev(NBD_MAJOR, "nbd");
1117 kfree(nbd_dev);
1118 printk(KERN_INFO "nbd: unregistered device at major %d\n", NBD_MAJOR);
1119 }
1120
1121 module_init(nbd_init);
1122 module_exit(nbd_cleanup);
1123
1124 MODULE_DESCRIPTION("Network Block Device");
1125 MODULE_LICENSE("GPL");
1126
1127 module_param(nbds_max, int, 0444);
1128 MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)");
1129 module_param(max_part, int, 0444);
1130 MODULE_PARM_DESC(max_part, "number of partitions per device (default: 0)");
This page took 0.076246 seconds and 6 git commands to generate.