1 /* A simple network driver using virtio.
3 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <linux/netdevice.h>
21 #include <linux/etherdevice.h>
22 #include <linux/ethtool.h>
23 #include <linux/module.h>
24 #include <linux/virtio.h>
25 #include <linux/virtio_net.h>
26 #include <linux/scatterlist.h>
28 static int napi_weight
= 128;
29 module_param(napi_weight
, int, 0444);
31 static int csum
= 1, gso
= 1;
32 module_param(csum
, bool, 0444);
33 module_param(gso
, bool, 0444);
35 /* FIXME: MTU in config. */
36 #define MAX_PACKET_LEN (ETH_HLEN+ETH_DATA_LEN)
40 struct virtio_device
*vdev
;
41 struct virtqueue
*rvq
, *svq
;
42 struct net_device
*dev
;
43 struct napi_struct napi
;
45 /* The skb we couldn't send because buffers were full. */
46 struct sk_buff
*last_xmit_skb
;
48 /* If we need to free in a timer, this is it. */
49 struct timer_list xmit_free_timer
;
51 /* Number of input buffers, and max we've ever had. */
52 unsigned int num
, max
;
54 /* For cleaning up after transmission. */
55 struct tasklet_struct tasklet
;
58 /* I like... big packets and I cannot lie! */
61 /* Receive & send queues. */
62 struct sk_buff_head recv
;
63 struct sk_buff_head send
;
65 /* Chain pages by the private ptr. */
69 static inline struct virtio_net_hdr
*skb_vnet_hdr(struct sk_buff
*skb
)
71 return (struct virtio_net_hdr
*)skb
->cb
;
74 static inline void vnet_hdr_to_sg(struct scatterlist
*sg
, struct sk_buff
*skb
)
76 sg_init_one(sg
, skb_vnet_hdr(skb
), sizeof(struct virtio_net_hdr
));
79 static void give_a_page(struct virtnet_info
*vi
, struct page
*page
)
81 page
->private = (unsigned long)vi
->pages
;
85 static void trim_pages(struct virtnet_info
*vi
, struct sk_buff
*skb
)
89 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++)
90 give_a_page(vi
, skb_shinfo(skb
)->frags
[i
].page
);
91 skb_shinfo(skb
)->nr_frags
= 0;
95 static struct page
*get_a_page(struct virtnet_info
*vi
, gfp_t gfp_mask
)
97 struct page
*p
= vi
->pages
;
100 vi
->pages
= (struct page
*)p
->private;
102 p
= alloc_page(gfp_mask
);
106 static void skb_xmit_done(struct virtqueue
*svq
)
108 struct virtnet_info
*vi
= svq
->vdev
->priv
;
110 /* Suppress further interrupts. */
111 svq
->vq_ops
->disable_cb(svq
);
113 /* We were probably waiting for more output buffers. */
114 netif_wake_queue(vi
->dev
);
116 /* Make sure we re-xmit last_xmit_skb: if there are no more packets
117 * queued, start_xmit won't be called. */
118 tasklet_schedule(&vi
->tasklet
);
121 static void receive_skb(struct net_device
*dev
, struct sk_buff
*skb
,
124 struct virtio_net_hdr
*hdr
= skb_vnet_hdr(skb
);
127 if (unlikely(len
< sizeof(struct virtio_net_hdr
) + ETH_HLEN
)) {
128 pr_debug("%s: short packet %i\n", dev
->name
, len
);
129 dev
->stats
.rx_length_errors
++;
132 len
-= sizeof(struct virtio_net_hdr
);
134 if (len
<= MAX_PACKET_LEN
)
135 trim_pages(netdev_priv(dev
), skb
);
137 err
= pskb_trim(skb
, len
);
139 pr_debug("%s: pskb_trim failed %i %d\n", dev
->name
, len
, err
);
140 dev
->stats
.rx_dropped
++;
143 skb
->truesize
+= skb
->data_len
;
144 dev
->stats
.rx_bytes
+= skb
->len
;
145 dev
->stats
.rx_packets
++;
147 if (hdr
->flags
& VIRTIO_NET_HDR_F_NEEDS_CSUM
) {
148 pr_debug("Needs csum!\n");
149 if (!skb_partial_csum_set(skb
,hdr
->csum_start
,hdr
->csum_offset
))
153 skb
->protocol
= eth_type_trans(skb
, dev
);
154 pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
155 ntohs(skb
->protocol
), skb
->len
, skb
->pkt_type
);
157 if (hdr
->gso_type
!= VIRTIO_NET_HDR_GSO_NONE
) {
159 switch (hdr
->gso_type
& ~VIRTIO_NET_HDR_GSO_ECN
) {
160 case VIRTIO_NET_HDR_GSO_TCPV4
:
161 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV4
;
163 case VIRTIO_NET_HDR_GSO_UDP
:
164 skb_shinfo(skb
)->gso_type
= SKB_GSO_UDP
;
166 case VIRTIO_NET_HDR_GSO_TCPV6
:
167 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV6
;
171 printk(KERN_WARNING
"%s: bad gso type %u.\n",
172 dev
->name
, hdr
->gso_type
);
176 if (hdr
->gso_type
& VIRTIO_NET_HDR_GSO_ECN
)
177 skb_shinfo(skb
)->gso_type
|= SKB_GSO_TCP_ECN
;
179 skb_shinfo(skb
)->gso_size
= hdr
->gso_size
;
180 if (skb_shinfo(skb
)->gso_size
== 0) {
182 printk(KERN_WARNING
"%s: zero gso size.\n",
187 /* Header must be checked, and gso_segs computed. */
188 skb_shinfo(skb
)->gso_type
|= SKB_GSO_DODGY
;
189 skb_shinfo(skb
)->gso_segs
= 0;
192 netif_receive_skb(skb
);
196 dev
->stats
.rx_frame_errors
++;
201 static void try_fill_recv(struct virtnet_info
*vi
)
204 struct scatterlist sg
[2+MAX_SKB_FRAGS
];
207 sg_init_table(sg
, 2+MAX_SKB_FRAGS
);
209 skb
= netdev_alloc_skb(vi
->dev
, MAX_PACKET_LEN
);
213 skb_put(skb
, MAX_PACKET_LEN
);
214 vnet_hdr_to_sg(sg
, skb
);
216 if (vi
->big_packets
) {
217 for (i
= 0; i
< MAX_SKB_FRAGS
; i
++) {
218 skb_frag_t
*f
= &skb_shinfo(skb
)->frags
[i
];
219 f
->page
= get_a_page(vi
, GFP_ATOMIC
);
226 skb
->data_len
+= PAGE_SIZE
;
227 skb
->len
+= PAGE_SIZE
;
229 skb_shinfo(skb
)->nr_frags
++;
233 num
= skb_to_sgvec(skb
, sg
+1, 0, skb
->len
) + 1;
234 skb_queue_head(&vi
->recv
, skb
);
236 err
= vi
->rvq
->vq_ops
->add_buf(vi
->rvq
, sg
, 0, num
, skb
);
238 skb_unlink(skb
, &vi
->recv
);
245 if (unlikely(vi
->num
> vi
->max
))
247 vi
->rvq
->vq_ops
->kick(vi
->rvq
);
250 static void skb_recv_done(struct virtqueue
*rvq
)
252 struct virtnet_info
*vi
= rvq
->vdev
->priv
;
253 /* Schedule NAPI, Suppress further interrupts if successful. */
254 if (netif_rx_schedule_prep(vi
->dev
, &vi
->napi
)) {
255 rvq
->vq_ops
->disable_cb(rvq
);
256 __netif_rx_schedule(vi
->dev
, &vi
->napi
);
260 static int virtnet_poll(struct napi_struct
*napi
, int budget
)
262 struct virtnet_info
*vi
= container_of(napi
, struct virtnet_info
, napi
);
263 struct sk_buff
*skb
= NULL
;
264 unsigned int len
, received
= 0;
267 while (received
< budget
&&
268 (skb
= vi
->rvq
->vq_ops
->get_buf(vi
->rvq
, &len
)) != NULL
) {
269 __skb_unlink(skb
, &vi
->recv
);
270 receive_skb(vi
->dev
, skb
, len
);
275 /* FIXME: If we oom and completely run out of inbufs, we need
276 * to start a timer trying to fill more. */
277 if (vi
->num
< vi
->max
/ 2)
280 /* Out of packets? */
281 if (received
< budget
) {
282 netif_rx_complete(vi
->dev
, napi
);
283 if (unlikely(!vi
->rvq
->vq_ops
->enable_cb(vi
->rvq
))
284 && napi_schedule_prep(napi
)) {
285 vi
->rvq
->vq_ops
->disable_cb(vi
->rvq
);
286 __netif_rx_schedule(vi
->dev
, napi
);
294 static void free_old_xmit_skbs(struct virtnet_info
*vi
)
299 while ((skb
= vi
->svq
->vq_ops
->get_buf(vi
->svq
, &len
)) != NULL
) {
300 pr_debug("Sent skb %p\n", skb
);
301 __skb_unlink(skb
, &vi
->send
);
302 vi
->dev
->stats
.tx_bytes
+= skb
->len
;
303 vi
->dev
->stats
.tx_packets
++;
308 /* If the virtio transport doesn't always notify us when all in-flight packets
309 * are consumed, we fall back to using this function on a timer to free them. */
310 static void xmit_free(unsigned long data
)
312 struct virtnet_info
*vi
= (void *)data
;
314 netif_tx_lock(vi
->dev
);
316 free_old_xmit_skbs(vi
);
318 if (!skb_queue_empty(&vi
->send
))
319 mod_timer(&vi
->xmit_free_timer
, jiffies
+ (HZ
/10));
321 netif_tx_unlock(vi
->dev
);
324 static int xmit_skb(struct virtnet_info
*vi
, struct sk_buff
*skb
)
327 struct scatterlist sg
[2+MAX_SKB_FRAGS
];
328 struct virtio_net_hdr
*hdr
;
329 const unsigned char *dest
= ((struct ethhdr
*)skb
->data
)->h_dest
;
331 sg_init_table(sg
, 2+MAX_SKB_FRAGS
);
333 pr_debug("%s: xmit %p %pM\n", vi
->dev
->name
, skb
, dest
);
335 /* Encode metadata header at front. */
336 hdr
= skb_vnet_hdr(skb
);
337 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
338 hdr
->flags
= VIRTIO_NET_HDR_F_NEEDS_CSUM
;
339 hdr
->csum_start
= skb
->csum_start
- skb_headroom(skb
);
340 hdr
->csum_offset
= skb
->csum_offset
;
343 hdr
->csum_offset
= hdr
->csum_start
= 0;
346 if (skb_is_gso(skb
)) {
347 hdr
->hdr_len
= skb_transport_header(skb
) - skb
->data
;
348 hdr
->gso_size
= skb_shinfo(skb
)->gso_size
;
349 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV4
)
350 hdr
->gso_type
= VIRTIO_NET_HDR_GSO_TCPV4
;
351 else if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV6
)
352 hdr
->gso_type
= VIRTIO_NET_HDR_GSO_TCPV6
;
353 else if (skb_shinfo(skb
)->gso_type
& SKB_GSO_UDP
)
354 hdr
->gso_type
= VIRTIO_NET_HDR_GSO_UDP
;
357 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCP_ECN
)
358 hdr
->gso_type
|= VIRTIO_NET_HDR_GSO_ECN
;
360 hdr
->gso_type
= VIRTIO_NET_HDR_GSO_NONE
;
361 hdr
->gso_size
= hdr
->hdr_len
= 0;
364 vnet_hdr_to_sg(sg
, skb
);
365 num
= skb_to_sgvec(skb
, sg
+1, 0, skb
->len
) + 1;
367 err
= vi
->svq
->vq_ops
->add_buf(vi
->svq
, sg
, num
, 0, skb
);
368 if (!err
&& !vi
->free_in_tasklet
)
369 mod_timer(&vi
->xmit_free_timer
, jiffies
+ (HZ
/10));
374 static void xmit_tasklet(unsigned long data
)
376 struct virtnet_info
*vi
= (void *)data
;
378 netif_tx_lock_bh(vi
->dev
);
379 if (vi
->last_xmit_skb
&& xmit_skb(vi
, vi
->last_xmit_skb
) == 0) {
380 vi
->svq
->vq_ops
->kick(vi
->svq
);
381 vi
->last_xmit_skb
= NULL
;
383 if (vi
->free_in_tasklet
)
384 free_old_xmit_skbs(vi
);
385 netif_tx_unlock_bh(vi
->dev
);
388 static int start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
390 struct virtnet_info
*vi
= netdev_priv(dev
);
393 /* Free up any pending old buffers before queueing new ones. */
394 free_old_xmit_skbs(vi
);
396 /* If we has a buffer left over from last time, send it now. */
397 if (unlikely(vi
->last_xmit_skb
) &&
398 xmit_skb(vi
, vi
->last_xmit_skb
) != 0)
401 vi
->last_xmit_skb
= NULL
;
403 /* Put new one in send queue and do transmit */
405 __skb_queue_head(&vi
->send
, skb
);
406 if (xmit_skb(vi
, skb
) != 0) {
407 vi
->last_xmit_skb
= skb
;
413 vi
->svq
->vq_ops
->kick(vi
->svq
);
417 pr_debug("%s: virtio not prepared to send\n", dev
->name
);
418 netif_stop_queue(dev
);
420 /* Activate callback for using skbs: if this returns false it
421 * means some were used in the meantime. */
422 if (unlikely(!vi
->svq
->vq_ops
->enable_cb(vi
->svq
))) {
423 vi
->svq
->vq_ops
->disable_cb(vi
->svq
);
424 netif_start_queue(dev
);
428 /* Drop this skb: we only queue one. */
429 vi
->dev
->stats
.tx_dropped
++;
435 #ifdef CONFIG_NET_POLL_CONTROLLER
436 static void virtnet_netpoll(struct net_device
*dev
)
438 struct virtnet_info
*vi
= netdev_priv(dev
);
440 napi_schedule(&vi
->napi
);
444 static int virtnet_open(struct net_device
*dev
)
446 struct virtnet_info
*vi
= netdev_priv(dev
);
448 napi_enable(&vi
->napi
);
450 /* If all buffers were filled by other side before we napi_enabled, we
451 * won't get another interrupt, so process any outstanding packets
452 * now. virtnet_poll wants re-enable the queue, so we disable here.
453 * We synchronize against interrupts via NAPI_STATE_SCHED */
454 if (netif_rx_schedule_prep(dev
, &vi
->napi
)) {
455 vi
->rvq
->vq_ops
->disable_cb(vi
->rvq
);
456 __netif_rx_schedule(dev
, &vi
->napi
);
461 static int virtnet_close(struct net_device
*dev
)
463 struct virtnet_info
*vi
= netdev_priv(dev
);
465 napi_disable(&vi
->napi
);
470 static int virtnet_set_tx_csum(struct net_device
*dev
, u32 data
)
472 struct virtnet_info
*vi
= netdev_priv(dev
);
473 struct virtio_device
*vdev
= vi
->vdev
;
475 if (data
&& !virtio_has_feature(vdev
, VIRTIO_NET_F_CSUM
))
478 return ethtool_op_set_tx_hw_csum(dev
, data
);
481 static struct ethtool_ops virtnet_ethtool_ops
= {
482 .set_tx_csum
= virtnet_set_tx_csum
,
483 .set_sg
= ethtool_op_set_sg
,
486 static int virtnet_probe(struct virtio_device
*vdev
)
489 struct net_device
*dev
;
490 struct virtnet_info
*vi
;
492 /* Allocate ourselves a network device with room for our info */
493 dev
= alloc_etherdev(sizeof(struct virtnet_info
));
497 /* Set up network device as normal. */
498 dev
->open
= virtnet_open
;
499 dev
->stop
= virtnet_close
;
500 dev
->hard_start_xmit
= start_xmit
;
501 dev
->features
= NETIF_F_HIGHDMA
;
502 #ifdef CONFIG_NET_POLL_CONTROLLER
503 dev
->poll_controller
= virtnet_netpoll
;
505 SET_ETHTOOL_OPS(dev
, &virtnet_ethtool_ops
);
506 SET_NETDEV_DEV(dev
, &vdev
->dev
);
508 /* Do we support "hardware" checksums? */
509 if (csum
&& virtio_has_feature(vdev
, VIRTIO_NET_F_CSUM
)) {
510 /* This opens up the world of extra features. */
511 dev
->features
|= NETIF_F_HW_CSUM
|NETIF_F_SG
|NETIF_F_FRAGLIST
;
512 if (gso
&& virtio_has_feature(vdev
, VIRTIO_NET_F_GSO
)) {
513 dev
->features
|= NETIF_F_TSO
| NETIF_F_UFO
514 | NETIF_F_TSO_ECN
| NETIF_F_TSO6
;
516 /* Individual feature bits: what can host handle? */
517 if (gso
&& virtio_has_feature(vdev
, VIRTIO_NET_F_HOST_TSO4
))
518 dev
->features
|= NETIF_F_TSO
;
519 if (gso
&& virtio_has_feature(vdev
, VIRTIO_NET_F_HOST_TSO6
))
520 dev
->features
|= NETIF_F_TSO6
;
521 if (gso
&& virtio_has_feature(vdev
, VIRTIO_NET_F_HOST_ECN
))
522 dev
->features
|= NETIF_F_TSO_ECN
;
523 if (gso
&& virtio_has_feature(vdev
, VIRTIO_NET_F_HOST_UFO
))
524 dev
->features
|= NETIF_F_UFO
;
527 /* Configuration may specify what MAC to use. Otherwise random. */
528 if (virtio_has_feature(vdev
, VIRTIO_NET_F_MAC
)) {
529 vdev
->config
->get(vdev
,
530 offsetof(struct virtio_net_config
, mac
),
531 dev
->dev_addr
, dev
->addr_len
);
533 random_ether_addr(dev
->dev_addr
);
535 /* Set up our device-specific information */
536 vi
= netdev_priv(dev
);
537 netif_napi_add(dev
, &vi
->napi
, virtnet_poll
, napi_weight
);
543 /* If they give us a callback when all buffers are done, we don't need
545 vi
->free_in_tasklet
= virtio_has_feature(vdev
,VIRTIO_F_NOTIFY_ON_EMPTY
);
547 /* If we can receive ANY GSO packets, we must allocate large ones. */
548 if (virtio_has_feature(vdev
, VIRTIO_NET_F_GUEST_TSO4
)
549 || virtio_has_feature(vdev
, VIRTIO_NET_F_GUEST_TSO6
)
550 || virtio_has_feature(vdev
, VIRTIO_NET_F_GUEST_ECN
))
551 vi
->big_packets
= true;
553 /* We expect two virtqueues, receive then send. */
554 vi
->rvq
= vdev
->config
->find_vq(vdev
, 0, skb_recv_done
);
555 if (IS_ERR(vi
->rvq
)) {
556 err
= PTR_ERR(vi
->rvq
);
560 vi
->svq
= vdev
->config
->find_vq(vdev
, 1, skb_xmit_done
);
561 if (IS_ERR(vi
->svq
)) {
562 err
= PTR_ERR(vi
->svq
);
566 /* Initialize our empty receive and send queues. */
567 skb_queue_head_init(&vi
->recv
);
568 skb_queue_head_init(&vi
->send
);
570 tasklet_init(&vi
->tasklet
, xmit_tasklet
, (unsigned long)vi
);
572 if (!vi
->free_in_tasklet
)
573 setup_timer(&vi
->xmit_free_timer
, xmit_free
, (unsigned long)vi
);
575 err
= register_netdev(dev
);
577 pr_debug("virtio_net: registering device failed\n");
581 /* Last of all, set up some receive buffers. */
584 /* If we didn't even get one input buffer, we're useless. */
590 pr_debug("virtnet: registered device %s\n", dev
->name
);
594 unregister_netdev(dev
);
596 vdev
->config
->del_vq(vi
->svq
);
598 vdev
->config
->del_vq(vi
->rvq
);
604 static void virtnet_remove(struct virtio_device
*vdev
)
606 struct virtnet_info
*vi
= vdev
->priv
;
609 /* Stop all the virtqueues. */
610 vdev
->config
->reset(vdev
);
612 if (!vi
->free_in_tasklet
)
613 del_timer_sync(&vi
->xmit_free_timer
);
615 /* Free our skbs in send and recv queues, if any. */
616 while ((skb
= __skb_dequeue(&vi
->recv
)) != NULL
) {
620 __skb_queue_purge(&vi
->send
);
622 BUG_ON(vi
->num
!= 0);
624 vdev
->config
->del_vq(vi
->svq
);
625 vdev
->config
->del_vq(vi
->rvq
);
626 unregister_netdev(vi
->dev
);
629 __free_pages(get_a_page(vi
, GFP_KERNEL
), 0);
631 free_netdev(vi
->dev
);
634 static struct virtio_device_id id_table
[] = {
635 { VIRTIO_ID_NET
, VIRTIO_DEV_ANY_ID
},
639 static unsigned int features
[] = {
640 VIRTIO_NET_F_CSUM
, VIRTIO_NET_F_GUEST_CSUM
,
641 VIRTIO_NET_F_GSO
, VIRTIO_NET_F_MAC
,
642 VIRTIO_NET_F_HOST_TSO4
, VIRTIO_NET_F_HOST_UFO
, VIRTIO_NET_F_HOST_TSO6
,
643 VIRTIO_NET_F_HOST_ECN
, VIRTIO_NET_F_GUEST_TSO4
, VIRTIO_NET_F_GUEST_TSO6
,
644 VIRTIO_NET_F_GUEST_ECN
, /* We don't yet handle UFO input. */
645 VIRTIO_F_NOTIFY_ON_EMPTY
,
648 static struct virtio_driver virtio_net
= {
649 .feature_table
= features
,
650 .feature_table_size
= ARRAY_SIZE(features
),
651 .driver
.name
= KBUILD_MODNAME
,
652 .driver
.owner
= THIS_MODULE
,
653 .id_table
= id_table
,
654 .probe
= virtnet_probe
,
655 .remove
= __devexit_p(virtnet_remove
),
658 static int __init
init(void)
660 return register_virtio_driver(&virtio_net
);
663 static void __exit
fini(void)
665 unregister_virtio_driver(&virtio_net
);
670 MODULE_DEVICE_TABLE(virtio
, id_table
);
671 MODULE_DESCRIPTION("Virtio network driver");
672 MODULE_LICENSE("GPL");