2 * Copyright (c) 2009, Microsoft Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, see <http://www.gnu.org/licenses/>.
17 * Haiyang Zhang <haiyangz@microsoft.com>
18 * Hank Janssen <hjanssen@microsoft.com>
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 #include <linux/init.h>
23 #include <linux/atomic.h>
24 #include <linux/module.h>
25 #include <linux/highmem.h>
26 #include <linux/device.h>
28 #include <linux/delay.h>
29 #include <linux/netdevice.h>
30 #include <linux/inetdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/if_vlan.h>
35 #include <linux/slab.h>
37 #include <net/route.h>
39 #include <net/pkt_sched.h>
41 #include "hyperv_net.h"
43 #define RING_SIZE_MIN 64
44 #define LINKCHANGE_INT (2 * HZ)
45 #define NETVSC_HW_FEATURES (NETIF_F_RXCSUM | \
50 static int ring_size
= 128;
51 module_param(ring_size
, int, S_IRUGO
);
52 MODULE_PARM_DESC(ring_size
, "Ring buffer size (# of pages)");
54 static int max_num_vrss_chns
= 8;
56 static const u32 default_msg
= NETIF_MSG_DRV
| NETIF_MSG_PROBE
|
57 NETIF_MSG_LINK
| NETIF_MSG_IFUP
|
58 NETIF_MSG_IFDOWN
| NETIF_MSG_RX_ERR
|
61 static int debug
= -1;
62 module_param(debug
, int, S_IRUGO
);
63 MODULE_PARM_DESC(debug
, "Debug level (0=none,...,16=all)");
65 static void do_set_multicast(struct work_struct
*w
)
67 struct net_device_context
*ndevctx
=
68 container_of(w
, struct net_device_context
, work
);
69 struct hv_device
*device_obj
= ndevctx
->device_ctx
;
70 struct net_device
*ndev
= hv_get_drvdata(device_obj
);
71 struct netvsc_device
*nvdev
= ndevctx
->nvdev
;
72 struct rndis_device
*rdev
;
77 rdev
= nvdev
->extension
;
81 if (ndev
->flags
& IFF_PROMISC
)
82 rndis_filter_set_packet_filter(rdev
,
83 NDIS_PACKET_TYPE_PROMISCUOUS
);
85 rndis_filter_set_packet_filter(rdev
,
86 NDIS_PACKET_TYPE_BROADCAST
|
87 NDIS_PACKET_TYPE_ALL_MULTICAST
|
88 NDIS_PACKET_TYPE_DIRECTED
);
91 static void netvsc_set_multicast_list(struct net_device
*net
)
93 struct net_device_context
*net_device_ctx
= netdev_priv(net
);
95 schedule_work(&net_device_ctx
->work
);
98 static int netvsc_open(struct net_device
*net
)
100 struct netvsc_device
*nvdev
= net_device_to_netvsc_device(net
);
101 struct rndis_device
*rdev
;
104 netif_carrier_off(net
);
106 /* Open up the device */
107 ret
= rndis_filter_open(nvdev
);
109 netdev_err(net
, "unable to open device (ret %d).\n", ret
);
113 netif_tx_wake_all_queues(net
);
115 rdev
= nvdev
->extension
;
116 if (!rdev
->link_state
)
117 netif_carrier_on(net
);
122 static int netvsc_close(struct net_device
*net
)
124 struct net_device_context
*net_device_ctx
= netdev_priv(net
);
125 struct netvsc_device
*nvdev
= net_device_ctx
->nvdev
;
127 u32 aread
, awrite
, i
, msec
= 10, retry
= 0, retry_max
= 20;
128 struct vmbus_channel
*chn
;
130 netif_tx_disable(net
);
132 /* Make sure netvsc_set_multicast_list doesn't re-enable filter! */
133 cancel_work_sync(&net_device_ctx
->work
);
134 ret
= rndis_filter_close(nvdev
);
136 netdev_err(net
, "unable to close device (ret %d).\n", ret
);
140 /* Ensure pending bytes in ring are read */
143 for (i
= 0; i
< nvdev
->num_chn
; i
++) {
144 chn
= nvdev
->chn_table
[i
];
148 hv_get_ringbuffer_availbytes(&chn
->inbound
, &aread
,
154 hv_get_ringbuffer_availbytes(&chn
->outbound
, &aread
,
162 if (retry
> retry_max
|| aread
== 0)
172 netdev_err(net
, "Ring buffer not empty after closing rndis\n");
179 static void *init_ppi_data(struct rndis_message
*msg
, u32 ppi_size
,
182 struct rndis_packet
*rndis_pkt
;
183 struct rndis_per_packet_info
*ppi
;
185 rndis_pkt
= &msg
->msg
.pkt
;
186 rndis_pkt
->data_offset
+= ppi_size
;
188 ppi
= (struct rndis_per_packet_info
*)((void *)rndis_pkt
+
189 rndis_pkt
->per_pkt_info_offset
+ rndis_pkt
->per_pkt_info_len
);
191 ppi
->size
= ppi_size
;
192 ppi
->type
= pkt_type
;
193 ppi
->ppi_offset
= sizeof(struct rndis_per_packet_info
);
195 rndis_pkt
->per_pkt_info_len
+= ppi_size
;
200 static u16
netvsc_select_queue(struct net_device
*ndev
, struct sk_buff
*skb
,
201 void *accel_priv
, select_queue_fallback_t fallback
)
203 struct net_device_context
*net_device_ctx
= netdev_priv(ndev
);
204 struct netvsc_device
*nvsc_dev
= net_device_ctx
->nvdev
;
208 if (nvsc_dev
== NULL
|| ndev
->real_num_tx_queues
<= 1)
211 hash
= skb_get_hash(skb
);
212 q_idx
= nvsc_dev
->send_table
[hash
% VRSS_SEND_TAB_SIZE
] %
213 ndev
->real_num_tx_queues
;
215 if (!nvsc_dev
->chn_table
[q_idx
])
221 static u32
fill_pg_buf(struct page
*page
, u32 offset
, u32 len
,
222 struct hv_page_buffer
*pb
)
226 /* Deal with compund pages by ignoring unused part
229 page
+= (offset
>> PAGE_SHIFT
);
230 offset
&= ~PAGE_MASK
;
235 bytes
= PAGE_SIZE
- offset
;
238 pb
[j
].pfn
= page_to_pfn(page
);
239 pb
[j
].offset
= offset
;
245 if (offset
== PAGE_SIZE
&& len
) {
255 static u32
init_page_array(void *hdr
, u32 len
, struct sk_buff
*skb
,
256 struct hv_netvsc_packet
*packet
,
257 struct hv_page_buffer
**page_buf
)
259 struct hv_page_buffer
*pb
= *page_buf
;
261 char *data
= skb
->data
;
262 int frags
= skb_shinfo(skb
)->nr_frags
;
265 /* The packet is laid out thus:
266 * 1. hdr: RNDIS header and PPI
268 * 3. skb fragment data
271 slots_used
+= fill_pg_buf(virt_to_page(hdr
),
273 len
, &pb
[slots_used
]);
275 packet
->rmsg_size
= len
;
276 packet
->rmsg_pgcnt
= slots_used
;
278 slots_used
+= fill_pg_buf(virt_to_page(data
),
279 offset_in_page(data
),
280 skb_headlen(skb
), &pb
[slots_used
]);
282 for (i
= 0; i
< frags
; i
++) {
283 skb_frag_t
*frag
= skb_shinfo(skb
)->frags
+ i
;
285 slots_used
+= fill_pg_buf(skb_frag_page(frag
),
287 skb_frag_size(frag
), &pb
[slots_used
]);
292 static int count_skb_frag_slots(struct sk_buff
*skb
)
294 int i
, frags
= skb_shinfo(skb
)->nr_frags
;
297 for (i
= 0; i
< frags
; i
++) {
298 skb_frag_t
*frag
= skb_shinfo(skb
)->frags
+ i
;
299 unsigned long size
= skb_frag_size(frag
);
300 unsigned long offset
= frag
->page_offset
;
302 /* Skip unused frames from start of page */
303 offset
&= ~PAGE_MASK
;
304 pages
+= PFN_UP(offset
+ size
);
309 static int netvsc_get_slots(struct sk_buff
*skb
)
311 char *data
= skb
->data
;
312 unsigned int offset
= offset_in_page(data
);
313 unsigned int len
= skb_headlen(skb
);
317 slots
= DIV_ROUND_UP(offset
+ len
, PAGE_SIZE
);
318 frag_slots
= count_skb_frag_slots(skb
);
319 return slots
+ frag_slots
;
322 static u32
get_net_transport_info(struct sk_buff
*skb
, u32
*trans_off
)
324 u32 ret_val
= TRANSPORT_INFO_NOT_IP
;
326 if ((eth_hdr(skb
)->h_proto
!= htons(ETH_P_IP
)) &&
327 (eth_hdr(skb
)->h_proto
!= htons(ETH_P_IPV6
))) {
331 *trans_off
= skb_transport_offset(skb
);
333 if ((eth_hdr(skb
)->h_proto
== htons(ETH_P_IP
))) {
334 struct iphdr
*iphdr
= ip_hdr(skb
);
336 if (iphdr
->protocol
== IPPROTO_TCP
)
337 ret_val
= TRANSPORT_INFO_IPV4_TCP
;
338 else if (iphdr
->protocol
== IPPROTO_UDP
)
339 ret_val
= TRANSPORT_INFO_IPV4_UDP
;
341 if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
342 ret_val
= TRANSPORT_INFO_IPV6_TCP
;
343 else if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_UDP
)
344 ret_val
= TRANSPORT_INFO_IPV6_UDP
;
351 static int netvsc_start_xmit(struct sk_buff
*skb
, struct net_device
*net
)
353 struct net_device_context
*net_device_ctx
= netdev_priv(net
);
354 struct hv_netvsc_packet
*packet
= NULL
;
356 unsigned int num_data_pgs
;
357 struct rndis_message
*rndis_msg
;
358 struct rndis_packet
*rndis_pkt
;
360 struct rndis_per_packet_info
*ppi
;
361 struct ndis_tcp_ip_checksum_info
*csum_info
;
366 struct hv_page_buffer page_buf
[MAX_PAGE_BUFFER_COUNT
];
367 struct hv_page_buffer
*pb
= page_buf
;
369 /* We will atmost need two pages to describe the rndis
370 * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
371 * of pages in a single packet. If skb is scattered around
372 * more pages we try linearizing it.
375 skb_length
= skb
->len
;
376 num_data_pgs
= netvsc_get_slots(skb
) + 2;
378 if (unlikely(num_data_pgs
> MAX_PAGE_BUFFER_COUNT
)) {
379 ++net_device_ctx
->eth_stats
.tx_scattered
;
381 if (skb_linearize(skb
))
384 num_data_pgs
= netvsc_get_slots(skb
) + 2;
385 if (num_data_pgs
> MAX_PAGE_BUFFER_COUNT
) {
386 ++net_device_ctx
->eth_stats
.tx_too_big
;
392 * Place the rndis header in the skb head room and
393 * the skb->cb will be used for hv_netvsc_packet
396 ret
= skb_cow_head(skb
, RNDIS_AND_PPI_SIZE
);
400 /* Use the skb control buffer for building up the packet */
401 BUILD_BUG_ON(sizeof(struct hv_netvsc_packet
) >
402 FIELD_SIZEOF(struct sk_buff
, cb
));
403 packet
= (struct hv_netvsc_packet
*)skb
->cb
;
405 packet
->q_idx
= skb_get_queue_mapping(skb
);
407 packet
->total_data_buflen
= skb
->len
;
409 rndis_msg
= (struct rndis_message
*)skb
->head
;
411 memset(rndis_msg
, 0, RNDIS_AND_PPI_SIZE
);
413 /* Add the rndis header */
414 rndis_msg
->ndis_msg_type
= RNDIS_MSG_PACKET
;
415 rndis_msg
->msg_len
= packet
->total_data_buflen
;
416 rndis_pkt
= &rndis_msg
->msg
.pkt
;
417 rndis_pkt
->data_offset
= sizeof(struct rndis_packet
);
418 rndis_pkt
->data_len
= packet
->total_data_buflen
;
419 rndis_pkt
->per_pkt_info_offset
= sizeof(struct rndis_packet
);
421 rndis_msg_size
= RNDIS_MESSAGE_SIZE(struct rndis_packet
);
423 hash
= skb_get_hash_raw(skb
);
424 if (hash
!= 0 && net
->real_num_tx_queues
> 1) {
425 rndis_msg_size
+= NDIS_HASH_PPI_SIZE
;
426 ppi
= init_ppi_data(rndis_msg
, NDIS_HASH_PPI_SIZE
,
428 *(u32
*)((void *)ppi
+ ppi
->ppi_offset
) = hash
;
431 if (skb_vlan_tag_present(skb
)) {
432 struct ndis_pkt_8021q_info
*vlan
;
434 rndis_msg_size
+= NDIS_VLAN_PPI_SIZE
;
435 ppi
= init_ppi_data(rndis_msg
, NDIS_VLAN_PPI_SIZE
,
437 vlan
= (struct ndis_pkt_8021q_info
*)((void *)ppi
+
439 vlan
->vlanid
= skb
->vlan_tci
& VLAN_VID_MASK
;
440 vlan
->pri
= (skb
->vlan_tci
& VLAN_PRIO_MASK
) >>
444 net_trans_info
= get_net_transport_info(skb
, &hdr_offset
);
445 if (net_trans_info
== TRANSPORT_INFO_NOT_IP
)
449 * Setup the sendside checksum offload only if this is not a
452 if (skb_is_gso(skb
)) {
453 struct ndis_tcp_lso_info
*lso_info
;
455 rndis_msg_size
+= NDIS_LSO_PPI_SIZE
;
456 ppi
= init_ppi_data(rndis_msg
, NDIS_LSO_PPI_SIZE
,
457 TCP_LARGESEND_PKTINFO
);
459 lso_info
= (struct ndis_tcp_lso_info
*)((void *)ppi
+
462 lso_info
->lso_v2_transmit
.type
= NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE
;
463 if (net_trans_info
& (INFO_IPV4
<< 16)) {
464 lso_info
->lso_v2_transmit
.ip_version
=
465 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4
;
466 ip_hdr(skb
)->tot_len
= 0;
467 ip_hdr(skb
)->check
= 0;
468 tcp_hdr(skb
)->check
=
469 ~csum_tcpudp_magic(ip_hdr(skb
)->saddr
,
470 ip_hdr(skb
)->daddr
, 0, IPPROTO_TCP
, 0);
472 lso_info
->lso_v2_transmit
.ip_version
=
473 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6
;
474 ipv6_hdr(skb
)->payload_len
= 0;
475 tcp_hdr(skb
)->check
=
476 ~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
477 &ipv6_hdr(skb
)->daddr
, 0, IPPROTO_TCP
, 0);
479 lso_info
->lso_v2_transmit
.tcp_header_offset
= hdr_offset
;
480 lso_info
->lso_v2_transmit
.mss
= skb_shinfo(skb
)->gso_size
;
484 if ((skb
->ip_summed
== CHECKSUM_NONE
) ||
485 (skb
->ip_summed
== CHECKSUM_UNNECESSARY
))
488 rndis_msg_size
+= NDIS_CSUM_PPI_SIZE
;
489 ppi
= init_ppi_data(rndis_msg
, NDIS_CSUM_PPI_SIZE
,
490 TCPIP_CHKSUM_PKTINFO
);
492 csum_info
= (struct ndis_tcp_ip_checksum_info
*)((void *)ppi
+
495 if (net_trans_info
& (INFO_IPV4
<< 16))
496 csum_info
->transmit
.is_ipv4
= 1;
498 csum_info
->transmit
.is_ipv6
= 1;
500 if (net_trans_info
& INFO_TCP
) {
501 csum_info
->transmit
.tcp_checksum
= 1;
502 csum_info
->transmit
.tcp_header_offset
= hdr_offset
;
503 } else if (net_trans_info
& INFO_UDP
) {
504 /* UDP checksum offload is not supported on ws2008r2.
505 * Furthermore, on ws2012 and ws2012r2, there are some
506 * issues with udp checksum offload from Linux guests.
507 * (these are host issues).
508 * For now compute the checksum here.
513 ret
= skb_cow_head(skb
, 0);
518 udp_len
= ntohs(uh
->len
);
520 uh
->check
= csum_tcpudp_magic(ip_hdr(skb
)->saddr
,
522 udp_len
, IPPROTO_UDP
,
523 csum_partial(uh
, udp_len
, 0));
525 uh
->check
= CSUM_MANGLED_0
;
527 csum_info
->transmit
.udp_checksum
= 0;
531 /* Start filling in the page buffers with the rndis hdr */
532 rndis_msg
->msg_len
+= rndis_msg_size
;
533 packet
->total_data_buflen
= rndis_msg
->msg_len
;
534 packet
->page_buf_cnt
= init_page_array(rndis_msg
, rndis_msg_size
,
537 /* timestamp packet in software */
538 skb_tx_timestamp(skb
);
539 ret
= netvsc_send(net_device_ctx
->device_ctx
, packet
,
540 rndis_msg
, &pb
, skb
);
541 if (likely(ret
== 0)) {
542 struct netvsc_stats
*tx_stats
= this_cpu_ptr(net_device_ctx
->tx_stats
);
544 u64_stats_update_begin(&tx_stats
->syncp
);
546 tx_stats
->bytes
+= skb_length
;
547 u64_stats_update_end(&tx_stats
->syncp
);
551 if (ret
== -EAGAIN
) {
552 ++net_device_ctx
->eth_stats
.tx_busy
;
553 return NETDEV_TX_BUSY
;
557 ++net_device_ctx
->eth_stats
.tx_no_space
;
560 dev_kfree_skb_any(skb
);
561 net
->stats
.tx_dropped
++;
566 ++net_device_ctx
->eth_stats
.tx_no_memory
;
571 * netvsc_linkstatus_callback - Link up/down notification
573 void netvsc_linkstatus_callback(struct hv_device
*device_obj
,
574 struct rndis_message
*resp
)
576 struct rndis_indicate_status
*indicate
= &resp
->msg
.indicate_status
;
577 struct net_device
*net
;
578 struct net_device_context
*ndev_ctx
;
579 struct netvsc_reconfig
*event
;
582 net
= hv_get_drvdata(device_obj
);
587 ndev_ctx
= netdev_priv(net
);
589 /* Update the physical link speed when changing to another vSwitch */
590 if (indicate
->status
== RNDIS_STATUS_LINK_SPEED_CHANGE
) {
593 speed
= *(u32
*)((void *)indicate
+ indicate
->
594 status_buf_offset
) / 10000;
595 ndev_ctx
->speed
= speed
;
599 /* Handle these link change statuses below */
600 if (indicate
->status
!= RNDIS_STATUS_NETWORK_CHANGE
&&
601 indicate
->status
!= RNDIS_STATUS_MEDIA_CONNECT
&&
602 indicate
->status
!= RNDIS_STATUS_MEDIA_DISCONNECT
)
605 if (net
->reg_state
!= NETREG_REGISTERED
)
608 event
= kzalloc(sizeof(*event
), GFP_ATOMIC
);
611 event
->event
= indicate
->status
;
613 spin_lock_irqsave(&ndev_ctx
->lock
, flags
);
614 list_add_tail(&event
->list
, &ndev_ctx
->reconfig_events
);
615 spin_unlock_irqrestore(&ndev_ctx
->lock
, flags
);
617 schedule_delayed_work(&ndev_ctx
->dwork
, 0);
620 static struct sk_buff
*netvsc_alloc_recv_skb(struct net_device
*net
,
621 struct hv_netvsc_packet
*packet
,
622 struct ndis_tcp_ip_checksum_info
*csum_info
,
623 void *data
, u16 vlan_tci
)
627 skb
= netdev_alloc_skb_ip_align(net
, packet
->total_data_buflen
);
632 * Copy to skb. This copy is needed here since the memory pointed by
633 * hv_netvsc_packet cannot be deallocated
635 memcpy(skb_put(skb
, packet
->total_data_buflen
), data
,
636 packet
->total_data_buflen
);
638 skb
->protocol
= eth_type_trans(skb
, net
);
640 /* We only look at the IP checksum here.
641 * Should we be dropping the packet if checksum
642 * failed? How do we deal with other checksums - TCP/UDP?
644 if (csum_info
->receive
.ip_checksum_succeeded
)
645 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
647 skb
->ip_summed
= CHECKSUM_NONE
;
650 if (vlan_tci
& VLAN_TAG_PRESENT
)
651 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
),
658 * netvsc_recv_callback - Callback when we receive a packet from the
659 * "wire" on the specified device.
661 int netvsc_recv_callback(struct hv_device
*device_obj
,
662 struct hv_netvsc_packet
*packet
,
664 struct ndis_tcp_ip_checksum_info
*csum_info
,
665 struct vmbus_channel
*channel
,
668 struct net_device
*net
= hv_get_drvdata(device_obj
);
669 struct net_device_context
*net_device_ctx
= netdev_priv(net
);
671 struct sk_buff
*vf_skb
;
672 struct netvsc_stats
*rx_stats
;
673 u32 bytes_recvd
= packet
->total_data_buflen
;
676 if (!net
|| net
->reg_state
!= NETREG_REGISTERED
)
677 return NVSP_STAT_FAIL
;
679 if (READ_ONCE(net_device_ctx
->vf_inject
)) {
680 atomic_inc(&net_device_ctx
->vf_use_cnt
);
681 if (!READ_ONCE(net_device_ctx
->vf_inject
)) {
683 * We raced; just move on.
685 atomic_dec(&net_device_ctx
->vf_use_cnt
);
686 goto vf_injection_done
;
690 * Inject this packet into the VF inerface.
691 * On Hyper-V, multicast and brodcast packets
692 * are only delivered on the synthetic interface
693 * (after subjecting these to policy filters on
694 * the host). Deliver these via the VF interface
697 vf_skb
= netvsc_alloc_recv_skb(net_device_ctx
->vf_netdev
,
698 packet
, csum_info
, *data
,
700 if (vf_skb
!= NULL
) {
701 ++net_device_ctx
->vf_netdev
->stats
.rx_packets
;
702 net_device_ctx
->vf_netdev
->stats
.rx_bytes
+=
704 netif_receive_skb(vf_skb
);
706 ++net
->stats
.rx_dropped
;
707 ret
= NVSP_STAT_FAIL
;
709 atomic_dec(&net_device_ctx
->vf_use_cnt
);
714 rx_stats
= this_cpu_ptr(net_device_ctx
->rx_stats
);
716 /* Allocate a skb - TODO direct I/O to pages? */
717 skb
= netvsc_alloc_recv_skb(net
, packet
, csum_info
, *data
, vlan_tci
);
718 if (unlikely(!skb
)) {
719 ++net
->stats
.rx_dropped
;
720 return NVSP_STAT_FAIL
;
722 skb_record_rx_queue(skb
, channel
->
723 offermsg
.offer
.sub_channel_index
);
725 u64_stats_update_begin(&rx_stats
->syncp
);
727 rx_stats
->bytes
+= packet
->total_data_buflen
;
728 u64_stats_update_end(&rx_stats
->syncp
);
731 * Pass the skb back up. Network stack will deallocate the skb when it
740 static void netvsc_get_drvinfo(struct net_device
*net
,
741 struct ethtool_drvinfo
*info
)
743 struct net_device_context
*net_device_ctx
= netdev_priv(net
);
744 struct hv_device
*dev
= net_device_ctx
->device_ctx
;
746 strlcpy(info
->driver
, KBUILD_MODNAME
, sizeof(info
->driver
));
747 strlcpy(info
->fw_version
, "N/A", sizeof(info
->fw_version
));
748 strlcpy(info
->bus_info
, vmbus_dev_name(dev
), sizeof(info
->bus_info
));
751 static void netvsc_get_channels(struct net_device
*net
,
752 struct ethtool_channels
*channel
)
754 struct net_device_context
*net_device_ctx
= netdev_priv(net
);
755 struct netvsc_device
*nvdev
= net_device_ctx
->nvdev
;
758 channel
->max_combined
= nvdev
->max_chn
;
759 channel
->combined_count
= nvdev
->num_chn
;
763 static int netvsc_set_channels(struct net_device
*net
,
764 struct ethtool_channels
*channels
)
766 struct net_device_context
*net_device_ctx
= netdev_priv(net
);
767 struct hv_device
*dev
= net_device_ctx
->device_ctx
;
768 struct netvsc_device
*nvdev
= net_device_ctx
->nvdev
;
769 struct netvsc_device_info device_info
;
773 bool recovering
= false;
775 if (net_device_ctx
->start_remove
|| !nvdev
|| nvdev
->destroy
)
778 num_chn
= nvdev
->num_chn
;
779 max_chn
= min_t(u32
, nvdev
->max_chn
, num_online_cpus());
781 if (nvdev
->nvsp_version
< NVSP_PROTOCOL_VERSION_5
) {
782 pr_info("vRSS unsupported before NVSP Version 5\n");
786 /* We do not support rx, tx, or other */
788 channels
->rx_count
||
789 channels
->tx_count
||
790 channels
->other_count
||
791 (channels
->combined_count
< 1))
794 if (channels
->combined_count
> max_chn
) {
795 pr_info("combined channels too high, using %d\n", max_chn
);
796 channels
->combined_count
= max_chn
;
799 ret
= netvsc_close(net
);
804 net_device_ctx
->start_remove
= true;
805 rndis_filter_device_remove(dev
);
807 nvdev
->num_chn
= channels
->combined_count
;
809 memset(&device_info
, 0, sizeof(device_info
));
810 device_info
.num_chn
= nvdev
->num_chn
; /* passed to RNDIS */
811 device_info
.ring_size
= ring_size
;
812 device_info
.max_num_vrss_chns
= max_num_vrss_chns
;
814 ret
= rndis_filter_device_add(dev
, &device_info
);
817 netdev_err(net
, "unable to add netvsc device (ret %d)\n", ret
);
823 nvdev
= net_device_ctx
->nvdev
;
825 ret
= netif_set_real_num_tx_queues(net
, nvdev
->num_chn
);
828 netdev_err(net
, "could not set tx queue count (ret %d)\n", ret
);
834 ret
= netif_set_real_num_rx_queues(net
, nvdev
->num_chn
);
837 netdev_err(net
, "could not set rx queue count (ret %d)\n", ret
);
845 net_device_ctx
->start_remove
= false;
846 /* We may have missed link change notifications */
847 schedule_delayed_work(&net_device_ctx
->dwork
, 0);
852 /* If the above failed, we attempt to recover through the same
853 * process but with the original number of channels.
855 netdev_err(net
, "could not set channels, recovering\n");
857 channels
->combined_count
= num_chn
;
861 static bool netvsc_validate_ethtool_ss_cmd(const struct ethtool_cmd
*cmd
)
863 struct ethtool_cmd diff1
= *cmd
;
864 struct ethtool_cmd diff2
= {};
866 ethtool_cmd_speed_set(&diff1
, 0);
868 /* advertising and cmd are usually set */
869 diff1
.advertising
= 0;
871 /* We set port to PORT_OTHER */
872 diff2
.port
= PORT_OTHER
;
874 return !memcmp(&diff1
, &diff2
, sizeof(diff1
));
877 static void netvsc_init_settings(struct net_device
*dev
)
879 struct net_device_context
*ndc
= netdev_priv(dev
);
881 ndc
->speed
= SPEED_UNKNOWN
;
882 ndc
->duplex
= DUPLEX_UNKNOWN
;
885 static int netvsc_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
887 struct net_device_context
*ndc
= netdev_priv(dev
);
889 ethtool_cmd_speed_set(cmd
, ndc
->speed
);
890 cmd
->duplex
= ndc
->duplex
;
891 cmd
->port
= PORT_OTHER
;
896 static int netvsc_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
898 struct net_device_context
*ndc
= netdev_priv(dev
);
901 speed
= ethtool_cmd_speed(cmd
);
902 if (!ethtool_validate_speed(speed
) ||
903 !ethtool_validate_duplex(cmd
->duplex
) ||
904 !netvsc_validate_ethtool_ss_cmd(cmd
))
908 ndc
->duplex
= cmd
->duplex
;
913 static int netvsc_change_mtu(struct net_device
*ndev
, int mtu
)
915 struct net_device_context
*ndevctx
= netdev_priv(ndev
);
916 struct netvsc_device
*nvdev
= ndevctx
->nvdev
;
917 struct hv_device
*hdev
= ndevctx
->device_ctx
;
918 struct netvsc_device_info device_info
;
919 int limit
= ETH_DATA_LEN
;
923 if (ndevctx
->start_remove
|| !nvdev
|| nvdev
->destroy
)
926 if (nvdev
->nvsp_version
>= NVSP_PROTOCOL_VERSION_2
)
927 limit
= NETVSC_MTU
- ETH_HLEN
;
929 if (mtu
< NETVSC_MTU_MIN
|| mtu
> limit
)
932 ret
= netvsc_close(ndev
);
936 num_chn
= nvdev
->num_chn
;
938 ndevctx
->start_remove
= true;
939 rndis_filter_device_remove(hdev
);
943 memset(&device_info
, 0, sizeof(device_info
));
944 device_info
.ring_size
= ring_size
;
945 device_info
.num_chn
= num_chn
;
946 device_info
.max_num_vrss_chns
= max_num_vrss_chns
;
947 rndis_filter_device_add(hdev
, &device_info
);
951 ndevctx
->start_remove
= false;
953 /* We may have missed link change notifications */
954 schedule_delayed_work(&ndevctx
->dwork
, 0);
959 static struct rtnl_link_stats64
*netvsc_get_stats64(struct net_device
*net
,
960 struct rtnl_link_stats64
*t
)
962 struct net_device_context
*ndev_ctx
= netdev_priv(net
);
965 for_each_possible_cpu(cpu
) {
966 struct netvsc_stats
*tx_stats
= per_cpu_ptr(ndev_ctx
->tx_stats
,
968 struct netvsc_stats
*rx_stats
= per_cpu_ptr(ndev_ctx
->rx_stats
,
970 u64 tx_packets
, tx_bytes
, rx_packets
, rx_bytes
;
974 start
= u64_stats_fetch_begin_irq(&tx_stats
->syncp
);
975 tx_packets
= tx_stats
->packets
;
976 tx_bytes
= tx_stats
->bytes
;
977 } while (u64_stats_fetch_retry_irq(&tx_stats
->syncp
, start
));
980 start
= u64_stats_fetch_begin_irq(&rx_stats
->syncp
);
981 rx_packets
= rx_stats
->packets
;
982 rx_bytes
= rx_stats
->bytes
;
983 } while (u64_stats_fetch_retry_irq(&rx_stats
->syncp
, start
));
985 t
->tx_bytes
+= tx_bytes
;
986 t
->tx_packets
+= tx_packets
;
987 t
->rx_bytes
+= rx_bytes
;
988 t
->rx_packets
+= rx_packets
;
991 t
->tx_dropped
= net
->stats
.tx_dropped
;
992 t
->tx_errors
= net
->stats
.tx_dropped
;
994 t
->rx_dropped
= net
->stats
.rx_dropped
;
995 t
->rx_errors
= net
->stats
.rx_errors
;
1000 static int netvsc_set_mac_addr(struct net_device
*ndev
, void *p
)
1002 struct sockaddr
*addr
= p
;
1003 char save_adr
[ETH_ALEN
];
1004 unsigned char save_aatype
;
1007 memcpy(save_adr
, ndev
->dev_addr
, ETH_ALEN
);
1008 save_aatype
= ndev
->addr_assign_type
;
1010 err
= eth_mac_addr(ndev
, p
);
1014 err
= rndis_filter_set_device_mac(ndev
, addr
->sa_data
);
1016 /* roll back to saved MAC */
1017 memcpy(ndev
->dev_addr
, save_adr
, ETH_ALEN
);
1018 ndev
->addr_assign_type
= save_aatype
;
1024 static const struct {
1025 char name
[ETH_GSTRING_LEN
];
1027 } netvsc_stats
[] = {
1028 { "tx_scattered", offsetof(struct netvsc_ethtool_stats
, tx_scattered
) },
1029 { "tx_no_memory", offsetof(struct netvsc_ethtool_stats
, tx_no_memory
) },
1030 { "tx_no_space", offsetof(struct netvsc_ethtool_stats
, tx_no_space
) },
1031 { "tx_too_big", offsetof(struct netvsc_ethtool_stats
, tx_too_big
) },
1032 { "tx_busy", offsetof(struct netvsc_ethtool_stats
, tx_busy
) },
1035 static int netvsc_get_sset_count(struct net_device
*dev
, int string_set
)
1037 switch (string_set
) {
1039 return ARRAY_SIZE(netvsc_stats
);
1045 static void netvsc_get_ethtool_stats(struct net_device
*dev
,
1046 struct ethtool_stats
*stats
, u64
*data
)
1048 struct net_device_context
*ndc
= netdev_priv(dev
);
1049 const void *nds
= &ndc
->eth_stats
;
1052 for (i
= 0; i
< ARRAY_SIZE(netvsc_stats
); i
++)
1053 data
[i
] = *(unsigned long *)(nds
+ netvsc_stats
[i
].offset
);
1056 static void netvsc_get_strings(struct net_device
*dev
, u32 stringset
, u8
*data
)
1060 switch (stringset
) {
1062 for (i
= 0; i
< ARRAY_SIZE(netvsc_stats
); i
++)
1063 memcpy(data
+ i
* ETH_GSTRING_LEN
,
1064 netvsc_stats
[i
].name
, ETH_GSTRING_LEN
);
1069 #ifdef CONFIG_NET_POLL_CONTROLLER
1070 static void netvsc_poll_controller(struct net_device
*net
)
1072 /* As netvsc_start_xmit() works synchronous we don't have to
1073 * trigger anything here.
1078 static const struct ethtool_ops ethtool_ops
= {
1079 .get_drvinfo
= netvsc_get_drvinfo
,
1080 .get_link
= ethtool_op_get_link
,
1081 .get_ethtool_stats
= netvsc_get_ethtool_stats
,
1082 .get_sset_count
= netvsc_get_sset_count
,
1083 .get_strings
= netvsc_get_strings
,
1084 .get_channels
= netvsc_get_channels
,
1085 .set_channels
= netvsc_set_channels
,
1086 .get_ts_info
= ethtool_op_get_ts_info
,
1087 .get_settings
= netvsc_get_settings
,
1088 .set_settings
= netvsc_set_settings
,
1091 static const struct net_device_ops device_ops
= {
1092 .ndo_open
= netvsc_open
,
1093 .ndo_stop
= netvsc_close
,
1094 .ndo_start_xmit
= netvsc_start_xmit
,
1095 .ndo_set_rx_mode
= netvsc_set_multicast_list
,
1096 .ndo_change_mtu
= netvsc_change_mtu
,
1097 .ndo_validate_addr
= eth_validate_addr
,
1098 .ndo_set_mac_address
= netvsc_set_mac_addr
,
1099 .ndo_select_queue
= netvsc_select_queue
,
1100 .ndo_get_stats64
= netvsc_get_stats64
,
1101 #ifdef CONFIG_NET_POLL_CONTROLLER
1102 .ndo_poll_controller
= netvsc_poll_controller
,
1107 * Handle link status changes. For RNDIS_STATUS_NETWORK_CHANGE emulate link
1108 * down/up sequence. In case of RNDIS_STATUS_MEDIA_CONNECT when carrier is
1109 * present send GARP packet to network peers with netif_notify_peers().
1111 static void netvsc_link_change(struct work_struct
*w
)
1113 struct net_device_context
*ndev_ctx
=
1114 container_of(w
, struct net_device_context
, dwork
.work
);
1115 struct hv_device
*device_obj
= ndev_ctx
->device_ctx
;
1116 struct net_device
*net
= hv_get_drvdata(device_obj
);
1117 struct netvsc_device
*net_device
;
1118 struct rndis_device
*rdev
;
1119 struct netvsc_reconfig
*event
= NULL
;
1120 bool notify
= false, reschedule
= false;
1121 unsigned long flags
, next_reconfig
, delay
;
1124 if (ndev_ctx
->start_remove
)
1127 net_device
= ndev_ctx
->nvdev
;
1128 rdev
= net_device
->extension
;
1130 next_reconfig
= ndev_ctx
->last_reconfig
+ LINKCHANGE_INT
;
1131 if (time_is_after_jiffies(next_reconfig
)) {
1132 /* link_watch only sends one notification with current state
1133 * per second, avoid doing reconfig more frequently. Handle
1136 delay
= next_reconfig
- jiffies
;
1137 delay
= delay
< LINKCHANGE_INT
? delay
: LINKCHANGE_INT
;
1138 schedule_delayed_work(&ndev_ctx
->dwork
, delay
);
1141 ndev_ctx
->last_reconfig
= jiffies
;
1143 spin_lock_irqsave(&ndev_ctx
->lock
, flags
);
1144 if (!list_empty(&ndev_ctx
->reconfig_events
)) {
1145 event
= list_first_entry(&ndev_ctx
->reconfig_events
,
1146 struct netvsc_reconfig
, list
);
1147 list_del(&event
->list
);
1148 reschedule
= !list_empty(&ndev_ctx
->reconfig_events
);
1150 spin_unlock_irqrestore(&ndev_ctx
->lock
, flags
);
1155 switch (event
->event
) {
1156 /* Only the following events are possible due to the check in
1157 * netvsc_linkstatus_callback()
1159 case RNDIS_STATUS_MEDIA_CONNECT
:
1160 if (rdev
->link_state
) {
1161 rdev
->link_state
= false;
1162 netif_carrier_on(net
);
1163 netif_tx_wake_all_queues(net
);
1169 case RNDIS_STATUS_MEDIA_DISCONNECT
:
1170 if (!rdev
->link_state
) {
1171 rdev
->link_state
= true;
1172 netif_carrier_off(net
);
1173 netif_tx_stop_all_queues(net
);
1177 case RNDIS_STATUS_NETWORK_CHANGE
:
1178 /* Only makes sense if carrier is present */
1179 if (!rdev
->link_state
) {
1180 rdev
->link_state
= true;
1181 netif_carrier_off(net
);
1182 netif_tx_stop_all_queues(net
);
1183 event
->event
= RNDIS_STATUS_MEDIA_CONNECT
;
1184 spin_lock_irqsave(&ndev_ctx
->lock
, flags
);
1185 list_add(&event
->list
, &ndev_ctx
->reconfig_events
);
1186 spin_unlock_irqrestore(&ndev_ctx
->lock
, flags
);
1195 netdev_notify_peers(net
);
1197 /* link_watch only sends one notification with current state per
1198 * second, handle next reconfig event in 2 seconds.
1201 schedule_delayed_work(&ndev_ctx
->dwork
, LINKCHANGE_INT
);
1209 static void netvsc_free_netdev(struct net_device
*netdev
)
1211 struct net_device_context
*net_device_ctx
= netdev_priv(netdev
);
1213 free_percpu(net_device_ctx
->tx_stats
);
1214 free_percpu(net_device_ctx
->rx_stats
);
1215 free_netdev(netdev
);
1218 static struct net_device
*get_netvsc_net_device(char *mac
)
1220 struct net_device
*dev
, *found
= NULL
;
1224 for_each_netdev(&init_net
, dev
) {
1225 if (memcmp(dev
->dev_addr
, mac
, ETH_ALEN
) == 0) {
1226 if (dev
->netdev_ops
!= &device_ops
)
1236 static int netvsc_register_vf(struct net_device
*vf_netdev
)
1238 struct net_device
*ndev
;
1239 struct net_device_context
*net_device_ctx
;
1240 struct netvsc_device
*netvsc_dev
;
1241 const struct ethtool_ops
*eth_ops
= vf_netdev
->ethtool_ops
;
1243 if (eth_ops
== NULL
|| eth_ops
== ðtool_ops
)
1247 * We will use the MAC address to locate the synthetic interface to
1248 * associate with the VF interface. If we don't find a matching
1249 * synthetic interface, move on.
1251 ndev
= get_netvsc_net_device(vf_netdev
->dev_addr
);
1255 net_device_ctx
= netdev_priv(ndev
);
1256 netvsc_dev
= net_device_ctx
->nvdev
;
1257 if (!netvsc_dev
|| net_device_ctx
->vf_netdev
)
1260 netdev_info(ndev
, "VF registering: %s\n", vf_netdev
->name
);
1262 * Take a reference on the module.
1264 try_module_get(THIS_MODULE
);
1265 net_device_ctx
->vf_netdev
= vf_netdev
;
1269 static void netvsc_inject_enable(struct net_device_context
*net_device_ctx
)
1271 net_device_ctx
->vf_inject
= true;
1274 static void netvsc_inject_disable(struct net_device_context
*net_device_ctx
)
1276 net_device_ctx
->vf_inject
= false;
1278 /* Wait for currently active users to drain out. */
1279 while (atomic_read(&net_device_ctx
->vf_use_cnt
) != 0)
1283 static int netvsc_vf_up(struct net_device
*vf_netdev
)
1285 struct net_device
*ndev
;
1286 struct netvsc_device
*netvsc_dev
;
1287 const struct ethtool_ops
*eth_ops
= vf_netdev
->ethtool_ops
;
1288 struct net_device_context
*net_device_ctx
;
1290 if (eth_ops
== ðtool_ops
)
1293 ndev
= get_netvsc_net_device(vf_netdev
->dev_addr
);
1297 net_device_ctx
= netdev_priv(ndev
);
1298 netvsc_dev
= net_device_ctx
->nvdev
;
1300 if (!netvsc_dev
|| !net_device_ctx
->vf_netdev
)
1303 netdev_info(ndev
, "VF up: %s\n", vf_netdev
->name
);
1304 netvsc_inject_enable(net_device_ctx
);
1307 * Open the device before switching data path.
1309 rndis_filter_open(netvsc_dev
);
1312 * notify the host to switch the data path.
1314 netvsc_switch_datapath(ndev
, true);
1315 netdev_info(ndev
, "Data path switched to VF: %s\n", vf_netdev
->name
);
1317 netif_carrier_off(ndev
);
1319 /* Now notify peers through VF device. */
1320 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS
, vf_netdev
);
1325 static int netvsc_vf_down(struct net_device
*vf_netdev
)
1327 struct net_device
*ndev
;
1328 struct netvsc_device
*netvsc_dev
;
1329 struct net_device_context
*net_device_ctx
;
1330 const struct ethtool_ops
*eth_ops
= vf_netdev
->ethtool_ops
;
1332 if (eth_ops
== ðtool_ops
)
1335 ndev
= get_netvsc_net_device(vf_netdev
->dev_addr
);
1339 net_device_ctx
= netdev_priv(ndev
);
1340 netvsc_dev
= net_device_ctx
->nvdev
;
1342 if (!netvsc_dev
|| !net_device_ctx
->vf_netdev
)
1345 netdev_info(ndev
, "VF down: %s\n", vf_netdev
->name
);
1346 netvsc_inject_disable(net_device_ctx
);
1347 netvsc_switch_datapath(ndev
, false);
1348 netdev_info(ndev
, "Data path switched from VF: %s\n", vf_netdev
->name
);
1349 rndis_filter_close(netvsc_dev
);
1350 netif_carrier_on(ndev
);
1352 /* Now notify peers through netvsc device. */
1353 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS
, ndev
);
1358 static int netvsc_unregister_vf(struct net_device
*vf_netdev
)
1360 struct net_device
*ndev
;
1361 struct netvsc_device
*netvsc_dev
;
1362 const struct ethtool_ops
*eth_ops
= vf_netdev
->ethtool_ops
;
1363 struct net_device_context
*net_device_ctx
;
1365 if (eth_ops
== ðtool_ops
)
1368 ndev
= get_netvsc_net_device(vf_netdev
->dev_addr
);
1372 net_device_ctx
= netdev_priv(ndev
);
1373 netvsc_dev
= net_device_ctx
->nvdev
;
1374 if (!netvsc_dev
|| !net_device_ctx
->vf_netdev
)
1376 netdev_info(ndev
, "VF unregistering: %s\n", vf_netdev
->name
);
1377 netvsc_inject_disable(net_device_ctx
);
1378 net_device_ctx
->vf_netdev
= NULL
;
1379 module_put(THIS_MODULE
);
1383 static int netvsc_probe(struct hv_device
*dev
,
1384 const struct hv_vmbus_device_id
*dev_id
)
1386 struct net_device
*net
= NULL
;
1387 struct net_device_context
*net_device_ctx
;
1388 struct netvsc_device_info device_info
;
1389 struct netvsc_device
*nvdev
;
1392 net
= alloc_etherdev_mq(sizeof(struct net_device_context
),
1397 netif_carrier_off(net
);
1399 netvsc_init_settings(net
);
1401 net_device_ctx
= netdev_priv(net
);
1402 net_device_ctx
->device_ctx
= dev
;
1403 net_device_ctx
->msg_enable
= netif_msg_init(debug
, default_msg
);
1404 if (netif_msg_probe(net_device_ctx
))
1405 netdev_dbg(net
, "netvsc msg_enable: %d\n",
1406 net_device_ctx
->msg_enable
);
1408 net_device_ctx
->tx_stats
= netdev_alloc_pcpu_stats(struct netvsc_stats
);
1409 if (!net_device_ctx
->tx_stats
) {
1413 net_device_ctx
->rx_stats
= netdev_alloc_pcpu_stats(struct netvsc_stats
);
1414 if (!net_device_ctx
->rx_stats
) {
1415 free_percpu(net_device_ctx
->tx_stats
);
1420 hv_set_drvdata(dev
, net
);
1422 net_device_ctx
->start_remove
= false;
1424 INIT_DELAYED_WORK(&net_device_ctx
->dwork
, netvsc_link_change
);
1425 INIT_WORK(&net_device_ctx
->work
, do_set_multicast
);
1427 spin_lock_init(&net_device_ctx
->lock
);
1428 INIT_LIST_HEAD(&net_device_ctx
->reconfig_events
);
1430 atomic_set(&net_device_ctx
->vf_use_cnt
, 0);
1431 net_device_ctx
->vf_netdev
= NULL
;
1432 net_device_ctx
->vf_inject
= false;
1434 net
->netdev_ops
= &device_ops
;
1436 net
->hw_features
= NETVSC_HW_FEATURES
;
1437 net
->features
= NETVSC_HW_FEATURES
| NETIF_F_HW_VLAN_CTAG_TX
;
1439 net
->ethtool_ops
= ðtool_ops
;
1440 SET_NETDEV_DEV(net
, &dev
->device
);
1442 /* We always need headroom for rndis header */
1443 net
->needed_headroom
= RNDIS_AND_PPI_SIZE
;
1445 /* Notify the netvsc driver of the new device */
1446 memset(&device_info
, 0, sizeof(device_info
));
1447 device_info
.ring_size
= ring_size
;
1448 device_info
.max_num_vrss_chns
= max_num_vrss_chns
;
1449 ret
= rndis_filter_device_add(dev
, &device_info
);
1451 netdev_err(net
, "unable to add netvsc device (ret %d)\n", ret
);
1452 netvsc_free_netdev(net
);
1453 hv_set_drvdata(dev
, NULL
);
1456 memcpy(net
->dev_addr
, device_info
.mac_adr
, ETH_ALEN
);
1458 nvdev
= net_device_ctx
->nvdev
;
1459 netif_set_real_num_tx_queues(net
, nvdev
->num_chn
);
1460 netif_set_real_num_rx_queues(net
, nvdev
->num_chn
);
1462 ret
= register_netdev(net
);
1464 pr_err("Unable to register netdev.\n");
1465 rndis_filter_device_remove(dev
);
1466 netvsc_free_netdev(net
);
1472 static int netvsc_remove(struct hv_device
*dev
)
1474 struct net_device
*net
;
1475 struct net_device_context
*ndev_ctx
;
1476 struct netvsc_device
*net_device
;
1478 net
= hv_get_drvdata(dev
);
1481 dev_err(&dev
->device
, "No net device to remove\n");
1485 ndev_ctx
= netdev_priv(net
);
1486 net_device
= ndev_ctx
->nvdev
;
1488 /* Avoid racing with netvsc_change_mtu()/netvsc_set_channels()
1489 * removing the device.
1492 ndev_ctx
->start_remove
= true;
1495 cancel_delayed_work_sync(&ndev_ctx
->dwork
);
1496 cancel_work_sync(&ndev_ctx
->work
);
1498 /* Stop outbound asap */
1499 netif_tx_disable(net
);
1501 unregister_netdev(net
);
1504 * Call to the vsc driver to let it know that the device is being
1507 rndis_filter_device_remove(dev
);
1509 hv_set_drvdata(dev
, NULL
);
1511 netvsc_free_netdev(net
);
1515 static const struct hv_vmbus_device_id id_table
[] = {
1521 MODULE_DEVICE_TABLE(vmbus
, id_table
);
1523 /* The one and only one */
1524 static struct hv_driver netvsc_drv
= {
1525 .name
= KBUILD_MODNAME
,
1526 .id_table
= id_table
,
1527 .probe
= netvsc_probe
,
1528 .remove
= netvsc_remove
,
1532 * On Hyper-V, every VF interface is matched with a corresponding
1533 * synthetic interface. The synthetic interface is presented first
1534 * to the guest. When the corresponding VF instance is registered,
1535 * we will take care of switching the data path.
1537 static int netvsc_netdev_event(struct notifier_block
*this,
1538 unsigned long event
, void *ptr
)
1540 struct net_device
*event_dev
= netdev_notifier_info_to_dev(ptr
);
1542 /* Avoid Vlan dev with same MAC registering as VF */
1543 if (event_dev
->priv_flags
& IFF_802_1Q_VLAN
)
1546 /* Avoid Bonding master dev with same MAC registering as VF */
1547 if (event_dev
->priv_flags
& IFF_BONDING
&&
1548 event_dev
->flags
& IFF_MASTER
)
1552 case NETDEV_REGISTER
:
1553 return netvsc_register_vf(event_dev
);
1554 case NETDEV_UNREGISTER
:
1555 return netvsc_unregister_vf(event_dev
);
1557 return netvsc_vf_up(event_dev
);
1559 return netvsc_vf_down(event_dev
);
1565 static struct notifier_block netvsc_netdev_notifier
= {
1566 .notifier_call
= netvsc_netdev_event
,
1569 static void __exit
netvsc_drv_exit(void)
1571 unregister_netdevice_notifier(&netvsc_netdev_notifier
);
1572 vmbus_driver_unregister(&netvsc_drv
);
1575 static int __init
netvsc_drv_init(void)
1579 if (ring_size
< RING_SIZE_MIN
) {
1580 ring_size
= RING_SIZE_MIN
;
1581 pr_info("Increased ring_size to %d (min allowed)\n",
1584 ret
= vmbus_driver_register(&netvsc_drv
);
1589 register_netdevice_notifier(&netvsc_netdev_notifier
);
1593 MODULE_LICENSE("GPL");
1594 MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
1596 module_init(netvsc_drv_init
);
1597 module_exit(netvsc_drv_exit
);