Merge remote-tracking branch 'vfio/next'
[deliverable/linux.git] / drivers / net / hyperv / netvsc_drv.c
CommitLineData
fceaf24a 1/*
fceaf24a
HJ
2 * Copyright (c) 2009, Microsoft Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
adf8d3ff 14 * this program; if not, see <http://www.gnu.org/licenses/>.
fceaf24a
HJ
15 *
16 * Authors:
d0e94d17 17 * Haiyang Zhang <haiyangz@microsoft.com>
fceaf24a 18 * Hank Janssen <hjanssen@microsoft.com>
fceaf24a 19 */
eb335bc4
HJ
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
fceaf24a 22#include <linux/init.h>
9079ce69 23#include <linux/atomic.h>
fceaf24a
HJ
24#include <linux/module.h>
25#include <linux/highmem.h>
26#include <linux/device.h>
fceaf24a 27#include <linux/io.h>
fceaf24a
HJ
28#include <linux/delay.h>
29#include <linux/netdevice.h>
30#include <linux/inetdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
c802db11 33#include <linux/if_vlan.h>
fceaf24a 34#include <linux/in.h>
5a0e3ad6 35#include <linux/slab.h>
fceaf24a
HJ
36#include <net/arp.h>
37#include <net/route.h>
38#include <net/sock.h>
39#include <net/pkt_sched.h>
3f335ea2 40
5ca7252a 41#include "hyperv_net.h"
fceaf24a 42
fa85a6c2 43#define RING_SIZE_MIN 64
27a70af3 44#define LINKCHANGE_INT (2 * HZ)
a060679c 45#define NETVSC_HW_FEATURES (NETIF_F_RXCSUM | \
46 NETIF_F_SG | \
47 NETIF_F_TSO | \
48 NETIF_F_TSO6 | \
49 NETIF_F_HW_CSUM)
99c8da0f 50static int ring_size = 128;
450d7a4b
SH
51module_param(ring_size, int, S_IRUGO);
52MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
fceaf24a 53
e01ec219
KS
54static int max_num_vrss_chns = 8;
55
3f300ff4
SX
56static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
57 NETIF_MSG_LINK | NETIF_MSG_IFUP |
58 NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR |
59 NETIF_MSG_TX_ERR;
60
61static int debug = -1;
62module_param(debug, int, S_IRUGO);
63MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
64
d426b2e3
HZ
65static void do_set_multicast(struct work_struct *w)
66{
792df872
WM
67 struct net_device_context *ndevctx =
68 container_of(w, struct net_device_context, work);
0a1275ca
VK
69 struct hv_device *device_obj = ndevctx->device_ctx;
70 struct net_device *ndev = hv_get_drvdata(device_obj);
71 struct netvsc_device *nvdev = ndevctx->nvdev;
d426b2e3
HZ
72 struct rndis_device *rdev;
73
0a1275ca 74 if (!nvdev)
792df872 75 return;
d426b2e3
HZ
76
77 rdev = nvdev->extension;
78 if (rdev == NULL)
792df872 79 return;
d426b2e3 80
0a1275ca 81 if (ndev->flags & IFF_PROMISC)
d426b2e3
HZ
82 rndis_filter_set_packet_filter(rdev,
83 NDIS_PACKET_TYPE_PROMISCUOUS);
84 else
85 rndis_filter_set_packet_filter(rdev,
86 NDIS_PACKET_TYPE_BROADCAST |
87 NDIS_PACKET_TYPE_ALL_MULTICAST |
88 NDIS_PACKET_TYPE_DIRECTED);
d426b2e3
HZ
89}
90
4e9bfefa 91static void netvsc_set_multicast_list(struct net_device *net)
fceaf24a 92{
792df872 93 struct net_device_context *net_device_ctx = netdev_priv(net);
d426b2e3 94
792df872 95 schedule_work(&net_device_ctx->work);
fceaf24a
HJ
96}
97
fceaf24a
HJ
98static int netvsc_open(struct net_device *net)
99{
2f5fa6c8 100 struct netvsc_device *nvdev = net_device_to_netvsc_device(net);
891de74d 101 struct rndis_device *rdev;
02fafbc6 102 int ret = 0;
fceaf24a 103
891de74d
HZ
104 netif_carrier_off(net);
105
d515d0ff 106 /* Open up the device */
2f5fa6c8 107 ret = rndis_filter_open(nvdev);
d515d0ff
HZ
108 if (ret != 0) {
109 netdev_err(net, "unable to open device (ret %d).\n", ret);
110 return ret;
fceaf24a
HJ
111 }
112
2de8530b 113 netif_tx_wake_all_queues(net);
d515d0ff 114
891de74d
HZ
115 rdev = nvdev->extension;
116 if (!rdev->link_state)
117 netif_carrier_on(net);
118
fceaf24a
HJ
119 return ret;
120}
121
fceaf24a
HJ
122static int netvsc_close(struct net_device *net)
123{
fceaf24a 124 struct net_device_context *net_device_ctx = netdev_priv(net);
3d541ac5 125 struct netvsc_device *nvdev = net_device_ctx->nvdev;
02fafbc6 126 int ret;
2de8530b
HZ
127 u32 aread, awrite, i, msec = 10, retry = 0, retry_max = 20;
128 struct vmbus_channel *chn;
fceaf24a 129
0a282538 130 netif_tx_disable(net);
fceaf24a 131
792df872
WM
132 /* Make sure netvsc_set_multicast_list doesn't re-enable filter! */
133 cancel_work_sync(&net_device_ctx->work);
2f5fa6c8 134 ret = rndis_filter_close(nvdev);
2de8530b 135 if (ret != 0) {
eb335bc4 136 netdev_err(net, "unable to close device (ret %d).\n", ret);
2de8530b
HZ
137 return ret;
138 }
139
140 /* Ensure pending bytes in ring are read */
141 while (true) {
142 aread = 0;
143 for (i = 0; i < nvdev->num_chn; i++) {
144 chn = nvdev->chn_table[i];
145 if (!chn)
146 continue;
147
148 hv_get_ringbuffer_availbytes(&chn->inbound, &aread,
149 &awrite);
150
151 if (aread)
152 break;
153
154 hv_get_ringbuffer_availbytes(&chn->outbound, &aread,
155 &awrite);
156
157 if (aread)
158 break;
159 }
160
161 retry++;
162 if (retry > retry_max || aread == 0)
163 break;
164
165 msleep(msec);
166
167 if (msec < 1000)
168 msec *= 2;
169 }
170
171 if (aread) {
172 netdev_err(net, "Ring buffer not empty after closing rndis\n");
173 ret = -ETIMEDOUT;
174 }
fceaf24a 175
fceaf24a
HJ
176 return ret;
177}
178
8a00251a
KS
179static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size,
180 int pkt_type)
181{
182 struct rndis_packet *rndis_pkt;
183 struct rndis_per_packet_info *ppi;
184
185 rndis_pkt = &msg->msg.pkt;
186 rndis_pkt->data_offset += ppi_size;
187
188 ppi = (struct rndis_per_packet_info *)((void *)rndis_pkt +
189 rndis_pkt->per_pkt_info_offset + rndis_pkt->per_pkt_info_len);
190
191 ppi->size = ppi_size;
192 ppi->type = pkt_type;
193 ppi->ppi_offset = sizeof(struct rndis_per_packet_info);
194
195 rndis_pkt->per_pkt_info_len += ppi_size;
196
197 return ppi;
198}
199
5b54dac8
HZ
200static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
201 void *accel_priv, select_queue_fallback_t fallback)
202{
203 struct net_device_context *net_device_ctx = netdev_priv(ndev);
3d541ac5 204 struct netvsc_device *nvsc_dev = net_device_ctx->nvdev;
5b54dac8
HZ
205 u32 hash;
206 u16 q_idx = 0;
207
208 if (nvsc_dev == NULL || ndev->real_num_tx_queues <= 1)
209 return 0;
210
757647e1
VK
211 hash = skb_get_hash(skb);
212 q_idx = nvsc_dev->send_table[hash % VRSS_SEND_TAB_SIZE] %
213 ndev->real_num_tx_queues;
5b54dac8 214
8b9fbe1a
VK
215 if (!nvsc_dev->chn_table[q_idx])
216 q_idx = 0;
217
5b54dac8
HZ
218 return q_idx;
219}
220
54a7357f
KS
221static u32 fill_pg_buf(struct page *page, u32 offset, u32 len,
222 struct hv_page_buffer *pb)
223{
224 int j = 0;
225
226 /* Deal with compund pages by ignoring unused part
227 * of the page.
228 */
229 page += (offset >> PAGE_SHIFT);
230 offset &= ~PAGE_MASK;
231
232 while (len > 0) {
233 unsigned long bytes;
234
235 bytes = PAGE_SIZE - offset;
236 if (bytes > len)
237 bytes = len;
238 pb[j].pfn = page_to_pfn(page);
239 pb[j].offset = offset;
240 pb[j].len = bytes;
241
242 offset += bytes;
243 len -= bytes;
244
245 if (offset == PAGE_SIZE && len) {
246 page++;
247 offset = 0;
248 j++;
249 }
250 }
251
252 return j + 1;
253}
254
8a00251a 255static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
a9f2e2d6
KS
256 struct hv_netvsc_packet *packet,
257 struct hv_page_buffer **page_buf)
54a7357f 258{
a9f2e2d6 259 struct hv_page_buffer *pb = *page_buf;
54a7357f
KS
260 u32 slots_used = 0;
261 char *data = skb->data;
262 int frags = skb_shinfo(skb)->nr_frags;
263 int i;
264
265 /* The packet is laid out thus:
aa0a34be 266 * 1. hdr: RNDIS header and PPI
54a7357f
KS
267 * 2. skb linear data
268 * 3. skb fragment data
269 */
270 if (hdr != NULL)
271 slots_used += fill_pg_buf(virt_to_page(hdr),
272 offset_in_page(hdr),
273 len, &pb[slots_used]);
274
aa0a34be
HZ
275 packet->rmsg_size = len;
276 packet->rmsg_pgcnt = slots_used;
277
54a7357f
KS
278 slots_used += fill_pg_buf(virt_to_page(data),
279 offset_in_page(data),
280 skb_headlen(skb), &pb[slots_used]);
281
282 for (i = 0; i < frags; i++) {
283 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
284
285 slots_used += fill_pg_buf(skb_frag_page(frag),
286 frag->page_offset,
287 skb_frag_size(frag), &pb[slots_used]);
288 }
8a00251a 289 return slots_used;
54a7357f
KS
290}
291
292static int count_skb_frag_slots(struct sk_buff *skb)
293{
294 int i, frags = skb_shinfo(skb)->nr_frags;
295 int pages = 0;
296
297 for (i = 0; i < frags; i++) {
298 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
299 unsigned long size = skb_frag_size(frag);
300 unsigned long offset = frag->page_offset;
301
302 /* Skip unused frames from start of page */
303 offset &= ~PAGE_MASK;
304 pages += PFN_UP(offset + size);
305 }
306 return pages;
307}
308
309static int netvsc_get_slots(struct sk_buff *skb)
310{
311 char *data = skb->data;
312 unsigned int offset = offset_in_page(data);
313 unsigned int len = skb_headlen(skb);
314 int slots;
315 int frag_slots;
316
317 slots = DIV_ROUND_UP(offset + len, PAGE_SIZE);
318 frag_slots = count_skb_frag_slots(skb);
319 return slots + frag_slots;
320}
321
08cd04bf
KS
322static u32 get_net_transport_info(struct sk_buff *skb, u32 *trans_off)
323{
324 u32 ret_val = TRANSPORT_INFO_NOT_IP;
325
326 if ((eth_hdr(skb)->h_proto != htons(ETH_P_IP)) &&
327 (eth_hdr(skb)->h_proto != htons(ETH_P_IPV6))) {
328 goto not_ip;
329 }
330
331 *trans_off = skb_transport_offset(skb);
332
333 if ((eth_hdr(skb)->h_proto == htons(ETH_P_IP))) {
334 struct iphdr *iphdr = ip_hdr(skb);
335
336 if (iphdr->protocol == IPPROTO_TCP)
337 ret_val = TRANSPORT_INFO_IPV4_TCP;
338 else if (iphdr->protocol == IPPROTO_UDP)
339 ret_val = TRANSPORT_INFO_IPV4_UDP;
340 } else {
341 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
342 ret_val = TRANSPORT_INFO_IPV6_TCP;
343 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
344 ret_val = TRANSPORT_INFO_IPV6_UDP;
345 }
346
347not_ip:
348 return ret_val;
349}
350
02fafbc6 351static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
fceaf24a 352{
fceaf24a 353 struct net_device_context *net_device_ctx = netdev_priv(net);
981a1bd8 354 struct hv_netvsc_packet *packet = NULL;
02fafbc6 355 int ret;
8a00251a
KS
356 unsigned int num_data_pgs;
357 struct rndis_message *rndis_msg;
358 struct rndis_packet *rndis_pkt;
359 u32 rndis_msg_size;
8a00251a 360 struct rndis_per_packet_info *ppi;
08cd04bf
KS
361 struct ndis_tcp_ip_checksum_info *csum_info;
362 int hdr_offset;
363 u32 net_trans_info;
307f0995 364 u32 hash;
e88f7e07 365 u32 skb_length;
b08cc791 366 struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT];
a9f2e2d6 367 struct hv_page_buffer *pb = page_buf;
fceaf24a 368
54a7357f
KS
369 /* We will atmost need two pages to describe the rndis
370 * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
e88f7e07
VK
371 * of pages in a single packet. If skb is scattered around
372 * more pages we try linearizing it.
54a7357f 373 */
e88f7e07 374
e88f7e07 375 skb_length = skb->len;
8a00251a 376 num_data_pgs = netvsc_get_slots(skb) + 2;
0ab05141
SH
377
378 if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) {
4323b47c
SH
379 ++net_device_ctx->eth_stats.tx_scattered;
380
381 if (skb_linearize(skb))
382 goto no_memory;
0ab05141
SH
383
384 num_data_pgs = netvsc_get_slots(skb) + 2;
385 if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
4323b47c 386 ++net_device_ctx->eth_stats.tx_too_big;
0ab05141
SH
387 goto drop;
388 }
54a7357f 389 }
fceaf24a 390
c0eb4540
KS
391 /*
392 * Place the rndis header in the skb head room and
393 * the skb->cb will be used for hv_netvsc_packet
394 * structure.
395 */
396 ret = skb_cow_head(skb, RNDIS_AND_PPI_SIZE);
4323b47c
SH
397 if (ret)
398 goto no_memory;
399
c0eb4540
KS
400 /* Use the skb control buffer for building up the packet */
401 BUILD_BUG_ON(sizeof(struct hv_netvsc_packet) >
402 FIELD_SIZEOF(struct sk_buff, cb));
403 packet = (struct hv_netvsc_packet *)skb->cb;
fceaf24a 404
5b54dac8
HZ
405 packet->q_idx = skb_get_queue_mapping(skb);
406
4d447c9a 407 packet->total_data_buflen = skb->len;
fceaf24a 408
c0eb4540 409 rndis_msg = (struct rndis_message *)skb->head;
b08cc791 410
24476760 411 memset(rndis_msg, 0, RNDIS_AND_PPI_SIZE);
fceaf24a 412
8a00251a 413 /* Add the rndis header */
8a00251a
KS
414 rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
415 rndis_msg->msg_len = packet->total_data_buflen;
416 rndis_pkt = &rndis_msg->msg.pkt;
417 rndis_pkt->data_offset = sizeof(struct rndis_packet);
418 rndis_pkt->data_len = packet->total_data_buflen;
419 rndis_pkt->per_pkt_info_offset = sizeof(struct rndis_packet);
420
421 rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
422
307f0995
HZ
423 hash = skb_get_hash_raw(skb);
424 if (hash != 0 && net->real_num_tx_queues > 1) {
425 rndis_msg_size += NDIS_HASH_PPI_SIZE;
426 ppi = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE,
427 NBL_HASH_VALUE);
428 *(u32 *)((void *)ppi + ppi->ppi_offset) = hash;
429 }
430
0ab05141 431 if (skb_vlan_tag_present(skb)) {
8a00251a
KS
432 struct ndis_pkt_8021q_info *vlan;
433
434 rndis_msg_size += NDIS_VLAN_PPI_SIZE;
435 ppi = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE,
436 IEEE_8021Q_INFO);
437 vlan = (struct ndis_pkt_8021q_info *)((void *)ppi +
438 ppi->ppi_offset);
760d1e36
KS
439 vlan->vlanid = skb->vlan_tci & VLAN_VID_MASK;
440 vlan->pri = (skb->vlan_tci & VLAN_PRIO_MASK) >>
8a00251a
KS
441 VLAN_PRIO_SHIFT;
442 }
443
08cd04bf
KS
444 net_trans_info = get_net_transport_info(skb, &hdr_offset);
445 if (net_trans_info == TRANSPORT_INFO_NOT_IP)
446 goto do_send;
447
448 /*
449 * Setup the sendside checksum offload only if this is not a
450 * GSO packet.
451 */
0ab05141
SH
452 if (skb_is_gso(skb)) {
453 struct ndis_tcp_lso_info *lso_info;
454
455 rndis_msg_size += NDIS_LSO_PPI_SIZE;
456 ppi = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
457 TCP_LARGESEND_PKTINFO);
458
459 lso_info = (struct ndis_tcp_lso_info *)((void *)ppi +
460 ppi->ppi_offset);
461
462 lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
463 if (net_trans_info & (INFO_IPV4 << 16)) {
464 lso_info->lso_v2_transmit.ip_version =
465 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
466 ip_hdr(skb)->tot_len = 0;
467 ip_hdr(skb)->check = 0;
468 tcp_hdr(skb)->check =
469 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
470 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
471 } else {
472 lso_info->lso_v2_transmit.ip_version =
473 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
474 ipv6_hdr(skb)->payload_len = 0;
475 tcp_hdr(skb)->check =
476 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
477 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
478 }
479 lso_info->lso_v2_transmit.tcp_header_offset = hdr_offset;
480 lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
481 goto do_send;
482 }
08cd04bf 483
22041fb0
KS
484 if ((skb->ip_summed == CHECKSUM_NONE) ||
485 (skb->ip_summed == CHECKSUM_UNNECESSARY))
486 goto do_send;
487
08cd04bf
KS
488 rndis_msg_size += NDIS_CSUM_PPI_SIZE;
489 ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
490 TCPIP_CHKSUM_PKTINFO);
491
492 csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi +
493 ppi->ppi_offset);
494
495 if (net_trans_info & (INFO_IPV4 << 16))
496 csum_info->transmit.is_ipv4 = 1;
497 else
498 csum_info->transmit.is_ipv6 = 1;
499
500 if (net_trans_info & INFO_TCP) {
501 csum_info->transmit.tcp_checksum = 1;
502 csum_info->transmit.tcp_header_offset = hdr_offset;
503 } else if (net_trans_info & INFO_UDP) {
af9893a3
KS
504 /* UDP checksum offload is not supported on ws2008r2.
505 * Furthermore, on ws2012 and ws2012r2, there are some
506 * issues with udp checksum offload from Linux guests.
507 * (these are host issues).
508 * For now compute the checksum here.
509 */
510 struct udphdr *uh;
511 u16 udp_len;
512
513 ret = skb_cow_head(skb, 0);
514 if (ret)
4323b47c 515 goto no_memory;
af9893a3
KS
516
517 uh = udp_hdr(skb);
518 udp_len = ntohs(uh->len);
519 uh->check = 0;
520 uh->check = csum_tcpudp_magic(ip_hdr(skb)->saddr,
521 ip_hdr(skb)->daddr,
522 udp_len, IPPROTO_UDP,
523 csum_partial(uh, udp_len, 0));
524 if (uh->check == 0)
525 uh->check = CSUM_MANGLED_0;
526
527 csum_info->transmit.udp_checksum = 0;
08cd04bf
KS
528 }
529
530do_send:
8a00251a
KS
531 /* Start filling in the page buffers with the rndis hdr */
532 rndis_msg->msg_len += rndis_msg_size;
942396b0 533 packet->total_data_buflen = rndis_msg->msg_len;
8a00251a 534 packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
a9f2e2d6 535 skb, packet, &pb);
8a00251a 536
76d13b56 537 /* timestamp packet in software */
538 skb_tx_timestamp(skb);
3a3d9a0a
KS
539 ret = netvsc_send(net_device_ctx->device_ctx, packet,
540 rndis_msg, &pb, skb);
0ab05141 541 if (likely(ret == 0)) {
4323b47c
SH
542 struct netvsc_stats *tx_stats = this_cpu_ptr(net_device_ctx->tx_stats);
543
4b02b58b 544 u64_stats_update_begin(&tx_stats->syncp);
7eafd9b4 545 tx_stats->packets++;
546 tx_stats->bytes += skb_length;
4b02b58b 547 u64_stats_update_end(&tx_stats->syncp);
0ab05141 548 return NETDEV_TX_OK;
fceaf24a 549 }
4323b47c
SH
550
551 if (ret == -EAGAIN) {
552 ++net_device_ctx->eth_stats.tx_busy;
0ab05141 553 return NETDEV_TX_BUSY;
4323b47c
SH
554 }
555
556 if (ret == -ENOSPC)
557 ++net_device_ctx->eth_stats.tx_no_space;
0ab05141
SH
558
559drop:
560 dev_kfree_skb_any(skb);
561 net->stats.tx_dropped++;
fceaf24a 562
0ab05141 563 return NETDEV_TX_OK;
4323b47c
SH
564
565no_memory:
566 ++net_device_ctx->eth_stats.tx_no_memory;
567 goto drop;
fceaf24a
HJ
568}
569
3e189519 570/*
02fafbc6
GKH
571 * netvsc_linkstatus_callback - Link up/down notification
572 */
90ef117a 573void netvsc_linkstatus_callback(struct hv_device *device_obj,
3a494e71 574 struct rndis_message *resp)
fceaf24a 575{
3a494e71 576 struct rndis_indicate_status *indicate = &resp->msg.indicate_status;
2ddd5e5f 577 struct net_device *net;
c996edcf 578 struct net_device_context *ndev_ctx;
27a70af3
VK
579 struct netvsc_reconfig *event;
580 unsigned long flags;
891de74d 581
7f5d5af0
HZ
582 net = hv_get_drvdata(device_obj);
583
584 if (!net)
585 return;
586
587 ndev_ctx = netdev_priv(net);
588
589 /* Update the physical link speed when changing to another vSwitch */
590 if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) {
591 u32 speed;
592
593 speed = *(u32 *)((void *)indicate + indicate->
594 status_buf_offset) / 10000;
595 ndev_ctx->speed = speed;
596 return;
597 }
598
599 /* Handle these link change statuses below */
27a70af3
VK
600 if (indicate->status != RNDIS_STATUS_NETWORK_CHANGE &&
601 indicate->status != RNDIS_STATUS_MEDIA_CONNECT &&
602 indicate->status != RNDIS_STATUS_MEDIA_DISCONNECT)
3a494e71 603 return;
891de74d 604
7f5d5af0 605 if (net->reg_state != NETREG_REGISTERED)
fceaf24a 606 return;
fceaf24a 607
27a70af3
VK
608 event = kzalloc(sizeof(*event), GFP_ATOMIC);
609 if (!event)
610 return;
611 event->event = indicate->status;
612
613 spin_lock_irqsave(&ndev_ctx->lock, flags);
614 list_add_tail(&event->list, &ndev_ctx->reconfig_events);
615 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
616
617 schedule_delayed_work(&ndev_ctx->dwork, 0);
fceaf24a
HJ
618}
619
84bf9cef 620static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
e3d605ed 621 struct hv_netvsc_packet *packet,
25b85ee8 622 struct ndis_tcp_ip_checksum_info *csum_info,
84bf9cef 623 void *data, u16 vlan_tci)
fceaf24a 624{
fceaf24a 625 struct sk_buff *skb;
fceaf24a 626
72a2f5bd 627 skb = netdev_alloc_skb_ip_align(net, packet->total_data_buflen);
84bf9cef
KS
628 if (!skb)
629 return skb;
fceaf24a 630
02fafbc6
GKH
631 /*
632 * Copy to skb. This copy is needed here since the memory pointed by
633 * hv_netvsc_packet cannot be deallocated
634 */
84bf9cef
KS
635 memcpy(skb_put(skb, packet->total_data_buflen), data,
636 packet->total_data_buflen);
fceaf24a
HJ
637
638 skb->protocol = eth_type_trans(skb, net);
e3d605ed
KS
639 if (csum_info) {
640 /* We only look at the IP checksum here.
641 * Should we be dropping the packet if checksum
642 * failed? How do we deal with other checksums - TCP/UDP?
643 */
644 if (csum_info->receive.ip_checksum_succeeded)
645 skb->ip_summed = CHECKSUM_UNNECESSARY;
646 else
647 skb->ip_summed = CHECKSUM_NONE;
648 }
649
760d1e36 650 if (vlan_tci & VLAN_TAG_PRESENT)
93725cbd 651 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
760d1e36 652 vlan_tci);
fceaf24a 653
84bf9cef
KS
654 return skb;
655}
656
657/*
658 * netvsc_recv_callback - Callback when we receive a packet from the
659 * "wire" on the specified device.
660 */
661int netvsc_recv_callback(struct hv_device *device_obj,
662 struct hv_netvsc_packet *packet,
663 void **data,
664 struct ndis_tcp_ip_checksum_info *csum_info,
665 struct vmbus_channel *channel,
666 u16 vlan_tci)
667{
3d541ac5
VK
668 struct net_device *net = hv_get_drvdata(device_obj);
669 struct net_device_context *net_device_ctx = netdev_priv(net);
84bf9cef
KS
670 struct sk_buff *skb;
671 struct sk_buff *vf_skb;
672 struct netvsc_stats *rx_stats;
84bf9cef
KS
673 u32 bytes_recvd = packet->total_data_buflen;
674 int ret = 0;
675
84bf9cef
KS
676 if (!net || net->reg_state != NETREG_REGISTERED)
677 return NVSP_STAT_FAIL;
678
f9a7da91
VK
679 if (READ_ONCE(net_device_ctx->vf_inject)) {
680 atomic_inc(&net_device_ctx->vf_use_cnt);
681 if (!READ_ONCE(net_device_ctx->vf_inject)) {
84bf9cef
KS
682 /*
683 * We raced; just move on.
684 */
f9a7da91 685 atomic_dec(&net_device_ctx->vf_use_cnt);
84bf9cef
KS
686 goto vf_injection_done;
687 }
688
689 /*
690 * Inject this packet into the VF inerface.
691 * On Hyper-V, multicast and brodcast packets
692 * are only delivered on the synthetic interface
693 * (after subjecting these to policy filters on
694 * the host). Deliver these via the VF interface
695 * in the guest.
696 */
f9a7da91
VK
697 vf_skb = netvsc_alloc_recv_skb(net_device_ctx->vf_netdev,
698 packet, csum_info, *data,
699 vlan_tci);
84bf9cef 700 if (vf_skb != NULL) {
f9a7da91
VK
701 ++net_device_ctx->vf_netdev->stats.rx_packets;
702 net_device_ctx->vf_netdev->stats.rx_bytes +=
703 bytes_recvd;
84bf9cef
KS
704 netif_receive_skb(vf_skb);
705 } else {
706 ++net->stats.rx_dropped;
707 ret = NVSP_STAT_FAIL;
708 }
f9a7da91 709 atomic_dec(&net_device_ctx->vf_use_cnt);
84bf9cef
KS
710 return ret;
711 }
712
713vf_injection_done:
84bf9cef
KS
714 rx_stats = this_cpu_ptr(net_device_ctx->rx_stats);
715
716 /* Allocate a skb - TODO direct I/O to pages? */
717 skb = netvsc_alloc_recv_skb(net, packet, csum_info, *data, vlan_tci);
718 if (unlikely(!skb)) {
719 ++net->stats.rx_dropped;
720 return NVSP_STAT_FAIL;
721 }
25b85ee8 722 skb_record_rx_queue(skb, channel->
e565e803 723 offermsg.offer.sub_channel_index);
5b54dac8 724
4b02b58b 725 u64_stats_update_begin(&rx_stats->syncp);
7eafd9b4 726 rx_stats->packets++;
727 rx_stats->bytes += packet->total_data_buflen;
4b02b58b 728 u64_stats_update_end(&rx_stats->syncp);
9495c282 729
02fafbc6
GKH
730 /*
731 * Pass the skb back up. Network stack will deallocate the skb when it
9495c282
SH
732 * is done.
733 * TODO - use NAPI?
02fafbc6 734 */
9495c282 735 netif_rx(skb);
fceaf24a 736
fceaf24a
HJ
737 return 0;
738}
739
f82f4ad7
SH
740static void netvsc_get_drvinfo(struct net_device *net,
741 struct ethtool_drvinfo *info)
742{
e3f74b84
SH
743 struct net_device_context *net_device_ctx = netdev_priv(net);
744 struct hv_device *dev = net_device_ctx->device_ctx;
745
7826d43f 746 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
7826d43f 747 strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
e3f74b84 748 strlcpy(info->bus_info, vmbus_dev_name(dev), sizeof(info->bus_info));
f82f4ad7
SH
749}
750
59995370
AS
751static void netvsc_get_channels(struct net_device *net,
752 struct ethtool_channels *channel)
753{
754 struct net_device_context *net_device_ctx = netdev_priv(net);
3d541ac5 755 struct netvsc_device *nvdev = net_device_ctx->nvdev;
59995370
AS
756
757 if (nvdev) {
758 channel->max_combined = nvdev->max_chn;
759 channel->combined_count = nvdev->num_chn;
760 }
761}
762
b5960e6e
AS
763static int netvsc_set_channels(struct net_device *net,
764 struct ethtool_channels *channels)
765{
766 struct net_device_context *net_device_ctx = netdev_priv(net);
767 struct hv_device *dev = net_device_ctx->device_ctx;
3d541ac5 768 struct netvsc_device *nvdev = net_device_ctx->nvdev;
b5960e6e 769 struct netvsc_device_info device_info;
954591b9
AS
770 u32 num_chn;
771 u32 max_chn;
b5960e6e
AS
772 int ret = 0;
773 bool recovering = false;
774
6da7225f 775 if (net_device_ctx->start_remove || !nvdev || nvdev->destroy)
b5960e6e
AS
776 return -ENODEV;
777
954591b9
AS
778 num_chn = nvdev->num_chn;
779 max_chn = min_t(u32, nvdev->max_chn, num_online_cpus());
780
b5960e6e
AS
781 if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5) {
782 pr_info("vRSS unsupported before NVSP Version 5\n");
783 return -EINVAL;
784 }
785
786 /* We do not support rx, tx, or other */
787 if (!channels ||
788 channels->rx_count ||
789 channels->tx_count ||
790 channels->other_count ||
791 (channels->combined_count < 1))
792 return -EINVAL;
793
794 if (channels->combined_count > max_chn) {
795 pr_info("combined channels too high, using %d\n", max_chn);
796 channels->combined_count = max_chn;
797 }
798
799 ret = netvsc_close(net);
800 if (ret)
801 goto out;
802
803 do_set:
f580aec4 804 net_device_ctx->start_remove = true;
b5960e6e
AS
805 rndis_filter_device_remove(dev);
806
807 nvdev->num_chn = channels->combined_count;
808
b5960e6e
AS
809 memset(&device_info, 0, sizeof(device_info));
810 device_info.num_chn = nvdev->num_chn; /* passed to RNDIS */
811 device_info.ring_size = ring_size;
812 device_info.max_num_vrss_chns = max_num_vrss_chns;
813
814 ret = rndis_filter_device_add(dev, &device_info);
815 if (ret) {
816 if (recovering) {
817 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
818 return ret;
819 }
820 goto recover;
821 }
822
3d541ac5 823 nvdev = net_device_ctx->nvdev;
b5960e6e
AS
824
825 ret = netif_set_real_num_tx_queues(net, nvdev->num_chn);
826 if (ret) {
827 if (recovering) {
828 netdev_err(net, "could not set tx queue count (ret %d)\n", ret);
829 return ret;
830 }
831 goto recover;
832 }
833
834 ret = netif_set_real_num_rx_queues(net, nvdev->num_chn);
835 if (ret) {
836 if (recovering) {
837 netdev_err(net, "could not set rx queue count (ret %d)\n", ret);
838 return ret;
839 }
840 goto recover;
841 }
842
843 out:
844 netvsc_open(net);
f580aec4 845 net_device_ctx->start_remove = false;
1bdcec8a
VK
846 /* We may have missed link change notifications */
847 schedule_delayed_work(&net_device_ctx->dwork, 0);
b5960e6e
AS
848
849 return ret;
850
851 recover:
852 /* If the above failed, we attempt to recover through the same
853 * process but with the original number of channels.
854 */
855 netdev_err(net, "could not set channels, recovering\n");
856 recovering = true;
857 channels->combined_count = num_chn;
858 goto do_set;
859}
860
49eb9389 861static bool netvsc_validate_ethtool_ss_cmd(const struct ethtool_cmd *cmd)
862{
863 struct ethtool_cmd diff1 = *cmd;
864 struct ethtool_cmd diff2 = {};
865
866 ethtool_cmd_speed_set(&diff1, 0);
867 diff1.duplex = 0;
868 /* advertising and cmd are usually set */
869 diff1.advertising = 0;
870 diff1.cmd = 0;
871 /* We set port to PORT_OTHER */
872 diff2.port = PORT_OTHER;
873
874 return !memcmp(&diff1, &diff2, sizeof(diff1));
875}
876
877static void netvsc_init_settings(struct net_device *dev)
878{
879 struct net_device_context *ndc = netdev_priv(dev);
880
881 ndc->speed = SPEED_UNKNOWN;
882 ndc->duplex = DUPLEX_UNKNOWN;
883}
884
885static int netvsc_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
886{
887 struct net_device_context *ndc = netdev_priv(dev);
888
889 ethtool_cmd_speed_set(cmd, ndc->speed);
890 cmd->duplex = ndc->duplex;
891 cmd->port = PORT_OTHER;
892
893 return 0;
894}
895
896static int netvsc_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
897{
898 struct net_device_context *ndc = netdev_priv(dev);
899 u32 speed;
900
901 speed = ethtool_cmd_speed(cmd);
902 if (!ethtool_validate_speed(speed) ||
903 !ethtool_validate_duplex(cmd->duplex) ||
904 !netvsc_validate_ethtool_ss_cmd(cmd))
905 return -EINVAL;
906
907 ndc->speed = speed;
908 ndc->duplex = cmd->duplex;
909
910 return 0;
911}
912
4d447c9a
HZ
913static int netvsc_change_mtu(struct net_device *ndev, int mtu)
914{
915 struct net_device_context *ndevctx = netdev_priv(ndev);
3d541ac5
VK
916 struct netvsc_device *nvdev = ndevctx->nvdev;
917 struct hv_device *hdev = ndevctx->device_ctx;
4d447c9a
HZ
918 struct netvsc_device_info device_info;
919 int limit = ETH_DATA_LEN;
d212b463 920 u32 num_chn;
2de8530b 921 int ret = 0;
4d447c9a 922
6da7225f 923 if (ndevctx->start_remove || !nvdev || nvdev->destroy)
4d447c9a
HZ
924 return -ENODEV;
925
a1eabb01 926 if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
4d3c9d37 927 limit = NETVSC_MTU - ETH_HLEN;
4d447c9a 928
f9cbce34 929 if (mtu < NETVSC_MTU_MIN || mtu > limit)
4d447c9a
HZ
930 return -EINVAL;
931
2de8530b
HZ
932 ret = netvsc_close(ndev);
933 if (ret)
934 goto out;
935
d212b463
HZ
936 num_chn = nvdev->num_chn;
937
f580aec4 938 ndevctx->start_remove = true;
4d447c9a
HZ
939 rndis_filter_device_remove(hdev);
940
941 ndev->mtu = mtu;
942
8ebdcc52 943 memset(&device_info, 0, sizeof(device_info));
4d447c9a 944 device_info.ring_size = ring_size;
d212b463 945 device_info.num_chn = num_chn;
e01ec219 946 device_info.max_num_vrss_chns = max_num_vrss_chns;
4d447c9a 947 rndis_filter_device_add(hdev, &device_info);
4d447c9a 948
2de8530b
HZ
949out:
950 netvsc_open(ndev);
f580aec4 951 ndevctx->start_remove = false;
2de8530b 952
1bdcec8a
VK
953 /* We may have missed link change notifications */
954 schedule_delayed_work(&ndevctx->dwork, 0);
955
2de8530b 956 return ret;
4d447c9a
HZ
957}
958
7eafd9b4 959static struct rtnl_link_stats64 *netvsc_get_stats64(struct net_device *net,
960 struct rtnl_link_stats64 *t)
961{
962 struct net_device_context *ndev_ctx = netdev_priv(net);
963 int cpu;
964
965 for_each_possible_cpu(cpu) {
966 struct netvsc_stats *tx_stats = per_cpu_ptr(ndev_ctx->tx_stats,
967 cpu);
968 struct netvsc_stats *rx_stats = per_cpu_ptr(ndev_ctx->rx_stats,
969 cpu);
970 u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
971 unsigned int start;
972
973 do {
4b02b58b 974 start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
7eafd9b4 975 tx_packets = tx_stats->packets;
976 tx_bytes = tx_stats->bytes;
4b02b58b 977 } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
7eafd9b4 978
979 do {
4b02b58b 980 start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
7eafd9b4 981 rx_packets = rx_stats->packets;
982 rx_bytes = rx_stats->bytes;
4b02b58b 983 } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
7eafd9b4 984
985 t->tx_bytes += tx_bytes;
986 t->tx_packets += tx_packets;
987 t->rx_bytes += rx_bytes;
988 t->rx_packets += rx_packets;
989 }
990
991 t->tx_dropped = net->stats.tx_dropped;
992 t->tx_errors = net->stats.tx_dropped;
993
994 t->rx_dropped = net->stats.rx_dropped;
995 t->rx_errors = net->stats.rx_errors;
996
997 return t;
998}
1ce09e89
HZ
999
1000static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
1001{
1ce09e89 1002 struct sockaddr *addr = p;
9a4c831e 1003 char save_adr[ETH_ALEN];
1ce09e89
HZ
1004 unsigned char save_aatype;
1005 int err;
1006
1007 memcpy(save_adr, ndev->dev_addr, ETH_ALEN);
1008 save_aatype = ndev->addr_assign_type;
1009
1010 err = eth_mac_addr(ndev, p);
1011 if (err != 0)
1012 return err;
1013
e834da9a 1014 err = rndis_filter_set_device_mac(ndev, addr->sa_data);
1ce09e89
HZ
1015 if (err != 0) {
1016 /* roll back to saved MAC */
1017 memcpy(ndev->dev_addr, save_adr, ETH_ALEN);
1018 ndev->addr_assign_type = save_aatype;
1019 }
1020
1021 return err;
1022}
1023
4323b47c
SH
1024static const struct {
1025 char name[ETH_GSTRING_LEN];
1026 u16 offset;
1027} netvsc_stats[] = {
1028 { "tx_scattered", offsetof(struct netvsc_ethtool_stats, tx_scattered) },
1029 { "tx_no_memory", offsetof(struct netvsc_ethtool_stats, tx_no_memory) },
1030 { "tx_no_space", offsetof(struct netvsc_ethtool_stats, tx_no_space) },
1031 { "tx_too_big", offsetof(struct netvsc_ethtool_stats, tx_too_big) },
1032 { "tx_busy", offsetof(struct netvsc_ethtool_stats, tx_busy) },
1033};
1034
1035static int netvsc_get_sset_count(struct net_device *dev, int string_set)
1036{
1037 switch (string_set) {
1038 case ETH_SS_STATS:
1039 return ARRAY_SIZE(netvsc_stats);
1040 default:
1041 return -EINVAL;
1042 }
1043}
1044
1045static void netvsc_get_ethtool_stats(struct net_device *dev,
1046 struct ethtool_stats *stats, u64 *data)
1047{
1048 struct net_device_context *ndc = netdev_priv(dev);
1049 const void *nds = &ndc->eth_stats;
1050 int i;
1051
1052 for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++)
1053 data[i] = *(unsigned long *)(nds + netvsc_stats[i].offset);
1054}
1055
1056static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1057{
1058 int i;
1059
1060 switch (stringset) {
1061 case ETH_SS_STATS:
1062 for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++)
1063 memcpy(data + i * ETH_GSTRING_LEN,
1064 netvsc_stats[i].name, ETH_GSTRING_LEN);
1065 break;
1066 }
1067}
1068
316158fe
RW
1069#ifdef CONFIG_NET_POLL_CONTROLLER
1070static void netvsc_poll_controller(struct net_device *net)
1071{
1072 /* As netvsc_start_xmit() works synchronous we don't have to
1073 * trigger anything here.
1074 */
1075}
1076#endif
1ce09e89 1077
f82f4ad7
SH
1078static const struct ethtool_ops ethtool_ops = {
1079 .get_drvinfo = netvsc_get_drvinfo,
f82f4ad7 1080 .get_link = ethtool_op_get_link,
4323b47c
SH
1081 .get_ethtool_stats = netvsc_get_ethtool_stats,
1082 .get_sset_count = netvsc_get_sset_count,
1083 .get_strings = netvsc_get_strings,
59995370 1084 .get_channels = netvsc_get_channels,
b5960e6e 1085 .set_channels = netvsc_set_channels,
76d13b56 1086 .get_ts_info = ethtool_op_get_ts_info,
49eb9389 1087 .get_settings = netvsc_get_settings,
1088 .set_settings = netvsc_set_settings,
f82f4ad7
SH
1089};
1090
df2fff28
GKH
1091static const struct net_device_ops device_ops = {
1092 .ndo_open = netvsc_open,
1093 .ndo_stop = netvsc_close,
1094 .ndo_start_xmit = netvsc_start_xmit,
afc4b13d 1095 .ndo_set_rx_mode = netvsc_set_multicast_list,
4d447c9a 1096 .ndo_change_mtu = netvsc_change_mtu,
b681b588 1097 .ndo_validate_addr = eth_validate_addr,
1ce09e89 1098 .ndo_set_mac_address = netvsc_set_mac_addr,
5b54dac8 1099 .ndo_select_queue = netvsc_select_queue,
7eafd9b4 1100 .ndo_get_stats64 = netvsc_get_stats64,
316158fe
RW
1101#ifdef CONFIG_NET_POLL_CONTROLLER
1102 .ndo_poll_controller = netvsc_poll_controller,
1103#endif
df2fff28
GKH
1104};
1105
c996edcf 1106/*
27a70af3
VK
1107 * Handle link status changes. For RNDIS_STATUS_NETWORK_CHANGE emulate link
1108 * down/up sequence. In case of RNDIS_STATUS_MEDIA_CONNECT when carrier is
1109 * present send GARP packet to network peers with netif_notify_peers().
c996edcf 1110 */
891de74d 1111static void netvsc_link_change(struct work_struct *w)
c996edcf 1112{
0a1275ca
VK
1113 struct net_device_context *ndev_ctx =
1114 container_of(w, struct net_device_context, dwork.work);
1115 struct hv_device *device_obj = ndev_ctx->device_ctx;
1116 struct net_device *net = hv_get_drvdata(device_obj);
2ddd5e5f 1117 struct netvsc_device *net_device;
891de74d 1118 struct rndis_device *rdev;
27a70af3
VK
1119 struct netvsc_reconfig *event = NULL;
1120 bool notify = false, reschedule = false;
1121 unsigned long flags, next_reconfig, delay;
c996edcf 1122
1bdcec8a
VK
1123 rtnl_lock();
1124 if (ndev_ctx->start_remove)
1125 goto out_unlock;
1126
3d541ac5 1127 net_device = ndev_ctx->nvdev;
891de74d 1128 rdev = net_device->extension;
891de74d 1129
27a70af3
VK
1130 next_reconfig = ndev_ctx->last_reconfig + LINKCHANGE_INT;
1131 if (time_is_after_jiffies(next_reconfig)) {
1132 /* link_watch only sends one notification with current state
1133 * per second, avoid doing reconfig more frequently. Handle
1134 * wrap around.
1135 */
1136 delay = next_reconfig - jiffies;
1137 delay = delay < LINKCHANGE_INT ? delay : LINKCHANGE_INT;
1138 schedule_delayed_work(&ndev_ctx->dwork, delay);
1bdcec8a 1139 goto out_unlock;
27a70af3
VK
1140 }
1141 ndev_ctx->last_reconfig = jiffies;
1142
1143 spin_lock_irqsave(&ndev_ctx->lock, flags);
1144 if (!list_empty(&ndev_ctx->reconfig_events)) {
1145 event = list_first_entry(&ndev_ctx->reconfig_events,
1146 struct netvsc_reconfig, list);
1147 list_del(&event->list);
1148 reschedule = !list_empty(&ndev_ctx->reconfig_events);
1149 }
1150 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
1151
1152 if (!event)
1bdcec8a 1153 goto out_unlock;
27a70af3
VK
1154
1155 switch (event->event) {
1156 /* Only the following events are possible due to the check in
1157 * netvsc_linkstatus_callback()
1158 */
1159 case RNDIS_STATUS_MEDIA_CONNECT:
1160 if (rdev->link_state) {
1161 rdev->link_state = false;
1162 netif_carrier_on(net);
1163 netif_tx_wake_all_queues(net);
1164 } else {
1165 notify = true;
1166 }
1167 kfree(event);
1168 break;
1169 case RNDIS_STATUS_MEDIA_DISCONNECT:
1170 if (!rdev->link_state) {
1171 rdev->link_state = true;
1172 netif_carrier_off(net);
1173 netif_tx_stop_all_queues(net);
1174 }
1175 kfree(event);
1176 break;
1177 case RNDIS_STATUS_NETWORK_CHANGE:
1178 /* Only makes sense if carrier is present */
1179 if (!rdev->link_state) {
1180 rdev->link_state = true;
1181 netif_carrier_off(net);
1182 netif_tx_stop_all_queues(net);
1183 event->event = RNDIS_STATUS_MEDIA_CONNECT;
1184 spin_lock_irqsave(&ndev_ctx->lock, flags);
15cfd407 1185 list_add(&event->list, &ndev_ctx->reconfig_events);
27a70af3
VK
1186 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
1187 reschedule = true;
3a494e71 1188 }
27a70af3 1189 break;
891de74d
HZ
1190 }
1191
1192 rtnl_unlock();
1193
1194 if (notify)
1195 netdev_notify_peers(net);
27a70af3
VK
1196
1197 /* link_watch only sends one notification with current state per
1198 * second, handle next reconfig event in 2 seconds.
1199 */
1200 if (reschedule)
1201 schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
1bdcec8a
VK
1202
1203 return;
1204
1205out_unlock:
1206 rtnl_unlock();
c996edcf
HZ
1207}
1208
7eafd9b4 1209static void netvsc_free_netdev(struct net_device *netdev)
1210{
1211 struct net_device_context *net_device_ctx = netdev_priv(netdev);
1212
1213 free_percpu(net_device_ctx->tx_stats);
1214 free_percpu(net_device_ctx->rx_stats);
1215 free_netdev(netdev);
1216}
c996edcf 1217
0a1275ca 1218static struct net_device *get_netvsc_net_device(char *mac)
84bf9cef 1219{
0a1275ca 1220 struct net_device *dev, *found = NULL;
84bf9cef 1221
8737caaf 1222 ASSERT_RTNL();
84bf9cef
KS
1223
1224 for_each_netdev(&init_net, dev) {
1225 if (memcmp(dev->dev_addr, mac, ETH_ALEN) == 0) {
1226 if (dev->netdev_ops != &device_ops)
1227 continue;
0a1275ca 1228 found = dev;
84bf9cef
KS
1229 break;
1230 }
1231 }
84bf9cef 1232
0a1275ca 1233 return found;
84bf9cef
KS
1234}
1235
1236static int netvsc_register_vf(struct net_device *vf_netdev)
1237{
0a1275ca
VK
1238 struct net_device *ndev;
1239 struct net_device_context *net_device_ctx;
84bf9cef
KS
1240 struct netvsc_device *netvsc_dev;
1241 const struct ethtool_ops *eth_ops = vf_netdev->ethtool_ops;
1242
1243 if (eth_ops == NULL || eth_ops == &ethtool_ops)
1244 return NOTIFY_DONE;
1245
1246 /*
1247 * We will use the MAC address to locate the synthetic interface to
1248 * associate with the VF interface. If we don't find a matching
1249 * synthetic interface, move on.
1250 */
0a1275ca
VK
1251 ndev = get_netvsc_net_device(vf_netdev->dev_addr);
1252 if (!ndev)
1253 return NOTIFY_DONE;
1254
1255 net_device_ctx = netdev_priv(ndev);
1256 netvsc_dev = net_device_ctx->nvdev;
0f20d795 1257 if (!netvsc_dev || net_device_ctx->vf_netdev)
84bf9cef
KS
1258 return NOTIFY_DONE;
1259
0a1275ca 1260 netdev_info(ndev, "VF registering: %s\n", vf_netdev->name);
84bf9cef
KS
1261 /*
1262 * Take a reference on the module.
1263 */
1264 try_module_get(THIS_MODULE);
f9a7da91 1265 net_device_ctx->vf_netdev = vf_netdev;
84bf9cef
KS
1266 return NOTIFY_OK;
1267}
1268
57c1826b
VK
1269static void netvsc_inject_enable(struct net_device_context *net_device_ctx)
1270{
1271 net_device_ctx->vf_inject = true;
1272}
1273
1274static void netvsc_inject_disable(struct net_device_context *net_device_ctx)
1275{
1276 net_device_ctx->vf_inject = false;
1277
1278 /* Wait for currently active users to drain out. */
1279 while (atomic_read(&net_device_ctx->vf_use_cnt) != 0)
1280 udelay(50);
1281}
84bf9cef
KS
1282
1283static int netvsc_vf_up(struct net_device *vf_netdev)
1284{
0a1275ca 1285 struct net_device *ndev;
84bf9cef
KS
1286 struct netvsc_device *netvsc_dev;
1287 const struct ethtool_ops *eth_ops = vf_netdev->ethtool_ops;
1288 struct net_device_context *net_device_ctx;
1289
1290 if (eth_ops == &ethtool_ops)
1291 return NOTIFY_DONE;
1292
0a1275ca
VK
1293 ndev = get_netvsc_net_device(vf_netdev->dev_addr);
1294 if (!ndev)
1295 return NOTIFY_DONE;
1296
1297 net_device_ctx = netdev_priv(ndev);
1298 netvsc_dev = net_device_ctx->nvdev;
84bf9cef 1299
f9a7da91 1300 if (!netvsc_dev || !net_device_ctx->vf_netdev)
84bf9cef
KS
1301 return NOTIFY_DONE;
1302
0a1275ca 1303 netdev_info(ndev, "VF up: %s\n", vf_netdev->name);
57c1826b 1304 netvsc_inject_enable(net_device_ctx);
84bf9cef
KS
1305
1306 /*
1307 * Open the device before switching data path.
1308 */
2f5fa6c8 1309 rndis_filter_open(netvsc_dev);
84bf9cef
KS
1310
1311 /*
1312 * notify the host to switch the data path.
1313 */
0a1275ca
VK
1314 netvsc_switch_datapath(ndev, true);
1315 netdev_info(ndev, "Data path switched to VF: %s\n", vf_netdev->name);
84bf9cef 1316
0a1275ca 1317 netif_carrier_off(ndev);
84bf9cef 1318
d072218f
VK
1319 /* Now notify peers through VF device. */
1320 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, vf_netdev);
84bf9cef
KS
1321
1322 return NOTIFY_OK;
1323}
1324
84bf9cef
KS
1325static int netvsc_vf_down(struct net_device *vf_netdev)
1326{
0a1275ca 1327 struct net_device *ndev;
84bf9cef
KS
1328 struct netvsc_device *netvsc_dev;
1329 struct net_device_context *net_device_ctx;
1330 const struct ethtool_ops *eth_ops = vf_netdev->ethtool_ops;
1331
1332 if (eth_ops == &ethtool_ops)
1333 return NOTIFY_DONE;
1334
0a1275ca
VK
1335 ndev = get_netvsc_net_device(vf_netdev->dev_addr);
1336 if (!ndev)
1337 return NOTIFY_DONE;
1338
1339 net_device_ctx = netdev_priv(ndev);
1340 netvsc_dev = net_device_ctx->nvdev;
84bf9cef 1341
f9a7da91 1342 if (!netvsc_dev || !net_device_ctx->vf_netdev)
84bf9cef
KS
1343 return NOTIFY_DONE;
1344
0a1275ca 1345 netdev_info(ndev, "VF down: %s\n", vf_netdev->name);
57c1826b 1346 netvsc_inject_disable(net_device_ctx);
0a1275ca
VK
1347 netvsc_switch_datapath(ndev, false);
1348 netdev_info(ndev, "Data path switched from VF: %s\n", vf_netdev->name);
2f5fa6c8 1349 rndis_filter_close(netvsc_dev);
0a1275ca 1350 netif_carrier_on(ndev);
d072218f
VK
1351
1352 /* Now notify peers through netvsc device. */
1353 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, ndev);
84bf9cef
KS
1354
1355 return NOTIFY_OK;
1356}
1357
84bf9cef
KS
1358static int netvsc_unregister_vf(struct net_device *vf_netdev)
1359{
0a1275ca 1360 struct net_device *ndev;
84bf9cef
KS
1361 struct netvsc_device *netvsc_dev;
1362 const struct ethtool_ops *eth_ops = vf_netdev->ethtool_ops;
0a1275ca 1363 struct net_device_context *net_device_ctx;
84bf9cef
KS
1364
1365 if (eth_ops == &ethtool_ops)
1366 return NOTIFY_DONE;
1367
0a1275ca
VK
1368 ndev = get_netvsc_net_device(vf_netdev->dev_addr);
1369 if (!ndev)
1370 return NOTIFY_DONE;
1371
1372 net_device_ctx = netdev_priv(ndev);
1373 netvsc_dev = net_device_ctx->nvdev;
0f20d795 1374 if (!netvsc_dev || !net_device_ctx->vf_netdev)
84bf9cef 1375 return NOTIFY_DONE;
0a1275ca 1376 netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
57c1826b 1377 netvsc_inject_disable(net_device_ctx);
f9a7da91 1378 net_device_ctx->vf_netdev = NULL;
84bf9cef
KS
1379 module_put(THIS_MODULE);
1380 return NOTIFY_OK;
1381}
1382
84946899
S
1383static int netvsc_probe(struct hv_device *dev,
1384 const struct hv_vmbus_device_id *dev_id)
df2fff28 1385{
df2fff28
GKH
1386 struct net_device *net = NULL;
1387 struct net_device_context *net_device_ctx;
1388 struct netvsc_device_info device_info;
5b54dac8 1389 struct netvsc_device *nvdev;
df2fff28
GKH
1390 int ret;
1391
5b54dac8
HZ
1392 net = alloc_etherdev_mq(sizeof(struct net_device_context),
1393 num_online_cpus());
df2fff28 1394 if (!net)
51a805d0 1395 return -ENOMEM;
df2fff28 1396
1b07da51
HZ
1397 netif_carrier_off(net);
1398
b37879e6
HZ
1399 netvsc_init_settings(net);
1400
df2fff28 1401 net_device_ctx = netdev_priv(net);
9efd21e1 1402 net_device_ctx->device_ctx = dev;
3f300ff4
SX
1403 net_device_ctx->msg_enable = netif_msg_init(debug, default_msg);
1404 if (netif_msg_probe(net_device_ctx))
1405 netdev_dbg(net, "netvsc msg_enable: %d\n",
1406 net_device_ctx->msg_enable);
1407
7eafd9b4 1408 net_device_ctx->tx_stats = netdev_alloc_pcpu_stats(struct netvsc_stats);
1409 if (!net_device_ctx->tx_stats) {
1410 free_netdev(net);
1411 return -ENOMEM;
1412 }
1413 net_device_ctx->rx_stats = netdev_alloc_pcpu_stats(struct netvsc_stats);
1414 if (!net_device_ctx->rx_stats) {
1415 free_percpu(net_device_ctx->tx_stats);
1416 free_netdev(net);
1417 return -ENOMEM;
1418 }
1419
2ddd5e5f 1420 hv_set_drvdata(dev, net);
f580aec4
VK
1421
1422 net_device_ctx->start_remove = false;
1423
891de74d 1424 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
792df872 1425 INIT_WORK(&net_device_ctx->work, do_set_multicast);
df2fff28 1426
27a70af3
VK
1427 spin_lock_init(&net_device_ctx->lock);
1428 INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
1429
f9a7da91
VK
1430 atomic_set(&net_device_ctx->vf_use_cnt, 0);
1431 net_device_ctx->vf_netdev = NULL;
1432 net_device_ctx->vf_inject = false;
1433
df2fff28
GKH
1434 net->netdev_ops = &device_ops;
1435
a060679c 1436 net->hw_features = NETVSC_HW_FEATURES;
1437 net->features = NETVSC_HW_FEATURES | NETIF_F_HW_VLAN_CTAG_TX;
6048718d 1438
7ad24ea4 1439 net->ethtool_ops = &ethtool_ops;
9efd21e1 1440 SET_NETDEV_DEV(net, &dev->device);
df2fff28 1441
14a03cf8
VK
1442 /* We always need headroom for rndis header */
1443 net->needed_headroom = RNDIS_AND_PPI_SIZE;
1444
692e084e 1445 /* Notify the netvsc driver of the new device */
8ebdcc52 1446 memset(&device_info, 0, sizeof(device_info));
692e084e 1447 device_info.ring_size = ring_size;
e01ec219 1448 device_info.max_num_vrss_chns = max_num_vrss_chns;
692e084e
HZ
1449 ret = rndis_filter_device_add(dev, &device_info);
1450 if (ret != 0) {
1451 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
7eafd9b4 1452 netvsc_free_netdev(net);
2ddd5e5f 1453 hv_set_drvdata(dev, NULL);
692e084e 1454 return ret;
df2fff28 1455 }
692e084e
HZ
1456 memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
1457
3d541ac5 1458 nvdev = net_device_ctx->nvdev;
5b54dac8
HZ
1459 netif_set_real_num_tx_queues(net, nvdev->num_chn);
1460 netif_set_real_num_rx_queues(net, nvdev->num_chn);
5b54dac8 1461
a68f9614
HZ
1462 ret = register_netdev(net);
1463 if (ret != 0) {
1464 pr_err("Unable to register netdev.\n");
1465 rndis_filter_device_remove(dev);
7eafd9b4 1466 netvsc_free_netdev(net);
a68f9614
HZ
1467 }
1468
df2fff28
GKH
1469 return ret;
1470}
1471
415b023a 1472static int netvsc_remove(struct hv_device *dev)
df2fff28 1473{
2ddd5e5f 1474 struct net_device *net;
122a5f64 1475 struct net_device_context *ndev_ctx;
2ddd5e5f
S
1476 struct netvsc_device *net_device;
1477
3d541ac5 1478 net = hv_get_drvdata(dev);
df2fff28 1479
df2fff28 1480 if (net == NULL) {
415b023a 1481 dev_err(&dev->device, "No net device to remove\n");
df2fff28
GKH
1482 return 0;
1483 }
1484
122a5f64 1485 ndev_ctx = netdev_priv(net);
3d541ac5
VK
1486 net_device = ndev_ctx->nvdev;
1487
6da7225f
VK
1488 /* Avoid racing with netvsc_change_mtu()/netvsc_set_channels()
1489 * removing the device.
1490 */
1491 rtnl_lock();
f580aec4 1492 ndev_ctx->start_remove = true;
6da7225f 1493 rtnl_unlock();
f580aec4 1494
122a5f64 1495 cancel_delayed_work_sync(&ndev_ctx->dwork);
792df872 1496 cancel_work_sync(&ndev_ctx->work);
122a5f64 1497
df2fff28 1498 /* Stop outbound asap */
0a282538 1499 netif_tx_disable(net);
df2fff28
GKH
1500
1501 unregister_netdev(net);
1502
1503 /*
1504 * Call to the vsc driver to let it know that the device is being
1505 * removed
1506 */
df06bcff 1507 rndis_filter_device_remove(dev);
df2fff28 1508
3d541ac5
VK
1509 hv_set_drvdata(dev, NULL);
1510
7eafd9b4 1511 netvsc_free_netdev(net);
df06bcff 1512 return 0;
df2fff28
GKH
1513}
1514
345c4cc3 1515static const struct hv_vmbus_device_id id_table[] = {
c45cf2d4 1516 /* Network guid */
8f505944 1517 { HV_NIC_GUID, },
c45cf2d4 1518 { },
345c4cc3
S
1519};
1520
1521MODULE_DEVICE_TABLE(vmbus, id_table);
1522
f1542a66 1523/* The one and only one */
fde0ef9b 1524static struct hv_driver netvsc_drv = {
d31b20fc 1525 .name = KBUILD_MODNAME,
345c4cc3 1526 .id_table = id_table,
fde0ef9b
S
1527 .probe = netvsc_probe,
1528 .remove = netvsc_remove,
d4890970 1529};
f1542a66 1530
84bf9cef
KS
1531/*
1532 * On Hyper-V, every VF interface is matched with a corresponding
1533 * synthetic interface. The synthetic interface is presented first
1534 * to the guest. When the corresponding VF instance is registered,
1535 * we will take care of switching the data path.
1536 */
1537static int netvsc_netdev_event(struct notifier_block *this,
1538 unsigned long event, void *ptr)
1539{
1540 struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
1541
0dbff144
VK
1542 /* Avoid Vlan dev with same MAC registering as VF */
1543 if (event_dev->priv_flags & IFF_802_1Q_VLAN)
1544 return NOTIFY_DONE;
1545
1546 /* Avoid Bonding master dev with same MAC registering as VF */
1547 if (event_dev->priv_flags & IFF_BONDING &&
1548 event_dev->flags & IFF_MASTER)
cb2911fe
HZ
1549 return NOTIFY_DONE;
1550
84bf9cef
KS
1551 switch (event) {
1552 case NETDEV_REGISTER:
1553 return netvsc_register_vf(event_dev);
1554 case NETDEV_UNREGISTER:
1555 return netvsc_unregister_vf(event_dev);
1556 case NETDEV_UP:
1557 return netvsc_vf_up(event_dev);
1558 case NETDEV_DOWN:
1559 return netvsc_vf_down(event_dev);
1560 default:
1561 return NOTIFY_DONE;
1562 }
1563}
1564
1565static struct notifier_block netvsc_netdev_notifier = {
1566 .notifier_call = netvsc_netdev_event,
1567};
1568
a9869c94 1569static void __exit netvsc_drv_exit(void)
fceaf24a 1570{
84bf9cef 1571 unregister_netdevice_notifier(&netvsc_netdev_notifier);
768fa219 1572 vmbus_driver_unregister(&netvsc_drv);
fceaf24a
HJ
1573}
1574
1fde28cf 1575static int __init netvsc_drv_init(void)
df2fff28 1576{
84bf9cef
KS
1577 int ret;
1578
fa85a6c2
HZ
1579 if (ring_size < RING_SIZE_MIN) {
1580 ring_size = RING_SIZE_MIN;
1581 pr_info("Increased ring_size to %d (min allowed)\n",
1582 ring_size);
1583 }
84bf9cef
KS
1584 ret = vmbus_driver_register(&netvsc_drv);
1585
1586 if (ret)
1587 return ret;
1588
1589 register_netdevice_notifier(&netvsc_netdev_notifier);
1590 return 0;
df2fff28
GKH
1591}
1592
26c14cc1 1593MODULE_LICENSE("GPL");
7880fc54 1594MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
fceaf24a 1595
1fde28cf 1596module_init(netvsc_drv_init);
a9869c94 1597module_exit(netvsc_drv_exit);
This page took 0.951254 seconds and 5 git commands to generate.