Merge tag 'localmodconfig-v4.7' of git://git.kernel.org/pub/scm/linux/kernel/git...
[deliverable/linux.git] / drivers / net / ethernet / mellanox / mlx4 / en_netdev.c
1 /*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34 #include <linux/etherdevice.h>
35 #include <linux/tcp.h>
36 #include <linux/if_vlan.h>
37 #include <linux/delay.h>
38 #include <linux/slab.h>
39 #include <linux/hash.h>
40 #include <net/ip.h>
41 #include <net/busy_poll.h>
42 #include <net/vxlan.h>
43 #include <net/devlink.h>
44
45 #include <linux/mlx4/driver.h>
46 #include <linux/mlx4/device.h>
47 #include <linux/mlx4/cmd.h>
48 #include <linux/mlx4/cq.h>
49
50 #include "mlx4_en.h"
51 #include "en_port.h"
52
53 int mlx4_en_setup_tc(struct net_device *dev, u8 up)
54 {
55 struct mlx4_en_priv *priv = netdev_priv(dev);
56 int i;
57 unsigned int offset = 0;
58
59 if (up && up != MLX4_EN_NUM_UP)
60 return -EINVAL;
61
62 netdev_set_num_tc(dev, up);
63
64 /* Partition Tx queues evenly amongst UP's */
65 for (i = 0; i < up; i++) {
66 netdev_set_tc_queue(dev, i, priv->num_tx_rings_p_up, offset);
67 offset += priv->num_tx_rings_p_up;
68 }
69
70 return 0;
71 }
72
73 static int __mlx4_en_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
74 struct tc_to_netdev *tc)
75 {
76 if (tc->type != TC_SETUP_MQPRIO)
77 return -EINVAL;
78
79 return mlx4_en_setup_tc(dev, tc->tc);
80 }
81
82 #ifdef CONFIG_RFS_ACCEL
83
84 struct mlx4_en_filter {
85 struct list_head next;
86 struct work_struct work;
87
88 u8 ip_proto;
89 __be32 src_ip;
90 __be32 dst_ip;
91 __be16 src_port;
92 __be16 dst_port;
93
94 int rxq_index;
95 struct mlx4_en_priv *priv;
96 u32 flow_id; /* RFS infrastructure id */
97 int id; /* mlx4_en driver id */
98 u64 reg_id; /* Flow steering API id */
99 u8 activated; /* Used to prevent expiry before filter
100 * is attached
101 */
102 struct hlist_node filter_chain;
103 };
104
105 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv);
106
107 static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto)
108 {
109 switch (ip_proto) {
110 case IPPROTO_UDP:
111 return MLX4_NET_TRANS_RULE_ID_UDP;
112 case IPPROTO_TCP:
113 return MLX4_NET_TRANS_RULE_ID_TCP;
114 default:
115 return MLX4_NET_TRANS_RULE_NUM;
116 }
117 };
118
119 static void mlx4_en_filter_work(struct work_struct *work)
120 {
121 struct mlx4_en_filter *filter = container_of(work,
122 struct mlx4_en_filter,
123 work);
124 struct mlx4_en_priv *priv = filter->priv;
125 struct mlx4_spec_list spec_tcp_udp = {
126 .id = mlx4_ip_proto_to_trans_rule_id(filter->ip_proto),
127 {
128 .tcp_udp = {
129 .dst_port = filter->dst_port,
130 .dst_port_msk = (__force __be16)-1,
131 .src_port = filter->src_port,
132 .src_port_msk = (__force __be16)-1,
133 },
134 },
135 };
136 struct mlx4_spec_list spec_ip = {
137 .id = MLX4_NET_TRANS_RULE_ID_IPV4,
138 {
139 .ipv4 = {
140 .dst_ip = filter->dst_ip,
141 .dst_ip_msk = (__force __be32)-1,
142 .src_ip = filter->src_ip,
143 .src_ip_msk = (__force __be32)-1,
144 },
145 },
146 };
147 struct mlx4_spec_list spec_eth = {
148 .id = MLX4_NET_TRANS_RULE_ID_ETH,
149 };
150 struct mlx4_net_trans_rule rule = {
151 .list = LIST_HEAD_INIT(rule.list),
152 .queue_mode = MLX4_NET_TRANS_Q_LIFO,
153 .exclusive = 1,
154 .allow_loopback = 1,
155 .promisc_mode = MLX4_FS_REGULAR,
156 .port = priv->port,
157 .priority = MLX4_DOMAIN_RFS,
158 };
159 int rc;
160 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
161
162 if (spec_tcp_udp.id >= MLX4_NET_TRANS_RULE_NUM) {
163 en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n",
164 filter->ip_proto);
165 goto ignore;
166 }
167 list_add_tail(&spec_eth.list, &rule.list);
168 list_add_tail(&spec_ip.list, &rule.list);
169 list_add_tail(&spec_tcp_udp.list, &rule.list);
170
171 rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn;
172 memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN);
173 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
174
175 filter->activated = 0;
176
177 if (filter->reg_id) {
178 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
179 if (rc && rc != -ENOENT)
180 en_err(priv, "Error detaching flow. rc = %d\n", rc);
181 }
182
183 rc = mlx4_flow_attach(priv->mdev->dev, &rule, &filter->reg_id);
184 if (rc)
185 en_err(priv, "Error attaching flow. err = %d\n", rc);
186
187 ignore:
188 mlx4_en_filter_rfs_expire(priv);
189
190 filter->activated = 1;
191 }
192
193 static inline struct hlist_head *
194 filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
195 __be16 src_port, __be16 dst_port)
196 {
197 unsigned long l;
198 int bucket_idx;
199
200 l = (__force unsigned long)src_port |
201 ((__force unsigned long)dst_port << 2);
202 l ^= (__force unsigned long)(src_ip ^ dst_ip);
203
204 bucket_idx = hash_long(l, MLX4_EN_FILTER_HASH_SHIFT);
205
206 return &priv->filter_hash[bucket_idx];
207 }
208
209 static struct mlx4_en_filter *
210 mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip,
211 __be32 dst_ip, u8 ip_proto, __be16 src_port,
212 __be16 dst_port, u32 flow_id)
213 {
214 struct mlx4_en_filter *filter = NULL;
215
216 filter = kzalloc(sizeof(struct mlx4_en_filter), GFP_ATOMIC);
217 if (!filter)
218 return NULL;
219
220 filter->priv = priv;
221 filter->rxq_index = rxq_index;
222 INIT_WORK(&filter->work, mlx4_en_filter_work);
223
224 filter->src_ip = src_ip;
225 filter->dst_ip = dst_ip;
226 filter->ip_proto = ip_proto;
227 filter->src_port = src_port;
228 filter->dst_port = dst_port;
229
230 filter->flow_id = flow_id;
231
232 filter->id = priv->last_filter_id++ % RPS_NO_FILTER;
233
234 list_add_tail(&filter->next, &priv->filters);
235 hlist_add_head(&filter->filter_chain,
236 filter_hash_bucket(priv, src_ip, dst_ip, src_port,
237 dst_port));
238
239 return filter;
240 }
241
242 static void mlx4_en_filter_free(struct mlx4_en_filter *filter)
243 {
244 struct mlx4_en_priv *priv = filter->priv;
245 int rc;
246
247 list_del(&filter->next);
248
249 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
250 if (rc && rc != -ENOENT)
251 en_err(priv, "Error detaching flow. rc = %d\n", rc);
252
253 kfree(filter);
254 }
255
256 static inline struct mlx4_en_filter *
257 mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
258 u8 ip_proto, __be16 src_port, __be16 dst_port)
259 {
260 struct mlx4_en_filter *filter;
261 struct mlx4_en_filter *ret = NULL;
262
263 hlist_for_each_entry(filter,
264 filter_hash_bucket(priv, src_ip, dst_ip,
265 src_port, dst_port),
266 filter_chain) {
267 if (filter->src_ip == src_ip &&
268 filter->dst_ip == dst_ip &&
269 filter->ip_proto == ip_proto &&
270 filter->src_port == src_port &&
271 filter->dst_port == dst_port) {
272 ret = filter;
273 break;
274 }
275 }
276
277 return ret;
278 }
279
280 static int
281 mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
282 u16 rxq_index, u32 flow_id)
283 {
284 struct mlx4_en_priv *priv = netdev_priv(net_dev);
285 struct mlx4_en_filter *filter;
286 const struct iphdr *ip;
287 const __be16 *ports;
288 u8 ip_proto;
289 __be32 src_ip;
290 __be32 dst_ip;
291 __be16 src_port;
292 __be16 dst_port;
293 int nhoff = skb_network_offset(skb);
294 int ret = 0;
295
296 if (skb->protocol != htons(ETH_P_IP))
297 return -EPROTONOSUPPORT;
298
299 ip = (const struct iphdr *)(skb->data + nhoff);
300 if (ip_is_fragment(ip))
301 return -EPROTONOSUPPORT;
302
303 if ((ip->protocol != IPPROTO_TCP) && (ip->protocol != IPPROTO_UDP))
304 return -EPROTONOSUPPORT;
305 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
306
307 ip_proto = ip->protocol;
308 src_ip = ip->saddr;
309 dst_ip = ip->daddr;
310 src_port = ports[0];
311 dst_port = ports[1];
312
313 spin_lock_bh(&priv->filters_lock);
314 filter = mlx4_en_filter_find(priv, src_ip, dst_ip, ip_proto,
315 src_port, dst_port);
316 if (filter) {
317 if (filter->rxq_index == rxq_index)
318 goto out;
319
320 filter->rxq_index = rxq_index;
321 } else {
322 filter = mlx4_en_filter_alloc(priv, rxq_index,
323 src_ip, dst_ip, ip_proto,
324 src_port, dst_port, flow_id);
325 if (!filter) {
326 ret = -ENOMEM;
327 goto err;
328 }
329 }
330
331 queue_work(priv->mdev->workqueue, &filter->work);
332
333 out:
334 ret = filter->id;
335 err:
336 spin_unlock_bh(&priv->filters_lock);
337
338 return ret;
339 }
340
341 void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv)
342 {
343 struct mlx4_en_filter *filter, *tmp;
344 LIST_HEAD(del_list);
345
346 spin_lock_bh(&priv->filters_lock);
347 list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
348 list_move(&filter->next, &del_list);
349 hlist_del(&filter->filter_chain);
350 }
351 spin_unlock_bh(&priv->filters_lock);
352
353 list_for_each_entry_safe(filter, tmp, &del_list, next) {
354 cancel_work_sync(&filter->work);
355 mlx4_en_filter_free(filter);
356 }
357 }
358
359 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv)
360 {
361 struct mlx4_en_filter *filter = NULL, *tmp, *last_filter = NULL;
362 LIST_HEAD(del_list);
363 int i = 0;
364
365 spin_lock_bh(&priv->filters_lock);
366 list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
367 if (i > MLX4_EN_FILTER_EXPIRY_QUOTA)
368 break;
369
370 if (filter->activated &&
371 !work_pending(&filter->work) &&
372 rps_may_expire_flow(priv->dev,
373 filter->rxq_index, filter->flow_id,
374 filter->id)) {
375 list_move(&filter->next, &del_list);
376 hlist_del(&filter->filter_chain);
377 } else
378 last_filter = filter;
379
380 i++;
381 }
382
383 if (last_filter && (&last_filter->next != priv->filters.next))
384 list_move(&priv->filters, &last_filter->next);
385
386 spin_unlock_bh(&priv->filters_lock);
387
388 list_for_each_entry_safe(filter, tmp, &del_list, next)
389 mlx4_en_filter_free(filter);
390 }
391 #endif
392
393 static int mlx4_en_vlan_rx_add_vid(struct net_device *dev,
394 __be16 proto, u16 vid)
395 {
396 struct mlx4_en_priv *priv = netdev_priv(dev);
397 struct mlx4_en_dev *mdev = priv->mdev;
398 int err;
399 int idx;
400
401 en_dbg(HW, priv, "adding VLAN:%d\n", vid);
402
403 set_bit(vid, priv->active_vlans);
404
405 /* Add VID to port VLAN filter */
406 mutex_lock(&mdev->state_lock);
407 if (mdev->device_up && priv->port_up) {
408 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
409 if (err)
410 en_err(priv, "Failed configuring VLAN filter\n");
411 }
412 if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx))
413 en_dbg(HW, priv, "failed adding vlan %d\n", vid);
414 mutex_unlock(&mdev->state_lock);
415
416 return 0;
417 }
418
419 static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
420 __be16 proto, u16 vid)
421 {
422 struct mlx4_en_priv *priv = netdev_priv(dev);
423 struct mlx4_en_dev *mdev = priv->mdev;
424 int err;
425
426 en_dbg(HW, priv, "Killing VID:%d\n", vid);
427
428 clear_bit(vid, priv->active_vlans);
429
430 /* Remove VID from port VLAN filter */
431 mutex_lock(&mdev->state_lock);
432 mlx4_unregister_vlan(mdev->dev, priv->port, vid);
433
434 if (mdev->device_up && priv->port_up) {
435 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
436 if (err)
437 en_err(priv, "Failed configuring VLAN filter\n");
438 }
439 mutex_unlock(&mdev->state_lock);
440
441 return 0;
442 }
443
444 static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac)
445 {
446 int i;
447 for (i = ETH_ALEN - 1; i >= 0; --i) {
448 dst_mac[i] = src_mac & 0xff;
449 src_mac >>= 8;
450 }
451 memset(&dst_mac[ETH_ALEN], 0, 2);
452 }
453
454
455 static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *addr,
456 int qpn, u64 *reg_id)
457 {
458 int err;
459
460 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
461 priv->mdev->dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
462 return 0; /* do nothing */
463
464 err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn,
465 MLX4_DOMAIN_NIC, reg_id);
466 if (err) {
467 en_err(priv, "failed to add vxlan steering rule, err %d\n", err);
468 return err;
469 }
470 en_dbg(DRV, priv, "added vxlan steering rule, mac %pM reg_id %llx\n", addr, *reg_id);
471 return 0;
472 }
473
474
475 static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv,
476 unsigned char *mac, int *qpn, u64 *reg_id)
477 {
478 struct mlx4_en_dev *mdev = priv->mdev;
479 struct mlx4_dev *dev = mdev->dev;
480 int err;
481
482 switch (dev->caps.steering_mode) {
483 case MLX4_STEERING_MODE_B0: {
484 struct mlx4_qp qp;
485 u8 gid[16] = {0};
486
487 qp.qpn = *qpn;
488 memcpy(&gid[10], mac, ETH_ALEN);
489 gid[5] = priv->port;
490
491 err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
492 break;
493 }
494 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
495 struct mlx4_spec_list spec_eth = { {NULL} };
496 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
497
498 struct mlx4_net_trans_rule rule = {
499 .queue_mode = MLX4_NET_TRANS_Q_FIFO,
500 .exclusive = 0,
501 .allow_loopback = 1,
502 .promisc_mode = MLX4_FS_REGULAR,
503 .priority = MLX4_DOMAIN_NIC,
504 };
505
506 rule.port = priv->port;
507 rule.qpn = *qpn;
508 INIT_LIST_HEAD(&rule.list);
509
510 spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH;
511 memcpy(spec_eth.eth.dst_mac, mac, ETH_ALEN);
512 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
513 list_add_tail(&spec_eth.list, &rule.list);
514
515 err = mlx4_flow_attach(dev, &rule, reg_id);
516 break;
517 }
518 default:
519 return -EINVAL;
520 }
521 if (err)
522 en_warn(priv, "Failed Attaching Unicast\n");
523
524 return err;
525 }
526
527 static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv,
528 unsigned char *mac, int qpn, u64 reg_id)
529 {
530 struct mlx4_en_dev *mdev = priv->mdev;
531 struct mlx4_dev *dev = mdev->dev;
532
533 switch (dev->caps.steering_mode) {
534 case MLX4_STEERING_MODE_B0: {
535 struct mlx4_qp qp;
536 u8 gid[16] = {0};
537
538 qp.qpn = qpn;
539 memcpy(&gid[10], mac, ETH_ALEN);
540 gid[5] = priv->port;
541
542 mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
543 break;
544 }
545 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
546 mlx4_flow_detach(dev, reg_id);
547 break;
548 }
549 default:
550 en_err(priv, "Invalid steering mode.\n");
551 }
552 }
553
554 static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
555 {
556 struct mlx4_en_dev *mdev = priv->mdev;
557 struct mlx4_dev *dev = mdev->dev;
558 int index = 0;
559 int err = 0;
560 int *qpn = &priv->base_qpn;
561 u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
562
563 en_dbg(DRV, priv, "Registering MAC: %pM for adding\n",
564 priv->dev->dev_addr);
565 index = mlx4_register_mac(dev, priv->port, mac);
566 if (index < 0) {
567 err = index;
568 en_err(priv, "Failed adding MAC: %pM\n",
569 priv->dev->dev_addr);
570 return err;
571 }
572
573 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
574 int base_qpn = mlx4_get_base_qpn(dev, priv->port);
575 *qpn = base_qpn + index;
576 return 0;
577 }
578
579 err = mlx4_qp_reserve_range(dev, 1, 1, qpn, MLX4_RESERVE_A0_QP);
580 en_dbg(DRV, priv, "Reserved qp %d\n", *qpn);
581 if (err) {
582 en_err(priv, "Failed to reserve qp for mac registration\n");
583 mlx4_unregister_mac(dev, priv->port, mac);
584 return err;
585 }
586
587 return 0;
588 }
589
590 static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
591 {
592 struct mlx4_en_dev *mdev = priv->mdev;
593 struct mlx4_dev *dev = mdev->dev;
594 int qpn = priv->base_qpn;
595
596 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
597 u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
598 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
599 priv->dev->dev_addr);
600 mlx4_unregister_mac(dev, priv->port, mac);
601 } else {
602 en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n",
603 priv->port, qpn);
604 mlx4_qp_release_range(dev, qpn, 1);
605 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
606 }
607 }
608
609 static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
610 unsigned char *new_mac, unsigned char *prev_mac)
611 {
612 struct mlx4_en_dev *mdev = priv->mdev;
613 struct mlx4_dev *dev = mdev->dev;
614 int err = 0;
615 u64 new_mac_u64 = mlx4_mac_to_u64(new_mac);
616
617 if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
618 struct hlist_head *bucket;
619 unsigned int mac_hash;
620 struct mlx4_mac_entry *entry;
621 struct hlist_node *tmp;
622 u64 prev_mac_u64 = mlx4_mac_to_u64(prev_mac);
623
624 bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]];
625 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
626 if (ether_addr_equal_64bits(entry->mac, prev_mac)) {
627 mlx4_en_uc_steer_release(priv, entry->mac,
628 qpn, entry->reg_id);
629 mlx4_unregister_mac(dev, priv->port,
630 prev_mac_u64);
631 hlist_del_rcu(&entry->hlist);
632 synchronize_rcu();
633 memcpy(entry->mac, new_mac, ETH_ALEN);
634 entry->reg_id = 0;
635 mac_hash = new_mac[MLX4_EN_MAC_HASH_IDX];
636 hlist_add_head_rcu(&entry->hlist,
637 &priv->mac_hash[mac_hash]);
638 mlx4_register_mac(dev, priv->port, new_mac_u64);
639 err = mlx4_en_uc_steer_add(priv, new_mac,
640 &qpn,
641 &entry->reg_id);
642 if (err)
643 return err;
644 if (priv->tunnel_reg_id) {
645 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
646 priv->tunnel_reg_id = 0;
647 }
648 err = mlx4_en_tunnel_steer_add(priv, new_mac, qpn,
649 &priv->tunnel_reg_id);
650 return err;
651 }
652 }
653 return -EINVAL;
654 }
655
656 return __mlx4_replace_mac(dev, priv->port, qpn, new_mac_u64);
657 }
658
659 static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv,
660 unsigned char new_mac[ETH_ALEN + 2])
661 {
662 int err = 0;
663
664 if (priv->port_up) {
665 /* Remove old MAC and insert the new one */
666 err = mlx4_en_replace_mac(priv, priv->base_qpn,
667 new_mac, priv->current_mac);
668 if (err)
669 en_err(priv, "Failed changing HW MAC address\n");
670 } else
671 en_dbg(HW, priv, "Port is down while registering mac, exiting...\n");
672
673 if (!err)
674 memcpy(priv->current_mac, new_mac, sizeof(priv->current_mac));
675
676 return err;
677 }
678
679 static int mlx4_en_set_mac(struct net_device *dev, void *addr)
680 {
681 struct mlx4_en_priv *priv = netdev_priv(dev);
682 struct mlx4_en_dev *mdev = priv->mdev;
683 struct sockaddr *saddr = addr;
684 unsigned char new_mac[ETH_ALEN + 2];
685 int err;
686
687 if (!is_valid_ether_addr(saddr->sa_data))
688 return -EADDRNOTAVAIL;
689
690 mutex_lock(&mdev->state_lock);
691 memcpy(new_mac, saddr->sa_data, ETH_ALEN);
692 err = mlx4_en_do_set_mac(priv, new_mac);
693 if (!err)
694 memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
695 mutex_unlock(&mdev->state_lock);
696
697 return err;
698 }
699
700 static void mlx4_en_clear_list(struct net_device *dev)
701 {
702 struct mlx4_en_priv *priv = netdev_priv(dev);
703 struct mlx4_en_mc_list *tmp, *mc_to_del;
704
705 list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) {
706 list_del(&mc_to_del->list);
707 kfree(mc_to_del);
708 }
709 }
710
711 static void mlx4_en_cache_mclist(struct net_device *dev)
712 {
713 struct mlx4_en_priv *priv = netdev_priv(dev);
714 struct netdev_hw_addr *ha;
715 struct mlx4_en_mc_list *tmp;
716
717 mlx4_en_clear_list(dev);
718 netdev_for_each_mc_addr(ha, dev) {
719 tmp = kzalloc(sizeof(struct mlx4_en_mc_list), GFP_ATOMIC);
720 if (!tmp) {
721 mlx4_en_clear_list(dev);
722 return;
723 }
724 memcpy(tmp->addr, ha->addr, ETH_ALEN);
725 list_add_tail(&tmp->list, &priv->mc_list);
726 }
727 }
728
729 static void update_mclist_flags(struct mlx4_en_priv *priv,
730 struct list_head *dst,
731 struct list_head *src)
732 {
733 struct mlx4_en_mc_list *dst_tmp, *src_tmp, *new_mc;
734 bool found;
735
736 /* Find all the entries that should be removed from dst,
737 * These are the entries that are not found in src
738 */
739 list_for_each_entry(dst_tmp, dst, list) {
740 found = false;
741 list_for_each_entry(src_tmp, src, list) {
742 if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) {
743 found = true;
744 break;
745 }
746 }
747 if (!found)
748 dst_tmp->action = MCLIST_REM;
749 }
750
751 /* Add entries that exist in src but not in dst
752 * mark them as need to add
753 */
754 list_for_each_entry(src_tmp, src, list) {
755 found = false;
756 list_for_each_entry(dst_tmp, dst, list) {
757 if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) {
758 dst_tmp->action = MCLIST_NONE;
759 found = true;
760 break;
761 }
762 }
763 if (!found) {
764 new_mc = kmemdup(src_tmp,
765 sizeof(struct mlx4_en_mc_list),
766 GFP_KERNEL);
767 if (!new_mc)
768 return;
769
770 new_mc->action = MCLIST_ADD;
771 list_add_tail(&new_mc->list, dst);
772 }
773 }
774 }
775
776 static void mlx4_en_set_rx_mode(struct net_device *dev)
777 {
778 struct mlx4_en_priv *priv = netdev_priv(dev);
779
780 if (!priv->port_up)
781 return;
782
783 queue_work(priv->mdev->workqueue, &priv->rx_mode_task);
784 }
785
786 static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv,
787 struct mlx4_en_dev *mdev)
788 {
789 int err = 0;
790
791 if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
792 if (netif_msg_rx_status(priv))
793 en_warn(priv, "Entering promiscuous mode\n");
794 priv->flags |= MLX4_EN_FLAG_PROMISC;
795
796 /* Enable promiscouos mode */
797 switch (mdev->dev->caps.steering_mode) {
798 case MLX4_STEERING_MODE_DEVICE_MANAGED:
799 err = mlx4_flow_steer_promisc_add(mdev->dev,
800 priv->port,
801 priv->base_qpn,
802 MLX4_FS_ALL_DEFAULT);
803 if (err)
804 en_err(priv, "Failed enabling promiscuous mode\n");
805 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
806 break;
807
808 case MLX4_STEERING_MODE_B0:
809 err = mlx4_unicast_promisc_add(mdev->dev,
810 priv->base_qpn,
811 priv->port);
812 if (err)
813 en_err(priv, "Failed enabling unicast promiscuous mode\n");
814
815 /* Add the default qp number as multicast
816 * promisc
817 */
818 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
819 err = mlx4_multicast_promisc_add(mdev->dev,
820 priv->base_qpn,
821 priv->port);
822 if (err)
823 en_err(priv, "Failed enabling multicast promiscuous mode\n");
824 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
825 }
826 break;
827
828 case MLX4_STEERING_MODE_A0:
829 err = mlx4_SET_PORT_qpn_calc(mdev->dev,
830 priv->port,
831 priv->base_qpn,
832 1);
833 if (err)
834 en_err(priv, "Failed enabling promiscuous mode\n");
835 break;
836 }
837
838 /* Disable port multicast filter (unconditionally) */
839 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
840 0, MLX4_MCAST_DISABLE);
841 if (err)
842 en_err(priv, "Failed disabling multicast filter\n");
843 }
844 }
845
846 static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv,
847 struct mlx4_en_dev *mdev)
848 {
849 int err = 0;
850
851 if (netif_msg_rx_status(priv))
852 en_warn(priv, "Leaving promiscuous mode\n");
853 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
854
855 /* Disable promiscouos mode */
856 switch (mdev->dev->caps.steering_mode) {
857 case MLX4_STEERING_MODE_DEVICE_MANAGED:
858 err = mlx4_flow_steer_promisc_remove(mdev->dev,
859 priv->port,
860 MLX4_FS_ALL_DEFAULT);
861 if (err)
862 en_err(priv, "Failed disabling promiscuous mode\n");
863 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
864 break;
865
866 case MLX4_STEERING_MODE_B0:
867 err = mlx4_unicast_promisc_remove(mdev->dev,
868 priv->base_qpn,
869 priv->port);
870 if (err)
871 en_err(priv, "Failed disabling unicast promiscuous mode\n");
872 /* Disable Multicast promisc */
873 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
874 err = mlx4_multicast_promisc_remove(mdev->dev,
875 priv->base_qpn,
876 priv->port);
877 if (err)
878 en_err(priv, "Failed disabling multicast promiscuous mode\n");
879 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
880 }
881 break;
882
883 case MLX4_STEERING_MODE_A0:
884 err = mlx4_SET_PORT_qpn_calc(mdev->dev,
885 priv->port,
886 priv->base_qpn, 0);
887 if (err)
888 en_err(priv, "Failed disabling promiscuous mode\n");
889 break;
890 }
891 }
892
893 static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
894 struct net_device *dev,
895 struct mlx4_en_dev *mdev)
896 {
897 struct mlx4_en_mc_list *mclist, *tmp;
898 u64 mcast_addr = 0;
899 u8 mc_list[16] = {0};
900 int err = 0;
901
902 /* Enable/disable the multicast filter according to IFF_ALLMULTI */
903 if (dev->flags & IFF_ALLMULTI) {
904 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
905 0, MLX4_MCAST_DISABLE);
906 if (err)
907 en_err(priv, "Failed disabling multicast filter\n");
908
909 /* Add the default qp number as multicast promisc */
910 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
911 switch (mdev->dev->caps.steering_mode) {
912 case MLX4_STEERING_MODE_DEVICE_MANAGED:
913 err = mlx4_flow_steer_promisc_add(mdev->dev,
914 priv->port,
915 priv->base_qpn,
916 MLX4_FS_MC_DEFAULT);
917 break;
918
919 case MLX4_STEERING_MODE_B0:
920 err = mlx4_multicast_promisc_add(mdev->dev,
921 priv->base_qpn,
922 priv->port);
923 break;
924
925 case MLX4_STEERING_MODE_A0:
926 break;
927 }
928 if (err)
929 en_err(priv, "Failed entering multicast promisc mode\n");
930 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
931 }
932 } else {
933 /* Disable Multicast promisc */
934 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
935 switch (mdev->dev->caps.steering_mode) {
936 case MLX4_STEERING_MODE_DEVICE_MANAGED:
937 err = mlx4_flow_steer_promisc_remove(mdev->dev,
938 priv->port,
939 MLX4_FS_MC_DEFAULT);
940 break;
941
942 case MLX4_STEERING_MODE_B0:
943 err = mlx4_multicast_promisc_remove(mdev->dev,
944 priv->base_qpn,
945 priv->port);
946 break;
947
948 case MLX4_STEERING_MODE_A0:
949 break;
950 }
951 if (err)
952 en_err(priv, "Failed disabling multicast promiscuous mode\n");
953 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
954 }
955
956 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
957 0, MLX4_MCAST_DISABLE);
958 if (err)
959 en_err(priv, "Failed disabling multicast filter\n");
960
961 /* Flush mcast filter and init it with broadcast address */
962 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
963 1, MLX4_MCAST_CONFIG);
964
965 /* Update multicast list - we cache all addresses so they won't
966 * change while HW is updated holding the command semaphor */
967 netif_addr_lock_bh(dev);
968 mlx4_en_cache_mclist(dev);
969 netif_addr_unlock_bh(dev);
970 list_for_each_entry(mclist, &priv->mc_list, list) {
971 mcast_addr = mlx4_mac_to_u64(mclist->addr);
972 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
973 mcast_addr, 0, MLX4_MCAST_CONFIG);
974 }
975 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
976 0, MLX4_MCAST_ENABLE);
977 if (err)
978 en_err(priv, "Failed enabling multicast filter\n");
979
980 update_mclist_flags(priv, &priv->curr_list, &priv->mc_list);
981 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
982 if (mclist->action == MCLIST_REM) {
983 /* detach this address and delete from list */
984 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
985 mc_list[5] = priv->port;
986 err = mlx4_multicast_detach(mdev->dev,
987 &priv->rss_map.indir_qp,
988 mc_list,
989 MLX4_PROT_ETH,
990 mclist->reg_id);
991 if (err)
992 en_err(priv, "Fail to detach multicast address\n");
993
994 if (mclist->tunnel_reg_id) {
995 err = mlx4_flow_detach(priv->mdev->dev, mclist->tunnel_reg_id);
996 if (err)
997 en_err(priv, "Failed to detach multicast address\n");
998 }
999
1000 /* remove from list */
1001 list_del(&mclist->list);
1002 kfree(mclist);
1003 } else if (mclist->action == MCLIST_ADD) {
1004 /* attach the address */
1005 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
1006 /* needed for B0 steering support */
1007 mc_list[5] = priv->port;
1008 err = mlx4_multicast_attach(mdev->dev,
1009 &priv->rss_map.indir_qp,
1010 mc_list,
1011 priv->port, 0,
1012 MLX4_PROT_ETH,
1013 &mclist->reg_id);
1014 if (err)
1015 en_err(priv, "Fail to attach multicast address\n");
1016
1017 err = mlx4_en_tunnel_steer_add(priv, &mc_list[10], priv->base_qpn,
1018 &mclist->tunnel_reg_id);
1019 if (err)
1020 en_err(priv, "Failed to attach multicast address\n");
1021 }
1022 }
1023 }
1024 }
1025
1026 static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
1027 struct net_device *dev,
1028 struct mlx4_en_dev *mdev)
1029 {
1030 struct netdev_hw_addr *ha;
1031 struct mlx4_mac_entry *entry;
1032 struct hlist_node *tmp;
1033 bool found;
1034 u64 mac;
1035 int err = 0;
1036 struct hlist_head *bucket;
1037 unsigned int i;
1038 int removed = 0;
1039 u32 prev_flags;
1040
1041 /* Note that we do not need to protect our mac_hash traversal with rcu,
1042 * since all modification code is protected by mdev->state_lock
1043 */
1044
1045 /* find what to remove */
1046 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
1047 bucket = &priv->mac_hash[i];
1048 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
1049 found = false;
1050 netdev_for_each_uc_addr(ha, dev) {
1051 if (ether_addr_equal_64bits(entry->mac,
1052 ha->addr)) {
1053 found = true;
1054 break;
1055 }
1056 }
1057
1058 /* MAC address of the port is not in uc list */
1059 if (ether_addr_equal_64bits(entry->mac,
1060 priv->current_mac))
1061 found = true;
1062
1063 if (!found) {
1064 mac = mlx4_mac_to_u64(entry->mac);
1065 mlx4_en_uc_steer_release(priv, entry->mac,
1066 priv->base_qpn,
1067 entry->reg_id);
1068 mlx4_unregister_mac(mdev->dev, priv->port, mac);
1069
1070 hlist_del_rcu(&entry->hlist);
1071 kfree_rcu(entry, rcu);
1072 en_dbg(DRV, priv, "Removed MAC %pM on port:%d\n",
1073 entry->mac, priv->port);
1074 ++removed;
1075 }
1076 }
1077 }
1078
1079 /* if we didn't remove anything, there is no use in trying to add
1080 * again once we are in a forced promisc mode state
1081 */
1082 if ((priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) && 0 == removed)
1083 return;
1084
1085 prev_flags = priv->flags;
1086 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
1087
1088 /* find what to add */
1089 netdev_for_each_uc_addr(ha, dev) {
1090 found = false;
1091 bucket = &priv->mac_hash[ha->addr[MLX4_EN_MAC_HASH_IDX]];
1092 hlist_for_each_entry(entry, bucket, hlist) {
1093 if (ether_addr_equal_64bits(entry->mac, ha->addr)) {
1094 found = true;
1095 break;
1096 }
1097 }
1098
1099 if (!found) {
1100 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1101 if (!entry) {
1102 en_err(priv, "Failed adding MAC %pM on port:%d (out of memory)\n",
1103 ha->addr, priv->port);
1104 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1105 break;
1106 }
1107 mac = mlx4_mac_to_u64(ha->addr);
1108 memcpy(entry->mac, ha->addr, ETH_ALEN);
1109 err = mlx4_register_mac(mdev->dev, priv->port, mac);
1110 if (err < 0) {
1111 en_err(priv, "Failed registering MAC %pM on port %d: %d\n",
1112 ha->addr, priv->port, err);
1113 kfree(entry);
1114 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1115 break;
1116 }
1117 err = mlx4_en_uc_steer_add(priv, ha->addr,
1118 &priv->base_qpn,
1119 &entry->reg_id);
1120 if (err) {
1121 en_err(priv, "Failed adding MAC %pM on port %d: %d\n",
1122 ha->addr, priv->port, err);
1123 mlx4_unregister_mac(mdev->dev, priv->port, mac);
1124 kfree(entry);
1125 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1126 break;
1127 } else {
1128 unsigned int mac_hash;
1129 en_dbg(DRV, priv, "Added MAC %pM on port:%d\n",
1130 ha->addr, priv->port);
1131 mac_hash = ha->addr[MLX4_EN_MAC_HASH_IDX];
1132 bucket = &priv->mac_hash[mac_hash];
1133 hlist_add_head_rcu(&entry->hlist, bucket);
1134 }
1135 }
1136 }
1137
1138 if (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) {
1139 en_warn(priv, "Forcing promiscuous mode on port:%d\n",
1140 priv->port);
1141 } else if (prev_flags & MLX4_EN_FLAG_FORCE_PROMISC) {
1142 en_warn(priv, "Stop forcing promiscuous mode on port:%d\n",
1143 priv->port);
1144 }
1145 }
1146
1147 static void mlx4_en_do_set_rx_mode(struct work_struct *work)
1148 {
1149 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1150 rx_mode_task);
1151 struct mlx4_en_dev *mdev = priv->mdev;
1152 struct net_device *dev = priv->dev;
1153
1154 mutex_lock(&mdev->state_lock);
1155 if (!mdev->device_up) {
1156 en_dbg(HW, priv, "Card is not up, ignoring rx mode change.\n");
1157 goto out;
1158 }
1159 if (!priv->port_up) {
1160 en_dbg(HW, priv, "Port is down, ignoring rx mode change.\n");
1161 goto out;
1162 }
1163
1164 if (!netif_carrier_ok(dev)) {
1165 if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
1166 if (priv->port_state.link_state) {
1167 priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
1168 netif_carrier_on(dev);
1169 en_dbg(LINK, priv, "Link Up\n");
1170 }
1171 }
1172 }
1173
1174 if (dev->priv_flags & IFF_UNICAST_FLT)
1175 mlx4_en_do_uc_filter(priv, dev, mdev);
1176
1177 /* Promsicuous mode: disable all filters */
1178 if ((dev->flags & IFF_PROMISC) ||
1179 (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC)) {
1180 mlx4_en_set_promisc_mode(priv, mdev);
1181 goto out;
1182 }
1183
1184 /* Not in promiscuous mode */
1185 if (priv->flags & MLX4_EN_FLAG_PROMISC)
1186 mlx4_en_clear_promisc_mode(priv, mdev);
1187
1188 mlx4_en_do_multicast(priv, dev, mdev);
1189 out:
1190 mutex_unlock(&mdev->state_lock);
1191 }
1192
1193 #ifdef CONFIG_NET_POLL_CONTROLLER
1194 static void mlx4_en_netpoll(struct net_device *dev)
1195 {
1196 struct mlx4_en_priv *priv = netdev_priv(dev);
1197 struct mlx4_en_cq *cq;
1198 int i;
1199
1200 for (i = 0; i < priv->rx_ring_num; i++) {
1201 cq = priv->rx_cq[i];
1202 napi_schedule(&cq->napi);
1203 }
1204 }
1205 #endif
1206
1207 static int mlx4_en_set_rss_steer_rules(struct mlx4_en_priv *priv)
1208 {
1209 u64 reg_id;
1210 int err = 0;
1211 int *qpn = &priv->base_qpn;
1212 struct mlx4_mac_entry *entry;
1213
1214 err = mlx4_en_uc_steer_add(priv, priv->dev->dev_addr, qpn, &reg_id);
1215 if (err)
1216 return err;
1217
1218 err = mlx4_en_tunnel_steer_add(priv, priv->dev->dev_addr, *qpn,
1219 &priv->tunnel_reg_id);
1220 if (err)
1221 goto tunnel_err;
1222
1223 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1224 if (!entry) {
1225 err = -ENOMEM;
1226 goto alloc_err;
1227 }
1228
1229 memcpy(entry->mac, priv->dev->dev_addr, sizeof(entry->mac));
1230 memcpy(priv->current_mac, entry->mac, sizeof(priv->current_mac));
1231 entry->reg_id = reg_id;
1232 hlist_add_head_rcu(&entry->hlist,
1233 &priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]);
1234
1235 return 0;
1236
1237 alloc_err:
1238 if (priv->tunnel_reg_id)
1239 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
1240
1241 tunnel_err:
1242 mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id);
1243 return err;
1244 }
1245
1246 static void mlx4_en_delete_rss_steer_rules(struct mlx4_en_priv *priv)
1247 {
1248 u64 mac;
1249 unsigned int i;
1250 int qpn = priv->base_qpn;
1251 struct hlist_head *bucket;
1252 struct hlist_node *tmp;
1253 struct mlx4_mac_entry *entry;
1254
1255 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
1256 bucket = &priv->mac_hash[i];
1257 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
1258 mac = mlx4_mac_to_u64(entry->mac);
1259 en_dbg(DRV, priv, "Registering MAC:%pM for deleting\n",
1260 entry->mac);
1261 mlx4_en_uc_steer_release(priv, entry->mac,
1262 qpn, entry->reg_id);
1263
1264 mlx4_unregister_mac(priv->mdev->dev, priv->port, mac);
1265 hlist_del_rcu(&entry->hlist);
1266 kfree_rcu(entry, rcu);
1267 }
1268 }
1269
1270 if (priv->tunnel_reg_id) {
1271 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
1272 priv->tunnel_reg_id = 0;
1273 }
1274 }
1275
1276 static void mlx4_en_tx_timeout(struct net_device *dev)
1277 {
1278 struct mlx4_en_priv *priv = netdev_priv(dev);
1279 struct mlx4_en_dev *mdev = priv->mdev;
1280 int i;
1281
1282 if (netif_msg_timer(priv))
1283 en_warn(priv, "Tx timeout called on port:%d\n", priv->port);
1284
1285 for (i = 0; i < priv->tx_ring_num; i++) {
1286 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i)))
1287 continue;
1288 en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n",
1289 i, priv->tx_ring[i]->qpn, priv->tx_ring[i]->cqn,
1290 priv->tx_ring[i]->cons, priv->tx_ring[i]->prod);
1291 }
1292
1293 priv->port_stats.tx_timeout++;
1294 en_dbg(DRV, priv, "Scheduling watchdog\n");
1295 queue_work(mdev->workqueue, &priv->watchdog_task);
1296 }
1297
1298
1299 static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev)
1300 {
1301 struct mlx4_en_priv *priv = netdev_priv(dev);
1302
1303 spin_lock_bh(&priv->stats_lock);
1304 memcpy(&priv->ret_stats, &priv->stats, sizeof(priv->stats));
1305 spin_unlock_bh(&priv->stats_lock);
1306
1307 return &priv->ret_stats;
1308 }
1309
1310 static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
1311 {
1312 struct mlx4_en_cq *cq;
1313 int i;
1314
1315 /* If we haven't received a specific coalescing setting
1316 * (module param), we set the moderation parameters as follows:
1317 * - moder_cnt is set to the number of mtu sized packets to
1318 * satisfy our coalescing target.
1319 * - moder_time is set to a fixed value.
1320 */
1321 priv->rx_frames = MLX4_EN_RX_COAL_TARGET;
1322 priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
1323 priv->tx_frames = MLX4_EN_TX_COAL_PKTS;
1324 priv->tx_usecs = MLX4_EN_TX_COAL_TIME;
1325 en_dbg(INTR, priv, "Default coalesing params for mtu:%d - rx_frames:%d rx_usecs:%d\n",
1326 priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
1327
1328 /* Setup cq moderation params */
1329 for (i = 0; i < priv->rx_ring_num; i++) {
1330 cq = priv->rx_cq[i];
1331 cq->moder_cnt = priv->rx_frames;
1332 cq->moder_time = priv->rx_usecs;
1333 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
1334 priv->last_moder_packets[i] = 0;
1335 priv->last_moder_bytes[i] = 0;
1336 }
1337
1338 for (i = 0; i < priv->tx_ring_num; i++) {
1339 cq = priv->tx_cq[i];
1340 cq->moder_cnt = priv->tx_frames;
1341 cq->moder_time = priv->tx_usecs;
1342 }
1343
1344 /* Reset auto-moderation params */
1345 priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW;
1346 priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW;
1347 priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH;
1348 priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH;
1349 priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL;
1350 priv->adaptive_rx_coal = 1;
1351 priv->last_moder_jiffies = 0;
1352 priv->last_moder_tx_packets = 0;
1353 }
1354
1355 static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
1356 {
1357 unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies);
1358 struct mlx4_en_cq *cq;
1359 unsigned long packets;
1360 unsigned long rate;
1361 unsigned long avg_pkt_size;
1362 unsigned long rx_packets;
1363 unsigned long rx_bytes;
1364 unsigned long rx_pkt_diff;
1365 int moder_time;
1366 int ring, err;
1367
1368 if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ)
1369 return;
1370
1371 for (ring = 0; ring < priv->rx_ring_num; ring++) {
1372 spin_lock_bh(&priv->stats_lock);
1373 rx_packets = priv->rx_ring[ring]->packets;
1374 rx_bytes = priv->rx_ring[ring]->bytes;
1375 spin_unlock_bh(&priv->stats_lock);
1376
1377 rx_pkt_diff = ((unsigned long) (rx_packets -
1378 priv->last_moder_packets[ring]));
1379 packets = rx_pkt_diff;
1380 rate = packets * HZ / period;
1381 avg_pkt_size = packets ? ((unsigned long) (rx_bytes -
1382 priv->last_moder_bytes[ring])) / packets : 0;
1383
1384 /* Apply auto-moderation only when packet rate
1385 * exceeds a rate that it matters */
1386 if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) &&
1387 avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) {
1388 if (rate < priv->pkt_rate_low)
1389 moder_time = priv->rx_usecs_low;
1390 else if (rate > priv->pkt_rate_high)
1391 moder_time = priv->rx_usecs_high;
1392 else
1393 moder_time = (rate - priv->pkt_rate_low) *
1394 (priv->rx_usecs_high - priv->rx_usecs_low) /
1395 (priv->pkt_rate_high - priv->pkt_rate_low) +
1396 priv->rx_usecs_low;
1397 } else {
1398 moder_time = priv->rx_usecs_low;
1399 }
1400
1401 if (moder_time != priv->last_moder_time[ring]) {
1402 priv->last_moder_time[ring] = moder_time;
1403 cq = priv->rx_cq[ring];
1404 cq->moder_time = moder_time;
1405 cq->moder_cnt = priv->rx_frames;
1406 err = mlx4_en_set_cq_moder(priv, cq);
1407 if (err)
1408 en_err(priv, "Failed modifying moderation for cq:%d\n",
1409 ring);
1410 }
1411 priv->last_moder_packets[ring] = rx_packets;
1412 priv->last_moder_bytes[ring] = rx_bytes;
1413 }
1414
1415 priv->last_moder_jiffies = jiffies;
1416 }
1417
1418 static void mlx4_en_do_get_stats(struct work_struct *work)
1419 {
1420 struct delayed_work *delay = to_delayed_work(work);
1421 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
1422 stats_task);
1423 struct mlx4_en_dev *mdev = priv->mdev;
1424 int err;
1425
1426 mutex_lock(&mdev->state_lock);
1427 if (mdev->device_up) {
1428 if (priv->port_up) {
1429 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
1430 if (err)
1431 en_dbg(HW, priv, "Could not update stats\n");
1432
1433 mlx4_en_auto_moderation(priv);
1434 }
1435
1436 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
1437 }
1438 if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) {
1439 mlx4_en_do_set_mac(priv, priv->current_mac);
1440 mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0;
1441 }
1442 mutex_unlock(&mdev->state_lock);
1443 }
1444
1445 /* mlx4_en_service_task - Run service task for tasks that needed to be done
1446 * periodically
1447 */
1448 static void mlx4_en_service_task(struct work_struct *work)
1449 {
1450 struct delayed_work *delay = to_delayed_work(work);
1451 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
1452 service_task);
1453 struct mlx4_en_dev *mdev = priv->mdev;
1454
1455 mutex_lock(&mdev->state_lock);
1456 if (mdev->device_up) {
1457 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
1458 mlx4_en_ptp_overflow_check(mdev);
1459
1460 mlx4_en_recover_from_oom(priv);
1461 queue_delayed_work(mdev->workqueue, &priv->service_task,
1462 SERVICE_TASK_DELAY);
1463 }
1464 mutex_unlock(&mdev->state_lock);
1465 }
1466
1467 static void mlx4_en_linkstate(struct work_struct *work)
1468 {
1469 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1470 linkstate_task);
1471 struct mlx4_en_dev *mdev = priv->mdev;
1472 int linkstate = priv->link_state;
1473
1474 mutex_lock(&mdev->state_lock);
1475 /* If observable port state changed set carrier state and
1476 * report to system log */
1477 if (priv->last_link_state != linkstate) {
1478 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
1479 en_info(priv, "Link Down\n");
1480 netif_carrier_off(priv->dev);
1481 } else {
1482 en_info(priv, "Link Up\n");
1483 netif_carrier_on(priv->dev);
1484 }
1485 }
1486 priv->last_link_state = linkstate;
1487 mutex_unlock(&mdev->state_lock);
1488 }
1489
1490 static int mlx4_en_init_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
1491 {
1492 struct mlx4_en_rx_ring *ring = priv->rx_ring[ring_idx];
1493 int numa_node = priv->mdev->dev->numa_node;
1494
1495 if (!zalloc_cpumask_var(&ring->affinity_mask, GFP_KERNEL))
1496 return -ENOMEM;
1497
1498 cpumask_set_cpu(cpumask_local_spread(ring_idx, numa_node),
1499 ring->affinity_mask);
1500 return 0;
1501 }
1502
1503 static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
1504 {
1505 free_cpumask_var(priv->rx_ring[ring_idx]->affinity_mask);
1506 }
1507
1508 int mlx4_en_start_port(struct net_device *dev)
1509 {
1510 struct mlx4_en_priv *priv = netdev_priv(dev);
1511 struct mlx4_en_dev *mdev = priv->mdev;
1512 struct mlx4_en_cq *cq;
1513 struct mlx4_en_tx_ring *tx_ring;
1514 int rx_index = 0;
1515 int tx_index = 0;
1516 int err = 0;
1517 int i;
1518 int j;
1519 u8 mc_list[16] = {0};
1520
1521 if (priv->port_up) {
1522 en_dbg(DRV, priv, "start port called while port already up\n");
1523 return 0;
1524 }
1525
1526 INIT_LIST_HEAD(&priv->mc_list);
1527 INIT_LIST_HEAD(&priv->curr_list);
1528 INIT_LIST_HEAD(&priv->ethtool_list);
1529 memset(&priv->ethtool_rules[0], 0,
1530 sizeof(struct ethtool_flow_id) * MAX_NUM_OF_FS_RULES);
1531
1532 /* Calculate Rx buf size */
1533 dev->mtu = min(dev->mtu, priv->max_mtu);
1534 mlx4_en_calc_rx_buf(dev);
1535 en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size);
1536
1537 /* Configure rx cq's and rings */
1538 err = mlx4_en_activate_rx_rings(priv);
1539 if (err) {
1540 en_err(priv, "Failed to activate RX rings\n");
1541 return err;
1542 }
1543 for (i = 0; i < priv->rx_ring_num; i++) {
1544 cq = priv->rx_cq[i];
1545
1546 err = mlx4_en_init_affinity_hint(priv, i);
1547 if (err) {
1548 en_err(priv, "Failed preparing IRQ affinity hint\n");
1549 goto cq_err;
1550 }
1551
1552 err = mlx4_en_activate_cq(priv, cq, i);
1553 if (err) {
1554 en_err(priv, "Failed activating Rx CQ\n");
1555 mlx4_en_free_affinity_hint(priv, i);
1556 goto cq_err;
1557 }
1558
1559 for (j = 0; j < cq->size; j++) {
1560 struct mlx4_cqe *cqe = NULL;
1561
1562 cqe = mlx4_en_get_cqe(cq->buf, j, priv->cqe_size) +
1563 priv->cqe_factor;
1564 cqe->owner_sr_opcode = MLX4_CQE_OWNER_MASK;
1565 }
1566
1567 err = mlx4_en_set_cq_moder(priv, cq);
1568 if (err) {
1569 en_err(priv, "Failed setting cq moderation parameters\n");
1570 mlx4_en_deactivate_cq(priv, cq);
1571 mlx4_en_free_affinity_hint(priv, i);
1572 goto cq_err;
1573 }
1574 mlx4_en_arm_cq(priv, cq);
1575 priv->rx_ring[i]->cqn = cq->mcq.cqn;
1576 ++rx_index;
1577 }
1578
1579 /* Set qp number */
1580 en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port);
1581 err = mlx4_en_get_qp(priv);
1582 if (err) {
1583 en_err(priv, "Failed getting eth qp\n");
1584 goto cq_err;
1585 }
1586 mdev->mac_removed[priv->port] = 0;
1587
1588 priv->counter_index =
1589 mlx4_get_default_counter_index(mdev->dev, priv->port);
1590
1591 err = mlx4_en_config_rss_steer(priv);
1592 if (err) {
1593 en_err(priv, "Failed configuring rss steering\n");
1594 goto mac_err;
1595 }
1596
1597 err = mlx4_en_create_drop_qp(priv);
1598 if (err)
1599 goto rss_err;
1600
1601 /* Configure tx cq's and rings */
1602 for (i = 0; i < priv->tx_ring_num; i++) {
1603 /* Configure cq */
1604 cq = priv->tx_cq[i];
1605 err = mlx4_en_activate_cq(priv, cq, i);
1606 if (err) {
1607 en_err(priv, "Failed allocating Tx CQ\n");
1608 goto tx_err;
1609 }
1610 err = mlx4_en_set_cq_moder(priv, cq);
1611 if (err) {
1612 en_err(priv, "Failed setting cq moderation parameters\n");
1613 mlx4_en_deactivate_cq(priv, cq);
1614 goto tx_err;
1615 }
1616 en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i);
1617 cq->buf->wqe_index = cpu_to_be16(0xffff);
1618
1619 /* Configure ring */
1620 tx_ring = priv->tx_ring[i];
1621 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
1622 i / priv->num_tx_rings_p_up);
1623 if (err) {
1624 en_err(priv, "Failed allocating Tx ring\n");
1625 mlx4_en_deactivate_cq(priv, cq);
1626 goto tx_err;
1627 }
1628 tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
1629
1630 /* Arm CQ for TX completions */
1631 mlx4_en_arm_cq(priv, cq);
1632
1633 /* Set initial ownership of all Tx TXBBs to SW (1) */
1634 for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
1635 *((u32 *) (tx_ring->buf + j)) = 0xffffffff;
1636 ++tx_index;
1637 }
1638
1639 /* Configure port */
1640 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
1641 priv->rx_skb_size + ETH_FCS_LEN,
1642 priv->prof->tx_pause,
1643 priv->prof->tx_ppp,
1644 priv->prof->rx_pause,
1645 priv->prof->rx_ppp);
1646 if (err) {
1647 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
1648 priv->port, err);
1649 goto tx_err;
1650 }
1651 /* Set default qp number */
1652 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0);
1653 if (err) {
1654 en_err(priv, "Failed setting default qp numbers\n");
1655 goto tx_err;
1656 }
1657
1658 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
1659 err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1);
1660 if (err) {
1661 en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
1662 err);
1663 goto tx_err;
1664 }
1665 }
1666
1667 /* Init port */
1668 en_dbg(HW, priv, "Initializing port\n");
1669 err = mlx4_INIT_PORT(mdev->dev, priv->port);
1670 if (err) {
1671 en_err(priv, "Failed Initializing port\n");
1672 goto tx_err;
1673 }
1674
1675 /* Set Unicast and VXLAN steering rules */
1676 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0 &&
1677 mlx4_en_set_rss_steer_rules(priv))
1678 mlx4_warn(mdev, "Failed setting steering rules\n");
1679
1680 /* Attach rx QP to bradcast address */
1681 eth_broadcast_addr(&mc_list[10]);
1682 mc_list[5] = priv->port; /* needed for B0 steering support */
1683 if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
1684 priv->port, 0, MLX4_PROT_ETH,
1685 &priv->broadcast_id))
1686 mlx4_warn(mdev, "Failed Attaching Broadcast\n");
1687
1688 /* Must redo promiscuous mode setup. */
1689 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC);
1690
1691 /* Schedule multicast task to populate multicast list */
1692 queue_work(mdev->workqueue, &priv->rx_mode_task);
1693
1694 #ifdef CONFIG_MLX4_EN_VXLAN
1695 if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
1696 vxlan_get_rx_port(dev);
1697 #endif
1698 priv->port_up = true;
1699 netif_tx_start_all_queues(dev);
1700 netif_device_attach(dev);
1701
1702 return 0;
1703
1704 tx_err:
1705 while (tx_index--) {
1706 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[tx_index]);
1707 mlx4_en_deactivate_cq(priv, priv->tx_cq[tx_index]);
1708 }
1709 mlx4_en_destroy_drop_qp(priv);
1710 rss_err:
1711 mlx4_en_release_rss_steer(priv);
1712 mac_err:
1713 mlx4_en_put_qp(priv);
1714 cq_err:
1715 while (rx_index--) {
1716 mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]);
1717 mlx4_en_free_affinity_hint(priv, rx_index);
1718 }
1719 for (i = 0; i < priv->rx_ring_num; i++)
1720 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
1721
1722 return err; /* need to close devices */
1723 }
1724
1725
1726 void mlx4_en_stop_port(struct net_device *dev, int detach)
1727 {
1728 struct mlx4_en_priv *priv = netdev_priv(dev);
1729 struct mlx4_en_dev *mdev = priv->mdev;
1730 struct mlx4_en_mc_list *mclist, *tmp;
1731 struct ethtool_flow_id *flow, *tmp_flow;
1732 int i;
1733 u8 mc_list[16] = {0};
1734
1735 if (!priv->port_up) {
1736 en_dbg(DRV, priv, "stop port called while port already down\n");
1737 return;
1738 }
1739
1740 /* close port*/
1741 mlx4_CLOSE_PORT(mdev->dev, priv->port);
1742
1743 /* Synchronize with tx routine */
1744 netif_tx_lock_bh(dev);
1745 if (detach)
1746 netif_device_detach(dev);
1747 netif_tx_stop_all_queues(dev);
1748 netif_tx_unlock_bh(dev);
1749
1750 netif_tx_disable(dev);
1751
1752 /* Set port as not active */
1753 priv->port_up = false;
1754 priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
1755
1756 /* Promsicuous mode */
1757 if (mdev->dev->caps.steering_mode ==
1758 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1759 priv->flags &= ~(MLX4_EN_FLAG_PROMISC |
1760 MLX4_EN_FLAG_MC_PROMISC);
1761 mlx4_flow_steer_promisc_remove(mdev->dev,
1762 priv->port,
1763 MLX4_FS_ALL_DEFAULT);
1764 mlx4_flow_steer_promisc_remove(mdev->dev,
1765 priv->port,
1766 MLX4_FS_MC_DEFAULT);
1767 } else if (priv->flags & MLX4_EN_FLAG_PROMISC) {
1768 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
1769
1770 /* Disable promiscouos mode */
1771 mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn,
1772 priv->port);
1773
1774 /* Disable Multicast promisc */
1775 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
1776 mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
1777 priv->port);
1778 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
1779 }
1780 }
1781
1782 /* Detach All multicasts */
1783 eth_broadcast_addr(&mc_list[10]);
1784 mc_list[5] = priv->port; /* needed for B0 steering support */
1785 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
1786 MLX4_PROT_ETH, priv->broadcast_id);
1787 list_for_each_entry(mclist, &priv->curr_list, list) {
1788 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
1789 mc_list[5] = priv->port;
1790 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
1791 mc_list, MLX4_PROT_ETH, mclist->reg_id);
1792 if (mclist->tunnel_reg_id)
1793 mlx4_flow_detach(mdev->dev, mclist->tunnel_reg_id);
1794 }
1795 mlx4_en_clear_list(dev);
1796 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
1797 list_del(&mclist->list);
1798 kfree(mclist);
1799 }
1800
1801 /* Flush multicast filter */
1802 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
1803
1804 /* Remove flow steering rules for the port*/
1805 if (mdev->dev->caps.steering_mode ==
1806 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1807 ASSERT_RTNL();
1808 list_for_each_entry_safe(flow, tmp_flow,
1809 &priv->ethtool_list, list) {
1810 mlx4_flow_detach(mdev->dev, flow->id);
1811 list_del(&flow->list);
1812 }
1813 }
1814
1815 mlx4_en_destroy_drop_qp(priv);
1816
1817 /* Free TX Rings */
1818 for (i = 0; i < priv->tx_ring_num; i++) {
1819 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[i]);
1820 mlx4_en_deactivate_cq(priv, priv->tx_cq[i]);
1821 }
1822 msleep(10);
1823
1824 for (i = 0; i < priv->tx_ring_num; i++)
1825 mlx4_en_free_tx_buf(dev, priv->tx_ring[i]);
1826
1827 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
1828 mlx4_en_delete_rss_steer_rules(priv);
1829
1830 /* Free RSS qps */
1831 mlx4_en_release_rss_steer(priv);
1832
1833 /* Unregister Mac address for the port */
1834 mlx4_en_put_qp(priv);
1835 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN))
1836 mdev->mac_removed[priv->port] = 1;
1837
1838 /* Free RX Rings */
1839 for (i = 0; i < priv->rx_ring_num; i++) {
1840 struct mlx4_en_cq *cq = priv->rx_cq[i];
1841
1842 napi_synchronize(&cq->napi);
1843 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
1844 mlx4_en_deactivate_cq(priv, cq);
1845
1846 mlx4_en_free_affinity_hint(priv, i);
1847 }
1848 }
1849
1850 static void mlx4_en_restart(struct work_struct *work)
1851 {
1852 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1853 watchdog_task);
1854 struct mlx4_en_dev *mdev = priv->mdev;
1855 struct net_device *dev = priv->dev;
1856
1857 en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
1858
1859 rtnl_lock();
1860 mutex_lock(&mdev->state_lock);
1861 if (priv->port_up) {
1862 mlx4_en_stop_port(dev, 1);
1863 if (mlx4_en_start_port(dev))
1864 en_err(priv, "Failed restarting port %d\n", priv->port);
1865 }
1866 mutex_unlock(&mdev->state_lock);
1867 rtnl_unlock();
1868 }
1869
1870 static void mlx4_en_clear_stats(struct net_device *dev)
1871 {
1872 struct mlx4_en_priv *priv = netdev_priv(dev);
1873 struct mlx4_en_dev *mdev = priv->mdev;
1874 int i;
1875
1876 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
1877 en_dbg(HW, priv, "Failed dumping statistics\n");
1878
1879 memset(&priv->stats, 0, sizeof(priv->stats));
1880 memset(&priv->pstats, 0, sizeof(priv->pstats));
1881 memset(&priv->pkstats, 0, sizeof(priv->pkstats));
1882 memset(&priv->port_stats, 0, sizeof(priv->port_stats));
1883 memset(&priv->rx_flowstats, 0, sizeof(priv->rx_flowstats));
1884 memset(&priv->tx_flowstats, 0, sizeof(priv->tx_flowstats));
1885 memset(&priv->rx_priority_flowstats, 0,
1886 sizeof(priv->rx_priority_flowstats));
1887 memset(&priv->tx_priority_flowstats, 0,
1888 sizeof(priv->tx_priority_flowstats));
1889 memset(&priv->pf_stats, 0, sizeof(priv->pf_stats));
1890
1891 for (i = 0; i < priv->tx_ring_num; i++) {
1892 priv->tx_ring[i]->bytes = 0;
1893 priv->tx_ring[i]->packets = 0;
1894 priv->tx_ring[i]->tx_csum = 0;
1895 }
1896 for (i = 0; i < priv->rx_ring_num; i++) {
1897 priv->rx_ring[i]->bytes = 0;
1898 priv->rx_ring[i]->packets = 0;
1899 priv->rx_ring[i]->csum_ok = 0;
1900 priv->rx_ring[i]->csum_none = 0;
1901 priv->rx_ring[i]->csum_complete = 0;
1902 }
1903 }
1904
1905 static int mlx4_en_open(struct net_device *dev)
1906 {
1907 struct mlx4_en_priv *priv = netdev_priv(dev);
1908 struct mlx4_en_dev *mdev = priv->mdev;
1909 int err = 0;
1910
1911 mutex_lock(&mdev->state_lock);
1912
1913 if (!mdev->device_up) {
1914 en_err(priv, "Cannot open - device down/disabled\n");
1915 err = -EBUSY;
1916 goto out;
1917 }
1918
1919 /* Reset HW statistics and SW counters */
1920 mlx4_en_clear_stats(dev);
1921
1922 err = mlx4_en_start_port(dev);
1923 if (err)
1924 en_err(priv, "Failed starting port:%d\n", priv->port);
1925
1926 out:
1927 mutex_unlock(&mdev->state_lock);
1928 return err;
1929 }
1930
1931
1932 static int mlx4_en_close(struct net_device *dev)
1933 {
1934 struct mlx4_en_priv *priv = netdev_priv(dev);
1935 struct mlx4_en_dev *mdev = priv->mdev;
1936
1937 en_dbg(IFDOWN, priv, "Close port called\n");
1938
1939 mutex_lock(&mdev->state_lock);
1940
1941 mlx4_en_stop_port(dev, 0);
1942 netif_carrier_off(dev);
1943
1944 mutex_unlock(&mdev->state_lock);
1945 return 0;
1946 }
1947
1948 void mlx4_en_free_resources(struct mlx4_en_priv *priv)
1949 {
1950 int i;
1951
1952 #ifdef CONFIG_RFS_ACCEL
1953 priv->dev->rx_cpu_rmap = NULL;
1954 #endif
1955
1956 for (i = 0; i < priv->tx_ring_num; i++) {
1957 if (priv->tx_ring && priv->tx_ring[i])
1958 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
1959 if (priv->tx_cq && priv->tx_cq[i])
1960 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
1961 }
1962
1963 for (i = 0; i < priv->rx_ring_num; i++) {
1964 if (priv->rx_ring[i])
1965 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
1966 priv->prof->rx_ring_size, priv->stride);
1967 if (priv->rx_cq[i])
1968 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
1969 }
1970
1971 }
1972
1973 int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
1974 {
1975 struct mlx4_en_port_profile *prof = priv->prof;
1976 int i;
1977 int node;
1978
1979 /* Create tx Rings */
1980 for (i = 0; i < priv->tx_ring_num; i++) {
1981 node = cpu_to_node(i % num_online_cpus());
1982 if (mlx4_en_create_cq(priv, &priv->tx_cq[i],
1983 prof->tx_ring_size, i, TX, node))
1984 goto err;
1985
1986 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i],
1987 prof->tx_ring_size, TXBB_SIZE,
1988 node, i))
1989 goto err;
1990 }
1991
1992 /* Create rx Rings */
1993 for (i = 0; i < priv->rx_ring_num; i++) {
1994 node = cpu_to_node(i % num_online_cpus());
1995 if (mlx4_en_create_cq(priv, &priv->rx_cq[i],
1996 prof->rx_ring_size, i, RX, node))
1997 goto err;
1998
1999 if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
2000 prof->rx_ring_size, priv->stride,
2001 node))
2002 goto err;
2003 }
2004
2005 #ifdef CONFIG_RFS_ACCEL
2006 priv->dev->rx_cpu_rmap = mlx4_get_cpu_rmap(priv->mdev->dev, priv->port);
2007 #endif
2008
2009 return 0;
2010
2011 err:
2012 en_err(priv, "Failed to allocate NIC resources\n");
2013 for (i = 0; i < priv->rx_ring_num; i++) {
2014 if (priv->rx_ring[i])
2015 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
2016 prof->rx_ring_size,
2017 priv->stride);
2018 if (priv->rx_cq[i])
2019 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
2020 }
2021 for (i = 0; i < priv->tx_ring_num; i++) {
2022 if (priv->tx_ring[i])
2023 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
2024 if (priv->tx_cq[i])
2025 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
2026 }
2027 return -ENOMEM;
2028 }
2029
2030
2031 void mlx4_en_destroy_netdev(struct net_device *dev)
2032 {
2033 struct mlx4_en_priv *priv = netdev_priv(dev);
2034 struct mlx4_en_dev *mdev = priv->mdev;
2035
2036 en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
2037
2038 /* Unregister device - this will close the port if it was up */
2039 if (priv->registered) {
2040 devlink_port_type_clear(mlx4_get_devlink_port(mdev->dev,
2041 priv->port));
2042 unregister_netdev(dev);
2043 }
2044
2045 if (priv->allocated)
2046 mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
2047
2048 cancel_delayed_work(&priv->stats_task);
2049 cancel_delayed_work(&priv->service_task);
2050 /* flush any pending task for this netdev */
2051 flush_workqueue(mdev->workqueue);
2052
2053 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
2054 mlx4_en_remove_timestamp(mdev);
2055
2056 /* Detach the netdev so tasks would not attempt to access it */
2057 mutex_lock(&mdev->state_lock);
2058 mdev->pndev[priv->port] = NULL;
2059 mdev->upper[priv->port] = NULL;
2060 mutex_unlock(&mdev->state_lock);
2061
2062 mlx4_en_free_resources(priv);
2063
2064 kfree(priv->tx_ring);
2065 kfree(priv->tx_cq);
2066
2067 free_netdev(dev);
2068 }
2069
2070 static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
2071 {
2072 struct mlx4_en_priv *priv = netdev_priv(dev);
2073 struct mlx4_en_dev *mdev = priv->mdev;
2074 int err = 0;
2075
2076 en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n",
2077 dev->mtu, new_mtu);
2078
2079 if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) {
2080 en_err(priv, "Bad MTU size:%d.\n", new_mtu);
2081 return -EPERM;
2082 }
2083 dev->mtu = new_mtu;
2084
2085 if (netif_running(dev)) {
2086 mutex_lock(&mdev->state_lock);
2087 if (!mdev->device_up) {
2088 /* NIC is probably restarting - let watchdog task reset
2089 * the port */
2090 en_dbg(DRV, priv, "Change MTU called with card down!?\n");
2091 } else {
2092 mlx4_en_stop_port(dev, 1);
2093 err = mlx4_en_start_port(dev);
2094 if (err) {
2095 en_err(priv, "Failed restarting port:%d\n",
2096 priv->port);
2097 queue_work(mdev->workqueue, &priv->watchdog_task);
2098 }
2099 }
2100 mutex_unlock(&mdev->state_lock);
2101 }
2102 return 0;
2103 }
2104
2105 static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
2106 {
2107 struct mlx4_en_priv *priv = netdev_priv(dev);
2108 struct mlx4_en_dev *mdev = priv->mdev;
2109 struct hwtstamp_config config;
2110
2111 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
2112 return -EFAULT;
2113
2114 /* reserved for future extensions */
2115 if (config.flags)
2116 return -EINVAL;
2117
2118 /* device doesn't support time stamping */
2119 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS))
2120 return -EINVAL;
2121
2122 /* TX HW timestamp */
2123 switch (config.tx_type) {
2124 case HWTSTAMP_TX_OFF:
2125 case HWTSTAMP_TX_ON:
2126 break;
2127 default:
2128 return -ERANGE;
2129 }
2130
2131 /* RX HW timestamp */
2132 switch (config.rx_filter) {
2133 case HWTSTAMP_FILTER_NONE:
2134 break;
2135 case HWTSTAMP_FILTER_ALL:
2136 case HWTSTAMP_FILTER_SOME:
2137 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2138 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2139 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2140 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2141 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2142 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2143 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2144 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2145 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2146 case HWTSTAMP_FILTER_PTP_V2_EVENT:
2147 case HWTSTAMP_FILTER_PTP_V2_SYNC:
2148 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2149 config.rx_filter = HWTSTAMP_FILTER_ALL;
2150 break;
2151 default:
2152 return -ERANGE;
2153 }
2154
2155 if (mlx4_en_reset_config(dev, config, dev->features)) {
2156 config.tx_type = HWTSTAMP_TX_OFF;
2157 config.rx_filter = HWTSTAMP_FILTER_NONE;
2158 }
2159
2160 return copy_to_user(ifr->ifr_data, &config,
2161 sizeof(config)) ? -EFAULT : 0;
2162 }
2163
2164 static int mlx4_en_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
2165 {
2166 struct mlx4_en_priv *priv = netdev_priv(dev);
2167
2168 return copy_to_user(ifr->ifr_data, &priv->hwtstamp_config,
2169 sizeof(priv->hwtstamp_config)) ? -EFAULT : 0;
2170 }
2171
2172 static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2173 {
2174 switch (cmd) {
2175 case SIOCSHWTSTAMP:
2176 return mlx4_en_hwtstamp_set(dev, ifr);
2177 case SIOCGHWTSTAMP:
2178 return mlx4_en_hwtstamp_get(dev, ifr);
2179 default:
2180 return -EOPNOTSUPP;
2181 }
2182 }
2183
2184 static netdev_features_t mlx4_en_fix_features(struct net_device *netdev,
2185 netdev_features_t features)
2186 {
2187 struct mlx4_en_priv *en_priv = netdev_priv(netdev);
2188 struct mlx4_en_dev *mdev = en_priv->mdev;
2189
2190 /* Since there is no support for separate RX C-TAG/S-TAG vlan accel
2191 * enable/disable make sure S-TAG flag is always in same state as
2192 * C-TAG.
2193 */
2194 if (features & NETIF_F_HW_VLAN_CTAG_RX &&
2195 !(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN))
2196 features |= NETIF_F_HW_VLAN_STAG_RX;
2197 else
2198 features &= ~NETIF_F_HW_VLAN_STAG_RX;
2199
2200 return features;
2201 }
2202
2203 static int mlx4_en_set_features(struct net_device *netdev,
2204 netdev_features_t features)
2205 {
2206 struct mlx4_en_priv *priv = netdev_priv(netdev);
2207 bool reset = false;
2208 int ret = 0;
2209
2210 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXFCS)) {
2211 en_info(priv, "Turn %s RX-FCS\n",
2212 (features & NETIF_F_RXFCS) ? "ON" : "OFF");
2213 reset = true;
2214 }
2215
2216 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXALL)) {
2217 u8 ignore_fcs_value = (features & NETIF_F_RXALL) ? 1 : 0;
2218
2219 en_info(priv, "Turn %s RX-ALL\n",
2220 ignore_fcs_value ? "ON" : "OFF");
2221 ret = mlx4_SET_PORT_fcs_check(priv->mdev->dev,
2222 priv->port, ignore_fcs_value);
2223 if (ret)
2224 return ret;
2225 }
2226
2227 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
2228 en_info(priv, "Turn %s RX vlan strip offload\n",
2229 (features & NETIF_F_HW_VLAN_CTAG_RX) ? "ON" : "OFF");
2230 reset = true;
2231 }
2232
2233 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_TX))
2234 en_info(priv, "Turn %s TX vlan strip offload\n",
2235 (features & NETIF_F_HW_VLAN_CTAG_TX) ? "ON" : "OFF");
2236
2237 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_STAG_TX))
2238 en_info(priv, "Turn %s TX S-VLAN strip offload\n",
2239 (features & NETIF_F_HW_VLAN_STAG_TX) ? "ON" : "OFF");
2240
2241 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_LOOPBACK)) {
2242 en_info(priv, "Turn %s loopback\n",
2243 (features & NETIF_F_LOOPBACK) ? "ON" : "OFF");
2244 mlx4_en_update_loopback_state(netdev, features);
2245 }
2246
2247 if (reset) {
2248 ret = mlx4_en_reset_config(netdev, priv->hwtstamp_config,
2249 features);
2250 if (ret)
2251 return ret;
2252 }
2253
2254 return 0;
2255 }
2256
2257 static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
2258 {
2259 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2260 struct mlx4_en_dev *mdev = en_priv->mdev;
2261 u64 mac_u64 = mlx4_mac_to_u64(mac);
2262
2263 if (is_multicast_ether_addr(mac))
2264 return -EINVAL;
2265
2266 return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac_u64);
2267 }
2268
2269 static int mlx4_en_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos)
2270 {
2271 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2272 struct mlx4_en_dev *mdev = en_priv->mdev;
2273
2274 return mlx4_set_vf_vlan(mdev->dev, en_priv->port, vf, vlan, qos);
2275 }
2276
2277 static int mlx4_en_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
2278 int max_tx_rate)
2279 {
2280 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2281 struct mlx4_en_dev *mdev = en_priv->mdev;
2282
2283 return mlx4_set_vf_rate(mdev->dev, en_priv->port, vf, min_tx_rate,
2284 max_tx_rate);
2285 }
2286
2287 static int mlx4_en_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
2288 {
2289 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2290 struct mlx4_en_dev *mdev = en_priv->mdev;
2291
2292 return mlx4_set_vf_spoofchk(mdev->dev, en_priv->port, vf, setting);
2293 }
2294
2295 static int mlx4_en_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivf)
2296 {
2297 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2298 struct mlx4_en_dev *mdev = en_priv->mdev;
2299
2300 return mlx4_get_vf_config(mdev->dev, en_priv->port, vf, ivf);
2301 }
2302
2303 static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_state)
2304 {
2305 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2306 struct mlx4_en_dev *mdev = en_priv->mdev;
2307
2308 return mlx4_set_vf_link_state(mdev->dev, en_priv->port, vf, link_state);
2309 }
2310
2311 static int mlx4_en_get_vf_stats(struct net_device *dev, int vf,
2312 struct ifla_vf_stats *vf_stats)
2313 {
2314 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2315 struct mlx4_en_dev *mdev = en_priv->mdev;
2316
2317 return mlx4_get_vf_stats(mdev->dev, en_priv->port, vf, vf_stats);
2318 }
2319
2320 #define PORT_ID_BYTE_LEN 8
2321 static int mlx4_en_get_phys_port_id(struct net_device *dev,
2322 struct netdev_phys_item_id *ppid)
2323 {
2324 struct mlx4_en_priv *priv = netdev_priv(dev);
2325 struct mlx4_dev *mdev = priv->mdev->dev;
2326 int i;
2327 u64 phys_port_id = mdev->caps.phys_port_id[priv->port];
2328
2329 if (!phys_port_id)
2330 return -EOPNOTSUPP;
2331
2332 ppid->id_len = sizeof(phys_port_id);
2333 for (i = PORT_ID_BYTE_LEN - 1; i >= 0; --i) {
2334 ppid->id[i] = phys_port_id & 0xff;
2335 phys_port_id >>= 8;
2336 }
2337 return 0;
2338 }
2339
2340 #ifdef CONFIG_MLX4_EN_VXLAN
2341 static void mlx4_en_add_vxlan_offloads(struct work_struct *work)
2342 {
2343 int ret;
2344 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
2345 vxlan_add_task);
2346
2347 ret = mlx4_config_vxlan_port(priv->mdev->dev, priv->vxlan_port);
2348 if (ret)
2349 goto out;
2350
2351 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
2352 VXLAN_STEER_BY_OUTER_MAC, 1);
2353 out:
2354 if (ret) {
2355 en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret);
2356 return;
2357 }
2358
2359 /* set offloads */
2360 priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2361 NETIF_F_RXCSUM |
2362 NETIF_F_TSO | NETIF_F_TSO6 |
2363 NETIF_F_GSO_UDP_TUNNEL |
2364 NETIF_F_GSO_UDP_TUNNEL_CSUM |
2365 NETIF_F_GSO_PARTIAL;
2366 }
2367
2368 static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
2369 {
2370 int ret;
2371 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
2372 vxlan_del_task);
2373 /* unset offloads */
2374 priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2375 NETIF_F_RXCSUM |
2376 NETIF_F_TSO | NETIF_F_TSO6 |
2377 NETIF_F_GSO_UDP_TUNNEL |
2378 NETIF_F_GSO_UDP_TUNNEL_CSUM |
2379 NETIF_F_GSO_PARTIAL);
2380
2381 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
2382 VXLAN_STEER_BY_OUTER_MAC, 0);
2383 if (ret)
2384 en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret);
2385
2386 priv->vxlan_port = 0;
2387 }
2388
2389 static void mlx4_en_add_vxlan_port(struct net_device *dev,
2390 sa_family_t sa_family, __be16 port)
2391 {
2392 struct mlx4_en_priv *priv = netdev_priv(dev);
2393 __be16 current_port;
2394
2395 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
2396 return;
2397
2398 if (sa_family == AF_INET6)
2399 return;
2400
2401 current_port = priv->vxlan_port;
2402 if (current_port && current_port != port) {
2403 en_warn(priv, "vxlan port %d configured, can't add port %d\n",
2404 ntohs(current_port), ntohs(port));
2405 return;
2406 }
2407
2408 priv->vxlan_port = port;
2409 queue_work(priv->mdev->workqueue, &priv->vxlan_add_task);
2410 }
2411
2412 static void mlx4_en_del_vxlan_port(struct net_device *dev,
2413 sa_family_t sa_family, __be16 port)
2414 {
2415 struct mlx4_en_priv *priv = netdev_priv(dev);
2416 __be16 current_port;
2417
2418 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
2419 return;
2420
2421 if (sa_family == AF_INET6)
2422 return;
2423
2424 current_port = priv->vxlan_port;
2425 if (current_port != port) {
2426 en_dbg(DRV, priv, "vxlan port %d isn't configured, ignoring\n", ntohs(port));
2427 return;
2428 }
2429
2430 queue_work(priv->mdev->workqueue, &priv->vxlan_del_task);
2431 }
2432
2433 static netdev_features_t mlx4_en_features_check(struct sk_buff *skb,
2434 struct net_device *dev,
2435 netdev_features_t features)
2436 {
2437 features = vlan_features_check(skb, features);
2438 features = vxlan_features_check(skb, features);
2439
2440 /* The ConnectX-3 doesn't support outer IPv6 checksums but it does
2441 * support inner IPv6 checksums and segmentation so we need to
2442 * strip that feature if this is an IPv6 encapsulated frame.
2443 */
2444 if (skb->encapsulation &&
2445 (skb->ip_summed == CHECKSUM_PARTIAL) &&
2446 (ip_hdr(skb)->version != 4))
2447 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
2448
2449 return features;
2450 }
2451 #endif
2452
2453 static int mlx4_en_set_tx_maxrate(struct net_device *dev, int queue_index, u32 maxrate)
2454 {
2455 struct mlx4_en_priv *priv = netdev_priv(dev);
2456 struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[queue_index];
2457 struct mlx4_update_qp_params params;
2458 int err;
2459
2460 if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT))
2461 return -EOPNOTSUPP;
2462
2463 /* rate provided to us in Mbs, check if it fits into 12 bits, if not use Gbs */
2464 if (maxrate >> 12) {
2465 params.rate_unit = MLX4_QP_RATE_LIMIT_GBS;
2466 params.rate_val = maxrate / 1000;
2467 } else if (maxrate) {
2468 params.rate_unit = MLX4_QP_RATE_LIMIT_MBS;
2469 params.rate_val = maxrate;
2470 } else { /* zero serves to revoke the QP rate-limitation */
2471 params.rate_unit = 0;
2472 params.rate_val = 0;
2473 }
2474
2475 err = mlx4_update_qp(priv->mdev->dev, tx_ring->qpn, MLX4_UPDATE_QP_RATE_LIMIT,
2476 &params);
2477 return err;
2478 }
2479
2480 static const struct net_device_ops mlx4_netdev_ops = {
2481 .ndo_open = mlx4_en_open,
2482 .ndo_stop = mlx4_en_close,
2483 .ndo_start_xmit = mlx4_en_xmit,
2484 .ndo_select_queue = mlx4_en_select_queue,
2485 .ndo_get_stats = mlx4_en_get_stats,
2486 .ndo_set_rx_mode = mlx4_en_set_rx_mode,
2487 .ndo_set_mac_address = mlx4_en_set_mac,
2488 .ndo_validate_addr = eth_validate_addr,
2489 .ndo_change_mtu = mlx4_en_change_mtu,
2490 .ndo_do_ioctl = mlx4_en_ioctl,
2491 .ndo_tx_timeout = mlx4_en_tx_timeout,
2492 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
2493 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
2494 #ifdef CONFIG_NET_POLL_CONTROLLER
2495 .ndo_poll_controller = mlx4_en_netpoll,
2496 #endif
2497 .ndo_set_features = mlx4_en_set_features,
2498 .ndo_fix_features = mlx4_en_fix_features,
2499 .ndo_setup_tc = __mlx4_en_setup_tc,
2500 #ifdef CONFIG_RFS_ACCEL
2501 .ndo_rx_flow_steer = mlx4_en_filter_rfs,
2502 #endif
2503 .ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
2504 #ifdef CONFIG_MLX4_EN_VXLAN
2505 .ndo_add_vxlan_port = mlx4_en_add_vxlan_port,
2506 .ndo_del_vxlan_port = mlx4_en_del_vxlan_port,
2507 .ndo_features_check = mlx4_en_features_check,
2508 #endif
2509 .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
2510 };
2511
2512 static const struct net_device_ops mlx4_netdev_ops_master = {
2513 .ndo_open = mlx4_en_open,
2514 .ndo_stop = mlx4_en_close,
2515 .ndo_start_xmit = mlx4_en_xmit,
2516 .ndo_select_queue = mlx4_en_select_queue,
2517 .ndo_get_stats = mlx4_en_get_stats,
2518 .ndo_set_rx_mode = mlx4_en_set_rx_mode,
2519 .ndo_set_mac_address = mlx4_en_set_mac,
2520 .ndo_validate_addr = eth_validate_addr,
2521 .ndo_change_mtu = mlx4_en_change_mtu,
2522 .ndo_tx_timeout = mlx4_en_tx_timeout,
2523 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
2524 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
2525 .ndo_set_vf_mac = mlx4_en_set_vf_mac,
2526 .ndo_set_vf_vlan = mlx4_en_set_vf_vlan,
2527 .ndo_set_vf_rate = mlx4_en_set_vf_rate,
2528 .ndo_set_vf_spoofchk = mlx4_en_set_vf_spoofchk,
2529 .ndo_set_vf_link_state = mlx4_en_set_vf_link_state,
2530 .ndo_get_vf_stats = mlx4_en_get_vf_stats,
2531 .ndo_get_vf_config = mlx4_en_get_vf_config,
2532 #ifdef CONFIG_NET_POLL_CONTROLLER
2533 .ndo_poll_controller = mlx4_en_netpoll,
2534 #endif
2535 .ndo_set_features = mlx4_en_set_features,
2536 .ndo_fix_features = mlx4_en_fix_features,
2537 .ndo_setup_tc = __mlx4_en_setup_tc,
2538 #ifdef CONFIG_RFS_ACCEL
2539 .ndo_rx_flow_steer = mlx4_en_filter_rfs,
2540 #endif
2541 .ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
2542 #ifdef CONFIG_MLX4_EN_VXLAN
2543 .ndo_add_vxlan_port = mlx4_en_add_vxlan_port,
2544 .ndo_del_vxlan_port = mlx4_en_del_vxlan_port,
2545 .ndo_features_check = mlx4_en_features_check,
2546 #endif
2547 .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
2548 };
2549
2550 struct mlx4_en_bond {
2551 struct work_struct work;
2552 struct mlx4_en_priv *priv;
2553 int is_bonded;
2554 struct mlx4_port_map port_map;
2555 };
2556
2557 static void mlx4_en_bond_work(struct work_struct *work)
2558 {
2559 struct mlx4_en_bond *bond = container_of(work,
2560 struct mlx4_en_bond,
2561 work);
2562 int err = 0;
2563 struct mlx4_dev *dev = bond->priv->mdev->dev;
2564
2565 if (bond->is_bonded) {
2566 if (!mlx4_is_bonded(dev)) {
2567 err = mlx4_bond(dev);
2568 if (err)
2569 en_err(bond->priv, "Fail to bond device\n");
2570 }
2571 if (!err) {
2572 err = mlx4_port_map_set(dev, &bond->port_map);
2573 if (err)
2574 en_err(bond->priv, "Fail to set port map [%d][%d]: %d\n",
2575 bond->port_map.port1,
2576 bond->port_map.port2,
2577 err);
2578 }
2579 } else if (mlx4_is_bonded(dev)) {
2580 err = mlx4_unbond(dev);
2581 if (err)
2582 en_err(bond->priv, "Fail to unbond device\n");
2583 }
2584 dev_put(bond->priv->dev);
2585 kfree(bond);
2586 }
2587
2588 static int mlx4_en_queue_bond_work(struct mlx4_en_priv *priv, int is_bonded,
2589 u8 v2p_p1, u8 v2p_p2)
2590 {
2591 struct mlx4_en_bond *bond = NULL;
2592
2593 bond = kzalloc(sizeof(*bond), GFP_ATOMIC);
2594 if (!bond)
2595 return -ENOMEM;
2596
2597 INIT_WORK(&bond->work, mlx4_en_bond_work);
2598 bond->priv = priv;
2599 bond->is_bonded = is_bonded;
2600 bond->port_map.port1 = v2p_p1;
2601 bond->port_map.port2 = v2p_p2;
2602 dev_hold(priv->dev);
2603 queue_work(priv->mdev->workqueue, &bond->work);
2604 return 0;
2605 }
2606
2607 int mlx4_en_netdev_event(struct notifier_block *this,
2608 unsigned long event, void *ptr)
2609 {
2610 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
2611 u8 port = 0;
2612 struct mlx4_en_dev *mdev;
2613 struct mlx4_dev *dev;
2614 int i, num_eth_ports = 0;
2615 bool do_bond = true;
2616 struct mlx4_en_priv *priv;
2617 u8 v2p_port1 = 0;
2618 u8 v2p_port2 = 0;
2619
2620 if (!net_eq(dev_net(ndev), &init_net))
2621 return NOTIFY_DONE;
2622
2623 mdev = container_of(this, struct mlx4_en_dev, nb);
2624 dev = mdev->dev;
2625
2626 /* Go into this mode only when two network devices set on two ports
2627 * of the same mlx4 device are slaves of the same bonding master
2628 */
2629 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
2630 ++num_eth_ports;
2631 if (!port && (mdev->pndev[i] == ndev))
2632 port = i;
2633 mdev->upper[i] = mdev->pndev[i] ?
2634 netdev_master_upper_dev_get(mdev->pndev[i]) : NULL;
2635 /* condition not met: network device is a slave */
2636 if (!mdev->upper[i])
2637 do_bond = false;
2638 if (num_eth_ports < 2)
2639 continue;
2640 /* condition not met: same master */
2641 if (mdev->upper[i] != mdev->upper[i-1])
2642 do_bond = false;
2643 }
2644 /* condition not met: 2 salves */
2645 do_bond = (num_eth_ports == 2) ? do_bond : false;
2646
2647 /* handle only events that come with enough info */
2648 if ((do_bond && (event != NETDEV_BONDING_INFO)) || !port)
2649 return NOTIFY_DONE;
2650
2651 priv = netdev_priv(ndev);
2652 if (do_bond) {
2653 struct netdev_notifier_bonding_info *notifier_info = ptr;
2654 struct netdev_bonding_info *bonding_info =
2655 &notifier_info->bonding_info;
2656
2657 /* required mode 1, 2 or 4 */
2658 if ((bonding_info->master.bond_mode != BOND_MODE_ACTIVEBACKUP) &&
2659 (bonding_info->master.bond_mode != BOND_MODE_XOR) &&
2660 (bonding_info->master.bond_mode != BOND_MODE_8023AD))
2661 do_bond = false;
2662
2663 /* require exactly 2 slaves */
2664 if (bonding_info->master.num_slaves != 2)
2665 do_bond = false;
2666
2667 /* calc v2p */
2668 if (do_bond) {
2669 if (bonding_info->master.bond_mode ==
2670 BOND_MODE_ACTIVEBACKUP) {
2671 /* in active-backup mode virtual ports are
2672 * mapped to the physical port of the active
2673 * slave */
2674 if (bonding_info->slave.state ==
2675 BOND_STATE_BACKUP) {
2676 if (port == 1) {
2677 v2p_port1 = 2;
2678 v2p_port2 = 2;
2679 } else {
2680 v2p_port1 = 1;
2681 v2p_port2 = 1;
2682 }
2683 } else { /* BOND_STATE_ACTIVE */
2684 if (port == 1) {
2685 v2p_port1 = 1;
2686 v2p_port2 = 1;
2687 } else {
2688 v2p_port1 = 2;
2689 v2p_port2 = 2;
2690 }
2691 }
2692 } else { /* Active-Active */
2693 /* in active-active mode a virtual port is
2694 * mapped to the native physical port if and only
2695 * if the physical port is up */
2696 __s8 link = bonding_info->slave.link;
2697
2698 if (port == 1)
2699 v2p_port2 = 2;
2700 else
2701 v2p_port1 = 1;
2702 if ((link == BOND_LINK_UP) ||
2703 (link == BOND_LINK_FAIL)) {
2704 if (port == 1)
2705 v2p_port1 = 1;
2706 else
2707 v2p_port2 = 2;
2708 } else { /* BOND_LINK_DOWN || BOND_LINK_BACK */
2709 if (port == 1)
2710 v2p_port1 = 2;
2711 else
2712 v2p_port2 = 1;
2713 }
2714 }
2715 }
2716 }
2717
2718 mlx4_en_queue_bond_work(priv, do_bond,
2719 v2p_port1, v2p_port2);
2720
2721 return NOTIFY_DONE;
2722 }
2723
2724 void mlx4_en_update_pfc_stats_bitmap(struct mlx4_dev *dev,
2725 struct mlx4_en_stats_bitmap *stats_bitmap,
2726 u8 rx_ppp, u8 rx_pause,
2727 u8 tx_ppp, u8 tx_pause)
2728 {
2729 int last_i = NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PF_STATS;
2730
2731 if (!mlx4_is_slave(dev) &&
2732 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN)) {
2733 mutex_lock(&stats_bitmap->mutex);
2734 bitmap_clear(stats_bitmap->bitmap, last_i, NUM_FLOW_STATS);
2735
2736 if (rx_ppp)
2737 bitmap_set(stats_bitmap->bitmap, last_i,
2738 NUM_FLOW_PRIORITY_STATS_RX);
2739 last_i += NUM_FLOW_PRIORITY_STATS_RX;
2740
2741 if (rx_pause && !(rx_ppp))
2742 bitmap_set(stats_bitmap->bitmap, last_i,
2743 NUM_FLOW_STATS_RX);
2744 last_i += NUM_FLOW_STATS_RX;
2745
2746 if (tx_ppp)
2747 bitmap_set(stats_bitmap->bitmap, last_i,
2748 NUM_FLOW_PRIORITY_STATS_TX);
2749 last_i += NUM_FLOW_PRIORITY_STATS_TX;
2750
2751 if (tx_pause && !(tx_ppp))
2752 bitmap_set(stats_bitmap->bitmap, last_i,
2753 NUM_FLOW_STATS_TX);
2754 last_i += NUM_FLOW_STATS_TX;
2755
2756 mutex_unlock(&stats_bitmap->mutex);
2757 }
2758 }
2759
2760 void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev,
2761 struct mlx4_en_stats_bitmap *stats_bitmap,
2762 u8 rx_ppp, u8 rx_pause,
2763 u8 tx_ppp, u8 tx_pause)
2764 {
2765 int last_i = 0;
2766
2767 mutex_init(&stats_bitmap->mutex);
2768 bitmap_zero(stats_bitmap->bitmap, NUM_ALL_STATS);
2769
2770 if (mlx4_is_slave(dev)) {
2771 bitmap_set(stats_bitmap->bitmap, last_i +
2772 MLX4_FIND_NETDEV_STAT(rx_packets), 1);
2773 bitmap_set(stats_bitmap->bitmap, last_i +
2774 MLX4_FIND_NETDEV_STAT(tx_packets), 1);
2775 bitmap_set(stats_bitmap->bitmap, last_i +
2776 MLX4_FIND_NETDEV_STAT(rx_bytes), 1);
2777 bitmap_set(stats_bitmap->bitmap, last_i +
2778 MLX4_FIND_NETDEV_STAT(tx_bytes), 1);
2779 bitmap_set(stats_bitmap->bitmap, last_i +
2780 MLX4_FIND_NETDEV_STAT(rx_dropped), 1);
2781 bitmap_set(stats_bitmap->bitmap, last_i +
2782 MLX4_FIND_NETDEV_STAT(tx_dropped), 1);
2783 } else {
2784 bitmap_set(stats_bitmap->bitmap, last_i, NUM_MAIN_STATS);
2785 }
2786 last_i += NUM_MAIN_STATS;
2787
2788 bitmap_set(stats_bitmap->bitmap, last_i, NUM_PORT_STATS);
2789 last_i += NUM_PORT_STATS;
2790
2791 if (mlx4_is_master(dev))
2792 bitmap_set(stats_bitmap->bitmap, last_i,
2793 NUM_PF_STATS);
2794 last_i += NUM_PF_STATS;
2795
2796 mlx4_en_update_pfc_stats_bitmap(dev, stats_bitmap,
2797 rx_ppp, rx_pause,
2798 tx_ppp, tx_pause);
2799 last_i += NUM_FLOW_STATS;
2800
2801 if (!mlx4_is_slave(dev))
2802 bitmap_set(stats_bitmap->bitmap, last_i, NUM_PKT_STATS);
2803 }
2804
2805 int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2806 struct mlx4_en_port_profile *prof)
2807 {
2808 struct net_device *dev;
2809 struct mlx4_en_priv *priv;
2810 int i;
2811 int err;
2812
2813 dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
2814 MAX_TX_RINGS, MAX_RX_RINGS);
2815 if (dev == NULL)
2816 return -ENOMEM;
2817
2818 netif_set_real_num_tx_queues(dev, prof->tx_ring_num);
2819 netif_set_real_num_rx_queues(dev, prof->rx_ring_num);
2820
2821 SET_NETDEV_DEV(dev, &mdev->dev->persist->pdev->dev);
2822 dev->dev_port = port - 1;
2823
2824 /*
2825 * Initialize driver private data
2826 */
2827
2828 priv = netdev_priv(dev);
2829 memset(priv, 0, sizeof(struct mlx4_en_priv));
2830 priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
2831 spin_lock_init(&priv->stats_lock);
2832 INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
2833 INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
2834 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
2835 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
2836 INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
2837 #ifdef CONFIG_MLX4_EN_VXLAN
2838 INIT_WORK(&priv->vxlan_add_task, mlx4_en_add_vxlan_offloads);
2839 INIT_WORK(&priv->vxlan_del_task, mlx4_en_del_vxlan_offloads);
2840 #endif
2841 #ifdef CONFIG_RFS_ACCEL
2842 INIT_LIST_HEAD(&priv->filters);
2843 spin_lock_init(&priv->filters_lock);
2844 #endif
2845
2846 priv->dev = dev;
2847 priv->mdev = mdev;
2848 priv->ddev = &mdev->pdev->dev;
2849 priv->prof = prof;
2850 priv->port = port;
2851 priv->port_up = false;
2852 priv->flags = prof->flags;
2853 priv->pflags = MLX4_EN_PRIV_FLAGS_BLUEFLAME;
2854 priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
2855 MLX4_WQE_CTRL_SOLICITED);
2856 priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
2857 priv->tx_ring_num = prof->tx_ring_num;
2858 priv->tx_work_limit = MLX4_EN_DEFAULT_TX_WORK;
2859 netdev_rss_key_fill(priv->rss_key, sizeof(priv->rss_key));
2860
2861 priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS,
2862 GFP_KERNEL);
2863 if (!priv->tx_ring) {
2864 err = -ENOMEM;
2865 goto out;
2866 }
2867 priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS,
2868 GFP_KERNEL);
2869 if (!priv->tx_cq) {
2870 err = -ENOMEM;
2871 goto out;
2872 }
2873 priv->rx_ring_num = prof->rx_ring_num;
2874 priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0;
2875 priv->cqe_size = mdev->dev->caps.cqe_size;
2876 priv->mac_index = -1;
2877 priv->msg_enable = MLX4_EN_MSG_LEVEL;
2878 #ifdef CONFIG_MLX4_EN_DCB
2879 if (!mlx4_is_slave(priv->mdev->dev)) {
2880 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) {
2881 dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
2882 } else {
2883 en_info(priv, "enabling only PFC DCB ops\n");
2884 dev->dcbnl_ops = &mlx4_en_dcbnl_pfc_ops;
2885 }
2886 }
2887 #endif
2888
2889 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i)
2890 INIT_HLIST_HEAD(&priv->mac_hash[i]);
2891
2892 /* Query for default mac and max mtu */
2893 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
2894
2895 if (mdev->dev->caps.rx_checksum_flags_port[priv->port] &
2896 MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP)
2897 priv->flags |= MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP;
2898
2899 /* Set default MAC */
2900 dev->addr_len = ETH_ALEN;
2901 mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]);
2902 if (!is_valid_ether_addr(dev->dev_addr)) {
2903 en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n",
2904 priv->port, dev->dev_addr);
2905 err = -EINVAL;
2906 goto out;
2907 } else if (mlx4_is_slave(priv->mdev->dev) &&
2908 (priv->mdev->dev->port_random_macs & 1 << priv->port)) {
2909 /* Random MAC was assigned in mlx4_slave_cap
2910 * in mlx4_core module
2911 */
2912 dev->addr_assign_type |= NET_ADDR_RANDOM;
2913 en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr);
2914 }
2915
2916 memcpy(priv->current_mac, dev->dev_addr, sizeof(priv->current_mac));
2917
2918 priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
2919 DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
2920 err = mlx4_en_alloc_resources(priv);
2921 if (err)
2922 goto out;
2923
2924 /* Initialize time stamping config */
2925 priv->hwtstamp_config.flags = 0;
2926 priv->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF;
2927 priv->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
2928
2929 /* Allocate page for receive rings */
2930 err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
2931 MLX4_EN_PAGE_SIZE);
2932 if (err) {
2933 en_err(priv, "Failed to allocate page for rx qps\n");
2934 goto out;
2935 }
2936 priv->allocated = 1;
2937
2938 /*
2939 * Initialize netdev entry points
2940 */
2941 if (mlx4_is_master(priv->mdev->dev))
2942 dev->netdev_ops = &mlx4_netdev_ops_master;
2943 else
2944 dev->netdev_ops = &mlx4_netdev_ops;
2945 dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
2946 netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
2947 netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
2948
2949 dev->ethtool_ops = &mlx4_en_ethtool_ops;
2950
2951 /*
2952 * Set driver features
2953 */
2954 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2955 if (mdev->LSO_support)
2956 dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
2957
2958 dev->vlan_features = dev->hw_features;
2959
2960 dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH;
2961 dev->features = dev->hw_features | NETIF_F_HIGHDMA |
2962 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
2963 NETIF_F_HW_VLAN_CTAG_FILTER;
2964 dev->hw_features |= NETIF_F_LOOPBACK |
2965 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
2966
2967 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) {
2968 dev->features |= NETIF_F_HW_VLAN_STAG_RX |
2969 NETIF_F_HW_VLAN_STAG_FILTER;
2970 dev->hw_features |= NETIF_F_HW_VLAN_STAG_RX;
2971 }
2972
2973 if (mlx4_is_slave(mdev->dev)) {
2974 int phv;
2975
2976 err = get_phv_bit(mdev->dev, port, &phv);
2977 if (!err && phv) {
2978 dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
2979 priv->pflags |= MLX4_EN_PRIV_FLAGS_PHV;
2980 }
2981 } else {
2982 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN &&
2983 !(mdev->dev->caps.flags2 &
2984 MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN))
2985 dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
2986 }
2987
2988 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)
2989 dev->hw_features |= NETIF_F_RXFCS;
2990
2991 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS)
2992 dev->hw_features |= NETIF_F_RXALL;
2993
2994 if (mdev->dev->caps.steering_mode ==
2995 MLX4_STEERING_MODE_DEVICE_MANAGED &&
2996 mdev->dev->caps.dmfs_high_steer_mode != MLX4_STEERING_DMFS_A0_STATIC)
2997 dev->hw_features |= NETIF_F_NTUPLE;
2998
2999 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
3000 dev->priv_flags |= IFF_UNICAST_FLT;
3001
3002 /* Setting a default hash function value */
3003 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP) {
3004 priv->rss_hash_fn = ETH_RSS_HASH_TOP;
3005 } else if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR) {
3006 priv->rss_hash_fn = ETH_RSS_HASH_XOR;
3007 } else {
3008 en_warn(priv,
3009 "No RSS hash capabilities exposed, using Toeplitz\n");
3010 priv->rss_hash_fn = ETH_RSS_HASH_TOP;
3011 }
3012
3013 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
3014 dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
3015 NETIF_F_GSO_UDP_TUNNEL_CSUM |
3016 NETIF_F_GSO_PARTIAL;
3017 dev->features |= NETIF_F_GSO_UDP_TUNNEL |
3018 NETIF_F_GSO_UDP_TUNNEL_CSUM |
3019 NETIF_F_GSO_PARTIAL;
3020 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
3021 }
3022
3023 mdev->pndev[port] = dev;
3024 mdev->upper[port] = NULL;
3025
3026 netif_carrier_off(dev);
3027 mlx4_en_set_default_moderation(priv);
3028
3029 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
3030 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
3031
3032 mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
3033
3034 /* Configure port */
3035 mlx4_en_calc_rx_buf(dev);
3036 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
3037 priv->rx_skb_size + ETH_FCS_LEN,
3038 prof->tx_pause, prof->tx_ppp,
3039 prof->rx_pause, prof->rx_ppp);
3040 if (err) {
3041 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
3042 priv->port, err);
3043 goto out;
3044 }
3045
3046 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
3047 err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1);
3048 if (err) {
3049 en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
3050 err);
3051 goto out;
3052 }
3053 }
3054
3055 /* Init port */
3056 en_warn(priv, "Initializing port\n");
3057 err = mlx4_INIT_PORT(mdev->dev, priv->port);
3058 if (err) {
3059 en_err(priv, "Failed Initializing port\n");
3060 goto out;
3061 }
3062 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
3063
3064 /* Initialize time stamp mechanism */
3065 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
3066 mlx4_en_init_timestamp(mdev);
3067
3068 queue_delayed_work(mdev->workqueue, &priv->service_task,
3069 SERVICE_TASK_DELAY);
3070
3071 mlx4_en_set_stats_bitmap(mdev->dev, &priv->stats_bitmap,
3072 mdev->profile.prof[priv->port].rx_ppp,
3073 mdev->profile.prof[priv->port].rx_pause,
3074 mdev->profile.prof[priv->port].tx_ppp,
3075 mdev->profile.prof[priv->port].tx_pause);
3076
3077 err = register_netdev(dev);
3078 if (err) {
3079 en_err(priv, "Netdev registration failed for port %d\n", port);
3080 goto out;
3081 }
3082
3083 priv->registered = 1;
3084 devlink_port_type_eth_set(mlx4_get_devlink_port(mdev->dev, priv->port),
3085 dev);
3086
3087 return 0;
3088
3089 out:
3090 mlx4_en_destroy_netdev(dev);
3091 return err;
3092 }
3093
3094 int mlx4_en_reset_config(struct net_device *dev,
3095 struct hwtstamp_config ts_config,
3096 netdev_features_t features)
3097 {
3098 struct mlx4_en_priv *priv = netdev_priv(dev);
3099 struct mlx4_en_dev *mdev = priv->mdev;
3100 int port_up = 0;
3101 int err = 0;
3102
3103 if (priv->hwtstamp_config.tx_type == ts_config.tx_type &&
3104 priv->hwtstamp_config.rx_filter == ts_config.rx_filter &&
3105 !DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) &&
3106 !DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS))
3107 return 0; /* Nothing to change */
3108
3109 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) &&
3110 (features & NETIF_F_HW_VLAN_CTAG_RX) &&
3111 (priv->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE)) {
3112 en_warn(priv, "Can't turn ON rx vlan offload while time-stamping rx filter is ON\n");
3113 return -EINVAL;
3114 }
3115
3116 mutex_lock(&mdev->state_lock);
3117 if (priv->port_up) {
3118 port_up = 1;
3119 mlx4_en_stop_port(dev, 1);
3120 }
3121
3122 mlx4_en_free_resources(priv);
3123
3124 en_warn(priv, "Changing device configuration rx filter(%x) rx vlan(%x)\n",
3125 ts_config.rx_filter, !!(features & NETIF_F_HW_VLAN_CTAG_RX));
3126
3127 priv->hwtstamp_config.tx_type = ts_config.tx_type;
3128 priv->hwtstamp_config.rx_filter = ts_config.rx_filter;
3129
3130 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
3131 if (features & NETIF_F_HW_VLAN_CTAG_RX)
3132 dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
3133 else
3134 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
3135 } else if (ts_config.rx_filter == HWTSTAMP_FILTER_NONE) {
3136 /* RX time-stamping is OFF, update the RX vlan offload
3137 * to the latest wanted state
3138 */
3139 if (dev->wanted_features & NETIF_F_HW_VLAN_CTAG_RX)
3140 dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
3141 else
3142 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
3143 }
3144
3145 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS)) {
3146 if (features & NETIF_F_RXFCS)
3147 dev->features |= NETIF_F_RXFCS;
3148 else
3149 dev->features &= ~NETIF_F_RXFCS;
3150 }
3151
3152 /* RX vlan offload and RX time-stamping can't co-exist !
3153 * Regardless of the caller's choice,
3154 * Turn Off RX vlan offload in case of time-stamping is ON
3155 */
3156 if (ts_config.rx_filter != HWTSTAMP_FILTER_NONE) {
3157 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
3158 en_warn(priv, "Turning off RX vlan offload since RX time-stamping is ON\n");
3159 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
3160 }
3161
3162 err = mlx4_en_alloc_resources(priv);
3163 if (err) {
3164 en_err(priv, "Failed reallocating port resources\n");
3165 goto out;
3166 }
3167 if (port_up) {
3168 err = mlx4_en_start_port(dev);
3169 if (err)
3170 en_err(priv, "Failed starting port\n");
3171 }
3172
3173 out:
3174 mutex_unlock(&mdev->state_lock);
3175 netdev_features_change(dev);
3176 return err;
3177 }
This page took 0.116869 seconds and 6 git commands to generate.