mmc: sdhci-acpi: Set MMC_CAP_CMD_DURING_TFR for Intel eMMC controllers
[deliverable/linux.git] / drivers / net / ethernet / mellanox / mlx4 / en_netdev.c
1 /*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34 #include <linux/bpf.h>
35 #include <linux/etherdevice.h>
36 #include <linux/tcp.h>
37 #include <linux/if_vlan.h>
38 #include <linux/delay.h>
39 #include <linux/slab.h>
40 #include <linux/hash.h>
41 #include <net/ip.h>
42 #include <net/busy_poll.h>
43 #include <net/vxlan.h>
44 #include <net/devlink.h>
45
46 #include <linux/mlx4/driver.h>
47 #include <linux/mlx4/device.h>
48 #include <linux/mlx4/cmd.h>
49 #include <linux/mlx4/cq.h>
50
51 #include "mlx4_en.h"
52 #include "en_port.h"
53
54 int mlx4_en_setup_tc(struct net_device *dev, u8 up)
55 {
56 struct mlx4_en_priv *priv = netdev_priv(dev);
57 int i;
58 unsigned int offset = 0;
59
60 if (up && up != MLX4_EN_NUM_UP)
61 return -EINVAL;
62
63 netdev_set_num_tc(dev, up);
64
65 /* Partition Tx queues evenly amongst UP's */
66 for (i = 0; i < up; i++) {
67 netdev_set_tc_queue(dev, i, priv->num_tx_rings_p_up, offset);
68 offset += priv->num_tx_rings_p_up;
69 }
70
71 #ifdef CONFIG_MLX4_EN_DCB
72 if (!mlx4_is_slave(priv->mdev->dev)) {
73 if (up) {
74 priv->flags |= MLX4_EN_FLAG_DCB_ENABLED;
75 } else {
76 priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED;
77 priv->cee_params.dcb_cfg.pfc_state = false;
78 }
79 }
80 #endif /* CONFIG_MLX4_EN_DCB */
81
82 return 0;
83 }
84
85 static int __mlx4_en_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
86 struct tc_to_netdev *tc)
87 {
88 if (tc->type != TC_SETUP_MQPRIO)
89 return -EINVAL;
90
91 return mlx4_en_setup_tc(dev, tc->tc);
92 }
93
94 #ifdef CONFIG_RFS_ACCEL
95
96 struct mlx4_en_filter {
97 struct list_head next;
98 struct work_struct work;
99
100 u8 ip_proto;
101 __be32 src_ip;
102 __be32 dst_ip;
103 __be16 src_port;
104 __be16 dst_port;
105
106 int rxq_index;
107 struct mlx4_en_priv *priv;
108 u32 flow_id; /* RFS infrastructure id */
109 int id; /* mlx4_en driver id */
110 u64 reg_id; /* Flow steering API id */
111 u8 activated; /* Used to prevent expiry before filter
112 * is attached
113 */
114 struct hlist_node filter_chain;
115 };
116
117 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv);
118
119 static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto)
120 {
121 switch (ip_proto) {
122 case IPPROTO_UDP:
123 return MLX4_NET_TRANS_RULE_ID_UDP;
124 case IPPROTO_TCP:
125 return MLX4_NET_TRANS_RULE_ID_TCP;
126 default:
127 return MLX4_NET_TRANS_RULE_NUM;
128 }
129 };
130
131 static void mlx4_en_filter_work(struct work_struct *work)
132 {
133 struct mlx4_en_filter *filter = container_of(work,
134 struct mlx4_en_filter,
135 work);
136 struct mlx4_en_priv *priv = filter->priv;
137 struct mlx4_spec_list spec_tcp_udp = {
138 .id = mlx4_ip_proto_to_trans_rule_id(filter->ip_proto),
139 {
140 .tcp_udp = {
141 .dst_port = filter->dst_port,
142 .dst_port_msk = (__force __be16)-1,
143 .src_port = filter->src_port,
144 .src_port_msk = (__force __be16)-1,
145 },
146 },
147 };
148 struct mlx4_spec_list spec_ip = {
149 .id = MLX4_NET_TRANS_RULE_ID_IPV4,
150 {
151 .ipv4 = {
152 .dst_ip = filter->dst_ip,
153 .dst_ip_msk = (__force __be32)-1,
154 .src_ip = filter->src_ip,
155 .src_ip_msk = (__force __be32)-1,
156 },
157 },
158 };
159 struct mlx4_spec_list spec_eth = {
160 .id = MLX4_NET_TRANS_RULE_ID_ETH,
161 };
162 struct mlx4_net_trans_rule rule = {
163 .list = LIST_HEAD_INIT(rule.list),
164 .queue_mode = MLX4_NET_TRANS_Q_LIFO,
165 .exclusive = 1,
166 .allow_loopback = 1,
167 .promisc_mode = MLX4_FS_REGULAR,
168 .port = priv->port,
169 .priority = MLX4_DOMAIN_RFS,
170 };
171 int rc;
172 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
173
174 if (spec_tcp_udp.id >= MLX4_NET_TRANS_RULE_NUM) {
175 en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n",
176 filter->ip_proto);
177 goto ignore;
178 }
179 list_add_tail(&spec_eth.list, &rule.list);
180 list_add_tail(&spec_ip.list, &rule.list);
181 list_add_tail(&spec_tcp_udp.list, &rule.list);
182
183 rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn;
184 memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN);
185 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
186
187 filter->activated = 0;
188
189 if (filter->reg_id) {
190 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
191 if (rc && rc != -ENOENT)
192 en_err(priv, "Error detaching flow. rc = %d\n", rc);
193 }
194
195 rc = mlx4_flow_attach(priv->mdev->dev, &rule, &filter->reg_id);
196 if (rc)
197 en_err(priv, "Error attaching flow. err = %d\n", rc);
198
199 ignore:
200 mlx4_en_filter_rfs_expire(priv);
201
202 filter->activated = 1;
203 }
204
205 static inline struct hlist_head *
206 filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
207 __be16 src_port, __be16 dst_port)
208 {
209 unsigned long l;
210 int bucket_idx;
211
212 l = (__force unsigned long)src_port |
213 ((__force unsigned long)dst_port << 2);
214 l ^= (__force unsigned long)(src_ip ^ dst_ip);
215
216 bucket_idx = hash_long(l, MLX4_EN_FILTER_HASH_SHIFT);
217
218 return &priv->filter_hash[bucket_idx];
219 }
220
221 static struct mlx4_en_filter *
222 mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip,
223 __be32 dst_ip, u8 ip_proto, __be16 src_port,
224 __be16 dst_port, u32 flow_id)
225 {
226 struct mlx4_en_filter *filter = NULL;
227
228 filter = kzalloc(sizeof(struct mlx4_en_filter), GFP_ATOMIC);
229 if (!filter)
230 return NULL;
231
232 filter->priv = priv;
233 filter->rxq_index = rxq_index;
234 INIT_WORK(&filter->work, mlx4_en_filter_work);
235
236 filter->src_ip = src_ip;
237 filter->dst_ip = dst_ip;
238 filter->ip_proto = ip_proto;
239 filter->src_port = src_port;
240 filter->dst_port = dst_port;
241
242 filter->flow_id = flow_id;
243
244 filter->id = priv->last_filter_id++ % RPS_NO_FILTER;
245
246 list_add_tail(&filter->next, &priv->filters);
247 hlist_add_head(&filter->filter_chain,
248 filter_hash_bucket(priv, src_ip, dst_ip, src_port,
249 dst_port));
250
251 return filter;
252 }
253
254 static void mlx4_en_filter_free(struct mlx4_en_filter *filter)
255 {
256 struct mlx4_en_priv *priv = filter->priv;
257 int rc;
258
259 list_del(&filter->next);
260
261 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
262 if (rc && rc != -ENOENT)
263 en_err(priv, "Error detaching flow. rc = %d\n", rc);
264
265 kfree(filter);
266 }
267
268 static inline struct mlx4_en_filter *
269 mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
270 u8 ip_proto, __be16 src_port, __be16 dst_port)
271 {
272 struct mlx4_en_filter *filter;
273 struct mlx4_en_filter *ret = NULL;
274
275 hlist_for_each_entry(filter,
276 filter_hash_bucket(priv, src_ip, dst_ip,
277 src_port, dst_port),
278 filter_chain) {
279 if (filter->src_ip == src_ip &&
280 filter->dst_ip == dst_ip &&
281 filter->ip_proto == ip_proto &&
282 filter->src_port == src_port &&
283 filter->dst_port == dst_port) {
284 ret = filter;
285 break;
286 }
287 }
288
289 return ret;
290 }
291
292 static int
293 mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
294 u16 rxq_index, u32 flow_id)
295 {
296 struct mlx4_en_priv *priv = netdev_priv(net_dev);
297 struct mlx4_en_filter *filter;
298 const struct iphdr *ip;
299 const __be16 *ports;
300 u8 ip_proto;
301 __be32 src_ip;
302 __be32 dst_ip;
303 __be16 src_port;
304 __be16 dst_port;
305 int nhoff = skb_network_offset(skb);
306 int ret = 0;
307
308 if (skb->protocol != htons(ETH_P_IP))
309 return -EPROTONOSUPPORT;
310
311 ip = (const struct iphdr *)(skb->data + nhoff);
312 if (ip_is_fragment(ip))
313 return -EPROTONOSUPPORT;
314
315 if ((ip->protocol != IPPROTO_TCP) && (ip->protocol != IPPROTO_UDP))
316 return -EPROTONOSUPPORT;
317 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
318
319 ip_proto = ip->protocol;
320 src_ip = ip->saddr;
321 dst_ip = ip->daddr;
322 src_port = ports[0];
323 dst_port = ports[1];
324
325 spin_lock_bh(&priv->filters_lock);
326 filter = mlx4_en_filter_find(priv, src_ip, dst_ip, ip_proto,
327 src_port, dst_port);
328 if (filter) {
329 if (filter->rxq_index == rxq_index)
330 goto out;
331
332 filter->rxq_index = rxq_index;
333 } else {
334 filter = mlx4_en_filter_alloc(priv, rxq_index,
335 src_ip, dst_ip, ip_proto,
336 src_port, dst_port, flow_id);
337 if (!filter) {
338 ret = -ENOMEM;
339 goto err;
340 }
341 }
342
343 queue_work(priv->mdev->workqueue, &filter->work);
344
345 out:
346 ret = filter->id;
347 err:
348 spin_unlock_bh(&priv->filters_lock);
349
350 return ret;
351 }
352
353 void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv)
354 {
355 struct mlx4_en_filter *filter, *tmp;
356 LIST_HEAD(del_list);
357
358 spin_lock_bh(&priv->filters_lock);
359 list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
360 list_move(&filter->next, &del_list);
361 hlist_del(&filter->filter_chain);
362 }
363 spin_unlock_bh(&priv->filters_lock);
364
365 list_for_each_entry_safe(filter, tmp, &del_list, next) {
366 cancel_work_sync(&filter->work);
367 mlx4_en_filter_free(filter);
368 }
369 }
370
371 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv)
372 {
373 struct mlx4_en_filter *filter = NULL, *tmp, *last_filter = NULL;
374 LIST_HEAD(del_list);
375 int i = 0;
376
377 spin_lock_bh(&priv->filters_lock);
378 list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
379 if (i > MLX4_EN_FILTER_EXPIRY_QUOTA)
380 break;
381
382 if (filter->activated &&
383 !work_pending(&filter->work) &&
384 rps_may_expire_flow(priv->dev,
385 filter->rxq_index, filter->flow_id,
386 filter->id)) {
387 list_move(&filter->next, &del_list);
388 hlist_del(&filter->filter_chain);
389 } else
390 last_filter = filter;
391
392 i++;
393 }
394
395 if (last_filter && (&last_filter->next != priv->filters.next))
396 list_move(&priv->filters, &last_filter->next);
397
398 spin_unlock_bh(&priv->filters_lock);
399
400 list_for_each_entry_safe(filter, tmp, &del_list, next)
401 mlx4_en_filter_free(filter);
402 }
403 #endif
404
405 static int mlx4_en_vlan_rx_add_vid(struct net_device *dev,
406 __be16 proto, u16 vid)
407 {
408 struct mlx4_en_priv *priv = netdev_priv(dev);
409 struct mlx4_en_dev *mdev = priv->mdev;
410 int err;
411 int idx;
412
413 en_dbg(HW, priv, "adding VLAN:%d\n", vid);
414
415 set_bit(vid, priv->active_vlans);
416
417 /* Add VID to port VLAN filter */
418 mutex_lock(&mdev->state_lock);
419 if (mdev->device_up && priv->port_up) {
420 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
421 if (err) {
422 en_err(priv, "Failed configuring VLAN filter\n");
423 goto out;
424 }
425 }
426 err = mlx4_register_vlan(mdev->dev, priv->port, vid, &idx);
427 if (err)
428 en_dbg(HW, priv, "Failed adding vlan %d\n", vid);
429
430 out:
431 mutex_unlock(&mdev->state_lock);
432 return err;
433 }
434
435 static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
436 __be16 proto, u16 vid)
437 {
438 struct mlx4_en_priv *priv = netdev_priv(dev);
439 struct mlx4_en_dev *mdev = priv->mdev;
440 int err = 0;
441
442 en_dbg(HW, priv, "Killing VID:%d\n", vid);
443
444 clear_bit(vid, priv->active_vlans);
445
446 /* Remove VID from port VLAN filter */
447 mutex_lock(&mdev->state_lock);
448 mlx4_unregister_vlan(mdev->dev, priv->port, vid);
449
450 if (mdev->device_up && priv->port_up) {
451 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
452 if (err)
453 en_err(priv, "Failed configuring VLAN filter\n");
454 }
455 mutex_unlock(&mdev->state_lock);
456
457 return err;
458 }
459
460 static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac)
461 {
462 int i;
463 for (i = ETH_ALEN - 1; i >= 0; --i) {
464 dst_mac[i] = src_mac & 0xff;
465 src_mac >>= 8;
466 }
467 memset(&dst_mac[ETH_ALEN], 0, 2);
468 }
469
470
471 static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *addr,
472 int qpn, u64 *reg_id)
473 {
474 int err;
475
476 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
477 priv->mdev->dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
478 return 0; /* do nothing */
479
480 err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn,
481 MLX4_DOMAIN_NIC, reg_id);
482 if (err) {
483 en_err(priv, "failed to add vxlan steering rule, err %d\n", err);
484 return err;
485 }
486 en_dbg(DRV, priv, "added vxlan steering rule, mac %pM reg_id %llx\n", addr, *reg_id);
487 return 0;
488 }
489
490
491 static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv,
492 unsigned char *mac, int *qpn, u64 *reg_id)
493 {
494 struct mlx4_en_dev *mdev = priv->mdev;
495 struct mlx4_dev *dev = mdev->dev;
496 int err;
497
498 switch (dev->caps.steering_mode) {
499 case MLX4_STEERING_MODE_B0: {
500 struct mlx4_qp qp;
501 u8 gid[16] = {0};
502
503 qp.qpn = *qpn;
504 memcpy(&gid[10], mac, ETH_ALEN);
505 gid[5] = priv->port;
506
507 err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
508 break;
509 }
510 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
511 struct mlx4_spec_list spec_eth = { {NULL} };
512 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
513
514 struct mlx4_net_trans_rule rule = {
515 .queue_mode = MLX4_NET_TRANS_Q_FIFO,
516 .exclusive = 0,
517 .allow_loopback = 1,
518 .promisc_mode = MLX4_FS_REGULAR,
519 .priority = MLX4_DOMAIN_NIC,
520 };
521
522 rule.port = priv->port;
523 rule.qpn = *qpn;
524 INIT_LIST_HEAD(&rule.list);
525
526 spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH;
527 memcpy(spec_eth.eth.dst_mac, mac, ETH_ALEN);
528 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
529 list_add_tail(&spec_eth.list, &rule.list);
530
531 err = mlx4_flow_attach(dev, &rule, reg_id);
532 break;
533 }
534 default:
535 return -EINVAL;
536 }
537 if (err)
538 en_warn(priv, "Failed Attaching Unicast\n");
539
540 return err;
541 }
542
543 static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv,
544 unsigned char *mac, int qpn, u64 reg_id)
545 {
546 struct mlx4_en_dev *mdev = priv->mdev;
547 struct mlx4_dev *dev = mdev->dev;
548
549 switch (dev->caps.steering_mode) {
550 case MLX4_STEERING_MODE_B0: {
551 struct mlx4_qp qp;
552 u8 gid[16] = {0};
553
554 qp.qpn = qpn;
555 memcpy(&gid[10], mac, ETH_ALEN);
556 gid[5] = priv->port;
557
558 mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
559 break;
560 }
561 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
562 mlx4_flow_detach(dev, reg_id);
563 break;
564 }
565 default:
566 en_err(priv, "Invalid steering mode.\n");
567 }
568 }
569
570 static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
571 {
572 struct mlx4_en_dev *mdev = priv->mdev;
573 struct mlx4_dev *dev = mdev->dev;
574 int index = 0;
575 int err = 0;
576 int *qpn = &priv->base_qpn;
577 u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
578
579 en_dbg(DRV, priv, "Registering MAC: %pM for adding\n",
580 priv->dev->dev_addr);
581 index = mlx4_register_mac(dev, priv->port, mac);
582 if (index < 0) {
583 err = index;
584 en_err(priv, "Failed adding MAC: %pM\n",
585 priv->dev->dev_addr);
586 return err;
587 }
588
589 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
590 int base_qpn = mlx4_get_base_qpn(dev, priv->port);
591 *qpn = base_qpn + index;
592 return 0;
593 }
594
595 err = mlx4_qp_reserve_range(dev, 1, 1, qpn, MLX4_RESERVE_A0_QP);
596 en_dbg(DRV, priv, "Reserved qp %d\n", *qpn);
597 if (err) {
598 en_err(priv, "Failed to reserve qp for mac registration\n");
599 mlx4_unregister_mac(dev, priv->port, mac);
600 return err;
601 }
602
603 return 0;
604 }
605
606 static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
607 {
608 struct mlx4_en_dev *mdev = priv->mdev;
609 struct mlx4_dev *dev = mdev->dev;
610 int qpn = priv->base_qpn;
611
612 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
613 u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
614 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
615 priv->dev->dev_addr);
616 mlx4_unregister_mac(dev, priv->port, mac);
617 } else {
618 en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n",
619 priv->port, qpn);
620 mlx4_qp_release_range(dev, qpn, 1);
621 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
622 }
623 }
624
625 static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
626 unsigned char *new_mac, unsigned char *prev_mac)
627 {
628 struct mlx4_en_dev *mdev = priv->mdev;
629 struct mlx4_dev *dev = mdev->dev;
630 int err = 0;
631 u64 new_mac_u64 = mlx4_mac_to_u64(new_mac);
632
633 if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
634 struct hlist_head *bucket;
635 unsigned int mac_hash;
636 struct mlx4_mac_entry *entry;
637 struct hlist_node *tmp;
638 u64 prev_mac_u64 = mlx4_mac_to_u64(prev_mac);
639
640 bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]];
641 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
642 if (ether_addr_equal_64bits(entry->mac, prev_mac)) {
643 mlx4_en_uc_steer_release(priv, entry->mac,
644 qpn, entry->reg_id);
645 mlx4_unregister_mac(dev, priv->port,
646 prev_mac_u64);
647 hlist_del_rcu(&entry->hlist);
648 synchronize_rcu();
649 memcpy(entry->mac, new_mac, ETH_ALEN);
650 entry->reg_id = 0;
651 mac_hash = new_mac[MLX4_EN_MAC_HASH_IDX];
652 hlist_add_head_rcu(&entry->hlist,
653 &priv->mac_hash[mac_hash]);
654 mlx4_register_mac(dev, priv->port, new_mac_u64);
655 err = mlx4_en_uc_steer_add(priv, new_mac,
656 &qpn,
657 &entry->reg_id);
658 if (err)
659 return err;
660 if (priv->tunnel_reg_id) {
661 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
662 priv->tunnel_reg_id = 0;
663 }
664 err = mlx4_en_tunnel_steer_add(priv, new_mac, qpn,
665 &priv->tunnel_reg_id);
666 return err;
667 }
668 }
669 return -EINVAL;
670 }
671
672 return __mlx4_replace_mac(dev, priv->port, qpn, new_mac_u64);
673 }
674
675 static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv,
676 unsigned char new_mac[ETH_ALEN + 2])
677 {
678 int err = 0;
679
680 if (priv->port_up) {
681 /* Remove old MAC and insert the new one */
682 err = mlx4_en_replace_mac(priv, priv->base_qpn,
683 new_mac, priv->current_mac);
684 if (err)
685 en_err(priv, "Failed changing HW MAC address\n");
686 } else
687 en_dbg(HW, priv, "Port is down while registering mac, exiting...\n");
688
689 if (!err)
690 memcpy(priv->current_mac, new_mac, sizeof(priv->current_mac));
691
692 return err;
693 }
694
695 static int mlx4_en_set_mac(struct net_device *dev, void *addr)
696 {
697 struct mlx4_en_priv *priv = netdev_priv(dev);
698 struct mlx4_en_dev *mdev = priv->mdev;
699 struct sockaddr *saddr = addr;
700 unsigned char new_mac[ETH_ALEN + 2];
701 int err;
702
703 if (!is_valid_ether_addr(saddr->sa_data))
704 return -EADDRNOTAVAIL;
705
706 mutex_lock(&mdev->state_lock);
707 memcpy(new_mac, saddr->sa_data, ETH_ALEN);
708 err = mlx4_en_do_set_mac(priv, new_mac);
709 if (!err)
710 memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
711 mutex_unlock(&mdev->state_lock);
712
713 return err;
714 }
715
716 static void mlx4_en_clear_list(struct net_device *dev)
717 {
718 struct mlx4_en_priv *priv = netdev_priv(dev);
719 struct mlx4_en_mc_list *tmp, *mc_to_del;
720
721 list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) {
722 list_del(&mc_to_del->list);
723 kfree(mc_to_del);
724 }
725 }
726
727 static void mlx4_en_cache_mclist(struct net_device *dev)
728 {
729 struct mlx4_en_priv *priv = netdev_priv(dev);
730 struct netdev_hw_addr *ha;
731 struct mlx4_en_mc_list *tmp;
732
733 mlx4_en_clear_list(dev);
734 netdev_for_each_mc_addr(ha, dev) {
735 tmp = kzalloc(sizeof(struct mlx4_en_mc_list), GFP_ATOMIC);
736 if (!tmp) {
737 mlx4_en_clear_list(dev);
738 return;
739 }
740 memcpy(tmp->addr, ha->addr, ETH_ALEN);
741 list_add_tail(&tmp->list, &priv->mc_list);
742 }
743 }
744
745 static void update_mclist_flags(struct mlx4_en_priv *priv,
746 struct list_head *dst,
747 struct list_head *src)
748 {
749 struct mlx4_en_mc_list *dst_tmp, *src_tmp, *new_mc;
750 bool found;
751
752 /* Find all the entries that should be removed from dst,
753 * These are the entries that are not found in src
754 */
755 list_for_each_entry(dst_tmp, dst, list) {
756 found = false;
757 list_for_each_entry(src_tmp, src, list) {
758 if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) {
759 found = true;
760 break;
761 }
762 }
763 if (!found)
764 dst_tmp->action = MCLIST_REM;
765 }
766
767 /* Add entries that exist in src but not in dst
768 * mark them as need to add
769 */
770 list_for_each_entry(src_tmp, src, list) {
771 found = false;
772 list_for_each_entry(dst_tmp, dst, list) {
773 if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) {
774 dst_tmp->action = MCLIST_NONE;
775 found = true;
776 break;
777 }
778 }
779 if (!found) {
780 new_mc = kmemdup(src_tmp,
781 sizeof(struct mlx4_en_mc_list),
782 GFP_KERNEL);
783 if (!new_mc)
784 return;
785
786 new_mc->action = MCLIST_ADD;
787 list_add_tail(&new_mc->list, dst);
788 }
789 }
790 }
791
792 static void mlx4_en_set_rx_mode(struct net_device *dev)
793 {
794 struct mlx4_en_priv *priv = netdev_priv(dev);
795
796 if (!priv->port_up)
797 return;
798
799 queue_work(priv->mdev->workqueue, &priv->rx_mode_task);
800 }
801
802 static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv,
803 struct mlx4_en_dev *mdev)
804 {
805 int err = 0;
806
807 if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
808 if (netif_msg_rx_status(priv))
809 en_warn(priv, "Entering promiscuous mode\n");
810 priv->flags |= MLX4_EN_FLAG_PROMISC;
811
812 /* Enable promiscouos mode */
813 switch (mdev->dev->caps.steering_mode) {
814 case MLX4_STEERING_MODE_DEVICE_MANAGED:
815 err = mlx4_flow_steer_promisc_add(mdev->dev,
816 priv->port,
817 priv->base_qpn,
818 MLX4_FS_ALL_DEFAULT);
819 if (err)
820 en_err(priv, "Failed enabling promiscuous mode\n");
821 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
822 break;
823
824 case MLX4_STEERING_MODE_B0:
825 err = mlx4_unicast_promisc_add(mdev->dev,
826 priv->base_qpn,
827 priv->port);
828 if (err)
829 en_err(priv, "Failed enabling unicast promiscuous mode\n");
830
831 /* Add the default qp number as multicast
832 * promisc
833 */
834 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
835 err = mlx4_multicast_promisc_add(mdev->dev,
836 priv->base_qpn,
837 priv->port);
838 if (err)
839 en_err(priv, "Failed enabling multicast promiscuous mode\n");
840 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
841 }
842 break;
843
844 case MLX4_STEERING_MODE_A0:
845 err = mlx4_SET_PORT_qpn_calc(mdev->dev,
846 priv->port,
847 priv->base_qpn,
848 1);
849 if (err)
850 en_err(priv, "Failed enabling promiscuous mode\n");
851 break;
852 }
853
854 /* Disable port multicast filter (unconditionally) */
855 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
856 0, MLX4_MCAST_DISABLE);
857 if (err)
858 en_err(priv, "Failed disabling multicast filter\n");
859 }
860 }
861
862 static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv,
863 struct mlx4_en_dev *mdev)
864 {
865 int err = 0;
866
867 if (netif_msg_rx_status(priv))
868 en_warn(priv, "Leaving promiscuous mode\n");
869 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
870
871 /* Disable promiscouos mode */
872 switch (mdev->dev->caps.steering_mode) {
873 case MLX4_STEERING_MODE_DEVICE_MANAGED:
874 err = mlx4_flow_steer_promisc_remove(mdev->dev,
875 priv->port,
876 MLX4_FS_ALL_DEFAULT);
877 if (err)
878 en_err(priv, "Failed disabling promiscuous mode\n");
879 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
880 break;
881
882 case MLX4_STEERING_MODE_B0:
883 err = mlx4_unicast_promisc_remove(mdev->dev,
884 priv->base_qpn,
885 priv->port);
886 if (err)
887 en_err(priv, "Failed disabling unicast promiscuous mode\n");
888 /* Disable Multicast promisc */
889 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
890 err = mlx4_multicast_promisc_remove(mdev->dev,
891 priv->base_qpn,
892 priv->port);
893 if (err)
894 en_err(priv, "Failed disabling multicast promiscuous mode\n");
895 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
896 }
897 break;
898
899 case MLX4_STEERING_MODE_A0:
900 err = mlx4_SET_PORT_qpn_calc(mdev->dev,
901 priv->port,
902 priv->base_qpn, 0);
903 if (err)
904 en_err(priv, "Failed disabling promiscuous mode\n");
905 break;
906 }
907 }
908
909 static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
910 struct net_device *dev,
911 struct mlx4_en_dev *mdev)
912 {
913 struct mlx4_en_mc_list *mclist, *tmp;
914 u64 mcast_addr = 0;
915 u8 mc_list[16] = {0};
916 int err = 0;
917
918 /* Enable/disable the multicast filter according to IFF_ALLMULTI */
919 if (dev->flags & IFF_ALLMULTI) {
920 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
921 0, MLX4_MCAST_DISABLE);
922 if (err)
923 en_err(priv, "Failed disabling multicast filter\n");
924
925 /* Add the default qp number as multicast promisc */
926 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
927 switch (mdev->dev->caps.steering_mode) {
928 case MLX4_STEERING_MODE_DEVICE_MANAGED:
929 err = mlx4_flow_steer_promisc_add(mdev->dev,
930 priv->port,
931 priv->base_qpn,
932 MLX4_FS_MC_DEFAULT);
933 break;
934
935 case MLX4_STEERING_MODE_B0:
936 err = mlx4_multicast_promisc_add(mdev->dev,
937 priv->base_qpn,
938 priv->port);
939 break;
940
941 case MLX4_STEERING_MODE_A0:
942 break;
943 }
944 if (err)
945 en_err(priv, "Failed entering multicast promisc mode\n");
946 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
947 }
948 } else {
949 /* Disable Multicast promisc */
950 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
951 switch (mdev->dev->caps.steering_mode) {
952 case MLX4_STEERING_MODE_DEVICE_MANAGED:
953 err = mlx4_flow_steer_promisc_remove(mdev->dev,
954 priv->port,
955 MLX4_FS_MC_DEFAULT);
956 break;
957
958 case MLX4_STEERING_MODE_B0:
959 err = mlx4_multicast_promisc_remove(mdev->dev,
960 priv->base_qpn,
961 priv->port);
962 break;
963
964 case MLX4_STEERING_MODE_A0:
965 break;
966 }
967 if (err)
968 en_err(priv, "Failed disabling multicast promiscuous mode\n");
969 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
970 }
971
972 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
973 0, MLX4_MCAST_DISABLE);
974 if (err)
975 en_err(priv, "Failed disabling multicast filter\n");
976
977 /* Flush mcast filter and init it with broadcast address */
978 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
979 1, MLX4_MCAST_CONFIG);
980
981 /* Update multicast list - we cache all addresses so they won't
982 * change while HW is updated holding the command semaphor */
983 netif_addr_lock_bh(dev);
984 mlx4_en_cache_mclist(dev);
985 netif_addr_unlock_bh(dev);
986 list_for_each_entry(mclist, &priv->mc_list, list) {
987 mcast_addr = mlx4_mac_to_u64(mclist->addr);
988 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
989 mcast_addr, 0, MLX4_MCAST_CONFIG);
990 }
991 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
992 0, MLX4_MCAST_ENABLE);
993 if (err)
994 en_err(priv, "Failed enabling multicast filter\n");
995
996 update_mclist_flags(priv, &priv->curr_list, &priv->mc_list);
997 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
998 if (mclist->action == MCLIST_REM) {
999 /* detach this address and delete from list */
1000 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
1001 mc_list[5] = priv->port;
1002 err = mlx4_multicast_detach(mdev->dev,
1003 &priv->rss_map.indir_qp,
1004 mc_list,
1005 MLX4_PROT_ETH,
1006 mclist->reg_id);
1007 if (err)
1008 en_err(priv, "Fail to detach multicast address\n");
1009
1010 if (mclist->tunnel_reg_id) {
1011 err = mlx4_flow_detach(priv->mdev->dev, mclist->tunnel_reg_id);
1012 if (err)
1013 en_err(priv, "Failed to detach multicast address\n");
1014 }
1015
1016 /* remove from list */
1017 list_del(&mclist->list);
1018 kfree(mclist);
1019 } else if (mclist->action == MCLIST_ADD) {
1020 /* attach the address */
1021 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
1022 /* needed for B0 steering support */
1023 mc_list[5] = priv->port;
1024 err = mlx4_multicast_attach(mdev->dev,
1025 &priv->rss_map.indir_qp,
1026 mc_list,
1027 priv->port, 0,
1028 MLX4_PROT_ETH,
1029 &mclist->reg_id);
1030 if (err)
1031 en_err(priv, "Fail to attach multicast address\n");
1032
1033 err = mlx4_en_tunnel_steer_add(priv, &mc_list[10], priv->base_qpn,
1034 &mclist->tunnel_reg_id);
1035 if (err)
1036 en_err(priv, "Failed to attach multicast address\n");
1037 }
1038 }
1039 }
1040 }
1041
1042 static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
1043 struct net_device *dev,
1044 struct mlx4_en_dev *mdev)
1045 {
1046 struct netdev_hw_addr *ha;
1047 struct mlx4_mac_entry *entry;
1048 struct hlist_node *tmp;
1049 bool found;
1050 u64 mac;
1051 int err = 0;
1052 struct hlist_head *bucket;
1053 unsigned int i;
1054 int removed = 0;
1055 u32 prev_flags;
1056
1057 /* Note that we do not need to protect our mac_hash traversal with rcu,
1058 * since all modification code is protected by mdev->state_lock
1059 */
1060
1061 /* find what to remove */
1062 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
1063 bucket = &priv->mac_hash[i];
1064 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
1065 found = false;
1066 netdev_for_each_uc_addr(ha, dev) {
1067 if (ether_addr_equal_64bits(entry->mac,
1068 ha->addr)) {
1069 found = true;
1070 break;
1071 }
1072 }
1073
1074 /* MAC address of the port is not in uc list */
1075 if (ether_addr_equal_64bits(entry->mac,
1076 priv->current_mac))
1077 found = true;
1078
1079 if (!found) {
1080 mac = mlx4_mac_to_u64(entry->mac);
1081 mlx4_en_uc_steer_release(priv, entry->mac,
1082 priv->base_qpn,
1083 entry->reg_id);
1084 mlx4_unregister_mac(mdev->dev, priv->port, mac);
1085
1086 hlist_del_rcu(&entry->hlist);
1087 kfree_rcu(entry, rcu);
1088 en_dbg(DRV, priv, "Removed MAC %pM on port:%d\n",
1089 entry->mac, priv->port);
1090 ++removed;
1091 }
1092 }
1093 }
1094
1095 /* if we didn't remove anything, there is no use in trying to add
1096 * again once we are in a forced promisc mode state
1097 */
1098 if ((priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) && 0 == removed)
1099 return;
1100
1101 prev_flags = priv->flags;
1102 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
1103
1104 /* find what to add */
1105 netdev_for_each_uc_addr(ha, dev) {
1106 found = false;
1107 bucket = &priv->mac_hash[ha->addr[MLX4_EN_MAC_HASH_IDX]];
1108 hlist_for_each_entry(entry, bucket, hlist) {
1109 if (ether_addr_equal_64bits(entry->mac, ha->addr)) {
1110 found = true;
1111 break;
1112 }
1113 }
1114
1115 if (!found) {
1116 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1117 if (!entry) {
1118 en_err(priv, "Failed adding MAC %pM on port:%d (out of memory)\n",
1119 ha->addr, priv->port);
1120 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1121 break;
1122 }
1123 mac = mlx4_mac_to_u64(ha->addr);
1124 memcpy(entry->mac, ha->addr, ETH_ALEN);
1125 err = mlx4_register_mac(mdev->dev, priv->port, mac);
1126 if (err < 0) {
1127 en_err(priv, "Failed registering MAC %pM on port %d: %d\n",
1128 ha->addr, priv->port, err);
1129 kfree(entry);
1130 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1131 break;
1132 }
1133 err = mlx4_en_uc_steer_add(priv, ha->addr,
1134 &priv->base_qpn,
1135 &entry->reg_id);
1136 if (err) {
1137 en_err(priv, "Failed adding MAC %pM on port %d: %d\n",
1138 ha->addr, priv->port, err);
1139 mlx4_unregister_mac(mdev->dev, priv->port, mac);
1140 kfree(entry);
1141 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1142 break;
1143 } else {
1144 unsigned int mac_hash;
1145 en_dbg(DRV, priv, "Added MAC %pM on port:%d\n",
1146 ha->addr, priv->port);
1147 mac_hash = ha->addr[MLX4_EN_MAC_HASH_IDX];
1148 bucket = &priv->mac_hash[mac_hash];
1149 hlist_add_head_rcu(&entry->hlist, bucket);
1150 }
1151 }
1152 }
1153
1154 if (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) {
1155 en_warn(priv, "Forcing promiscuous mode on port:%d\n",
1156 priv->port);
1157 } else if (prev_flags & MLX4_EN_FLAG_FORCE_PROMISC) {
1158 en_warn(priv, "Stop forcing promiscuous mode on port:%d\n",
1159 priv->port);
1160 }
1161 }
1162
1163 static void mlx4_en_do_set_rx_mode(struct work_struct *work)
1164 {
1165 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1166 rx_mode_task);
1167 struct mlx4_en_dev *mdev = priv->mdev;
1168 struct net_device *dev = priv->dev;
1169
1170 mutex_lock(&mdev->state_lock);
1171 if (!mdev->device_up) {
1172 en_dbg(HW, priv, "Card is not up, ignoring rx mode change.\n");
1173 goto out;
1174 }
1175 if (!priv->port_up) {
1176 en_dbg(HW, priv, "Port is down, ignoring rx mode change.\n");
1177 goto out;
1178 }
1179
1180 if (!netif_carrier_ok(dev)) {
1181 if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
1182 if (priv->port_state.link_state) {
1183 priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
1184 netif_carrier_on(dev);
1185 en_dbg(LINK, priv, "Link Up\n");
1186 }
1187 }
1188 }
1189
1190 if (dev->priv_flags & IFF_UNICAST_FLT)
1191 mlx4_en_do_uc_filter(priv, dev, mdev);
1192
1193 /* Promsicuous mode: disable all filters */
1194 if ((dev->flags & IFF_PROMISC) ||
1195 (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC)) {
1196 mlx4_en_set_promisc_mode(priv, mdev);
1197 goto out;
1198 }
1199
1200 /* Not in promiscuous mode */
1201 if (priv->flags & MLX4_EN_FLAG_PROMISC)
1202 mlx4_en_clear_promisc_mode(priv, mdev);
1203
1204 mlx4_en_do_multicast(priv, dev, mdev);
1205 out:
1206 mutex_unlock(&mdev->state_lock);
1207 }
1208
1209 #ifdef CONFIG_NET_POLL_CONTROLLER
1210 static void mlx4_en_netpoll(struct net_device *dev)
1211 {
1212 struct mlx4_en_priv *priv = netdev_priv(dev);
1213 struct mlx4_en_cq *cq;
1214 int i;
1215
1216 for (i = 0; i < priv->tx_ring_num; i++) {
1217 cq = priv->tx_cq[i];
1218 napi_schedule(&cq->napi);
1219 }
1220 }
1221 #endif
1222
1223 static int mlx4_en_set_rss_steer_rules(struct mlx4_en_priv *priv)
1224 {
1225 u64 reg_id;
1226 int err = 0;
1227 int *qpn = &priv->base_qpn;
1228 struct mlx4_mac_entry *entry;
1229
1230 err = mlx4_en_uc_steer_add(priv, priv->dev->dev_addr, qpn, &reg_id);
1231 if (err)
1232 return err;
1233
1234 err = mlx4_en_tunnel_steer_add(priv, priv->dev->dev_addr, *qpn,
1235 &priv->tunnel_reg_id);
1236 if (err)
1237 goto tunnel_err;
1238
1239 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1240 if (!entry) {
1241 err = -ENOMEM;
1242 goto alloc_err;
1243 }
1244
1245 memcpy(entry->mac, priv->dev->dev_addr, sizeof(entry->mac));
1246 memcpy(priv->current_mac, entry->mac, sizeof(priv->current_mac));
1247 entry->reg_id = reg_id;
1248 hlist_add_head_rcu(&entry->hlist,
1249 &priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]);
1250
1251 return 0;
1252
1253 alloc_err:
1254 if (priv->tunnel_reg_id)
1255 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
1256
1257 tunnel_err:
1258 mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id);
1259 return err;
1260 }
1261
1262 static void mlx4_en_delete_rss_steer_rules(struct mlx4_en_priv *priv)
1263 {
1264 u64 mac;
1265 unsigned int i;
1266 int qpn = priv->base_qpn;
1267 struct hlist_head *bucket;
1268 struct hlist_node *tmp;
1269 struct mlx4_mac_entry *entry;
1270
1271 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
1272 bucket = &priv->mac_hash[i];
1273 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
1274 mac = mlx4_mac_to_u64(entry->mac);
1275 en_dbg(DRV, priv, "Registering MAC:%pM for deleting\n",
1276 entry->mac);
1277 mlx4_en_uc_steer_release(priv, entry->mac,
1278 qpn, entry->reg_id);
1279
1280 mlx4_unregister_mac(priv->mdev->dev, priv->port, mac);
1281 hlist_del_rcu(&entry->hlist);
1282 kfree_rcu(entry, rcu);
1283 }
1284 }
1285
1286 if (priv->tunnel_reg_id) {
1287 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
1288 priv->tunnel_reg_id = 0;
1289 }
1290 }
1291
1292 static void mlx4_en_tx_timeout(struct net_device *dev)
1293 {
1294 struct mlx4_en_priv *priv = netdev_priv(dev);
1295 struct mlx4_en_dev *mdev = priv->mdev;
1296 int i;
1297
1298 if (netif_msg_timer(priv))
1299 en_warn(priv, "Tx timeout called on port:%d\n", priv->port);
1300
1301 for (i = 0; i < priv->tx_ring_num; i++) {
1302 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i)))
1303 continue;
1304 en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n",
1305 i, priv->tx_ring[i]->qpn, priv->tx_ring[i]->cqn,
1306 priv->tx_ring[i]->cons, priv->tx_ring[i]->prod);
1307 }
1308
1309 priv->port_stats.tx_timeout++;
1310 en_dbg(DRV, priv, "Scheduling watchdog\n");
1311 queue_work(mdev->workqueue, &priv->watchdog_task);
1312 }
1313
1314
1315 static struct rtnl_link_stats64 *
1316 mlx4_en_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1317 {
1318 struct mlx4_en_priv *priv = netdev_priv(dev);
1319
1320 spin_lock_bh(&priv->stats_lock);
1321 netdev_stats_to_stats64(stats, &dev->stats);
1322 spin_unlock_bh(&priv->stats_lock);
1323
1324 return stats;
1325 }
1326
1327 static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
1328 {
1329 struct mlx4_en_cq *cq;
1330 int i;
1331
1332 /* If we haven't received a specific coalescing setting
1333 * (module param), we set the moderation parameters as follows:
1334 * - moder_cnt is set to the number of mtu sized packets to
1335 * satisfy our coalescing target.
1336 * - moder_time is set to a fixed value.
1337 */
1338 priv->rx_frames = MLX4_EN_RX_COAL_TARGET;
1339 priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
1340 priv->tx_frames = MLX4_EN_TX_COAL_PKTS;
1341 priv->tx_usecs = MLX4_EN_TX_COAL_TIME;
1342 en_dbg(INTR, priv, "Default coalesing params for mtu:%d - rx_frames:%d rx_usecs:%d\n",
1343 priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
1344
1345 /* Setup cq moderation params */
1346 for (i = 0; i < priv->rx_ring_num; i++) {
1347 cq = priv->rx_cq[i];
1348 cq->moder_cnt = priv->rx_frames;
1349 cq->moder_time = priv->rx_usecs;
1350 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
1351 priv->last_moder_packets[i] = 0;
1352 priv->last_moder_bytes[i] = 0;
1353 }
1354
1355 for (i = 0; i < priv->tx_ring_num; i++) {
1356 cq = priv->tx_cq[i];
1357 cq->moder_cnt = priv->tx_frames;
1358 cq->moder_time = priv->tx_usecs;
1359 }
1360
1361 /* Reset auto-moderation params */
1362 priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW;
1363 priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW;
1364 priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH;
1365 priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH;
1366 priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL;
1367 priv->adaptive_rx_coal = 1;
1368 priv->last_moder_jiffies = 0;
1369 priv->last_moder_tx_packets = 0;
1370 }
1371
1372 static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
1373 {
1374 unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies);
1375 struct mlx4_en_cq *cq;
1376 unsigned long packets;
1377 unsigned long rate;
1378 unsigned long avg_pkt_size;
1379 unsigned long rx_packets;
1380 unsigned long rx_bytes;
1381 unsigned long rx_pkt_diff;
1382 int moder_time;
1383 int ring, err;
1384
1385 if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ)
1386 return;
1387
1388 for (ring = 0; ring < priv->rx_ring_num; ring++) {
1389 spin_lock_bh(&priv->stats_lock);
1390 rx_packets = priv->rx_ring[ring]->packets;
1391 rx_bytes = priv->rx_ring[ring]->bytes;
1392 spin_unlock_bh(&priv->stats_lock);
1393
1394 rx_pkt_diff = ((unsigned long) (rx_packets -
1395 priv->last_moder_packets[ring]));
1396 packets = rx_pkt_diff;
1397 rate = packets * HZ / period;
1398 avg_pkt_size = packets ? ((unsigned long) (rx_bytes -
1399 priv->last_moder_bytes[ring])) / packets : 0;
1400
1401 /* Apply auto-moderation only when packet rate
1402 * exceeds a rate that it matters */
1403 if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) &&
1404 avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) {
1405 if (rate < priv->pkt_rate_low)
1406 moder_time = priv->rx_usecs_low;
1407 else if (rate > priv->pkt_rate_high)
1408 moder_time = priv->rx_usecs_high;
1409 else
1410 moder_time = (rate - priv->pkt_rate_low) *
1411 (priv->rx_usecs_high - priv->rx_usecs_low) /
1412 (priv->pkt_rate_high - priv->pkt_rate_low) +
1413 priv->rx_usecs_low;
1414 } else {
1415 moder_time = priv->rx_usecs_low;
1416 }
1417
1418 if (moder_time != priv->last_moder_time[ring]) {
1419 priv->last_moder_time[ring] = moder_time;
1420 cq = priv->rx_cq[ring];
1421 cq->moder_time = moder_time;
1422 cq->moder_cnt = priv->rx_frames;
1423 err = mlx4_en_set_cq_moder(priv, cq);
1424 if (err)
1425 en_err(priv, "Failed modifying moderation for cq:%d\n",
1426 ring);
1427 }
1428 priv->last_moder_packets[ring] = rx_packets;
1429 priv->last_moder_bytes[ring] = rx_bytes;
1430 }
1431
1432 priv->last_moder_jiffies = jiffies;
1433 }
1434
1435 static void mlx4_en_do_get_stats(struct work_struct *work)
1436 {
1437 struct delayed_work *delay = to_delayed_work(work);
1438 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
1439 stats_task);
1440 struct mlx4_en_dev *mdev = priv->mdev;
1441 int err;
1442
1443 mutex_lock(&mdev->state_lock);
1444 if (mdev->device_up) {
1445 if (priv->port_up) {
1446 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
1447 if (err)
1448 en_dbg(HW, priv, "Could not update stats\n");
1449
1450 mlx4_en_auto_moderation(priv);
1451 }
1452
1453 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
1454 }
1455 if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) {
1456 mlx4_en_do_set_mac(priv, priv->current_mac);
1457 mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0;
1458 }
1459 mutex_unlock(&mdev->state_lock);
1460 }
1461
1462 /* mlx4_en_service_task - Run service task for tasks that needed to be done
1463 * periodically
1464 */
1465 static void mlx4_en_service_task(struct work_struct *work)
1466 {
1467 struct delayed_work *delay = to_delayed_work(work);
1468 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
1469 service_task);
1470 struct mlx4_en_dev *mdev = priv->mdev;
1471
1472 mutex_lock(&mdev->state_lock);
1473 if (mdev->device_up) {
1474 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
1475 mlx4_en_ptp_overflow_check(mdev);
1476
1477 mlx4_en_recover_from_oom(priv);
1478 queue_delayed_work(mdev->workqueue, &priv->service_task,
1479 SERVICE_TASK_DELAY);
1480 }
1481 mutex_unlock(&mdev->state_lock);
1482 }
1483
1484 static void mlx4_en_linkstate(struct work_struct *work)
1485 {
1486 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1487 linkstate_task);
1488 struct mlx4_en_dev *mdev = priv->mdev;
1489 int linkstate = priv->link_state;
1490
1491 mutex_lock(&mdev->state_lock);
1492 /* If observable port state changed set carrier state and
1493 * report to system log */
1494 if (priv->last_link_state != linkstate) {
1495 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
1496 en_info(priv, "Link Down\n");
1497 netif_carrier_off(priv->dev);
1498 } else {
1499 en_info(priv, "Link Up\n");
1500 netif_carrier_on(priv->dev);
1501 }
1502 }
1503 priv->last_link_state = linkstate;
1504 mutex_unlock(&mdev->state_lock);
1505 }
1506
1507 static int mlx4_en_init_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
1508 {
1509 struct mlx4_en_rx_ring *ring = priv->rx_ring[ring_idx];
1510 int numa_node = priv->mdev->dev->numa_node;
1511
1512 if (!zalloc_cpumask_var(&ring->affinity_mask, GFP_KERNEL))
1513 return -ENOMEM;
1514
1515 cpumask_set_cpu(cpumask_local_spread(ring_idx, numa_node),
1516 ring->affinity_mask);
1517 return 0;
1518 }
1519
1520 static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
1521 {
1522 free_cpumask_var(priv->rx_ring[ring_idx]->affinity_mask);
1523 }
1524
1525 static void mlx4_en_init_recycle_ring(struct mlx4_en_priv *priv,
1526 int tx_ring_idx)
1527 {
1528 struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[tx_ring_idx];
1529 int rr_index;
1530
1531 rr_index = (priv->xdp_ring_num - priv->tx_ring_num) + tx_ring_idx;
1532 if (rr_index >= 0) {
1533 tx_ring->free_tx_desc = mlx4_en_recycle_tx_desc;
1534 tx_ring->recycle_ring = priv->rx_ring[rr_index];
1535 en_dbg(DRV, priv,
1536 "Set tx_ring[%d]->recycle_ring = rx_ring[%d]\n",
1537 tx_ring_idx, rr_index);
1538 } else {
1539 tx_ring->recycle_ring = NULL;
1540 }
1541 }
1542
1543 int mlx4_en_start_port(struct net_device *dev)
1544 {
1545 struct mlx4_en_priv *priv = netdev_priv(dev);
1546 struct mlx4_en_dev *mdev = priv->mdev;
1547 struct mlx4_en_cq *cq;
1548 struct mlx4_en_tx_ring *tx_ring;
1549 int rx_index = 0;
1550 int tx_index = 0;
1551 int err = 0;
1552 int i;
1553 int j;
1554 u8 mc_list[16] = {0};
1555
1556 if (priv->port_up) {
1557 en_dbg(DRV, priv, "start port called while port already up\n");
1558 return 0;
1559 }
1560
1561 INIT_LIST_HEAD(&priv->mc_list);
1562 INIT_LIST_HEAD(&priv->curr_list);
1563 INIT_LIST_HEAD(&priv->ethtool_list);
1564 memset(&priv->ethtool_rules[0], 0,
1565 sizeof(struct ethtool_flow_id) * MAX_NUM_OF_FS_RULES);
1566
1567 /* Calculate Rx buf size */
1568 dev->mtu = min(dev->mtu, priv->max_mtu);
1569 mlx4_en_calc_rx_buf(dev);
1570 en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size);
1571
1572 /* Configure rx cq's and rings */
1573 err = mlx4_en_activate_rx_rings(priv);
1574 if (err) {
1575 en_err(priv, "Failed to activate RX rings\n");
1576 return err;
1577 }
1578 for (i = 0; i < priv->rx_ring_num; i++) {
1579 cq = priv->rx_cq[i];
1580
1581 err = mlx4_en_init_affinity_hint(priv, i);
1582 if (err) {
1583 en_err(priv, "Failed preparing IRQ affinity hint\n");
1584 goto cq_err;
1585 }
1586
1587 err = mlx4_en_activate_cq(priv, cq, i);
1588 if (err) {
1589 en_err(priv, "Failed activating Rx CQ\n");
1590 mlx4_en_free_affinity_hint(priv, i);
1591 goto cq_err;
1592 }
1593
1594 for (j = 0; j < cq->size; j++) {
1595 struct mlx4_cqe *cqe = NULL;
1596
1597 cqe = mlx4_en_get_cqe(cq->buf, j, priv->cqe_size) +
1598 priv->cqe_factor;
1599 cqe->owner_sr_opcode = MLX4_CQE_OWNER_MASK;
1600 }
1601
1602 err = mlx4_en_set_cq_moder(priv, cq);
1603 if (err) {
1604 en_err(priv, "Failed setting cq moderation parameters\n");
1605 mlx4_en_deactivate_cq(priv, cq);
1606 mlx4_en_free_affinity_hint(priv, i);
1607 goto cq_err;
1608 }
1609 mlx4_en_arm_cq(priv, cq);
1610 priv->rx_ring[i]->cqn = cq->mcq.cqn;
1611 ++rx_index;
1612 }
1613
1614 /* Set qp number */
1615 en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port);
1616 err = mlx4_en_get_qp(priv);
1617 if (err) {
1618 en_err(priv, "Failed getting eth qp\n");
1619 goto cq_err;
1620 }
1621 mdev->mac_removed[priv->port] = 0;
1622
1623 priv->counter_index =
1624 mlx4_get_default_counter_index(mdev->dev, priv->port);
1625
1626 err = mlx4_en_config_rss_steer(priv);
1627 if (err) {
1628 en_err(priv, "Failed configuring rss steering\n");
1629 goto mac_err;
1630 }
1631
1632 err = mlx4_en_create_drop_qp(priv);
1633 if (err)
1634 goto rss_err;
1635
1636 /* Configure tx cq's and rings */
1637 for (i = 0; i < priv->tx_ring_num; i++) {
1638 /* Configure cq */
1639 cq = priv->tx_cq[i];
1640 err = mlx4_en_activate_cq(priv, cq, i);
1641 if (err) {
1642 en_err(priv, "Failed allocating Tx CQ\n");
1643 goto tx_err;
1644 }
1645 err = mlx4_en_set_cq_moder(priv, cq);
1646 if (err) {
1647 en_err(priv, "Failed setting cq moderation parameters\n");
1648 mlx4_en_deactivate_cq(priv, cq);
1649 goto tx_err;
1650 }
1651 en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i);
1652 cq->buf->wqe_index = cpu_to_be16(0xffff);
1653
1654 /* Configure ring */
1655 tx_ring = priv->tx_ring[i];
1656 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
1657 i / priv->num_tx_rings_p_up);
1658 if (err) {
1659 en_err(priv, "Failed allocating Tx ring\n");
1660 mlx4_en_deactivate_cq(priv, cq);
1661 goto tx_err;
1662 }
1663 tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
1664
1665 mlx4_en_init_recycle_ring(priv, i);
1666
1667 /* Arm CQ for TX completions */
1668 mlx4_en_arm_cq(priv, cq);
1669
1670 /* Set initial ownership of all Tx TXBBs to SW (1) */
1671 for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
1672 *((u32 *) (tx_ring->buf + j)) = 0xffffffff;
1673 ++tx_index;
1674 }
1675
1676 /* Configure port */
1677 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
1678 priv->rx_skb_size + ETH_FCS_LEN,
1679 priv->prof->tx_pause,
1680 priv->prof->tx_ppp,
1681 priv->prof->rx_pause,
1682 priv->prof->rx_ppp);
1683 if (err) {
1684 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
1685 priv->port, err);
1686 goto tx_err;
1687 }
1688 /* Set default qp number */
1689 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0);
1690 if (err) {
1691 en_err(priv, "Failed setting default qp numbers\n");
1692 goto tx_err;
1693 }
1694
1695 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
1696 err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1);
1697 if (err) {
1698 en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
1699 err);
1700 goto tx_err;
1701 }
1702 }
1703
1704 /* Init port */
1705 en_dbg(HW, priv, "Initializing port\n");
1706 err = mlx4_INIT_PORT(mdev->dev, priv->port);
1707 if (err) {
1708 en_err(priv, "Failed Initializing port\n");
1709 goto tx_err;
1710 }
1711
1712 /* Set Unicast and VXLAN steering rules */
1713 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0 &&
1714 mlx4_en_set_rss_steer_rules(priv))
1715 mlx4_warn(mdev, "Failed setting steering rules\n");
1716
1717 /* Attach rx QP to bradcast address */
1718 eth_broadcast_addr(&mc_list[10]);
1719 mc_list[5] = priv->port; /* needed for B0 steering support */
1720 if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
1721 priv->port, 0, MLX4_PROT_ETH,
1722 &priv->broadcast_id))
1723 mlx4_warn(mdev, "Failed Attaching Broadcast\n");
1724
1725 /* Must redo promiscuous mode setup. */
1726 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC);
1727
1728 /* Schedule multicast task to populate multicast list */
1729 queue_work(mdev->workqueue, &priv->rx_mode_task);
1730
1731 if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
1732 udp_tunnel_get_rx_info(dev);
1733
1734 priv->port_up = true;
1735 netif_tx_start_all_queues(dev);
1736 netif_device_attach(dev);
1737
1738 return 0;
1739
1740 tx_err:
1741 while (tx_index--) {
1742 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[tx_index]);
1743 mlx4_en_deactivate_cq(priv, priv->tx_cq[tx_index]);
1744 }
1745 mlx4_en_destroy_drop_qp(priv);
1746 rss_err:
1747 mlx4_en_release_rss_steer(priv);
1748 mac_err:
1749 mlx4_en_put_qp(priv);
1750 cq_err:
1751 while (rx_index--) {
1752 mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]);
1753 mlx4_en_free_affinity_hint(priv, rx_index);
1754 }
1755 for (i = 0; i < priv->rx_ring_num; i++)
1756 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
1757
1758 return err; /* need to close devices */
1759 }
1760
1761
1762 void mlx4_en_stop_port(struct net_device *dev, int detach)
1763 {
1764 struct mlx4_en_priv *priv = netdev_priv(dev);
1765 struct mlx4_en_dev *mdev = priv->mdev;
1766 struct mlx4_en_mc_list *mclist, *tmp;
1767 struct ethtool_flow_id *flow, *tmp_flow;
1768 int i;
1769 u8 mc_list[16] = {0};
1770
1771 if (!priv->port_up) {
1772 en_dbg(DRV, priv, "stop port called while port already down\n");
1773 return;
1774 }
1775
1776 /* close port*/
1777 mlx4_CLOSE_PORT(mdev->dev, priv->port);
1778
1779 /* Synchronize with tx routine */
1780 netif_tx_lock_bh(dev);
1781 if (detach)
1782 netif_device_detach(dev);
1783 netif_tx_stop_all_queues(dev);
1784 netif_tx_unlock_bh(dev);
1785
1786 netif_tx_disable(dev);
1787
1788 /* Set port as not active */
1789 priv->port_up = false;
1790 priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
1791
1792 /* Promsicuous mode */
1793 if (mdev->dev->caps.steering_mode ==
1794 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1795 priv->flags &= ~(MLX4_EN_FLAG_PROMISC |
1796 MLX4_EN_FLAG_MC_PROMISC);
1797 mlx4_flow_steer_promisc_remove(mdev->dev,
1798 priv->port,
1799 MLX4_FS_ALL_DEFAULT);
1800 mlx4_flow_steer_promisc_remove(mdev->dev,
1801 priv->port,
1802 MLX4_FS_MC_DEFAULT);
1803 } else if (priv->flags & MLX4_EN_FLAG_PROMISC) {
1804 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
1805
1806 /* Disable promiscouos mode */
1807 mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn,
1808 priv->port);
1809
1810 /* Disable Multicast promisc */
1811 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
1812 mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
1813 priv->port);
1814 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
1815 }
1816 }
1817
1818 /* Detach All multicasts */
1819 eth_broadcast_addr(&mc_list[10]);
1820 mc_list[5] = priv->port; /* needed for B0 steering support */
1821 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
1822 MLX4_PROT_ETH, priv->broadcast_id);
1823 list_for_each_entry(mclist, &priv->curr_list, list) {
1824 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
1825 mc_list[5] = priv->port;
1826 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
1827 mc_list, MLX4_PROT_ETH, mclist->reg_id);
1828 if (mclist->tunnel_reg_id)
1829 mlx4_flow_detach(mdev->dev, mclist->tunnel_reg_id);
1830 }
1831 mlx4_en_clear_list(dev);
1832 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
1833 list_del(&mclist->list);
1834 kfree(mclist);
1835 }
1836
1837 /* Flush multicast filter */
1838 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
1839
1840 /* Remove flow steering rules for the port*/
1841 if (mdev->dev->caps.steering_mode ==
1842 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1843 ASSERT_RTNL();
1844 list_for_each_entry_safe(flow, tmp_flow,
1845 &priv->ethtool_list, list) {
1846 mlx4_flow_detach(mdev->dev, flow->id);
1847 list_del(&flow->list);
1848 }
1849 }
1850
1851 mlx4_en_destroy_drop_qp(priv);
1852
1853 /* Free TX Rings */
1854 for (i = 0; i < priv->tx_ring_num; i++) {
1855 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[i]);
1856 mlx4_en_deactivate_cq(priv, priv->tx_cq[i]);
1857 }
1858 msleep(10);
1859
1860 for (i = 0; i < priv->tx_ring_num; i++)
1861 mlx4_en_free_tx_buf(dev, priv->tx_ring[i]);
1862
1863 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
1864 mlx4_en_delete_rss_steer_rules(priv);
1865
1866 /* Free RSS qps */
1867 mlx4_en_release_rss_steer(priv);
1868
1869 /* Unregister Mac address for the port */
1870 mlx4_en_put_qp(priv);
1871 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN))
1872 mdev->mac_removed[priv->port] = 1;
1873
1874 /* Free RX Rings */
1875 for (i = 0; i < priv->rx_ring_num; i++) {
1876 struct mlx4_en_cq *cq = priv->rx_cq[i];
1877
1878 napi_synchronize(&cq->napi);
1879 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
1880 mlx4_en_deactivate_cq(priv, cq);
1881
1882 mlx4_en_free_affinity_hint(priv, i);
1883 }
1884 }
1885
1886 static void mlx4_en_restart(struct work_struct *work)
1887 {
1888 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1889 watchdog_task);
1890 struct mlx4_en_dev *mdev = priv->mdev;
1891 struct net_device *dev = priv->dev;
1892
1893 en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
1894
1895 rtnl_lock();
1896 mutex_lock(&mdev->state_lock);
1897 if (priv->port_up) {
1898 mlx4_en_stop_port(dev, 1);
1899 if (mlx4_en_start_port(dev))
1900 en_err(priv, "Failed restarting port %d\n", priv->port);
1901 }
1902 mutex_unlock(&mdev->state_lock);
1903 rtnl_unlock();
1904 }
1905
1906 static void mlx4_en_clear_stats(struct net_device *dev)
1907 {
1908 struct mlx4_en_priv *priv = netdev_priv(dev);
1909 struct mlx4_en_dev *mdev = priv->mdev;
1910 int i;
1911
1912 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
1913 en_dbg(HW, priv, "Failed dumping statistics\n");
1914
1915 memset(&priv->pstats, 0, sizeof(priv->pstats));
1916 memset(&priv->pkstats, 0, sizeof(priv->pkstats));
1917 memset(&priv->port_stats, 0, sizeof(priv->port_stats));
1918 memset(&priv->rx_flowstats, 0, sizeof(priv->rx_flowstats));
1919 memset(&priv->tx_flowstats, 0, sizeof(priv->tx_flowstats));
1920 memset(&priv->rx_priority_flowstats, 0,
1921 sizeof(priv->rx_priority_flowstats));
1922 memset(&priv->tx_priority_flowstats, 0,
1923 sizeof(priv->tx_priority_flowstats));
1924 memset(&priv->pf_stats, 0, sizeof(priv->pf_stats));
1925
1926 for (i = 0; i < priv->tx_ring_num; i++) {
1927 priv->tx_ring[i]->bytes = 0;
1928 priv->tx_ring[i]->packets = 0;
1929 priv->tx_ring[i]->tx_csum = 0;
1930 priv->tx_ring[i]->tx_dropped = 0;
1931 priv->tx_ring[i]->queue_stopped = 0;
1932 priv->tx_ring[i]->wake_queue = 0;
1933 priv->tx_ring[i]->tso_packets = 0;
1934 priv->tx_ring[i]->xmit_more = 0;
1935 }
1936 for (i = 0; i < priv->rx_ring_num; i++) {
1937 priv->rx_ring[i]->bytes = 0;
1938 priv->rx_ring[i]->packets = 0;
1939 priv->rx_ring[i]->csum_ok = 0;
1940 priv->rx_ring[i]->csum_none = 0;
1941 priv->rx_ring[i]->csum_complete = 0;
1942 }
1943 }
1944
1945 static int mlx4_en_open(struct net_device *dev)
1946 {
1947 struct mlx4_en_priv *priv = netdev_priv(dev);
1948 struct mlx4_en_dev *mdev = priv->mdev;
1949 int err = 0;
1950
1951 mutex_lock(&mdev->state_lock);
1952
1953 if (!mdev->device_up) {
1954 en_err(priv, "Cannot open - device down/disabled\n");
1955 err = -EBUSY;
1956 goto out;
1957 }
1958
1959 /* Reset HW statistics and SW counters */
1960 mlx4_en_clear_stats(dev);
1961
1962 err = mlx4_en_start_port(dev);
1963 if (err)
1964 en_err(priv, "Failed starting port:%d\n", priv->port);
1965
1966 out:
1967 mutex_unlock(&mdev->state_lock);
1968 return err;
1969 }
1970
1971
1972 static int mlx4_en_close(struct net_device *dev)
1973 {
1974 struct mlx4_en_priv *priv = netdev_priv(dev);
1975 struct mlx4_en_dev *mdev = priv->mdev;
1976
1977 en_dbg(IFDOWN, priv, "Close port called\n");
1978
1979 mutex_lock(&mdev->state_lock);
1980
1981 mlx4_en_stop_port(dev, 0);
1982 netif_carrier_off(dev);
1983
1984 mutex_unlock(&mdev->state_lock);
1985 return 0;
1986 }
1987
1988 static void mlx4_en_free_resources(struct mlx4_en_priv *priv)
1989 {
1990 int i;
1991
1992 #ifdef CONFIG_RFS_ACCEL
1993 priv->dev->rx_cpu_rmap = NULL;
1994 #endif
1995
1996 for (i = 0; i < priv->tx_ring_num; i++) {
1997 if (priv->tx_ring && priv->tx_ring[i])
1998 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
1999 if (priv->tx_cq && priv->tx_cq[i])
2000 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
2001 }
2002
2003 for (i = 0; i < priv->rx_ring_num; i++) {
2004 if (priv->rx_ring[i])
2005 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
2006 priv->prof->rx_ring_size, priv->stride);
2007 if (priv->rx_cq[i])
2008 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
2009 }
2010
2011 }
2012
2013 static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
2014 {
2015 struct mlx4_en_port_profile *prof = priv->prof;
2016 int i;
2017 int node;
2018
2019 /* Create tx Rings */
2020 for (i = 0; i < priv->tx_ring_num; i++) {
2021 node = cpu_to_node(i % num_online_cpus());
2022 if (mlx4_en_create_cq(priv, &priv->tx_cq[i],
2023 prof->tx_ring_size, i, TX, node))
2024 goto err;
2025
2026 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i],
2027 prof->tx_ring_size, TXBB_SIZE,
2028 node, i))
2029 goto err;
2030 }
2031
2032 /* Create rx Rings */
2033 for (i = 0; i < priv->rx_ring_num; i++) {
2034 node = cpu_to_node(i % num_online_cpus());
2035 if (mlx4_en_create_cq(priv, &priv->rx_cq[i],
2036 prof->rx_ring_size, i, RX, node))
2037 goto err;
2038
2039 if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
2040 prof->rx_ring_size, priv->stride,
2041 node))
2042 goto err;
2043 }
2044
2045 #ifdef CONFIG_RFS_ACCEL
2046 priv->dev->rx_cpu_rmap = mlx4_get_cpu_rmap(priv->mdev->dev, priv->port);
2047 #endif
2048
2049 return 0;
2050
2051 err:
2052 en_err(priv, "Failed to allocate NIC resources\n");
2053 for (i = 0; i < priv->rx_ring_num; i++) {
2054 if (priv->rx_ring[i])
2055 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
2056 prof->rx_ring_size,
2057 priv->stride);
2058 if (priv->rx_cq[i])
2059 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
2060 }
2061 for (i = 0; i < priv->tx_ring_num; i++) {
2062 if (priv->tx_ring[i])
2063 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
2064 if (priv->tx_cq[i])
2065 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
2066 }
2067 return -ENOMEM;
2068 }
2069
2070 static void mlx4_en_shutdown(struct net_device *dev)
2071 {
2072 rtnl_lock();
2073 netif_device_detach(dev);
2074 mlx4_en_close(dev);
2075 rtnl_unlock();
2076 }
2077
2078 static int mlx4_en_copy_priv(struct mlx4_en_priv *dst,
2079 struct mlx4_en_priv *src,
2080 struct mlx4_en_port_profile *prof)
2081 {
2082 memcpy(&dst->hwtstamp_config, &prof->hwtstamp_config,
2083 sizeof(dst->hwtstamp_config));
2084 dst->num_tx_rings_p_up = src->mdev->profile.num_tx_rings_p_up;
2085 dst->tx_ring_num = prof->tx_ring_num;
2086 dst->rx_ring_num = prof->rx_ring_num;
2087 dst->flags = prof->flags;
2088 dst->mdev = src->mdev;
2089 dst->port = src->port;
2090 dst->dev = src->dev;
2091 dst->prof = prof;
2092 dst->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
2093 DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
2094
2095 dst->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS,
2096 GFP_KERNEL);
2097 if (!dst->tx_ring)
2098 return -ENOMEM;
2099
2100 dst->tx_cq = kzalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS,
2101 GFP_KERNEL);
2102 if (!dst->tx_cq) {
2103 kfree(dst->tx_ring);
2104 return -ENOMEM;
2105 }
2106 return 0;
2107 }
2108
2109 static void mlx4_en_update_priv(struct mlx4_en_priv *dst,
2110 struct mlx4_en_priv *src)
2111 {
2112 memcpy(dst->rx_ring, src->rx_ring,
2113 sizeof(struct mlx4_en_rx_ring *) * src->rx_ring_num);
2114 memcpy(dst->rx_cq, src->rx_cq,
2115 sizeof(struct mlx4_en_cq *) * src->rx_ring_num);
2116 memcpy(&dst->hwtstamp_config, &src->hwtstamp_config,
2117 sizeof(dst->hwtstamp_config));
2118 dst->tx_ring_num = src->tx_ring_num;
2119 dst->rx_ring_num = src->rx_ring_num;
2120 dst->tx_ring = src->tx_ring;
2121 dst->tx_cq = src->tx_cq;
2122 memcpy(dst->prof, src->prof, sizeof(struct mlx4_en_port_profile));
2123 }
2124
2125 int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
2126 struct mlx4_en_priv *tmp,
2127 struct mlx4_en_port_profile *prof)
2128 {
2129 mlx4_en_copy_priv(tmp, priv, prof);
2130
2131 if (mlx4_en_alloc_resources(tmp)) {
2132 en_warn(priv,
2133 "%s: Resource allocation failed, using previous configuration\n",
2134 __func__);
2135 kfree(tmp->tx_ring);
2136 kfree(tmp->tx_cq);
2137 return -ENOMEM;
2138 }
2139 return 0;
2140 }
2141
2142 void mlx4_en_safe_replace_resources(struct mlx4_en_priv *priv,
2143 struct mlx4_en_priv *tmp)
2144 {
2145 mlx4_en_free_resources(priv);
2146 mlx4_en_update_priv(priv, tmp);
2147 }
2148
2149 void mlx4_en_destroy_netdev(struct net_device *dev)
2150 {
2151 struct mlx4_en_priv *priv = netdev_priv(dev);
2152 struct mlx4_en_dev *mdev = priv->mdev;
2153 bool shutdown = mdev->dev->persist->interface_state &
2154 MLX4_INTERFACE_STATE_SHUTDOWN;
2155
2156 en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
2157
2158 /* Unregister device - this will close the port if it was up */
2159 if (priv->registered) {
2160 devlink_port_type_clear(mlx4_get_devlink_port(mdev->dev,
2161 priv->port));
2162 if (shutdown)
2163 mlx4_en_shutdown(dev);
2164 else
2165 unregister_netdev(dev);
2166 }
2167
2168 if (priv->allocated)
2169 mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
2170
2171 cancel_delayed_work(&priv->stats_task);
2172 cancel_delayed_work(&priv->service_task);
2173 /* flush any pending task for this netdev */
2174 flush_workqueue(mdev->workqueue);
2175
2176 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
2177 mlx4_en_remove_timestamp(mdev);
2178
2179 /* Detach the netdev so tasks would not attempt to access it */
2180 mutex_lock(&mdev->state_lock);
2181 mdev->pndev[priv->port] = NULL;
2182 mdev->upper[priv->port] = NULL;
2183 mutex_unlock(&mdev->state_lock);
2184
2185 #ifdef CONFIG_RFS_ACCEL
2186 mlx4_en_cleanup_filters(priv);
2187 #endif
2188
2189 mlx4_en_free_resources(priv);
2190
2191 kfree(priv->tx_ring);
2192 kfree(priv->tx_cq);
2193
2194 if (!shutdown)
2195 free_netdev(dev);
2196 }
2197
2198 static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
2199 {
2200 struct mlx4_en_priv *priv = netdev_priv(dev);
2201 struct mlx4_en_dev *mdev = priv->mdev;
2202 int err = 0;
2203
2204 en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n",
2205 dev->mtu, new_mtu);
2206
2207 if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) {
2208 en_err(priv, "Bad MTU size:%d.\n", new_mtu);
2209 return -EPERM;
2210 }
2211 if (priv->xdp_ring_num && MLX4_EN_EFF_MTU(new_mtu) > FRAG_SZ0) {
2212 en_err(priv, "MTU size:%d requires frags but XDP running\n",
2213 new_mtu);
2214 return -EOPNOTSUPP;
2215 }
2216 dev->mtu = new_mtu;
2217
2218 if (netif_running(dev)) {
2219 mutex_lock(&mdev->state_lock);
2220 if (!mdev->device_up) {
2221 /* NIC is probably restarting - let watchdog task reset
2222 * the port */
2223 en_dbg(DRV, priv, "Change MTU called with card down!?\n");
2224 } else {
2225 mlx4_en_stop_port(dev, 1);
2226 err = mlx4_en_start_port(dev);
2227 if (err) {
2228 en_err(priv, "Failed restarting port:%d\n",
2229 priv->port);
2230 queue_work(mdev->workqueue, &priv->watchdog_task);
2231 }
2232 }
2233 mutex_unlock(&mdev->state_lock);
2234 }
2235 return 0;
2236 }
2237
2238 static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
2239 {
2240 struct mlx4_en_priv *priv = netdev_priv(dev);
2241 struct mlx4_en_dev *mdev = priv->mdev;
2242 struct hwtstamp_config config;
2243
2244 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
2245 return -EFAULT;
2246
2247 /* reserved for future extensions */
2248 if (config.flags)
2249 return -EINVAL;
2250
2251 /* device doesn't support time stamping */
2252 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS))
2253 return -EINVAL;
2254
2255 /* TX HW timestamp */
2256 switch (config.tx_type) {
2257 case HWTSTAMP_TX_OFF:
2258 case HWTSTAMP_TX_ON:
2259 break;
2260 default:
2261 return -ERANGE;
2262 }
2263
2264 /* RX HW timestamp */
2265 switch (config.rx_filter) {
2266 case HWTSTAMP_FILTER_NONE:
2267 break;
2268 case HWTSTAMP_FILTER_ALL:
2269 case HWTSTAMP_FILTER_SOME:
2270 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2271 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2272 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2273 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2274 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2275 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2276 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2277 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2278 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2279 case HWTSTAMP_FILTER_PTP_V2_EVENT:
2280 case HWTSTAMP_FILTER_PTP_V2_SYNC:
2281 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2282 config.rx_filter = HWTSTAMP_FILTER_ALL;
2283 break;
2284 default:
2285 return -ERANGE;
2286 }
2287
2288 if (mlx4_en_reset_config(dev, config, dev->features)) {
2289 config.tx_type = HWTSTAMP_TX_OFF;
2290 config.rx_filter = HWTSTAMP_FILTER_NONE;
2291 }
2292
2293 return copy_to_user(ifr->ifr_data, &config,
2294 sizeof(config)) ? -EFAULT : 0;
2295 }
2296
2297 static int mlx4_en_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
2298 {
2299 struct mlx4_en_priv *priv = netdev_priv(dev);
2300
2301 return copy_to_user(ifr->ifr_data, &priv->hwtstamp_config,
2302 sizeof(priv->hwtstamp_config)) ? -EFAULT : 0;
2303 }
2304
2305 static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2306 {
2307 switch (cmd) {
2308 case SIOCSHWTSTAMP:
2309 return mlx4_en_hwtstamp_set(dev, ifr);
2310 case SIOCGHWTSTAMP:
2311 return mlx4_en_hwtstamp_get(dev, ifr);
2312 default:
2313 return -EOPNOTSUPP;
2314 }
2315 }
2316
2317 static netdev_features_t mlx4_en_fix_features(struct net_device *netdev,
2318 netdev_features_t features)
2319 {
2320 struct mlx4_en_priv *en_priv = netdev_priv(netdev);
2321 struct mlx4_en_dev *mdev = en_priv->mdev;
2322
2323 /* Since there is no support for separate RX C-TAG/S-TAG vlan accel
2324 * enable/disable make sure S-TAG flag is always in same state as
2325 * C-TAG.
2326 */
2327 if (features & NETIF_F_HW_VLAN_CTAG_RX &&
2328 !(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN))
2329 features |= NETIF_F_HW_VLAN_STAG_RX;
2330 else
2331 features &= ~NETIF_F_HW_VLAN_STAG_RX;
2332
2333 return features;
2334 }
2335
2336 static int mlx4_en_set_features(struct net_device *netdev,
2337 netdev_features_t features)
2338 {
2339 struct mlx4_en_priv *priv = netdev_priv(netdev);
2340 bool reset = false;
2341 int ret = 0;
2342
2343 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXFCS)) {
2344 en_info(priv, "Turn %s RX-FCS\n",
2345 (features & NETIF_F_RXFCS) ? "ON" : "OFF");
2346 reset = true;
2347 }
2348
2349 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXALL)) {
2350 u8 ignore_fcs_value = (features & NETIF_F_RXALL) ? 1 : 0;
2351
2352 en_info(priv, "Turn %s RX-ALL\n",
2353 ignore_fcs_value ? "ON" : "OFF");
2354 ret = mlx4_SET_PORT_fcs_check(priv->mdev->dev,
2355 priv->port, ignore_fcs_value);
2356 if (ret)
2357 return ret;
2358 }
2359
2360 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
2361 en_info(priv, "Turn %s RX vlan strip offload\n",
2362 (features & NETIF_F_HW_VLAN_CTAG_RX) ? "ON" : "OFF");
2363 reset = true;
2364 }
2365
2366 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_TX))
2367 en_info(priv, "Turn %s TX vlan strip offload\n",
2368 (features & NETIF_F_HW_VLAN_CTAG_TX) ? "ON" : "OFF");
2369
2370 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_STAG_TX))
2371 en_info(priv, "Turn %s TX S-VLAN strip offload\n",
2372 (features & NETIF_F_HW_VLAN_STAG_TX) ? "ON" : "OFF");
2373
2374 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_LOOPBACK)) {
2375 en_info(priv, "Turn %s loopback\n",
2376 (features & NETIF_F_LOOPBACK) ? "ON" : "OFF");
2377 mlx4_en_update_loopback_state(netdev, features);
2378 }
2379
2380 if (reset) {
2381 ret = mlx4_en_reset_config(netdev, priv->hwtstamp_config,
2382 features);
2383 if (ret)
2384 return ret;
2385 }
2386
2387 return 0;
2388 }
2389
2390 static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
2391 {
2392 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2393 struct mlx4_en_dev *mdev = en_priv->mdev;
2394 u64 mac_u64 = mlx4_mac_to_u64(mac);
2395
2396 if (is_multicast_ether_addr(mac))
2397 return -EINVAL;
2398
2399 return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac_u64);
2400 }
2401
2402 static int mlx4_en_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos)
2403 {
2404 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2405 struct mlx4_en_dev *mdev = en_priv->mdev;
2406
2407 return mlx4_set_vf_vlan(mdev->dev, en_priv->port, vf, vlan, qos);
2408 }
2409
2410 static int mlx4_en_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
2411 int max_tx_rate)
2412 {
2413 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2414 struct mlx4_en_dev *mdev = en_priv->mdev;
2415
2416 return mlx4_set_vf_rate(mdev->dev, en_priv->port, vf, min_tx_rate,
2417 max_tx_rate);
2418 }
2419
2420 static int mlx4_en_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
2421 {
2422 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2423 struct mlx4_en_dev *mdev = en_priv->mdev;
2424
2425 return mlx4_set_vf_spoofchk(mdev->dev, en_priv->port, vf, setting);
2426 }
2427
2428 static int mlx4_en_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivf)
2429 {
2430 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2431 struct mlx4_en_dev *mdev = en_priv->mdev;
2432
2433 return mlx4_get_vf_config(mdev->dev, en_priv->port, vf, ivf);
2434 }
2435
2436 static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_state)
2437 {
2438 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2439 struct mlx4_en_dev *mdev = en_priv->mdev;
2440
2441 return mlx4_set_vf_link_state(mdev->dev, en_priv->port, vf, link_state);
2442 }
2443
2444 static int mlx4_en_get_vf_stats(struct net_device *dev, int vf,
2445 struct ifla_vf_stats *vf_stats)
2446 {
2447 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2448 struct mlx4_en_dev *mdev = en_priv->mdev;
2449
2450 return mlx4_get_vf_stats(mdev->dev, en_priv->port, vf, vf_stats);
2451 }
2452
2453 #define PORT_ID_BYTE_LEN 8
2454 static int mlx4_en_get_phys_port_id(struct net_device *dev,
2455 struct netdev_phys_item_id *ppid)
2456 {
2457 struct mlx4_en_priv *priv = netdev_priv(dev);
2458 struct mlx4_dev *mdev = priv->mdev->dev;
2459 int i;
2460 u64 phys_port_id = mdev->caps.phys_port_id[priv->port];
2461
2462 if (!phys_port_id)
2463 return -EOPNOTSUPP;
2464
2465 ppid->id_len = sizeof(phys_port_id);
2466 for (i = PORT_ID_BYTE_LEN - 1; i >= 0; --i) {
2467 ppid->id[i] = phys_port_id & 0xff;
2468 phys_port_id >>= 8;
2469 }
2470 return 0;
2471 }
2472
2473 static void mlx4_en_add_vxlan_offloads(struct work_struct *work)
2474 {
2475 int ret;
2476 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
2477 vxlan_add_task);
2478
2479 ret = mlx4_config_vxlan_port(priv->mdev->dev, priv->vxlan_port);
2480 if (ret)
2481 goto out;
2482
2483 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
2484 VXLAN_STEER_BY_OUTER_MAC, 1);
2485 out:
2486 if (ret) {
2487 en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret);
2488 return;
2489 }
2490
2491 /* set offloads */
2492 priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2493 NETIF_F_RXCSUM |
2494 NETIF_F_TSO | NETIF_F_TSO6 |
2495 NETIF_F_GSO_UDP_TUNNEL |
2496 NETIF_F_GSO_UDP_TUNNEL_CSUM |
2497 NETIF_F_GSO_PARTIAL;
2498 }
2499
2500 static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
2501 {
2502 int ret;
2503 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
2504 vxlan_del_task);
2505 /* unset offloads */
2506 priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2507 NETIF_F_RXCSUM |
2508 NETIF_F_TSO | NETIF_F_TSO6 |
2509 NETIF_F_GSO_UDP_TUNNEL |
2510 NETIF_F_GSO_UDP_TUNNEL_CSUM |
2511 NETIF_F_GSO_PARTIAL);
2512
2513 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
2514 VXLAN_STEER_BY_OUTER_MAC, 0);
2515 if (ret)
2516 en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret);
2517
2518 priv->vxlan_port = 0;
2519 }
2520
2521 static void mlx4_en_add_vxlan_port(struct net_device *dev,
2522 struct udp_tunnel_info *ti)
2523 {
2524 struct mlx4_en_priv *priv = netdev_priv(dev);
2525 __be16 port = ti->port;
2526 __be16 current_port;
2527
2528 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2529 return;
2530
2531 if (ti->sa_family != AF_INET)
2532 return;
2533
2534 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
2535 return;
2536
2537 current_port = priv->vxlan_port;
2538 if (current_port && current_port != port) {
2539 en_warn(priv, "vxlan port %d configured, can't add port %d\n",
2540 ntohs(current_port), ntohs(port));
2541 return;
2542 }
2543
2544 priv->vxlan_port = port;
2545 queue_work(priv->mdev->workqueue, &priv->vxlan_add_task);
2546 }
2547
2548 static void mlx4_en_del_vxlan_port(struct net_device *dev,
2549 struct udp_tunnel_info *ti)
2550 {
2551 struct mlx4_en_priv *priv = netdev_priv(dev);
2552 __be16 port = ti->port;
2553 __be16 current_port;
2554
2555 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2556 return;
2557
2558 if (ti->sa_family != AF_INET)
2559 return;
2560
2561 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
2562 return;
2563
2564 current_port = priv->vxlan_port;
2565 if (current_port != port) {
2566 en_dbg(DRV, priv, "vxlan port %d isn't configured, ignoring\n", ntohs(port));
2567 return;
2568 }
2569
2570 queue_work(priv->mdev->workqueue, &priv->vxlan_del_task);
2571 }
2572
2573 static netdev_features_t mlx4_en_features_check(struct sk_buff *skb,
2574 struct net_device *dev,
2575 netdev_features_t features)
2576 {
2577 features = vlan_features_check(skb, features);
2578 features = vxlan_features_check(skb, features);
2579
2580 /* The ConnectX-3 doesn't support outer IPv6 checksums but it does
2581 * support inner IPv6 checksums and segmentation so we need to
2582 * strip that feature if this is an IPv6 encapsulated frame.
2583 */
2584 if (skb->encapsulation &&
2585 (skb->ip_summed == CHECKSUM_PARTIAL)) {
2586 struct mlx4_en_priv *priv = netdev_priv(dev);
2587
2588 if (!priv->vxlan_port ||
2589 (ip_hdr(skb)->version != 4) ||
2590 (udp_hdr(skb)->dest != priv->vxlan_port))
2591 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
2592 }
2593
2594 return features;
2595 }
2596
2597 static int mlx4_en_set_tx_maxrate(struct net_device *dev, int queue_index, u32 maxrate)
2598 {
2599 struct mlx4_en_priv *priv = netdev_priv(dev);
2600 struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[queue_index];
2601 struct mlx4_update_qp_params params;
2602 int err;
2603
2604 if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT))
2605 return -EOPNOTSUPP;
2606
2607 /* rate provided to us in Mbs, check if it fits into 12 bits, if not use Gbs */
2608 if (maxrate >> 12) {
2609 params.rate_unit = MLX4_QP_RATE_LIMIT_GBS;
2610 params.rate_val = maxrate / 1000;
2611 } else if (maxrate) {
2612 params.rate_unit = MLX4_QP_RATE_LIMIT_MBS;
2613 params.rate_val = maxrate;
2614 } else { /* zero serves to revoke the QP rate-limitation */
2615 params.rate_unit = 0;
2616 params.rate_val = 0;
2617 }
2618
2619 err = mlx4_update_qp(priv->mdev->dev, tx_ring->qpn, MLX4_UPDATE_QP_RATE_LIMIT,
2620 &params);
2621 return err;
2622 }
2623
2624 static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
2625 {
2626 struct mlx4_en_priv *priv = netdev_priv(dev);
2627 struct mlx4_en_dev *mdev = priv->mdev;
2628 struct bpf_prog *old_prog;
2629 int xdp_ring_num;
2630 int port_up = 0;
2631 int err;
2632 int i;
2633
2634 xdp_ring_num = prog ? ALIGN(priv->rx_ring_num, MLX4_EN_NUM_UP) : 0;
2635
2636 /* No need to reconfigure buffers when simply swapping the
2637 * program for a new one.
2638 */
2639 if (priv->xdp_ring_num == xdp_ring_num) {
2640 if (prog) {
2641 prog = bpf_prog_add(prog, priv->rx_ring_num - 1);
2642 if (IS_ERR(prog))
2643 return PTR_ERR(prog);
2644 }
2645 for (i = 0; i < priv->rx_ring_num; i++) {
2646 /* This xchg is paired with READ_ONCE in the fastpath */
2647 old_prog = xchg(&priv->rx_ring[i]->xdp_prog, prog);
2648 if (old_prog)
2649 bpf_prog_put(old_prog);
2650 }
2651 return 0;
2652 }
2653
2654 if (priv->num_frags > 1) {
2655 en_err(priv, "Cannot set XDP if MTU requires multiple frags\n");
2656 return -EOPNOTSUPP;
2657 }
2658
2659 if (priv->tx_ring_num < xdp_ring_num + MLX4_EN_NUM_UP) {
2660 en_err(priv,
2661 "Minimum %d tx channels required to run XDP\n",
2662 (xdp_ring_num + MLX4_EN_NUM_UP) / MLX4_EN_NUM_UP);
2663 return -EINVAL;
2664 }
2665
2666 if (prog) {
2667 prog = bpf_prog_add(prog, priv->rx_ring_num - 1);
2668 if (IS_ERR(prog))
2669 return PTR_ERR(prog);
2670 }
2671
2672 mutex_lock(&mdev->state_lock);
2673 if (priv->port_up) {
2674 port_up = 1;
2675 mlx4_en_stop_port(dev, 1);
2676 }
2677
2678 priv->xdp_ring_num = xdp_ring_num;
2679 netif_set_real_num_tx_queues(dev, priv->tx_ring_num -
2680 priv->xdp_ring_num);
2681
2682 for (i = 0; i < priv->rx_ring_num; i++) {
2683 old_prog = xchg(&priv->rx_ring[i]->xdp_prog, prog);
2684 if (old_prog)
2685 bpf_prog_put(old_prog);
2686 }
2687
2688 if (port_up) {
2689 err = mlx4_en_start_port(dev);
2690 if (err) {
2691 en_err(priv, "Failed starting port %d for XDP change\n",
2692 priv->port);
2693 queue_work(mdev->workqueue, &priv->watchdog_task);
2694 }
2695 }
2696
2697 mutex_unlock(&mdev->state_lock);
2698 return 0;
2699 }
2700
2701 static bool mlx4_xdp_attached(struct net_device *dev)
2702 {
2703 struct mlx4_en_priv *priv = netdev_priv(dev);
2704
2705 return !!priv->xdp_ring_num;
2706 }
2707
2708 static int mlx4_xdp(struct net_device *dev, struct netdev_xdp *xdp)
2709 {
2710 switch (xdp->command) {
2711 case XDP_SETUP_PROG:
2712 return mlx4_xdp_set(dev, xdp->prog);
2713 case XDP_QUERY_PROG:
2714 xdp->prog_attached = mlx4_xdp_attached(dev);
2715 return 0;
2716 default:
2717 return -EINVAL;
2718 }
2719 }
2720
2721 static const struct net_device_ops mlx4_netdev_ops = {
2722 .ndo_open = mlx4_en_open,
2723 .ndo_stop = mlx4_en_close,
2724 .ndo_start_xmit = mlx4_en_xmit,
2725 .ndo_select_queue = mlx4_en_select_queue,
2726 .ndo_get_stats64 = mlx4_en_get_stats64,
2727 .ndo_set_rx_mode = mlx4_en_set_rx_mode,
2728 .ndo_set_mac_address = mlx4_en_set_mac,
2729 .ndo_validate_addr = eth_validate_addr,
2730 .ndo_change_mtu = mlx4_en_change_mtu,
2731 .ndo_do_ioctl = mlx4_en_ioctl,
2732 .ndo_tx_timeout = mlx4_en_tx_timeout,
2733 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
2734 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
2735 #ifdef CONFIG_NET_POLL_CONTROLLER
2736 .ndo_poll_controller = mlx4_en_netpoll,
2737 #endif
2738 .ndo_set_features = mlx4_en_set_features,
2739 .ndo_fix_features = mlx4_en_fix_features,
2740 .ndo_setup_tc = __mlx4_en_setup_tc,
2741 #ifdef CONFIG_RFS_ACCEL
2742 .ndo_rx_flow_steer = mlx4_en_filter_rfs,
2743 #endif
2744 .ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
2745 .ndo_udp_tunnel_add = mlx4_en_add_vxlan_port,
2746 .ndo_udp_tunnel_del = mlx4_en_del_vxlan_port,
2747 .ndo_features_check = mlx4_en_features_check,
2748 .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
2749 .ndo_xdp = mlx4_xdp,
2750 };
2751
2752 static const struct net_device_ops mlx4_netdev_ops_master = {
2753 .ndo_open = mlx4_en_open,
2754 .ndo_stop = mlx4_en_close,
2755 .ndo_start_xmit = mlx4_en_xmit,
2756 .ndo_select_queue = mlx4_en_select_queue,
2757 .ndo_get_stats64 = mlx4_en_get_stats64,
2758 .ndo_set_rx_mode = mlx4_en_set_rx_mode,
2759 .ndo_set_mac_address = mlx4_en_set_mac,
2760 .ndo_validate_addr = eth_validate_addr,
2761 .ndo_change_mtu = mlx4_en_change_mtu,
2762 .ndo_tx_timeout = mlx4_en_tx_timeout,
2763 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
2764 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
2765 .ndo_set_vf_mac = mlx4_en_set_vf_mac,
2766 .ndo_set_vf_vlan = mlx4_en_set_vf_vlan,
2767 .ndo_set_vf_rate = mlx4_en_set_vf_rate,
2768 .ndo_set_vf_spoofchk = mlx4_en_set_vf_spoofchk,
2769 .ndo_set_vf_link_state = mlx4_en_set_vf_link_state,
2770 .ndo_get_vf_stats = mlx4_en_get_vf_stats,
2771 .ndo_get_vf_config = mlx4_en_get_vf_config,
2772 #ifdef CONFIG_NET_POLL_CONTROLLER
2773 .ndo_poll_controller = mlx4_en_netpoll,
2774 #endif
2775 .ndo_set_features = mlx4_en_set_features,
2776 .ndo_fix_features = mlx4_en_fix_features,
2777 .ndo_setup_tc = __mlx4_en_setup_tc,
2778 #ifdef CONFIG_RFS_ACCEL
2779 .ndo_rx_flow_steer = mlx4_en_filter_rfs,
2780 #endif
2781 .ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
2782 .ndo_udp_tunnel_add = mlx4_en_add_vxlan_port,
2783 .ndo_udp_tunnel_del = mlx4_en_del_vxlan_port,
2784 .ndo_features_check = mlx4_en_features_check,
2785 .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
2786 .ndo_xdp = mlx4_xdp,
2787 };
2788
2789 struct mlx4_en_bond {
2790 struct work_struct work;
2791 struct mlx4_en_priv *priv;
2792 int is_bonded;
2793 struct mlx4_port_map port_map;
2794 };
2795
2796 static void mlx4_en_bond_work(struct work_struct *work)
2797 {
2798 struct mlx4_en_bond *bond = container_of(work,
2799 struct mlx4_en_bond,
2800 work);
2801 int err = 0;
2802 struct mlx4_dev *dev = bond->priv->mdev->dev;
2803
2804 if (bond->is_bonded) {
2805 if (!mlx4_is_bonded(dev)) {
2806 err = mlx4_bond(dev);
2807 if (err)
2808 en_err(bond->priv, "Fail to bond device\n");
2809 }
2810 if (!err) {
2811 err = mlx4_port_map_set(dev, &bond->port_map);
2812 if (err)
2813 en_err(bond->priv, "Fail to set port map [%d][%d]: %d\n",
2814 bond->port_map.port1,
2815 bond->port_map.port2,
2816 err);
2817 }
2818 } else if (mlx4_is_bonded(dev)) {
2819 err = mlx4_unbond(dev);
2820 if (err)
2821 en_err(bond->priv, "Fail to unbond device\n");
2822 }
2823 dev_put(bond->priv->dev);
2824 kfree(bond);
2825 }
2826
2827 static int mlx4_en_queue_bond_work(struct mlx4_en_priv *priv, int is_bonded,
2828 u8 v2p_p1, u8 v2p_p2)
2829 {
2830 struct mlx4_en_bond *bond = NULL;
2831
2832 bond = kzalloc(sizeof(*bond), GFP_ATOMIC);
2833 if (!bond)
2834 return -ENOMEM;
2835
2836 INIT_WORK(&bond->work, mlx4_en_bond_work);
2837 bond->priv = priv;
2838 bond->is_bonded = is_bonded;
2839 bond->port_map.port1 = v2p_p1;
2840 bond->port_map.port2 = v2p_p2;
2841 dev_hold(priv->dev);
2842 queue_work(priv->mdev->workqueue, &bond->work);
2843 return 0;
2844 }
2845
2846 int mlx4_en_netdev_event(struct notifier_block *this,
2847 unsigned long event, void *ptr)
2848 {
2849 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
2850 u8 port = 0;
2851 struct mlx4_en_dev *mdev;
2852 struct mlx4_dev *dev;
2853 int i, num_eth_ports = 0;
2854 bool do_bond = true;
2855 struct mlx4_en_priv *priv;
2856 u8 v2p_port1 = 0;
2857 u8 v2p_port2 = 0;
2858
2859 if (!net_eq(dev_net(ndev), &init_net))
2860 return NOTIFY_DONE;
2861
2862 mdev = container_of(this, struct mlx4_en_dev, nb);
2863 dev = mdev->dev;
2864
2865 /* Go into this mode only when two network devices set on two ports
2866 * of the same mlx4 device are slaves of the same bonding master
2867 */
2868 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
2869 ++num_eth_ports;
2870 if (!port && (mdev->pndev[i] == ndev))
2871 port = i;
2872 mdev->upper[i] = mdev->pndev[i] ?
2873 netdev_master_upper_dev_get(mdev->pndev[i]) : NULL;
2874 /* condition not met: network device is a slave */
2875 if (!mdev->upper[i])
2876 do_bond = false;
2877 if (num_eth_ports < 2)
2878 continue;
2879 /* condition not met: same master */
2880 if (mdev->upper[i] != mdev->upper[i-1])
2881 do_bond = false;
2882 }
2883 /* condition not met: 2 salves */
2884 do_bond = (num_eth_ports == 2) ? do_bond : false;
2885
2886 /* handle only events that come with enough info */
2887 if ((do_bond && (event != NETDEV_BONDING_INFO)) || !port)
2888 return NOTIFY_DONE;
2889
2890 priv = netdev_priv(ndev);
2891 if (do_bond) {
2892 struct netdev_notifier_bonding_info *notifier_info = ptr;
2893 struct netdev_bonding_info *bonding_info =
2894 &notifier_info->bonding_info;
2895
2896 /* required mode 1, 2 or 4 */
2897 if ((bonding_info->master.bond_mode != BOND_MODE_ACTIVEBACKUP) &&
2898 (bonding_info->master.bond_mode != BOND_MODE_XOR) &&
2899 (bonding_info->master.bond_mode != BOND_MODE_8023AD))
2900 do_bond = false;
2901
2902 /* require exactly 2 slaves */
2903 if (bonding_info->master.num_slaves != 2)
2904 do_bond = false;
2905
2906 /* calc v2p */
2907 if (do_bond) {
2908 if (bonding_info->master.bond_mode ==
2909 BOND_MODE_ACTIVEBACKUP) {
2910 /* in active-backup mode virtual ports are
2911 * mapped to the physical port of the active
2912 * slave */
2913 if (bonding_info->slave.state ==
2914 BOND_STATE_BACKUP) {
2915 if (port == 1) {
2916 v2p_port1 = 2;
2917 v2p_port2 = 2;
2918 } else {
2919 v2p_port1 = 1;
2920 v2p_port2 = 1;
2921 }
2922 } else { /* BOND_STATE_ACTIVE */
2923 if (port == 1) {
2924 v2p_port1 = 1;
2925 v2p_port2 = 1;
2926 } else {
2927 v2p_port1 = 2;
2928 v2p_port2 = 2;
2929 }
2930 }
2931 } else { /* Active-Active */
2932 /* in active-active mode a virtual port is
2933 * mapped to the native physical port if and only
2934 * if the physical port is up */
2935 __s8 link = bonding_info->slave.link;
2936
2937 if (port == 1)
2938 v2p_port2 = 2;
2939 else
2940 v2p_port1 = 1;
2941 if ((link == BOND_LINK_UP) ||
2942 (link == BOND_LINK_FAIL)) {
2943 if (port == 1)
2944 v2p_port1 = 1;
2945 else
2946 v2p_port2 = 2;
2947 } else { /* BOND_LINK_DOWN || BOND_LINK_BACK */
2948 if (port == 1)
2949 v2p_port1 = 2;
2950 else
2951 v2p_port2 = 1;
2952 }
2953 }
2954 }
2955 }
2956
2957 mlx4_en_queue_bond_work(priv, do_bond,
2958 v2p_port1, v2p_port2);
2959
2960 return NOTIFY_DONE;
2961 }
2962
2963 void mlx4_en_update_pfc_stats_bitmap(struct mlx4_dev *dev,
2964 struct mlx4_en_stats_bitmap *stats_bitmap,
2965 u8 rx_ppp, u8 rx_pause,
2966 u8 tx_ppp, u8 tx_pause)
2967 {
2968 int last_i = NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PF_STATS;
2969
2970 if (!mlx4_is_slave(dev) &&
2971 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN)) {
2972 mutex_lock(&stats_bitmap->mutex);
2973 bitmap_clear(stats_bitmap->bitmap, last_i, NUM_FLOW_STATS);
2974
2975 if (rx_ppp)
2976 bitmap_set(stats_bitmap->bitmap, last_i,
2977 NUM_FLOW_PRIORITY_STATS_RX);
2978 last_i += NUM_FLOW_PRIORITY_STATS_RX;
2979
2980 if (rx_pause && !(rx_ppp))
2981 bitmap_set(stats_bitmap->bitmap, last_i,
2982 NUM_FLOW_STATS_RX);
2983 last_i += NUM_FLOW_STATS_RX;
2984
2985 if (tx_ppp)
2986 bitmap_set(stats_bitmap->bitmap, last_i,
2987 NUM_FLOW_PRIORITY_STATS_TX);
2988 last_i += NUM_FLOW_PRIORITY_STATS_TX;
2989
2990 if (tx_pause && !(tx_ppp))
2991 bitmap_set(stats_bitmap->bitmap, last_i,
2992 NUM_FLOW_STATS_TX);
2993 last_i += NUM_FLOW_STATS_TX;
2994
2995 mutex_unlock(&stats_bitmap->mutex);
2996 }
2997 }
2998
2999 void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev,
3000 struct mlx4_en_stats_bitmap *stats_bitmap,
3001 u8 rx_ppp, u8 rx_pause,
3002 u8 tx_ppp, u8 tx_pause)
3003 {
3004 int last_i = 0;
3005
3006 mutex_init(&stats_bitmap->mutex);
3007 bitmap_zero(stats_bitmap->bitmap, NUM_ALL_STATS);
3008
3009 if (mlx4_is_slave(dev)) {
3010 bitmap_set(stats_bitmap->bitmap, last_i +
3011 MLX4_FIND_NETDEV_STAT(rx_packets), 1);
3012 bitmap_set(stats_bitmap->bitmap, last_i +
3013 MLX4_FIND_NETDEV_STAT(tx_packets), 1);
3014 bitmap_set(stats_bitmap->bitmap, last_i +
3015 MLX4_FIND_NETDEV_STAT(rx_bytes), 1);
3016 bitmap_set(stats_bitmap->bitmap, last_i +
3017 MLX4_FIND_NETDEV_STAT(tx_bytes), 1);
3018 bitmap_set(stats_bitmap->bitmap, last_i +
3019 MLX4_FIND_NETDEV_STAT(rx_dropped), 1);
3020 bitmap_set(stats_bitmap->bitmap, last_i +
3021 MLX4_FIND_NETDEV_STAT(tx_dropped), 1);
3022 } else {
3023 bitmap_set(stats_bitmap->bitmap, last_i, NUM_MAIN_STATS);
3024 }
3025 last_i += NUM_MAIN_STATS;
3026
3027 bitmap_set(stats_bitmap->bitmap, last_i, NUM_PORT_STATS);
3028 last_i += NUM_PORT_STATS;
3029
3030 if (mlx4_is_master(dev))
3031 bitmap_set(stats_bitmap->bitmap, last_i,
3032 NUM_PF_STATS);
3033 last_i += NUM_PF_STATS;
3034
3035 mlx4_en_update_pfc_stats_bitmap(dev, stats_bitmap,
3036 rx_ppp, rx_pause,
3037 tx_ppp, tx_pause);
3038 last_i += NUM_FLOW_STATS;
3039
3040 if (!mlx4_is_slave(dev))
3041 bitmap_set(stats_bitmap->bitmap, last_i, NUM_PKT_STATS);
3042 }
3043
3044 int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
3045 struct mlx4_en_port_profile *prof)
3046 {
3047 struct net_device *dev;
3048 struct mlx4_en_priv *priv;
3049 int i;
3050 int err;
3051 #ifdef CONFIG_MLX4_EN_DCB
3052 struct tc_configuration *tc;
3053 #endif
3054
3055 dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
3056 MAX_TX_RINGS, MAX_RX_RINGS);
3057 if (dev == NULL)
3058 return -ENOMEM;
3059
3060 netif_set_real_num_tx_queues(dev, prof->tx_ring_num);
3061 netif_set_real_num_rx_queues(dev, prof->rx_ring_num);
3062
3063 SET_NETDEV_DEV(dev, &mdev->dev->persist->pdev->dev);
3064 dev->dev_port = port - 1;
3065
3066 /*
3067 * Initialize driver private data
3068 */
3069
3070 priv = netdev_priv(dev);
3071 memset(priv, 0, sizeof(struct mlx4_en_priv));
3072 priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
3073 spin_lock_init(&priv->stats_lock);
3074 INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
3075 INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
3076 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
3077 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
3078 INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
3079 INIT_WORK(&priv->vxlan_add_task, mlx4_en_add_vxlan_offloads);
3080 INIT_WORK(&priv->vxlan_del_task, mlx4_en_del_vxlan_offloads);
3081 #ifdef CONFIG_RFS_ACCEL
3082 INIT_LIST_HEAD(&priv->filters);
3083 spin_lock_init(&priv->filters_lock);
3084 #endif
3085
3086 priv->dev = dev;
3087 priv->mdev = mdev;
3088 priv->ddev = &mdev->pdev->dev;
3089 priv->prof = prof;
3090 priv->port = port;
3091 priv->port_up = false;
3092 priv->flags = prof->flags;
3093 priv->pflags = MLX4_EN_PRIV_FLAGS_BLUEFLAME;
3094 priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
3095 MLX4_WQE_CTRL_SOLICITED);
3096 priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
3097 priv->tx_ring_num = prof->tx_ring_num;
3098 priv->tx_work_limit = MLX4_EN_DEFAULT_TX_WORK;
3099 netdev_rss_key_fill(priv->rss_key, sizeof(priv->rss_key));
3100
3101 priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS,
3102 GFP_KERNEL);
3103 if (!priv->tx_ring) {
3104 err = -ENOMEM;
3105 goto out;
3106 }
3107 priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS,
3108 GFP_KERNEL);
3109 if (!priv->tx_cq) {
3110 err = -ENOMEM;
3111 goto out;
3112 }
3113 priv->rx_ring_num = prof->rx_ring_num;
3114 priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0;
3115 priv->cqe_size = mdev->dev->caps.cqe_size;
3116 priv->mac_index = -1;
3117 priv->msg_enable = MLX4_EN_MSG_LEVEL;
3118 #ifdef CONFIG_MLX4_EN_DCB
3119 if (!mlx4_is_slave(priv->mdev->dev)) {
3120 priv->cee_params.dcbx_cap = DCB_CAP_DCBX_VER_CEE |
3121 DCB_CAP_DCBX_HOST |
3122 DCB_CAP_DCBX_VER_IEEE;
3123 priv->flags |= MLX4_EN_DCB_ENABLED;
3124 priv->cee_params.dcb_cfg.pfc_state = false;
3125
3126 for (i = 0; i < MLX4_EN_NUM_UP; i++) {
3127 tc = &priv->cee_params.dcb_cfg.tc_config[i];
3128 tc->dcb_pfc = pfc_disabled;
3129 }
3130
3131 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) {
3132 dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
3133 } else {
3134 en_info(priv, "enabling only PFC DCB ops\n");
3135 dev->dcbnl_ops = &mlx4_en_dcbnl_pfc_ops;
3136 }
3137 }
3138 #endif
3139
3140 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i)
3141 INIT_HLIST_HEAD(&priv->mac_hash[i]);
3142
3143 /* Query for default mac and max mtu */
3144 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
3145
3146 if (mdev->dev->caps.rx_checksum_flags_port[priv->port] &
3147 MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP)
3148 priv->flags |= MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP;
3149
3150 /* Set default MAC */
3151 dev->addr_len = ETH_ALEN;
3152 mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]);
3153 if (!is_valid_ether_addr(dev->dev_addr)) {
3154 en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n",
3155 priv->port, dev->dev_addr);
3156 err = -EINVAL;
3157 goto out;
3158 } else if (mlx4_is_slave(priv->mdev->dev) &&
3159 (priv->mdev->dev->port_random_macs & 1 << priv->port)) {
3160 /* Random MAC was assigned in mlx4_slave_cap
3161 * in mlx4_core module
3162 */
3163 dev->addr_assign_type |= NET_ADDR_RANDOM;
3164 en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr);
3165 }
3166
3167 memcpy(priv->current_mac, dev->dev_addr, sizeof(priv->current_mac));
3168
3169 priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
3170 DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
3171 err = mlx4_en_alloc_resources(priv);
3172 if (err)
3173 goto out;
3174
3175 /* Initialize time stamping config */
3176 priv->hwtstamp_config.flags = 0;
3177 priv->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF;
3178 priv->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
3179
3180 /* Allocate page for receive rings */
3181 err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
3182 MLX4_EN_PAGE_SIZE);
3183 if (err) {
3184 en_err(priv, "Failed to allocate page for rx qps\n");
3185 goto out;
3186 }
3187 priv->allocated = 1;
3188
3189 /*
3190 * Initialize netdev entry points
3191 */
3192 if (mlx4_is_master(priv->mdev->dev))
3193 dev->netdev_ops = &mlx4_netdev_ops_master;
3194 else
3195 dev->netdev_ops = &mlx4_netdev_ops;
3196 dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
3197 netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
3198 netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
3199
3200 dev->ethtool_ops = &mlx4_en_ethtool_ops;
3201
3202 /*
3203 * Set driver features
3204 */
3205 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3206 if (mdev->LSO_support)
3207 dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
3208
3209 dev->vlan_features = dev->hw_features;
3210
3211 dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH;
3212 dev->features = dev->hw_features | NETIF_F_HIGHDMA |
3213 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
3214 NETIF_F_HW_VLAN_CTAG_FILTER;
3215 dev->hw_features |= NETIF_F_LOOPBACK |
3216 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
3217
3218 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) {
3219 dev->features |= NETIF_F_HW_VLAN_STAG_RX |
3220 NETIF_F_HW_VLAN_STAG_FILTER;
3221 dev->hw_features |= NETIF_F_HW_VLAN_STAG_RX;
3222 }
3223
3224 if (mlx4_is_slave(mdev->dev)) {
3225 int phv;
3226
3227 err = get_phv_bit(mdev->dev, port, &phv);
3228 if (!err && phv) {
3229 dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
3230 priv->pflags |= MLX4_EN_PRIV_FLAGS_PHV;
3231 }
3232 } else {
3233 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN &&
3234 !(mdev->dev->caps.flags2 &
3235 MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN))
3236 dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
3237 }
3238
3239 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)
3240 dev->hw_features |= NETIF_F_RXFCS;
3241
3242 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS)
3243 dev->hw_features |= NETIF_F_RXALL;
3244
3245 if (mdev->dev->caps.steering_mode ==
3246 MLX4_STEERING_MODE_DEVICE_MANAGED &&
3247 mdev->dev->caps.dmfs_high_steer_mode != MLX4_STEERING_DMFS_A0_STATIC)
3248 dev->hw_features |= NETIF_F_NTUPLE;
3249
3250 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
3251 dev->priv_flags |= IFF_UNICAST_FLT;
3252
3253 /* Setting a default hash function value */
3254 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP) {
3255 priv->rss_hash_fn = ETH_RSS_HASH_TOP;
3256 } else if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR) {
3257 priv->rss_hash_fn = ETH_RSS_HASH_XOR;
3258 } else {
3259 en_warn(priv,
3260 "No RSS hash capabilities exposed, using Toeplitz\n");
3261 priv->rss_hash_fn = ETH_RSS_HASH_TOP;
3262 }
3263
3264 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
3265 dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
3266 NETIF_F_GSO_UDP_TUNNEL_CSUM |
3267 NETIF_F_GSO_PARTIAL;
3268 dev->features |= NETIF_F_GSO_UDP_TUNNEL |
3269 NETIF_F_GSO_UDP_TUNNEL_CSUM |
3270 NETIF_F_GSO_PARTIAL;
3271 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
3272 }
3273
3274 mdev->pndev[port] = dev;
3275 mdev->upper[port] = NULL;
3276
3277 netif_carrier_off(dev);
3278 mlx4_en_set_default_moderation(priv);
3279
3280 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
3281 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
3282
3283 mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
3284
3285 /* Configure port */
3286 mlx4_en_calc_rx_buf(dev);
3287 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
3288 priv->rx_skb_size + ETH_FCS_LEN,
3289 prof->tx_pause, prof->tx_ppp,
3290 prof->rx_pause, prof->rx_ppp);
3291 if (err) {
3292 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
3293 priv->port, err);
3294 goto out;
3295 }
3296
3297 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
3298 err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1);
3299 if (err) {
3300 en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
3301 err);
3302 goto out;
3303 }
3304 }
3305
3306 /* Init port */
3307 en_warn(priv, "Initializing port\n");
3308 err = mlx4_INIT_PORT(mdev->dev, priv->port);
3309 if (err) {
3310 en_err(priv, "Failed Initializing port\n");
3311 goto out;
3312 }
3313 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
3314
3315 /* Initialize time stamp mechanism */
3316 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
3317 mlx4_en_init_timestamp(mdev);
3318
3319 queue_delayed_work(mdev->workqueue, &priv->service_task,
3320 SERVICE_TASK_DELAY);
3321
3322 mlx4_en_set_stats_bitmap(mdev->dev, &priv->stats_bitmap,
3323 mdev->profile.prof[priv->port].rx_ppp,
3324 mdev->profile.prof[priv->port].rx_pause,
3325 mdev->profile.prof[priv->port].tx_ppp,
3326 mdev->profile.prof[priv->port].tx_pause);
3327
3328 err = register_netdev(dev);
3329 if (err) {
3330 en_err(priv, "Netdev registration failed for port %d\n", port);
3331 goto out;
3332 }
3333
3334 priv->registered = 1;
3335 devlink_port_type_eth_set(mlx4_get_devlink_port(mdev->dev, priv->port),
3336 dev);
3337
3338 return 0;
3339
3340 out:
3341 mlx4_en_destroy_netdev(dev);
3342 return err;
3343 }
3344
3345 int mlx4_en_reset_config(struct net_device *dev,
3346 struct hwtstamp_config ts_config,
3347 netdev_features_t features)
3348 {
3349 struct mlx4_en_priv *priv = netdev_priv(dev);
3350 struct mlx4_en_dev *mdev = priv->mdev;
3351 struct mlx4_en_port_profile new_prof;
3352 struct mlx4_en_priv *tmp;
3353 int port_up = 0;
3354 int err = 0;
3355
3356 if (priv->hwtstamp_config.tx_type == ts_config.tx_type &&
3357 priv->hwtstamp_config.rx_filter == ts_config.rx_filter &&
3358 !DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) &&
3359 !DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS))
3360 return 0; /* Nothing to change */
3361
3362 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) &&
3363 (features & NETIF_F_HW_VLAN_CTAG_RX) &&
3364 (priv->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE)) {
3365 en_warn(priv, "Can't turn ON rx vlan offload while time-stamping rx filter is ON\n");
3366 return -EINVAL;
3367 }
3368
3369 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
3370 if (!tmp)
3371 return -ENOMEM;
3372
3373 mutex_lock(&mdev->state_lock);
3374
3375 memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
3376 memcpy(&new_prof.hwtstamp_config, &ts_config, sizeof(ts_config));
3377
3378 err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
3379 if (err)
3380 goto out;
3381
3382 if (priv->port_up) {
3383 port_up = 1;
3384 mlx4_en_stop_port(dev, 1);
3385 }
3386
3387 en_warn(priv, "Changing device configuration rx filter(%x) rx vlan(%x)\n",
3388 ts_config.rx_filter,
3389 !!(features & NETIF_F_HW_VLAN_CTAG_RX));
3390
3391 mlx4_en_safe_replace_resources(priv, tmp);
3392
3393 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
3394 if (features & NETIF_F_HW_VLAN_CTAG_RX)
3395 dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
3396 else
3397 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
3398 } else if (ts_config.rx_filter == HWTSTAMP_FILTER_NONE) {
3399 /* RX time-stamping is OFF, update the RX vlan offload
3400 * to the latest wanted state
3401 */
3402 if (dev->wanted_features & NETIF_F_HW_VLAN_CTAG_RX)
3403 dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
3404 else
3405 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
3406 }
3407
3408 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS)) {
3409 if (features & NETIF_F_RXFCS)
3410 dev->features |= NETIF_F_RXFCS;
3411 else
3412 dev->features &= ~NETIF_F_RXFCS;
3413 }
3414
3415 /* RX vlan offload and RX time-stamping can't co-exist !
3416 * Regardless of the caller's choice,
3417 * Turn Off RX vlan offload in case of time-stamping is ON
3418 */
3419 if (ts_config.rx_filter != HWTSTAMP_FILTER_NONE) {
3420 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
3421 en_warn(priv, "Turning off RX vlan offload since RX time-stamping is ON\n");
3422 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
3423 }
3424
3425 if (port_up) {
3426 err = mlx4_en_start_port(dev);
3427 if (err)
3428 en_err(priv, "Failed starting port\n");
3429 }
3430
3431 out:
3432 mutex_unlock(&mdev->state_lock);
3433 kfree(tmp);
3434 if (!err)
3435 netdev_features_change(dev);
3436 return err;
3437 }
This page took 0.116756 seconds and 5 git commands to generate.