2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/if_vlan.h>
36 #include <linux/etherdevice.h>
37 #include <linux/timecounter.h>
38 #include <linux/net_tstamp.h>
39 #include <linux/ptp_clock_kernel.h>
40 #include <linux/mlx5/driver.h>
41 #include <linux/mlx5/qp.h>
42 #include <linux/mlx5/cq.h>
43 #include <linux/mlx5/port.h>
44 #include <linux/mlx5/vport.h>
45 #include <linux/mlx5/transobj.h>
46 #include <linux/rhashtable.h>
48 #include "mlx5_core.h"
51 #define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
53 #define MLX5E_MAX_NUM_TC 8
55 #define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x6
56 #define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE 0xa
57 #define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE 0xd
59 #define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE 0x1
60 #define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa
61 #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE 0xd
63 #define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x1
64 #define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW 0x4
65 #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW 0x6
67 #define MLX5_MPWRQ_LOG_STRIDE_SIZE 6 /* >= 6, HW restriction */
68 #define MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS 8 /* >= 6, HW restriction */
69 #define MLX5_MPWRQ_LOG_WQE_SZ 17
70 #define MLX5_MPWRQ_WQE_PAGE_ORDER (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
71 MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0)
72 #define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
73 #define MLX5_MPWRQ_STRIDES_PER_PAGE (MLX5_MPWRQ_NUM_STRIDES >> \
74 MLX5_MPWRQ_WQE_PAGE_ORDER)
75 #define MLX5_CHANNEL_MAX_NUM_MTTS (ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8) * \
76 BIT(MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW))
77 #define MLX5_UMR_ALIGN (2048)
78 #define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (128)
80 #define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024)
81 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10
82 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3
83 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20
84 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC 0x10
85 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x20
86 #define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES 0x80
87 #define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW 0x2
89 #define MLX5E_LOG_INDIR_RQT_SIZE 0x7
90 #define MLX5E_INDIR_RQT_SIZE BIT(MLX5E_LOG_INDIR_RQT_SIZE)
91 #define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE >> 1)
92 #define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC)
93 #define MLX5E_TX_CQ_POLL_BUDGET 128
94 #define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */
95 #define MLX5E_SQ_BF_BUDGET 16
97 #define MLX5E_NUM_MAIN_GROUPS 9
99 static inline u16
mlx5_min_rx_wqes(int wq_type
, u32 wq_size
)
102 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
103 return min_t(u16
, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW
,
106 return min_t(u16
, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES
,
111 static inline int mlx5_min_log_rq_size(int wq_type
)
114 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
115 return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW
;
117 return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE
;
121 static inline int mlx5_max_log_rq_size(int wq_type
)
124 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
125 return MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW
;
127 return MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE
;
131 struct mlx5e_tx_wqe
{
132 struct mlx5_wqe_ctrl_seg ctrl
;
133 struct mlx5_wqe_eth_seg eth
;
136 struct mlx5e_rx_wqe
{
137 struct mlx5_wqe_srq_next_seg next
;
138 struct mlx5_wqe_data_seg data
;
141 struct mlx5e_umr_wqe
{
142 struct mlx5_wqe_ctrl_seg ctrl
;
143 struct mlx5_wqe_umr_ctrl_seg uctrl
;
144 struct mlx5_mkey_seg mkc
;
145 struct mlx5_wqe_data_seg data
;
148 static const char mlx5e_priv_flags
[][ETH_GSTRING_LEN
] = {
152 enum mlx5e_priv_flag
{
153 MLX5E_PFLAG_RX_CQE_BASED_MODER
= (1 << 0),
156 #define MLX5E_SET_PRIV_FLAG(priv, pflag, enable) \
159 priv->pflags |= pflag; \
161 priv->pflags &= ~pflag; \
164 #ifdef CONFIG_MLX5_CORE_EN_DCB
165 #define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
166 #define MLX5E_MIN_BW_ALLOC 1 /* Min percentage of BW allocation */
169 struct mlx5e_cq_moder
{
174 struct mlx5e_params
{
177 u8 mpwqe_log_stride_sz
;
178 u8 mpwqe_log_num_strides
;
182 u8 rx_cq_period_mode
;
183 bool rx_cqe_compress_admin
;
184 bool rx_cqe_compress
;
185 struct mlx5e_cq_moder rx_cq_moderation
;
186 struct mlx5e_cq_moder tx_cq_moderation
;
192 u8 toeplitz_hash_key
[40];
193 u32 indirection_rqt
[MLX5E_INDIR_RQT_SIZE
];
194 bool vlan_strip_disable
;
195 #ifdef CONFIG_MLX5_CORE_EN_DCB
201 struct mlx5e_tstamp
{
203 struct cyclecounter cycles
;
204 struct timecounter clock
;
205 struct hwtstamp_config hwtstamp_config
;
207 unsigned long overflow_period
;
208 struct delayed_work overflow_work
;
209 struct mlx5_core_dev
*mdev
;
210 struct ptp_clock
*ptp
;
211 struct ptp_clock_info ptp_info
;
215 MLX5E_RQ_STATE_POST_WQES_ENABLE
,
216 MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS
,
221 /* data path - accessed per cqe */
224 /* data path - accessed per napi poll */
226 struct napi_struct
*napi
;
227 struct mlx5_core_cq mcq
;
228 struct mlx5e_channel
*channel
;
229 struct mlx5e_priv
*priv
;
231 /* cqe decompression */
232 struct mlx5_cqe64 title
;
233 struct mlx5_mini_cqe8 mini_arr
[MLX5_MINI_CQE_ARRAY_SIZE
];
236 u16 decmprs_wqe_counter
;
239 struct mlx5_wq_ctrl wq_ctrl
;
240 } ____cacheline_aligned_in_smp
;
243 typedef void (*mlx5e_fp_handle_rx_cqe
)(struct mlx5e_rq
*rq
,
244 struct mlx5_cqe64
*cqe
);
245 typedef int (*mlx5e_fp_alloc_wqe
)(struct mlx5e_rq
*rq
, struct mlx5e_rx_wqe
*wqe
,
248 struct mlx5e_dma_info
{
253 struct mlx5e_rx_am_stats
{
254 int ppms
; /* packets per msec */
255 int epms
; /* events per msec */
258 struct mlx5e_rx_am_sample
{
260 unsigned int pkt_ctr
;
264 struct mlx5e_rx_am
{ /* Adaptive Moderation */
266 struct mlx5e_rx_am_stats prev_stats
;
267 struct mlx5e_rx_am_sample start_sample
;
268 struct work_struct work
;
279 struct mlx5_wq_ll wq
;
281 struct sk_buff
**skb
;
282 struct mlx5e_mpw_info
*wqe_info
;
287 struct net_device
*netdev
;
288 struct mlx5e_tstamp
*tstamp
;
289 struct mlx5e_rq_stats stats
;
291 mlx5e_fp_handle_rx_cqe handle_rx_cqe
;
292 mlx5e_fp_alloc_wqe alloc_wqe
;
297 struct mlx5e_rx_am am
; /* Adaptive Moderation */
300 struct mlx5_wq_ctrl wq_ctrl
;
303 u32 mpwqe_num_strides
;
305 struct mlx5e_channel
*channel
;
306 struct mlx5e_priv
*priv
;
307 } ____cacheline_aligned_in_smp
;
309 struct mlx5e_umr_dma_info
{
311 __be64
*mtt_no_align
;
313 struct mlx5e_dma_info
*dma_info
;
316 struct mlx5e_mpw_info
{
318 struct mlx5e_dma_info dma_info
;
319 struct mlx5e_umr_dma_info umr
;
321 u16 consumed_strides
;
322 u16 skbs_frags
[MLX5_MPWRQ_PAGES_PER_WQE
];
324 void (*dma_pre_sync
)(struct device
*pdev
,
325 struct mlx5e_mpw_info
*wi
,
326 u32 wqe_offset
, u32 len
);
327 void (*add_skb_frag
)(struct mlx5e_rq
*rq
,
329 struct mlx5e_mpw_info
*wi
,
330 u32 page_idx
, u32 frag_offset
, u32 len
);
331 void (*copy_skb_header
)(struct device
*pdev
,
333 struct mlx5e_mpw_info
*wi
,
334 u32 page_idx
, u32 offset
,
336 void (*free_wqe
)(struct mlx5e_rq
*rq
, struct mlx5e_mpw_info
*wi
);
339 struct mlx5e_tx_wqe_info
{
345 enum mlx5e_dma_map_type
{
346 MLX5E_DMA_MAP_SINGLE
,
350 struct mlx5e_sq_dma
{
353 enum mlx5e_dma_map_type type
;
357 MLX5E_SQ_STATE_WAKE_TXQ_ENABLE
,
358 MLX5E_SQ_STATE_BF_ENABLE
,
361 struct mlx5e_ico_wqe_info
{
369 /* dirtied @completion */
374 u16 pc ____cacheline_aligned_in_smp
;
379 struct mlx5e_sq_stats stats
;
383 /* pointers to per packet info: write@xmit, read@completion */
384 struct sk_buff
**skb
;
385 struct mlx5e_sq_dma
*dma_fifo
;
386 struct mlx5e_tx_wqe_info
*wqe_info
;
389 struct mlx5_wq_cyc wq
;
391 void __iomem
*uar_map
;
392 struct netdev_queue
*txq
;
398 struct mlx5e_tstamp
*tstamp
;
403 struct mlx5_wq_ctrl wq_ctrl
;
405 struct mlx5e_channel
*channel
;
407 struct mlx5e_ico_wqe_info
*ico_wqe_info
;
409 } ____cacheline_aligned_in_smp
;
411 static inline bool mlx5e_sq_has_room_for(struct mlx5e_sq
*sq
, u16 n
)
413 return (((sq
->wq
.sz_m1
& (sq
->cc
- sq
->pc
)) >= n
) ||
418 MLX5E_CHANNEL_NAPI_SCHED
= 1,
421 struct mlx5e_channel
{
424 struct mlx5e_sq sq
[MLX5E_MAX_NUM_TC
];
425 struct mlx5e_sq icosq
; /* internal control operations */
426 struct napi_struct napi
;
428 struct net_device
*netdev
;
434 struct mlx5e_priv
*priv
;
439 enum mlx5e_traffic_types
{
444 MLX5E_TT_IPV4_IPSEC_AH
,
445 MLX5E_TT_IPV6_IPSEC_AH
,
446 MLX5E_TT_IPV4_IPSEC_ESP
,
447 MLX5E_TT_IPV6_IPSEC_ESP
,
452 MLX5E_NUM_INDIR_TIRS
= MLX5E_TT_ANY
,
456 MLX5E_STATE_ASYNC_EVENTS_ENABLE
,
458 MLX5E_STATE_DESTROYING
,
461 struct mlx5e_vxlan_db
{
462 spinlock_t lock
; /* protect vxlan table */
463 struct radix_tree_root tree
;
466 struct mlx5e_l2_rule
{
467 u8 addr
[ETH_ALEN
+ 2];
468 struct mlx5_flow_rule
*rule
;
471 struct mlx5e_flow_table
{
473 struct mlx5_flow_table
*t
;
474 struct mlx5_flow_group
**g
;
477 #define MLX5E_L2_ADDR_HASH_SIZE BIT(BITS_PER_BYTE)
479 struct mlx5e_tc_table
{
480 struct mlx5_flow_table
*t
;
482 struct rhashtable_params ht_params
;
483 struct rhashtable ht
;
486 struct mlx5e_vlan_table
{
487 struct mlx5e_flow_table ft
;
488 unsigned long active_vlans
[BITS_TO_LONGS(VLAN_N_VID
)];
489 struct mlx5_flow_rule
*active_vlans_rule
[VLAN_N_VID
];
490 struct mlx5_flow_rule
*untagged_rule
;
491 struct mlx5_flow_rule
*any_vlan_rule
;
492 bool filter_disabled
;
495 struct mlx5e_l2_table
{
496 struct mlx5e_flow_table ft
;
497 struct hlist_head netdev_uc
[MLX5E_L2_ADDR_HASH_SIZE
];
498 struct hlist_head netdev_mc
[MLX5E_L2_ADDR_HASH_SIZE
];
499 struct mlx5e_l2_rule broadcast
;
500 struct mlx5e_l2_rule allmulti
;
501 struct mlx5e_l2_rule promisc
;
502 bool broadcast_enabled
;
503 bool allmulti_enabled
;
504 bool promisc_enabled
;
507 /* L3/L4 traffic type classifier */
508 struct mlx5e_ttc_table
{
509 struct mlx5e_flow_table ft
;
510 struct mlx5_flow_rule
*rules
[MLX5E_NUM_TT
];
513 #define ARFS_HASH_SHIFT BITS_PER_BYTE
514 #define ARFS_HASH_SIZE BIT(BITS_PER_BYTE)
516 struct mlx5e_flow_table ft
;
517 struct mlx5_flow_rule
*default_rule
;
518 struct hlist_head rules_hash
[ARFS_HASH_SIZE
];
529 struct mlx5e_arfs_tables
{
530 struct arfs_table arfs_tables
[ARFS_NUM_TYPES
];
531 /* Protect aRFS rules list */
532 spinlock_t arfs_lock
;
533 struct list_head rules
;
535 struct workqueue_struct
*wq
;
540 MLX5E_VLAN_FT_LEVEL
= 0,
546 struct mlx5e_flow_steering
{
547 struct mlx5_flow_namespace
*ns
;
548 struct mlx5e_tc_table tc
;
549 struct mlx5e_vlan_table vlan
;
550 struct mlx5e_l2_table l2
;
551 struct mlx5e_ttc_table ttc
;
552 struct mlx5e_arfs_tables arfs
;
555 struct mlx5e_direct_tir
{
566 /* priv data path fields - start */
567 struct mlx5e_sq
**txq_to_sq_map
;
568 int channeltc_to_txq_map
[MLX5E_MAX_NUM_CHANNELS
][MLX5E_MAX_NUM_TC
];
569 /* priv data path fields - end */
572 struct mutex state_lock
; /* Protects Interface state */
573 struct mlx5_uar cq_uar
;
576 struct mlx5_core_mkey mkey
;
577 struct mlx5_core_mkey umr_mkey
;
578 struct mlx5e_rq drop_rq
;
580 struct mlx5e_channel
**channel
;
581 u32 tisn
[MLX5E_MAX_NUM_TC
];
583 u32 indir_tirn
[MLX5E_NUM_INDIR_TIRS
];
584 struct mlx5e_direct_tir direct_tir
[MLX5E_MAX_NUM_CHANNELS
];
585 u32 tx_rates
[MLX5E_MAX_NUM_SQS
];
587 struct mlx5e_flow_steering fs
;
588 struct mlx5e_vxlan_db vxlan
;
590 struct mlx5e_params params
;
591 struct workqueue_struct
*wq
;
592 struct work_struct update_carrier_work
;
593 struct work_struct set_rx_mode_work
;
594 struct delayed_work update_stats_work
;
597 struct mlx5_core_dev
*mdev
;
598 struct net_device
*netdev
;
599 struct mlx5e_stats stats
;
600 struct mlx5e_tstamp tstamp
;
604 enum mlx5e_link_mode
{
605 MLX5E_1000BASE_CX_SGMII
= 0,
606 MLX5E_1000BASE_KX
= 1,
607 MLX5E_10GBASE_CX4
= 2,
608 MLX5E_10GBASE_KX4
= 3,
609 MLX5E_10GBASE_KR
= 4,
610 MLX5E_20GBASE_KR2
= 5,
611 MLX5E_40GBASE_CR4
= 6,
612 MLX5E_40GBASE_KR4
= 7,
613 MLX5E_56GBASE_R4
= 8,
614 MLX5E_10GBASE_CR
= 12,
615 MLX5E_10GBASE_SR
= 13,
616 MLX5E_10GBASE_ER
= 14,
617 MLX5E_40GBASE_SR4
= 15,
618 MLX5E_40GBASE_LR4
= 16,
619 MLX5E_50GBASE_SR2
= 18,
620 MLX5E_100GBASE_CR4
= 20,
621 MLX5E_100GBASE_SR4
= 21,
622 MLX5E_100GBASE_KR4
= 22,
623 MLX5E_100GBASE_LR4
= 23,
624 MLX5E_100BASE_TX
= 24,
625 MLX5E_1000BASE_T
= 25,
626 MLX5E_10GBASE_T
= 26,
627 MLX5E_25GBASE_CR
= 27,
628 MLX5E_25GBASE_KR
= 28,
629 MLX5E_25GBASE_SR
= 29,
630 MLX5E_50GBASE_CR2
= 30,
631 MLX5E_50GBASE_KR2
= 31,
632 MLX5E_LINK_MODES_NUMBER
,
635 #define MLX5E_PROT_MASK(link_mode) (1 << link_mode)
637 void mlx5e_send_nop(struct mlx5e_sq
*sq
, bool notify_hw
);
638 u16
mlx5e_select_queue(struct net_device
*dev
, struct sk_buff
*skb
,
639 void *accel_priv
, select_queue_fallback_t fallback
);
640 netdev_tx_t
mlx5e_xmit(struct sk_buff
*skb
, struct net_device
*dev
);
642 void mlx5e_completion_event(struct mlx5_core_cq
*mcq
);
643 void mlx5e_cq_error_event(struct mlx5_core_cq
*mcq
, enum mlx5_event event
);
644 int mlx5e_napi_poll(struct napi_struct
*napi
, int budget
);
645 bool mlx5e_poll_tx_cq(struct mlx5e_cq
*cq
, int napi_budget
);
646 int mlx5e_poll_rx_cq(struct mlx5e_cq
*cq
, int budget
);
648 void mlx5e_handle_rx_cqe(struct mlx5e_rq
*rq
, struct mlx5_cqe64
*cqe
);
649 void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq
*rq
, struct mlx5_cqe64
*cqe
);
650 bool mlx5e_post_rx_wqes(struct mlx5e_rq
*rq
);
651 int mlx5e_alloc_rx_wqe(struct mlx5e_rq
*rq
, struct mlx5e_rx_wqe
*wqe
, u16 ix
);
652 int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq
*rq
, struct mlx5e_rx_wqe
*wqe
, u16 ix
);
653 void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq
*rq
);
654 void mlx5e_complete_rx_linear_mpwqe(struct mlx5e_rq
*rq
,
655 struct mlx5_cqe64
*cqe
,
657 struct mlx5e_mpw_info
*wi
,
658 struct sk_buff
*skb
);
659 void mlx5e_complete_rx_fragmented_mpwqe(struct mlx5e_rq
*rq
,
660 struct mlx5_cqe64
*cqe
,
662 struct mlx5e_mpw_info
*wi
,
663 struct sk_buff
*skb
);
664 void mlx5e_free_rx_linear_mpwqe(struct mlx5e_rq
*rq
,
665 struct mlx5e_mpw_info
*wi
);
666 void mlx5e_free_rx_fragmented_mpwqe(struct mlx5e_rq
*rq
,
667 struct mlx5e_mpw_info
*wi
);
668 struct mlx5_cqe64
*mlx5e_get_cqe(struct mlx5e_cq
*cq
);
670 void mlx5e_rx_am(struct mlx5e_rq
*rq
);
671 void mlx5e_rx_am_work(struct work_struct
*work
);
672 struct mlx5e_cq_moder
mlx5e_am_get_def_profile(u8 rx_cq_period_mode
);
674 void mlx5e_update_stats(struct mlx5e_priv
*priv
);
676 int mlx5e_create_flow_steering(struct mlx5e_priv
*priv
);
677 void mlx5e_destroy_flow_steering(struct mlx5e_priv
*priv
);
678 void mlx5e_init_l2_addr(struct mlx5e_priv
*priv
);
679 void mlx5e_destroy_flow_table(struct mlx5e_flow_table
*ft
);
680 void mlx5e_set_rx_mode_work(struct work_struct
*work
);
682 void mlx5e_fill_hwstamp(struct mlx5e_tstamp
*clock
, u64 timestamp
,
683 struct skb_shared_hwtstamps
*hwts
);
684 void mlx5e_timestamp_init(struct mlx5e_priv
*priv
);
685 void mlx5e_timestamp_cleanup(struct mlx5e_priv
*priv
);
686 int mlx5e_hwstamp_set(struct net_device
*dev
, struct ifreq
*ifr
);
687 int mlx5e_hwstamp_get(struct net_device
*dev
, struct ifreq
*ifr
);
688 void mlx5e_modify_rx_cqe_compression(struct mlx5e_priv
*priv
, bool val
);
690 int mlx5e_vlan_rx_add_vid(struct net_device
*dev
, __always_unused __be16 proto
,
692 int mlx5e_vlan_rx_kill_vid(struct net_device
*dev
, __always_unused __be16 proto
,
694 void mlx5e_enable_vlan_filter(struct mlx5e_priv
*priv
);
695 void mlx5e_disable_vlan_filter(struct mlx5e_priv
*priv
);
697 int mlx5e_modify_rqs_vsd(struct mlx5e_priv
*priv
, bool vsd
);
699 int mlx5e_redirect_rqt(struct mlx5e_priv
*priv
, u32 rqtn
, int sz
, int ix
);
700 void mlx5e_build_tir_ctx_hash(void *tirc
, struct mlx5e_priv
*priv
);
702 int mlx5e_open_locked(struct net_device
*netdev
);
703 int mlx5e_close_locked(struct net_device
*netdev
);
704 void mlx5e_build_default_indir_rqt(struct mlx5_core_dev
*mdev
,
705 u32
*indirection_rqt
, int len
,
707 int mlx5e_get_max_linkspeed(struct mlx5_core_dev
*mdev
, u32
*speed
);
709 void mlx5e_set_rx_cq_mode_params(struct mlx5e_params
*params
,
712 static inline void mlx5e_tx_notify_hw(struct mlx5e_sq
*sq
,
713 struct mlx5_wqe_ctrl_seg
*ctrl
, int bf_sz
)
715 u16 ofst
= MLX5_BF_OFFSET
+ sq
->bf_offset
;
717 /* ensure wqe is visible to device before updating doorbell record */
720 *sq
->wq
.db
= cpu_to_be32(sq
->pc
);
722 /* ensure doorbell record is visible to device before ringing the
727 __iowrite64_copy(sq
->uar_map
+ ofst
, ctrl
, bf_sz
);
729 mlx5_write64((__be32
*)ctrl
, sq
->uar_map
+ ofst
, NULL
);
730 /* flush the write-combining mapped buffer */
733 sq
->bf_offset
^= sq
->bf_buf_size
;
736 static inline void mlx5e_cq_arm(struct mlx5e_cq
*cq
)
738 struct mlx5_core_cq
*mcq
;
741 mlx5_cq_arm(mcq
, MLX5_CQ_DB_REQ_NOT
, mcq
->uar
->map
, NULL
, cq
->wq
.cc
);
744 static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev
*mdev
)
746 return min_t(int, mdev
->priv
.eq_table
.num_comp_vectors
,
747 MLX5E_MAX_NUM_CHANNELS
);
750 static inline int mlx5e_get_mtt_octw(int npages
)
752 return ALIGN(npages
, 8) / 2;
755 extern const struct ethtool_ops mlx5e_ethtool_ops
;
756 #ifdef CONFIG_MLX5_CORE_EN_DCB
757 extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops
;
758 int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv
*priv
, struct ieee_ets
*ets
);
761 #ifndef CONFIG_RFS_ACCEL
762 static inline int mlx5e_arfs_create_tables(struct mlx5e_priv
*priv
)
767 static inline void mlx5e_arfs_destroy_tables(struct mlx5e_priv
*priv
) {}
769 static inline int mlx5e_arfs_enable(struct mlx5e_priv
*priv
)
774 static inline int mlx5e_arfs_disable(struct mlx5e_priv
*priv
)
779 int mlx5e_arfs_create_tables(struct mlx5e_priv
*priv
);
780 void mlx5e_arfs_destroy_tables(struct mlx5e_priv
*priv
);
781 int mlx5e_arfs_enable(struct mlx5e_priv
*priv
);
782 int mlx5e_arfs_disable(struct mlx5e_priv
*priv
);
783 int mlx5e_rx_flow_steer(struct net_device
*dev
, const struct sk_buff
*skb
,
784 u16 rxq_index
, u32 flow_id
);
787 u16
mlx5e_get_max_inline_cap(struct mlx5_core_dev
*mdev
);
789 #endif /* __MLX5_EN_H__ */