2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/types.h>
37 #include <rdma/ib_verbs.h>
38 #include <linux/mlx5/mlx5_ifc.h>
40 #if defined(__LITTLE_ENDIAN)
41 #define MLX5_SET_HOST_ENDIANNESS 0
42 #elif defined(__BIG_ENDIAN)
43 #define MLX5_SET_HOST_ENDIANNESS 0x80
45 #error Host endianness not defined
49 #define __mlx5_nullp(typ) ((struct mlx5_ifc_##typ##_bits *)0)
50 #define __mlx5_bit_sz(typ, fld) sizeof(__mlx5_nullp(typ)->fld)
51 #define __mlx5_bit_off(typ, fld) ((unsigned)(unsigned long)(&(__mlx5_nullp(typ)->fld)))
52 #define __mlx5_dw_off(typ, fld) (__mlx5_bit_off(typ, fld) / 32)
53 #define __mlx5_64_off(typ, fld) (__mlx5_bit_off(typ, fld) / 64)
54 #define __mlx5_dw_bit_off(typ, fld) (32 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0x1f))
55 #define __mlx5_mask(typ, fld) ((u32)((1ull << __mlx5_bit_sz(typ, fld)) - 1))
56 #define __mlx5_dw_mask(typ, fld) (__mlx5_mask(typ, fld) << __mlx5_dw_bit_off(typ, fld))
57 #define __mlx5_st_sz_bits(typ) sizeof(struct mlx5_ifc_##typ##_bits)
59 #define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8)
60 #define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8)
61 #define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32)
62 #define MLX5_ST_SZ_QW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 64)
63 #define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8)
64 #define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32)
65 #define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8)
66 #define MLX5_ADDR_OF(typ, p, fld) ((char *)(p) + MLX5_BYTE_OFF(typ, fld))
68 /* insert a value to a struct */
69 #define MLX5_SET(typ, p, fld, v) do { \
70 BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \
71 *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
72 cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
73 (~__mlx5_dw_mask(typ, fld))) | (((v) & __mlx5_mask(typ, fld)) \
74 << __mlx5_dw_bit_off(typ, fld))); \
77 #define MLX5_SET_TO_ONES(typ, p, fld) do { \
78 BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \
79 *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
80 cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
81 (~__mlx5_dw_mask(typ, fld))) | ((__mlx5_mask(typ, fld)) \
82 << __mlx5_dw_bit_off(typ, fld))); \
85 #define MLX5_GET(typ, p, fld) ((be32_to_cpu(*((__be32 *)(p) +\
86 __mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \
87 __mlx5_mask(typ, fld))
89 #define MLX5_GET_PR(typ, p, fld) ({ \
90 u32 ___t = MLX5_GET(typ, p, fld); \
91 pr_debug(#fld " = 0x%x\n", ___t); \
95 #define MLX5_SET64(typ, p, fld, v) do { \
96 BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) != 64); \
97 BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \
98 *((__be64 *)(p) + __mlx5_64_off(typ, fld)) = cpu_to_be64(v); \
101 #define MLX5_GET64(typ, p, fld) be64_to_cpu(*((__be64 *)(p) + __mlx5_64_off(typ, fld)))
103 #define MLX5_GET64_PR(typ, p, fld) ({ \
104 u64 ___t = MLX5_GET64(typ, p, fld); \
105 pr_debug(#fld " = 0x%llx\n", ___t); \
109 /* Big endian getters */
110 #define MLX5_GET64_BE(typ, p, fld) (*((__be64 *)(p) +\
111 __mlx5_64_off(typ, fld)))
113 #define MLX5_GET_BE(type_t, typ, p, fld) ({ \
115 switch (sizeof(tmp)) { \
117 tmp = (__force type_t)MLX5_GET(typ, p, fld); \
120 tmp = (__force type_t)cpu_to_be16(MLX5_GET(typ, p, fld)); \
123 tmp = (__force type_t)cpu_to_be32(MLX5_GET(typ, p, fld)); \
126 tmp = (__force type_t)MLX5_GET64_BE(typ, p, fld); \
132 enum mlx5_inline_modes
{
133 MLX5_INLINE_MODE_NONE
,
136 MLX5_INLINE_MODE_TCP_UDP
,
140 MLX5_MAX_COMMANDS
= 32,
141 MLX5_CMD_DATA_BLOCK_SIZE
= 512,
142 MLX5_PCI_CMD_XPORT
= 7,
143 MLX5_MKEY_BSF_OCTO_SIZE
= 4,
148 MLX5_EXTENDED_UD_AV
= 0x80000000,
152 MLX5_CQ_STATE_ARMED
= 9,
153 MLX5_CQ_STATE_ALWAYS_ARMED
= 0xb,
154 MLX5_CQ_STATE_FIRED
= 0xa,
158 MLX5_STAT_RATE_OFFSET
= 5,
162 MLX5_INLINE_SEG
= 0x80000000,
166 MLX5_HW_START_PADDING
= MLX5_INLINE_SEG
,
170 MLX5_MIN_PKEY_TABLE_SIZE
= 128,
171 MLX5_MAX_LOG_PKEY_TABLE
= 5,
175 MLX5_MKEY_INBOX_PG_ACCESS
= 1 << 31
179 MLX5_PFAULT_SUBTYPE_WQE
= 0,
180 MLX5_PFAULT_SUBTYPE_RDMA
= 1,
184 MLX5_PERM_LOCAL_READ
= 1 << 2,
185 MLX5_PERM_LOCAL_WRITE
= 1 << 3,
186 MLX5_PERM_REMOTE_READ
= 1 << 4,
187 MLX5_PERM_REMOTE_WRITE
= 1 << 5,
188 MLX5_PERM_ATOMIC
= 1 << 6,
189 MLX5_PERM_UMR_EN
= 1 << 7,
193 MLX5_PCIE_CTRL_SMALL_FENCE
= 1 << 0,
194 MLX5_PCIE_CTRL_RELAXED_ORDERING
= 1 << 2,
195 MLX5_PCIE_CTRL_NO_SNOOP
= 1 << 3,
196 MLX5_PCIE_CTRL_TLP_PROCE_EN
= 1 << 6,
197 MLX5_PCIE_CTRL_TPH_MASK
= 3 << 4,
206 MLX5_BF_REGS_PER_PAGE
= 4,
207 MLX5_MAX_UAR_PAGES
= 1 << 8,
208 MLX5_NON_FP_BF_REGS_PER_PAGE
= 2,
209 MLX5_MAX_UUARS
= MLX5_MAX_UAR_PAGES
* MLX5_NON_FP_BF_REGS_PER_PAGE
,
213 MLX5_MKEY_MASK_LEN
= 1ull << 0,
214 MLX5_MKEY_MASK_PAGE_SIZE
= 1ull << 1,
215 MLX5_MKEY_MASK_START_ADDR
= 1ull << 6,
216 MLX5_MKEY_MASK_PD
= 1ull << 7,
217 MLX5_MKEY_MASK_EN_RINVAL
= 1ull << 8,
218 MLX5_MKEY_MASK_EN_SIGERR
= 1ull << 9,
219 MLX5_MKEY_MASK_BSF_EN
= 1ull << 12,
220 MLX5_MKEY_MASK_KEY
= 1ull << 13,
221 MLX5_MKEY_MASK_QPN
= 1ull << 14,
222 MLX5_MKEY_MASK_LR
= 1ull << 17,
223 MLX5_MKEY_MASK_LW
= 1ull << 18,
224 MLX5_MKEY_MASK_RR
= 1ull << 19,
225 MLX5_MKEY_MASK_RW
= 1ull << 20,
226 MLX5_MKEY_MASK_A
= 1ull << 21,
227 MLX5_MKEY_MASK_SMALL_FENCE
= 1ull << 23,
228 MLX5_MKEY_MASK_FREE
= 1ull << 29,
232 MLX5_UMR_TRANSLATION_OFFSET_EN
= (1 << 4),
234 MLX5_UMR_CHECK_NOT_FREE
= (1 << 5),
235 MLX5_UMR_CHECK_FREE
= (2 << 5),
237 MLX5_UMR_INLINE
= (1 << 7),
240 #define MLX5_UMR_MTT_ALIGNMENT 0x40
241 #define MLX5_UMR_MTT_MASK (MLX5_UMR_MTT_ALIGNMENT - 1)
242 #define MLX5_UMR_MTT_MIN_CHUNK_SIZE MLX5_UMR_MTT_ALIGNMENT
244 #define MLX5_USER_INDEX_LEN (MLX5_FLD_SZ_BYTES(qpc, user_index) * 8)
247 MLX5_EVENT_QUEUE_TYPE_QP
= 0,
248 MLX5_EVENT_QUEUE_TYPE_RQ
= 1,
249 MLX5_EVENT_QUEUE_TYPE_SQ
= 2,
253 MLX5_EVENT_TYPE_COMP
= 0x0,
255 MLX5_EVENT_TYPE_PATH_MIG
= 0x01,
256 MLX5_EVENT_TYPE_COMM_EST
= 0x02,
257 MLX5_EVENT_TYPE_SQ_DRAINED
= 0x03,
258 MLX5_EVENT_TYPE_SRQ_LAST_WQE
= 0x13,
259 MLX5_EVENT_TYPE_SRQ_RQ_LIMIT
= 0x14,
261 MLX5_EVENT_TYPE_CQ_ERROR
= 0x04,
262 MLX5_EVENT_TYPE_WQ_CATAS_ERROR
= 0x05,
263 MLX5_EVENT_TYPE_PATH_MIG_FAILED
= 0x07,
264 MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR
= 0x10,
265 MLX5_EVENT_TYPE_WQ_ACCESS_ERROR
= 0x11,
266 MLX5_EVENT_TYPE_SRQ_CATAS_ERROR
= 0x12,
268 MLX5_EVENT_TYPE_INTERNAL_ERROR
= 0x08,
269 MLX5_EVENT_TYPE_PORT_CHANGE
= 0x09,
270 MLX5_EVENT_TYPE_GPIO_EVENT
= 0x15,
271 MLX5_EVENT_TYPE_REMOTE_CONFIG
= 0x19,
273 MLX5_EVENT_TYPE_DB_BF_CONGESTION
= 0x1a,
274 MLX5_EVENT_TYPE_STALL_EVENT
= 0x1b,
276 MLX5_EVENT_TYPE_CMD
= 0x0a,
277 MLX5_EVENT_TYPE_PAGE_REQUEST
= 0xb,
279 MLX5_EVENT_TYPE_PAGE_FAULT
= 0xc,
280 MLX5_EVENT_TYPE_NIC_VPORT_CHANGE
= 0xd,
284 MLX5_PORT_CHANGE_SUBTYPE_DOWN
= 1,
285 MLX5_PORT_CHANGE_SUBTYPE_ACTIVE
= 4,
286 MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED
= 5,
287 MLX5_PORT_CHANGE_SUBTYPE_LID
= 6,
288 MLX5_PORT_CHANGE_SUBTYPE_PKEY
= 7,
289 MLX5_PORT_CHANGE_SUBTYPE_GUID
= 8,
290 MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG
= 9,
294 MLX5_DEV_CAP_FLAG_XRC
= 1LL << 3,
295 MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR
= 1LL << 8,
296 MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR
= 1LL << 9,
297 MLX5_DEV_CAP_FLAG_APM
= 1LL << 17,
298 MLX5_DEV_CAP_FLAG_ATOMIC
= 1LL << 18,
299 MLX5_DEV_CAP_FLAG_BLOCK_MCAST
= 1LL << 23,
300 MLX5_DEV_CAP_FLAG_ON_DMND_PG
= 1LL << 24,
301 MLX5_DEV_CAP_FLAG_CQ_MODER
= 1LL << 29,
302 MLX5_DEV_CAP_FLAG_RESIZE_CQ
= 1LL << 30,
303 MLX5_DEV_CAP_FLAG_DCT
= 1LL << 37,
304 MLX5_DEV_CAP_FLAG_SIG_HAND_OVER
= 1LL << 40,
305 MLX5_DEV_CAP_FLAG_CMDIF_CSUM
= 3LL << 46,
309 MLX5_ROCE_VERSION_1
= 0,
310 MLX5_ROCE_VERSION_2
= 2,
314 MLX5_ROCE_VERSION_1_CAP
= 1 << MLX5_ROCE_VERSION_1
,
315 MLX5_ROCE_VERSION_2_CAP
= 1 << MLX5_ROCE_VERSION_2
,
319 MLX5_ROCE_L3_TYPE_IPV4
= 0,
320 MLX5_ROCE_L3_TYPE_IPV6
= 1,
324 MLX5_ROCE_L3_TYPE_IPV4_CAP
= 1 << 1,
325 MLX5_ROCE_L3_TYPE_IPV6_CAP
= 1 << 2,
329 MLX5_OPCODE_NOP
= 0x00,
330 MLX5_OPCODE_SEND_INVAL
= 0x01,
331 MLX5_OPCODE_RDMA_WRITE
= 0x08,
332 MLX5_OPCODE_RDMA_WRITE_IMM
= 0x09,
333 MLX5_OPCODE_SEND
= 0x0a,
334 MLX5_OPCODE_SEND_IMM
= 0x0b,
335 MLX5_OPCODE_LSO
= 0x0e,
336 MLX5_OPCODE_RDMA_READ
= 0x10,
337 MLX5_OPCODE_ATOMIC_CS
= 0x11,
338 MLX5_OPCODE_ATOMIC_FA
= 0x12,
339 MLX5_OPCODE_ATOMIC_MASKED_CS
= 0x14,
340 MLX5_OPCODE_ATOMIC_MASKED_FA
= 0x15,
341 MLX5_OPCODE_BIND_MW
= 0x18,
342 MLX5_OPCODE_CONFIG_CMD
= 0x1f,
344 MLX5_RECV_OPCODE_RDMA_WRITE_IMM
= 0x00,
345 MLX5_RECV_OPCODE_SEND
= 0x01,
346 MLX5_RECV_OPCODE_SEND_IMM
= 0x02,
347 MLX5_RECV_OPCODE_SEND_INVAL
= 0x03,
349 MLX5_CQE_OPCODE_ERROR
= 0x1e,
350 MLX5_CQE_OPCODE_RESIZE
= 0x16,
352 MLX5_OPCODE_SET_PSV
= 0x20,
353 MLX5_OPCODE_GET_PSV
= 0x21,
354 MLX5_OPCODE_CHECK_PSV
= 0x22,
355 MLX5_OPCODE_RGET_PSV
= 0x26,
356 MLX5_OPCODE_RCHECK_PSV
= 0x27,
358 MLX5_OPCODE_UMR
= 0x25,
363 MLX5_SET_PORT_RESET_QKEY
= 0,
364 MLX5_SET_PORT_GUID0
= 16,
365 MLX5_SET_PORT_NODE_GUID
= 17,
366 MLX5_SET_PORT_SYS_GUID
= 18,
367 MLX5_SET_PORT_GID_TABLE
= 19,
368 MLX5_SET_PORT_PKEY_TABLE
= 20,
372 MLX5_BW_NO_LIMIT
= 0,
373 MLX5_100_MBPS_UNIT
= 3,
378 MLX5_MAX_PAGE_SHIFT
= 31
382 MLX5_ADAPTER_PAGE_SHIFT
= 12,
383 MLX5_ADAPTER_PAGE_SIZE
= 1 << MLX5_ADAPTER_PAGE_SHIFT
,
387 MLX5_CAP_OFF_CMDIF_CSUM
= 46,
392 * Max wqe size for rdma read is 512 bytes, so this
393 * limits our max_sge_rd as the wqe needs to fit:
394 * - ctrl segment (16 bytes)
395 * - rdma segment (16 bytes)
396 * - scatter elements (16 bytes each)
398 MLX5_MAX_SGE_RD
= (512 - 16 - 16) / 16
401 enum mlx5_odp_transport_cap_bits
{
402 MLX5_ODP_SUPPORT_SEND
= 1 << 31,
403 MLX5_ODP_SUPPORT_RECV
= 1 << 30,
404 MLX5_ODP_SUPPORT_WRITE
= 1 << 29,
405 MLX5_ODP_SUPPORT_READ
= 1 << 28,
408 struct mlx5_odp_caps
{
414 } per_transport_caps
;
415 char reserved2
[0xe4];
418 struct mlx5_cmd_layout
{
433 struct health_buffer
{
434 __be32 assert_var
[5];
436 __be32 assert_exit_ptr
;
437 __be32 assert_callra
;
447 struct mlx5_init_seg
{
449 __be32 cmdif_rev_fw_sub
;
452 __be32 cmdq_addr_l_sz
;
456 struct health_buffer health
;
458 __be32 internal_timer_h
;
459 __be32 internal_timer_l
;
461 __be32 health_counter
;
464 __be32 ieee1588_clk_type
;
468 struct mlx5_eqe_comp
{
473 struct mlx5_eqe_qp_srq
{
480 struct mlx5_eqe_cq_err
{
486 struct mlx5_eqe_port_state
{
491 struct mlx5_eqe_gpio
{
496 struct mlx5_eqe_congestion
{
502 struct mlx5_eqe_stall_vl
{
507 struct mlx5_eqe_cmd
{
512 struct mlx5_eqe_page_req
{
519 struct mlx5_eqe_page_fault
{
520 __be32 bytes_committed
;
526 __be16 packet_length
;
532 __be16 packet_length
;
540 struct mlx5_eqe_vport_change
{
548 struct mlx5_eqe_cmd cmd
;
549 struct mlx5_eqe_comp comp
;
550 struct mlx5_eqe_qp_srq qp_srq
;
551 struct mlx5_eqe_cq_err cq_err
;
552 struct mlx5_eqe_port_state port
;
553 struct mlx5_eqe_gpio gpio
;
554 struct mlx5_eqe_congestion cong
;
555 struct mlx5_eqe_stall_vl stall_vl
;
556 struct mlx5_eqe_page_req req_pages
;
557 struct mlx5_eqe_page_fault page_fault
;
558 struct mlx5_eqe_vport_change vport_change
;
573 struct mlx5_cmd_prot_block
{
574 u8 data
[MLX5_CMD_DATA_BLOCK_SIZE
];
585 MLX5_CQE_SYND_FLUSHED_IN_ERROR
= 5,
588 struct mlx5_err_cqe
{
594 __be32 s_wqe_opcode_qpn
;
601 u8 outer_l3_tunneled
;
604 u8 lro_tcppsh_abort_dupack
;
607 __be32 lro_ack_seq_num
;
608 __be32 rss_hash_result
;
618 __be32 srqn
; /* [31:24]: lro_num_seg, [23:0]: srqn */
619 __be32 imm_inval_pkey
;
630 struct mlx5_mini_cqe8
{
632 __be32 rx_hash_result
;
648 MLX5_INLINE_DATA32_SEG
,
649 MLX5_INLINE_DATA64_SEG
,
654 MLX5_CQE_FORMAT_CSUM
= 0x1,
657 #define MLX5_MINI_CQE_ARRAY_SIZE 8
659 static inline int mlx5_get_cqe_format(struct mlx5_cqe64
*cqe
)
661 return (cqe
->op_own
>> 2) & 0x3;
664 static inline int get_cqe_lro_tcppsh(struct mlx5_cqe64
*cqe
)
666 return (cqe
->lro_tcppsh_abort_dupack
>> 6) & 1;
669 static inline u8
get_cqe_l4_hdr_type(struct mlx5_cqe64
*cqe
)
671 return (cqe
->l4_l3_hdr_type
>> 4) & 0x7;
674 static inline u8
get_cqe_l3_hdr_type(struct mlx5_cqe64
*cqe
)
676 return (cqe
->l4_l3_hdr_type
>> 2) & 0x3;
679 static inline u8
cqe_is_tunneled(struct mlx5_cqe64
*cqe
)
681 return cqe
->outer_l3_tunneled
& 0x1;
684 static inline int cqe_has_vlan(struct mlx5_cqe64
*cqe
)
686 return !!(cqe
->l4_l3_hdr_type
& 0x1);
689 static inline u64
get_cqe_ts(struct mlx5_cqe64
*cqe
)
693 hi
= be32_to_cpu(cqe
->timestamp_h
);
694 lo
= be32_to_cpu(cqe
->timestamp_l
);
696 return (u64
)lo
| ((u64
)hi
<< 32);
699 struct mpwrq_cqe_bc
{
700 __be16 filler_consumed_strides
;
704 static inline u16
mpwrq_get_cqe_byte_cnt(struct mlx5_cqe64
*cqe
)
706 struct mpwrq_cqe_bc
*bc
= (struct mpwrq_cqe_bc
*)&cqe
->byte_cnt
;
708 return be16_to_cpu(bc
->byte_cnt
);
711 static inline u16
mpwrq_get_cqe_bc_consumed_strides(struct mpwrq_cqe_bc
*bc
)
713 return 0x7fff & be16_to_cpu(bc
->filler_consumed_strides
);
716 static inline u16
mpwrq_get_cqe_consumed_strides(struct mlx5_cqe64
*cqe
)
718 struct mpwrq_cqe_bc
*bc
= (struct mpwrq_cqe_bc
*)&cqe
->byte_cnt
;
720 return mpwrq_get_cqe_bc_consumed_strides(bc
);
723 static inline bool mpwrq_is_filler_cqe(struct mlx5_cqe64
*cqe
)
725 struct mpwrq_cqe_bc
*bc
= (struct mpwrq_cqe_bc
*)&cqe
->byte_cnt
;
727 return 0x8000 & be16_to_cpu(bc
->filler_consumed_strides
);
730 static inline u16
mpwrq_get_cqe_stride_index(struct mlx5_cqe64
*cqe
)
732 return be16_to_cpu(cqe
->wqe_counter
);
736 CQE_L4_HDR_TYPE_NONE
= 0x0,
737 CQE_L4_HDR_TYPE_TCP_NO_ACK
= 0x1,
738 CQE_L4_HDR_TYPE_UDP
= 0x2,
739 CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA
= 0x3,
740 CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA
= 0x4,
744 CQE_RSS_HTYPE_IP
= 0x3 << 6,
745 CQE_RSS_HTYPE_L4
= 0x3 << 2,
749 MLX5_CQE_ROCE_L3_HEADER_TYPE_GRH
= 0x0,
750 MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV6
= 0x1,
751 MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV4
= 0x2,
760 struct mlx5_sig_err_cqe
{
762 __be32 expected_trans_sig
;
763 __be32 actual_trans_sig
;
764 __be32 expected_reftag
;
765 __be32 actual_reftag
;
777 struct mlx5_wqe_srq_next_seg
{
779 __be16 next_wqe_index
;
790 union mlx5_ext_cqe inl_grh
;
791 struct mlx5_cqe64 cqe64
;
795 MLX5_MKEY_STATUS_FREE
= 1 << 6,
799 MLX5_MKEY_REMOTE_INVAL
= 1 << 24,
800 MLX5_MKEY_FLAG_SYNC_UMR
= 1 << 29,
801 MLX5_MKEY_BSF_EN
= 1 << 30,
802 MLX5_MKEY_LEN64
= 1 << 31,
805 struct mlx5_mkey_seg
{
806 /* This is a two bit field occupying bits 31-30.
807 * bit 31 is always 0,
808 * bit 30 is zero for regular MRs and 1 (e.g free) for UMRs that do not have tanslation
819 __be32 bsfs_octo_size
;
827 #define MLX5_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90)
830 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO
= 1 << 0
834 VPORT_STATE_DOWN
= 0x0,
835 VPORT_STATE_UP
= 0x1,
839 MLX5_ESW_VPORT_ADMIN_STATE_DOWN
= 0x0,
840 MLX5_ESW_VPORT_ADMIN_STATE_UP
= 0x1,
841 MLX5_ESW_VPORT_ADMIN_STATE_AUTO
= 0x2,
845 MLX5_L3_PROT_TYPE_IPV4
= 0,
846 MLX5_L3_PROT_TYPE_IPV6
= 1,
850 MLX5_L4_PROT_TYPE_TCP
= 0,
851 MLX5_L4_PROT_TYPE_UDP
= 1,
855 MLX5_HASH_FIELD_SEL_SRC_IP
= 1 << 0,
856 MLX5_HASH_FIELD_SEL_DST_IP
= 1 << 1,
857 MLX5_HASH_FIELD_SEL_L4_SPORT
= 1 << 2,
858 MLX5_HASH_FIELD_SEL_L4_DPORT
= 1 << 3,
859 MLX5_HASH_FIELD_SEL_IPSEC_SPI
= 1 << 4,
863 MLX5_MATCH_OUTER_HEADERS
= 1 << 0,
864 MLX5_MATCH_MISC_PARAMETERS
= 1 << 1,
865 MLX5_MATCH_INNER_HEADERS
= 1 << 2,
870 MLX5_FLOW_TABLE_TYPE_NIC_RCV
= 0,
871 MLX5_FLOW_TABLE_TYPE_ESWITCH
= 4,
875 MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT
= 0,
876 MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE
= 1,
877 MLX5_FLOW_CONTEXT_DEST_TYPE_TIR
= 2,
880 enum mlx5_list_type
{
881 MLX5_NVPRT_LIST_TYPE_UC
= 0x0,
882 MLX5_NVPRT_LIST_TYPE_MC
= 0x1,
883 MLX5_NVPRT_LIST_TYPE_VLAN
= 0x2,
887 MLX5_RQC_RQ_TYPE_MEMORY_RQ_INLINE
= 0x0,
888 MLX5_RQC_RQ_TYPE_MEMORY_RQ_RPM
= 0x1,
892 MLX5_WOL_DISABLE
= 0,
893 MLX5_WOL_SECURED_MAGIC
= 1 << 1,
894 MLX5_WOL_MAGIC
= 1 << 2,
895 MLX5_WOL_ARP
= 1 << 3,
896 MLX5_WOL_BROADCAST
= 1 << 4,
897 MLX5_WOL_MULTICAST
= 1 << 5,
898 MLX5_WOL_UNICAST
= 1 << 6,
899 MLX5_WOL_PHY_ACTIVITY
= 1 << 7,
906 HCA_CAP_OPMOD_GET_MAX
= 0,
907 HCA_CAP_OPMOD_GET_CUR
= 1,
911 MLX5_CAP_GENERAL
= 0,
912 MLX5_CAP_ETHERNET_OFFLOADS
,
916 MLX5_CAP_IPOIB_OFFLOADS
,
917 MLX5_CAP_EOIB_OFFLOADS
,
919 MLX5_CAP_ESWITCH_FLOW_TABLE
,
922 MLX5_CAP_VECTOR_CALC
,
924 /* NUM OF CAP Types */
928 /* GET Dev Caps macros */
929 #define MLX5_CAP_GEN(mdev, cap) \
930 MLX5_GET(cmd_hca_cap, mdev->hca_caps_cur[MLX5_CAP_GENERAL], cap)
932 #define MLX5_CAP_GEN_MAX(mdev, cap) \
933 MLX5_GET(cmd_hca_cap, mdev->hca_caps_max[MLX5_CAP_GENERAL], cap)
935 #define MLX5_CAP_ETH(mdev, cap) \
936 MLX5_GET(per_protocol_networking_offload_caps,\
937 mdev->hca_caps_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap)
939 #define MLX5_CAP_ETH_MAX(mdev, cap) \
940 MLX5_GET(per_protocol_networking_offload_caps,\
941 mdev->hca_caps_max[MLX5_CAP_ETHERNET_OFFLOADS], cap)
943 #define MLX5_CAP_ROCE(mdev, cap) \
944 MLX5_GET(roce_cap, mdev->hca_caps_cur[MLX5_CAP_ROCE], cap)
946 #define MLX5_CAP_ROCE_MAX(mdev, cap) \
947 MLX5_GET(roce_cap, mdev->hca_caps_max[MLX5_CAP_ROCE], cap)
949 #define MLX5_CAP_ATOMIC(mdev, cap) \
950 MLX5_GET(atomic_caps, mdev->hca_caps_cur[MLX5_CAP_ATOMIC], cap)
952 #define MLX5_CAP_ATOMIC_MAX(mdev, cap) \
953 MLX5_GET(atomic_caps, mdev->hca_caps_max[MLX5_CAP_ATOMIC], cap)
955 #define MLX5_CAP_FLOWTABLE(mdev, cap) \
956 MLX5_GET(flow_table_nic_cap, mdev->hca_caps_cur[MLX5_CAP_FLOW_TABLE], cap)
958 #define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \
959 MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap)
961 #define MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) \
962 MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.cap)
964 #define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \
965 MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap)
967 #define MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) \
968 MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_sniffer.cap)
970 #define MLX5_CAP_FLOWTABLE_SNIFFER_RX_MAX(mdev, cap) \
971 MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive_sniffer.cap)
973 #define MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) \
974 MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit_sniffer.cap)
976 #define MLX5_CAP_FLOWTABLE_SNIFFER_TX_MAX(mdev, cap) \
977 MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit_sniffer.cap)
979 #define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
980 MLX5_GET(flow_table_eswitch_cap, \
981 mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
983 #define MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, cap) \
984 MLX5_GET(flow_table_eswitch_cap, \
985 mdev->hca_caps_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
987 #define MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) \
988 MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_nic_esw_fdb.cap)
990 #define MLX5_CAP_ESW_FLOWTABLE_FDB_MAX(mdev, cap) \
991 MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_nic_esw_fdb.cap)
993 #define MLX5_CAP_ESW_EGRESS_ACL(mdev, cap) \
994 MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_egress.cap)
996 #define MLX5_CAP_ESW_EGRESS_ACL_MAX(mdev, cap) \
997 MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_egress.cap)
999 #define MLX5_CAP_ESW_INGRESS_ACL(mdev, cap) \
1000 MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_ingress.cap)
1002 #define MLX5_CAP_ESW_INGRESS_ACL_MAX(mdev, cap) \
1003 MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_ingress.cap)
1005 #define MLX5_CAP_ESW(mdev, cap) \
1006 MLX5_GET(e_switch_cap, \
1007 mdev->hca_caps_cur[MLX5_CAP_ESWITCH], cap)
1009 #define MLX5_CAP_ESW_MAX(mdev, cap) \
1010 MLX5_GET(e_switch_cap, \
1011 mdev->hca_caps_max[MLX5_CAP_ESWITCH], cap)
1013 #define MLX5_CAP_ODP(mdev, cap)\
1014 MLX5_GET(odp_cap, mdev->hca_caps_cur[MLX5_CAP_ODP], cap)
1016 #define MLX5_CAP_VECTOR_CALC(mdev, cap) \
1017 MLX5_GET(vector_calc_cap, \
1018 mdev->hca_caps_cur[MLX5_CAP_VECTOR_CALC], cap)
1020 #define MLX5_CAP_QOS(mdev, cap)\
1021 MLX5_GET(qos_cap, mdev->hca_caps_cur[MLX5_CAP_QOS], cap)
1024 MLX5_CMD_STAT_OK
= 0x0,
1025 MLX5_CMD_STAT_INT_ERR
= 0x1,
1026 MLX5_CMD_STAT_BAD_OP_ERR
= 0x2,
1027 MLX5_CMD_STAT_BAD_PARAM_ERR
= 0x3,
1028 MLX5_CMD_STAT_BAD_SYS_STATE_ERR
= 0x4,
1029 MLX5_CMD_STAT_BAD_RES_ERR
= 0x5,
1030 MLX5_CMD_STAT_RES_BUSY
= 0x6,
1031 MLX5_CMD_STAT_LIM_ERR
= 0x8,
1032 MLX5_CMD_STAT_BAD_RES_STATE_ERR
= 0x9,
1033 MLX5_CMD_STAT_IX_ERR
= 0xa,
1034 MLX5_CMD_STAT_NO_RES_ERR
= 0xf,
1035 MLX5_CMD_STAT_BAD_INP_LEN_ERR
= 0x50,
1036 MLX5_CMD_STAT_BAD_OUTP_LEN_ERR
= 0x51,
1037 MLX5_CMD_STAT_BAD_QP_STATE_ERR
= 0x10,
1038 MLX5_CMD_STAT_BAD_PKT_ERR
= 0x30,
1039 MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR
= 0x40,
1043 MLX5_IEEE_802_3_COUNTERS_GROUP
= 0x0,
1044 MLX5_RFC_2863_COUNTERS_GROUP
= 0x1,
1045 MLX5_RFC_2819_COUNTERS_GROUP
= 0x2,
1046 MLX5_RFC_3635_COUNTERS_GROUP
= 0x3,
1047 MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP
= 0x5,
1048 MLX5_PER_PRIORITY_COUNTERS_GROUP
= 0x10,
1049 MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP
= 0x11,
1050 MLX5_PHYSICAL_LAYER_COUNTERS_GROUP
= 0x12,
1051 MLX5_INFINIBAND_PORT_COUNTERS_GROUP
= 0x20,
1054 static inline u16
mlx5_to_sw_pkey_sz(int pkey_sz
)
1056 if (pkey_sz
> MLX5_MAX_LOG_PKEY_TABLE
)
1058 return MLX5_MIN_PKEY_TABLE_SIZE
<< pkey_sz
;
1061 #define MLX5_BY_PASS_NUM_REGULAR_PRIOS 8
1062 #define MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS 8
1063 #define MLX5_BY_PASS_NUM_MULTICAST_PRIOS 1
1064 #define MLX5_BY_PASS_NUM_PRIOS (MLX5_BY_PASS_NUM_REGULAR_PRIOS +\
1065 MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS +\
1066 MLX5_BY_PASS_NUM_MULTICAST_PRIOS)
1068 #endif /* MLX5_DEVICE_H */