2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/etherdevice.h>
36 #include <linux/mlx4/cmd.h>
37 #include <linux/module.h>
38 #include <linux/cache.h>
44 MLX4_COMMAND_INTERFACE_MIN_REV
= 2,
45 MLX4_COMMAND_INTERFACE_MAX_REV
= 3,
46 MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS
= 3,
49 extern void __buggy_use_of_MLX4_GET(void);
50 extern void __buggy_use_of_MLX4_PUT(void);
52 static bool enable_qos
;
53 module_param(enable_qos
, bool, 0444);
54 MODULE_PARM_DESC(enable_qos
, "Enable Quality of Service support in the HCA (default: off)");
56 #define MLX4_GET(dest, source, offset) \
58 void *__p = (char *) (source) + (offset); \
59 switch (sizeof (dest)) { \
60 case 1: (dest) = *(u8 *) __p; break; \
61 case 2: (dest) = be16_to_cpup(__p); break; \
62 case 4: (dest) = be32_to_cpup(__p); break; \
63 case 8: (dest) = be64_to_cpup(__p); break; \
64 default: __buggy_use_of_MLX4_GET(); \
68 #define MLX4_PUT(dest, source, offset) \
70 void *__d = ((char *) (dest) + (offset)); \
71 switch (sizeof(source)) { \
72 case 1: *(u8 *) __d = (source); break; \
73 case 2: *(__be16 *) __d = cpu_to_be16(source); break; \
74 case 4: *(__be32 *) __d = cpu_to_be32(source); break; \
75 case 8: *(__be64 *) __d = cpu_to_be64(source); break; \
76 default: __buggy_use_of_MLX4_PUT(); \
80 static void dump_dev_cap_flags(struct mlx4_dev
*dev
, u64 flags
)
82 static const char *fname
[] = {
83 [ 0] = "RC transport",
84 [ 1] = "UC transport",
85 [ 2] = "UD transport",
86 [ 3] = "XRC transport",
88 [ 7] = "IPoIB checksum offload",
89 [ 8] = "P_Key violation counter",
90 [ 9] = "Q_Key violation counter",
91 [12] = "Dual Port Different Protocol (DPDP) support",
92 [15] = "Big LSO headers",
95 [18] = "Atomic ops support",
96 [19] = "Raw multicast support",
97 [20] = "Address vector port checking support",
98 [21] = "UD multicast support",
99 [30] = "IBoE support",
100 [32] = "Unicast loopback support",
101 [34] = "FCS header control",
102 [37] = "Wake On LAN (port1) support",
103 [38] = "Wake On LAN (port2) support",
104 [40] = "UDP RSS support",
105 [41] = "Unicast VEP steering support",
106 [42] = "Multicast VEP steering support",
107 [48] = "Counters support",
108 [53] = "Port ETS Scheduler support",
109 [55] = "Port link type sensing support",
110 [59] = "Port management change event support",
111 [61] = "64 byte EQE support",
112 [62] = "64 byte CQE support",
116 mlx4_dbg(dev
, "DEV_CAP flags:\n");
117 for (i
= 0; i
< ARRAY_SIZE(fname
); ++i
)
118 if (fname
[i
] && (flags
& (1LL << i
)))
119 mlx4_dbg(dev
, " %s\n", fname
[i
]);
122 static void dump_dev_cap_flags2(struct mlx4_dev
*dev
, u64 flags
)
124 static const char * const fname
[] = {
126 [1] = "RSS Toeplitz Hash Function support",
127 [2] = "RSS XOR Hash Function support",
128 [3] = "Device managed flow steering support",
129 [4] = "Automatic MAC reassignment support",
130 [5] = "Time stamping support",
131 [6] = "VST (control vlan insertion/stripping) support",
132 [7] = "FSM (MAC anti-spoofing) support",
133 [8] = "Dynamic QP updates support",
134 [9] = "Device managed flow steering IPoIB support",
135 [10] = "TCP/IP offloads/flow-steering for VXLAN support",
136 [11] = "MAD DEMUX (Secure-Host) support",
137 [12] = "Large cache line (>64B) CQE stride support",
138 [13] = "Large cache line (>64B) EQE stride support",
139 [14] = "Ethernet protocol control support",
140 [15] = "Ethernet Backplane autoneg support",
141 [16] = "CONFIG DEV support",
142 [17] = "Asymmetric EQs support",
143 [18] = "More than 80 VFs support",
144 [19] = "Performance optimized for limited rule configuration flow steering support",
145 [20] = "Recoverable error events support",
146 [21] = "Port Remap support",
151 for (i
= 0; i
< ARRAY_SIZE(fname
); ++i
)
152 if (fname
[i
] && (flags
& (1LL << i
)))
153 mlx4_dbg(dev
, " %s\n", fname
[i
]);
156 int mlx4_MOD_STAT_CFG(struct mlx4_dev
*dev
, struct mlx4_mod_stat_cfg
*cfg
)
158 struct mlx4_cmd_mailbox
*mailbox
;
162 #define MOD_STAT_CFG_IN_SIZE 0x100
164 #define MOD_STAT_CFG_PG_SZ_M_OFFSET 0x002
165 #define MOD_STAT_CFG_PG_SZ_OFFSET 0x003
167 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
169 return PTR_ERR(mailbox
);
170 inbox
= mailbox
->buf
;
172 MLX4_PUT(inbox
, cfg
->log_pg_sz
, MOD_STAT_CFG_PG_SZ_OFFSET
);
173 MLX4_PUT(inbox
, cfg
->log_pg_sz_m
, MOD_STAT_CFG_PG_SZ_M_OFFSET
);
175 err
= mlx4_cmd(dev
, mailbox
->dma
, 0, 0, MLX4_CMD_MOD_STAT_CFG
,
176 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
178 mlx4_free_cmd_mailbox(dev
, mailbox
);
182 int mlx4_QUERY_FUNC(struct mlx4_dev
*dev
, struct mlx4_func
*func
, int slave
)
184 struct mlx4_cmd_mailbox
*mailbox
;
191 #define QUERY_FUNC_BUS_OFFSET 0x00
192 #define QUERY_FUNC_DEVICE_OFFSET 0x01
193 #define QUERY_FUNC_FUNCTION_OFFSET 0x01
194 #define QUERY_FUNC_PHYSICAL_FUNCTION_OFFSET 0x03
195 #define QUERY_FUNC_RSVD_EQS_OFFSET 0x04
196 #define QUERY_FUNC_MAX_EQ_OFFSET 0x06
197 #define QUERY_FUNC_RSVD_UARS_OFFSET 0x0b
199 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
201 return PTR_ERR(mailbox
);
202 outbox
= mailbox
->buf
;
206 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, in_modifier
, 0,
208 MLX4_CMD_TIME_CLASS_A
,
213 MLX4_GET(field
, outbox
, QUERY_FUNC_BUS_OFFSET
);
214 func
->bus
= field
& 0xf;
215 MLX4_GET(field
, outbox
, QUERY_FUNC_DEVICE_OFFSET
);
216 func
->device
= field
& 0xf1;
217 MLX4_GET(field
, outbox
, QUERY_FUNC_FUNCTION_OFFSET
);
218 func
->function
= field
& 0x7;
219 MLX4_GET(field
, outbox
, QUERY_FUNC_PHYSICAL_FUNCTION_OFFSET
);
220 func
->physical_function
= field
& 0xf;
221 MLX4_GET(field16
, outbox
, QUERY_FUNC_RSVD_EQS_OFFSET
);
222 func
->rsvd_eqs
= field16
& 0xffff;
223 MLX4_GET(field16
, outbox
, QUERY_FUNC_MAX_EQ_OFFSET
);
224 func
->max_eq
= field16
& 0xffff;
225 MLX4_GET(field
, outbox
, QUERY_FUNC_RSVD_UARS_OFFSET
);
226 func
->rsvd_uars
= field
& 0x0f;
228 mlx4_dbg(dev
, "Bus: %d, Device: %d, Function: %d, Physical function: %d, Max EQs: %d, Reserved EQs: %d, Reserved UARs: %d\n",
229 func
->bus
, func
->device
, func
->function
, func
->physical_function
,
230 func
->max_eq
, func
->rsvd_eqs
, func
->rsvd_uars
);
233 mlx4_free_cmd_mailbox(dev
, mailbox
);
237 int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev
*dev
, int slave
,
238 struct mlx4_vhcr
*vhcr
,
239 struct mlx4_cmd_mailbox
*inbox
,
240 struct mlx4_cmd_mailbox
*outbox
,
241 struct mlx4_cmd_info
*cmd
)
243 struct mlx4_priv
*priv
= mlx4_priv(dev
);
245 u32 size
, proxy_qp
, qkey
;
247 struct mlx4_func func
;
249 #define QUERY_FUNC_CAP_FLAGS_OFFSET 0x0
250 #define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1
251 #define QUERY_FUNC_CAP_PF_BHVR_OFFSET 0x4
252 #define QUERY_FUNC_CAP_FMR_OFFSET 0x8
253 #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP 0x10
254 #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP 0x14
255 #define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP 0x18
256 #define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP 0x20
257 #define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP 0x24
258 #define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP 0x28
259 #define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c
260 #define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0x30
261 #define QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET 0x48
263 #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x50
264 #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x54
265 #define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET 0x58
266 #define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET 0x60
267 #define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET 0x64
268 #define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET 0x68
270 #define QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET 0x6c
272 #define QUERY_FUNC_CAP_FMR_FLAG 0x80
273 #define QUERY_FUNC_CAP_FLAG_RDMA 0x40
274 #define QUERY_FUNC_CAP_FLAG_ETH 0x80
275 #define QUERY_FUNC_CAP_FLAG_QUOTAS 0x10
276 #define QUERY_FUNC_CAP_FLAG_RESD_LKEY 0x08
277 #define QUERY_FUNC_CAP_FLAG_VALID_MAILBOX 0x04
279 #define QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG (1UL << 31)
280 #define QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG (1UL << 30)
282 /* when opcode modifier = 1 */
283 #define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3
284 #define QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET 0x4
285 #define QUERY_FUNC_CAP_FLAGS0_OFFSET 0x8
286 #define QUERY_FUNC_CAP_FLAGS1_OFFSET 0xc
288 #define QUERY_FUNC_CAP_QP0_TUNNEL 0x10
289 #define QUERY_FUNC_CAP_QP0_PROXY 0x14
290 #define QUERY_FUNC_CAP_QP1_TUNNEL 0x18
291 #define QUERY_FUNC_CAP_QP1_PROXY 0x1c
292 #define QUERY_FUNC_CAP_PHYS_PORT_ID 0x28
294 #define QUERY_FUNC_CAP_FLAGS1_FORCE_MAC 0x40
295 #define QUERY_FUNC_CAP_FLAGS1_FORCE_VLAN 0x80
296 #define QUERY_FUNC_CAP_FLAGS1_NIC_INFO 0x10
297 #define QUERY_FUNC_CAP_VF_ENABLE_QP0 0x08
299 #define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80
300 #define QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS (1 << 31)
302 if (vhcr
->op_modifier
== 1) {
303 struct mlx4_active_ports actv_ports
=
304 mlx4_get_active_ports(dev
, slave
);
305 int converted_port
= mlx4_slave_convert_port(
306 dev
, slave
, vhcr
->in_modifier
);
308 if (converted_port
< 0)
311 vhcr
->in_modifier
= converted_port
;
312 /* phys-port = logical-port */
313 field
= vhcr
->in_modifier
-
314 find_first_bit(actv_ports
.ports
, dev
->caps
.num_ports
);
315 MLX4_PUT(outbox
->buf
, field
, QUERY_FUNC_CAP_PHYS_PORT_OFFSET
);
317 port
= vhcr
->in_modifier
;
318 proxy_qp
= dev
->phys_caps
.base_proxy_sqpn
+ 8 * slave
+ port
- 1;
320 /* Set nic_info bit to mark new fields support */
321 field
= QUERY_FUNC_CAP_FLAGS1_NIC_INFO
;
323 if (mlx4_vf_smi_enabled(dev
, slave
, port
) &&
324 !mlx4_get_parav_qkey(dev
, proxy_qp
, &qkey
)) {
325 field
|= QUERY_FUNC_CAP_VF_ENABLE_QP0
;
326 MLX4_PUT(outbox
->buf
, qkey
,
327 QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET
);
329 MLX4_PUT(outbox
->buf
, field
, QUERY_FUNC_CAP_FLAGS1_OFFSET
);
331 /* size is now the QP number */
332 size
= dev
->phys_caps
.base_tunnel_sqpn
+ 8 * slave
+ port
- 1;
333 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_QP0_TUNNEL
);
336 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_QP1_TUNNEL
);
338 MLX4_PUT(outbox
->buf
, proxy_qp
, QUERY_FUNC_CAP_QP0_PROXY
);
340 MLX4_PUT(outbox
->buf
, proxy_qp
, QUERY_FUNC_CAP_QP1_PROXY
);
342 MLX4_PUT(outbox
->buf
, dev
->caps
.phys_port_id
[vhcr
->in_modifier
],
343 QUERY_FUNC_CAP_PHYS_PORT_ID
);
345 } else if (vhcr
->op_modifier
== 0) {
346 struct mlx4_active_ports actv_ports
=
347 mlx4_get_active_ports(dev
, slave
);
348 /* enable rdma and ethernet interfaces, new quota locations,
351 field
= (QUERY_FUNC_CAP_FLAG_ETH
| QUERY_FUNC_CAP_FLAG_RDMA
|
352 QUERY_FUNC_CAP_FLAG_QUOTAS
| QUERY_FUNC_CAP_FLAG_VALID_MAILBOX
|
353 QUERY_FUNC_CAP_FLAG_RESD_LKEY
);
354 MLX4_PUT(outbox
->buf
, field
, QUERY_FUNC_CAP_FLAGS_OFFSET
);
357 bitmap_weight(actv_ports
.ports
, dev
->caps
.num_ports
),
358 dev
->caps
.num_ports
);
359 MLX4_PUT(outbox
->buf
, field
, QUERY_FUNC_CAP_NUM_PORTS_OFFSET
);
361 size
= dev
->caps
.function_caps
; /* set PF behaviours */
362 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_PF_BHVR_OFFSET
);
364 field
= 0; /* protected FMR support not available as yet */
365 MLX4_PUT(outbox
->buf
, field
, QUERY_FUNC_CAP_FMR_OFFSET
);
367 size
= priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_QP
].quota
[slave
];
368 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_QP_QUOTA_OFFSET
);
369 size
= dev
->caps
.num_qps
;
370 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP
);
372 size
= priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_SRQ
].quota
[slave
];
373 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET
);
374 size
= dev
->caps
.num_srqs
;
375 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP
);
377 size
= priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_CQ
].quota
[slave
];
378 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET
);
379 size
= dev
->caps
.num_cqs
;
380 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP
);
382 if (!(dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_SYS_EQS
) ||
383 mlx4_QUERY_FUNC(dev
, &func
, slave
)) {
384 size
= vhcr
->in_modifier
&
385 QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS
?
387 rounddown_pow_of_two(dev
->caps
.num_eqs
);
388 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_MAX_EQ_OFFSET
);
389 size
= dev
->caps
.reserved_eqs
;
390 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET
);
392 size
= vhcr
->in_modifier
&
393 QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS
?
395 rounddown_pow_of_two(func
.max_eq
);
396 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_MAX_EQ_OFFSET
);
397 size
= func
.rsvd_eqs
;
398 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET
);
401 size
= priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_MPT
].quota
[slave
];
402 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET
);
403 size
= dev
->caps
.num_mpts
;
404 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP
);
406 size
= priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_MTT
].quota
[slave
];
407 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET
);
408 size
= dev
->caps
.num_mtts
;
409 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP
);
411 size
= dev
->caps
.num_mgms
+ dev
->caps
.num_amgms
;
412 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET
);
413 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP
);
415 size
= QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG
|
416 QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG
;
417 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET
);
419 size
= dev
->caps
.reserved_lkey
+ ((slave
<< 8) & 0xFF00);
420 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET
);
427 int mlx4_QUERY_FUNC_CAP(struct mlx4_dev
*dev
, u8 gen_or_port
,
428 struct mlx4_func_cap
*func_cap
)
430 struct mlx4_cmd_mailbox
*mailbox
;
432 u8 field
, op_modifier
;
434 int err
= 0, quotas
= 0;
437 op_modifier
= !!gen_or_port
; /* 0 = general, 1 = logical port */
438 in_modifier
= op_modifier
? gen_or_port
:
439 QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS
;
441 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
443 return PTR_ERR(mailbox
);
445 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, in_modifier
, op_modifier
,
446 MLX4_CMD_QUERY_FUNC_CAP
,
447 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_WRAPPED
);
451 outbox
= mailbox
->buf
;
454 MLX4_GET(field
, outbox
, QUERY_FUNC_CAP_FLAGS_OFFSET
);
455 if (!(field
& (QUERY_FUNC_CAP_FLAG_ETH
| QUERY_FUNC_CAP_FLAG_RDMA
))) {
456 mlx4_err(dev
, "The host supports neither eth nor rdma interfaces\n");
457 err
= -EPROTONOSUPPORT
;
460 func_cap
->flags
= field
;
461 quotas
= !!(func_cap
->flags
& QUERY_FUNC_CAP_FLAG_QUOTAS
);
463 MLX4_GET(field
, outbox
, QUERY_FUNC_CAP_NUM_PORTS_OFFSET
);
464 func_cap
->num_ports
= field
;
466 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_PF_BHVR_OFFSET
);
467 func_cap
->pf_context_behaviour
= size
;
470 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_QP_QUOTA_OFFSET
);
471 func_cap
->qp_quota
= size
& 0xFFFFFF;
473 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET
);
474 func_cap
->srq_quota
= size
& 0xFFFFFF;
476 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET
);
477 func_cap
->cq_quota
= size
& 0xFFFFFF;
479 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET
);
480 func_cap
->mpt_quota
= size
& 0xFFFFFF;
482 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET
);
483 func_cap
->mtt_quota
= size
& 0xFFFFFF;
485 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET
);
486 func_cap
->mcg_quota
= size
& 0xFFFFFF;
489 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP
);
490 func_cap
->qp_quota
= size
& 0xFFFFFF;
492 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP
);
493 func_cap
->srq_quota
= size
& 0xFFFFFF;
495 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP
);
496 func_cap
->cq_quota
= size
& 0xFFFFFF;
498 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP
);
499 func_cap
->mpt_quota
= size
& 0xFFFFFF;
501 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP
);
502 func_cap
->mtt_quota
= size
& 0xFFFFFF;
504 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP
);
505 func_cap
->mcg_quota
= size
& 0xFFFFFF;
507 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_MAX_EQ_OFFSET
);
508 func_cap
->max_eq
= size
& 0xFFFFFF;
510 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET
);
511 func_cap
->reserved_eq
= size
& 0xFFFFFF;
513 if (func_cap
->flags
& QUERY_FUNC_CAP_FLAG_RESD_LKEY
) {
514 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET
);
515 func_cap
->reserved_lkey
= size
;
517 func_cap
->reserved_lkey
= 0;
520 func_cap
->extra_flags
= 0;
522 /* Mailbox data from 0x6c and onward should only be treated if
523 * QUERY_FUNC_CAP_FLAG_VALID_MAILBOX is set in func_cap->flags
525 if (func_cap
->flags
& QUERY_FUNC_CAP_FLAG_VALID_MAILBOX
) {
526 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET
);
527 if (size
& QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG
)
528 func_cap
->extra_flags
|= MLX4_QUERY_FUNC_FLAGS_BF_RES_QP
;
529 if (size
& QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG
)
530 func_cap
->extra_flags
|= MLX4_QUERY_FUNC_FLAGS_A0_RES_QP
;
536 /* logical port query */
537 if (gen_or_port
> dev
->caps
.num_ports
) {
542 MLX4_GET(func_cap
->flags1
, outbox
, QUERY_FUNC_CAP_FLAGS1_OFFSET
);
543 if (dev
->caps
.port_type
[gen_or_port
] == MLX4_PORT_TYPE_ETH
) {
544 if (func_cap
->flags1
& QUERY_FUNC_CAP_FLAGS1_FORCE_VLAN
) {
545 mlx4_err(dev
, "VLAN is enforced on this port\n");
546 err
= -EPROTONOSUPPORT
;
550 if (func_cap
->flags1
& QUERY_FUNC_CAP_FLAGS1_FORCE_MAC
) {
551 mlx4_err(dev
, "Force mac is enabled on this port\n");
552 err
= -EPROTONOSUPPORT
;
555 } else if (dev
->caps
.port_type
[gen_or_port
] == MLX4_PORT_TYPE_IB
) {
556 MLX4_GET(field
, outbox
, QUERY_FUNC_CAP_FLAGS0_OFFSET
);
557 if (field
& QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID
) {
558 mlx4_err(dev
, "phy_wqe_gid is enforced on this ib port\n");
559 err
= -EPROTONOSUPPORT
;
564 MLX4_GET(field
, outbox
, QUERY_FUNC_CAP_PHYS_PORT_OFFSET
);
565 func_cap
->physical_port
= field
;
566 if (func_cap
->physical_port
!= gen_or_port
) {
571 if (func_cap
->flags1
& QUERY_FUNC_CAP_VF_ENABLE_QP0
) {
572 MLX4_GET(qkey
, outbox
, QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET
);
573 func_cap
->qp0_qkey
= qkey
;
575 func_cap
->qp0_qkey
= 0;
578 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_QP0_TUNNEL
);
579 func_cap
->qp0_tunnel_qpn
= size
& 0xFFFFFF;
581 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_QP0_PROXY
);
582 func_cap
->qp0_proxy_qpn
= size
& 0xFFFFFF;
584 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_QP1_TUNNEL
);
585 func_cap
->qp1_tunnel_qpn
= size
& 0xFFFFFF;
587 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_QP1_PROXY
);
588 func_cap
->qp1_proxy_qpn
= size
& 0xFFFFFF;
590 if (func_cap
->flags1
& QUERY_FUNC_CAP_FLAGS1_NIC_INFO
)
591 MLX4_GET(func_cap
->phys_port_id
, outbox
,
592 QUERY_FUNC_CAP_PHYS_PORT_ID
);
594 /* All other resources are allocated by the master, but we still report
595 * 'num' and 'reserved' capabilities as follows:
596 * - num remains the maximum resource index
597 * - 'num - reserved' is the total available objects of a resource, but
598 * resource indices may be less than 'reserved'
599 * TODO: set per-resource quotas */
602 mlx4_free_cmd_mailbox(dev
, mailbox
);
607 int mlx4_QUERY_DEV_CAP(struct mlx4_dev
*dev
, struct mlx4_dev_cap
*dev_cap
)
609 struct mlx4_cmd_mailbox
*mailbox
;
612 u32 field32
, flags
, ext_flags
;
618 #define QUERY_DEV_CAP_OUT_SIZE 0x100
619 #define QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET 0x10
620 #define QUERY_DEV_CAP_MAX_QP_SZ_OFFSET 0x11
621 #define QUERY_DEV_CAP_RSVD_QP_OFFSET 0x12
622 #define QUERY_DEV_CAP_MAX_QP_OFFSET 0x13
623 #define QUERY_DEV_CAP_RSVD_SRQ_OFFSET 0x14
624 #define QUERY_DEV_CAP_MAX_SRQ_OFFSET 0x15
625 #define QUERY_DEV_CAP_RSVD_EEC_OFFSET 0x16
626 #define QUERY_DEV_CAP_MAX_EEC_OFFSET 0x17
627 #define QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET 0x19
628 #define QUERY_DEV_CAP_RSVD_CQ_OFFSET 0x1a
629 #define QUERY_DEV_CAP_MAX_CQ_OFFSET 0x1b
630 #define QUERY_DEV_CAP_MAX_MPT_OFFSET 0x1d
631 #define QUERY_DEV_CAP_RSVD_EQ_OFFSET 0x1e
632 #define QUERY_DEV_CAP_MAX_EQ_OFFSET 0x1f
633 #define QUERY_DEV_CAP_RSVD_MTT_OFFSET 0x20
634 #define QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET 0x21
635 #define QUERY_DEV_CAP_RSVD_MRW_OFFSET 0x22
636 #define QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET 0x23
637 #define QUERY_DEV_CAP_NUM_SYS_EQ_OFFSET 0x26
638 #define QUERY_DEV_CAP_MAX_AV_OFFSET 0x27
639 #define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET 0x29
640 #define QUERY_DEV_CAP_MAX_RES_QP_OFFSET 0x2b
641 #define QUERY_DEV_CAP_MAX_GSO_OFFSET 0x2d
642 #define QUERY_DEV_CAP_RSS_OFFSET 0x2e
643 #define QUERY_DEV_CAP_MAX_RDMA_OFFSET 0x2f
644 #define QUERY_DEV_CAP_RSZ_SRQ_OFFSET 0x33
645 #define QUERY_DEV_CAP_ACK_DELAY_OFFSET 0x35
646 #define QUERY_DEV_CAP_MTU_WIDTH_OFFSET 0x36
647 #define QUERY_DEV_CAP_VL_PORT_OFFSET 0x37
648 #define QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET 0x38
649 #define QUERY_DEV_CAP_MAX_GID_OFFSET 0x3b
650 #define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET 0x3c
651 #define QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET 0x3e
652 #define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f
653 #define QUERY_DEV_CAP_EXT_FLAGS_OFFSET 0x40
654 #define QUERY_DEV_CAP_FLAGS_OFFSET 0x44
655 #define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48
656 #define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49
657 #define QUERY_DEV_CAP_PAGE_SZ_OFFSET 0x4b
658 #define QUERY_DEV_CAP_BF_OFFSET 0x4c
659 #define QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET 0x4d
660 #define QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET 0x4e
661 #define QUERY_DEV_CAP_LOG_MAX_BF_PAGES_OFFSET 0x4f
662 #define QUERY_DEV_CAP_MAX_SG_SQ_OFFSET 0x51
663 #define QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET 0x52
664 #define QUERY_DEV_CAP_MAX_SG_RQ_OFFSET 0x55
665 #define QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET 0x56
666 #define QUERY_DEV_CAP_MAX_QP_MCG_OFFSET 0x61
667 #define QUERY_DEV_CAP_RSVD_MCG_OFFSET 0x62
668 #define QUERY_DEV_CAP_MAX_MCG_OFFSET 0x63
669 #define QUERY_DEV_CAP_RSVD_PD_OFFSET 0x64
670 #define QUERY_DEV_CAP_MAX_PD_OFFSET 0x65
671 #define QUERY_DEV_CAP_RSVD_XRC_OFFSET 0x66
672 #define QUERY_DEV_CAP_MAX_XRC_OFFSET 0x67
673 #define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET 0x68
674 #define QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET 0x70
675 #define QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET 0x74
676 #define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76
677 #define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77
678 #define QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE 0x7a
679 #define QUERY_DEV_CAP_ECN_QCN_VER_OFFSET 0x7b
680 #define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80
681 #define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82
682 #define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84
683 #define QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET 0x86
684 #define QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET 0x88
685 #define QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET 0x8a
686 #define QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET 0x8c
687 #define QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET 0x8e
688 #define QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET 0x90
689 #define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET 0x92
690 #define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94
691 #define QUERY_DEV_CAP_CONFIG_DEV_OFFSET 0x94
692 #define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98
693 #define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0
694 #define QUERY_DEV_CAP_ETH_BACKPL_OFFSET 0x9c
695 #define QUERY_DEV_CAP_FW_REASSIGN_MAC 0x9d
696 #define QUERY_DEV_CAP_VXLAN 0x9e
697 #define QUERY_DEV_CAP_MAD_DEMUX_OFFSET 0xb0
698 #define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_BASE_OFFSET 0xa8
699 #define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_RANGE_OFFSET 0xac
702 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
704 return PTR_ERR(mailbox
);
705 outbox
= mailbox
->buf
;
707 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, 0, 0, MLX4_CMD_QUERY_DEV_CAP
,
708 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
712 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSVD_QP_OFFSET
);
713 dev_cap
->reserved_qps
= 1 << (field
& 0xf);
714 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_QP_OFFSET
);
715 dev_cap
->max_qps
= 1 << (field
& 0x1f);
716 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSVD_SRQ_OFFSET
);
717 dev_cap
->reserved_srqs
= 1 << (field
>> 4);
718 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_SRQ_OFFSET
);
719 dev_cap
->max_srqs
= 1 << (field
& 0x1f);
720 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET
);
721 dev_cap
->max_cq_sz
= 1 << field
;
722 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSVD_CQ_OFFSET
);
723 dev_cap
->reserved_cqs
= 1 << (field
& 0xf);
724 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_CQ_OFFSET
);
725 dev_cap
->max_cqs
= 1 << (field
& 0x1f);
726 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_MPT_OFFSET
);
727 dev_cap
->max_mpts
= 1 << (field
& 0x3f);
728 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSVD_EQ_OFFSET
);
729 dev_cap
->reserved_eqs
= 1 << (field
& 0xf);
730 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_EQ_OFFSET
);
731 dev_cap
->max_eqs
= 1 << (field
& 0xf);
732 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSVD_MTT_OFFSET
);
733 dev_cap
->reserved_mtts
= 1 << (field
>> 4);
734 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET
);
735 dev_cap
->max_mrw_sz
= 1 << field
;
736 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSVD_MRW_OFFSET
);
737 dev_cap
->reserved_mrws
= 1 << (field
& 0xf);
738 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET
);
739 dev_cap
->max_mtt_seg
= 1 << (field
& 0x3f);
740 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_NUM_SYS_EQ_OFFSET
);
741 dev_cap
->num_sys_eqs
= size
& 0xfff;
742 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_REQ_QP_OFFSET
);
743 dev_cap
->max_requester_per_qp
= 1 << (field
& 0x3f);
744 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_RES_QP_OFFSET
);
745 dev_cap
->max_responder_per_qp
= 1 << (field
& 0x3f);
746 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_GSO_OFFSET
);
749 dev_cap
->max_gso_sz
= 0;
751 dev_cap
->max_gso_sz
= 1 << field
;
753 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSS_OFFSET
);
755 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_RSS_XOR
;
757 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_RSS_TOP
;
760 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_RSS
;
761 dev_cap
->max_rss_tbl_sz
= 1 << field
;
763 dev_cap
->max_rss_tbl_sz
= 0;
764 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_RDMA_OFFSET
);
765 dev_cap
->max_rdma_global
= 1 << (field
& 0x3f);
766 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_ACK_DELAY_OFFSET
);
767 dev_cap
->local_ca_ack_delay
= field
& 0x1f;
768 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_VL_PORT_OFFSET
);
769 dev_cap
->num_ports
= field
& 0xf;
770 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET
);
771 dev_cap
->max_msg_sz
= 1 << (field
& 0x1f);
772 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET
);
774 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_FS_EN
;
775 dev_cap
->fs_log_max_ucast_qp_range_size
= field
& 0x1f;
776 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET
);
778 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_DMFS_IPOIB
;
779 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET
);
780 dev_cap
->fs_max_num_qp_per_entry
= field
;
781 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET
);
783 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_QCN
;
784 MLX4_GET(stat_rate
, outbox
, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET
);
785 dev_cap
->stat_rate_support
= stat_rate
;
786 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET
);
788 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_TS
;
789 MLX4_GET(ext_flags
, outbox
, QUERY_DEV_CAP_EXT_FLAGS_OFFSET
);
790 MLX4_GET(flags
, outbox
, QUERY_DEV_CAP_FLAGS_OFFSET
);
791 dev_cap
->flags
= flags
| (u64
)ext_flags
<< 32;
792 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSVD_UAR_OFFSET
);
793 dev_cap
->reserved_uars
= field
>> 4;
794 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_UAR_SZ_OFFSET
);
795 dev_cap
->uar_size
= 1 << ((field
& 0x3f) + 20);
796 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_PAGE_SZ_OFFSET
);
797 dev_cap
->min_page_sz
= 1 << field
;
799 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_BF_OFFSET
);
801 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET
);
802 dev_cap
->bf_reg_size
= 1 << (field
& 0x1f);
803 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET
);
804 if ((1 << (field
& 0x3f)) > (PAGE_SIZE
/ dev_cap
->bf_reg_size
))
806 dev_cap
->bf_regs_per_page
= 1 << (field
& 0x3f);
808 dev_cap
->bf_reg_size
= 0;
811 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_SG_SQ_OFFSET
);
812 dev_cap
->max_sq_sg
= field
;
813 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET
);
814 dev_cap
->max_sq_desc_sz
= size
;
816 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_QP_MCG_OFFSET
);
817 dev_cap
->max_qp_per_mcg
= 1 << field
;
818 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSVD_MCG_OFFSET
);
819 dev_cap
->reserved_mgms
= field
& 0xf;
820 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_MCG_OFFSET
);
821 dev_cap
->max_mcgs
= 1 << field
;
822 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSVD_PD_OFFSET
);
823 dev_cap
->reserved_pds
= field
>> 4;
824 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_PD_OFFSET
);
825 dev_cap
->max_pds
= 1 << (field
& 0x3f);
826 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSVD_XRC_OFFSET
);
827 dev_cap
->reserved_xrcds
= field
>> 4;
828 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_XRC_OFFSET
);
829 dev_cap
->max_xrcds
= 1 << (field
& 0x1f);
831 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET
);
832 dev_cap
->rdmarc_entry_sz
= size
;
833 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET
);
834 dev_cap
->qpc_entry_sz
= size
;
835 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET
);
836 dev_cap
->aux_entry_sz
= size
;
837 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET
);
838 dev_cap
->altc_entry_sz
= size
;
839 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET
);
840 dev_cap
->eqc_entry_sz
= size
;
841 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET
);
842 dev_cap
->cqc_entry_sz
= size
;
843 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET
);
844 dev_cap
->srq_entry_sz
= size
;
845 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET
);
846 dev_cap
->cmpt_entry_sz
= size
;
847 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET
);
848 dev_cap
->mtt_entry_sz
= size
;
849 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET
);
850 dev_cap
->dmpt_entry_sz
= size
;
852 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET
);
853 dev_cap
->max_srq_sz
= 1 << field
;
854 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_QP_SZ_OFFSET
);
855 dev_cap
->max_qp_sz
= 1 << field
;
856 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSZ_SRQ_OFFSET
);
857 dev_cap
->resize_srq
= field
& 1;
858 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_SG_RQ_OFFSET
);
859 dev_cap
->max_rq_sg
= field
;
860 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET
);
861 dev_cap
->max_rq_desc_sz
= size
;
862 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE
);
863 if (field
& (1 << 5))
864 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL
;
865 if (field
& (1 << 6))
866 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_CQE_STRIDE
;
867 if (field
& (1 << 7))
868 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_EQE_STRIDE
;
869 MLX4_GET(dev_cap
->bmme_flags
, outbox
,
870 QUERY_DEV_CAP_BMME_FLAGS_OFFSET
);
871 if (dev_cap
->bmme_flags
& MLX4_FLAG_PORT_REMAP
)
872 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_PORT_REMAP
;
873 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_CONFIG_DEV_OFFSET
);
875 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_CONFIG_DEV
;
876 MLX4_GET(dev_cap
->reserved_lkey
, outbox
,
877 QUERY_DEV_CAP_RSVD_LKEY_OFFSET
);
878 MLX4_GET(field32
, outbox
, QUERY_DEV_CAP_ETH_BACKPL_OFFSET
);
879 if (field32
& (1 << 0))
880 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP
;
881 if (field32
& (1 << 7))
882 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT
;
883 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_FW_REASSIGN_MAC
);
885 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN
;
886 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_VXLAN
);
888 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS
;
889 MLX4_GET(dev_cap
->max_icm_sz
, outbox
,
890 QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET
);
891 if (dev_cap
->flags
& MLX4_DEV_CAP_FLAG_COUNTERS
)
892 MLX4_GET(dev_cap
->max_counters
, outbox
,
893 QUERY_DEV_CAP_MAX_COUNTERS_OFFSET
);
895 MLX4_GET(field32
, outbox
,
896 QUERY_DEV_CAP_MAD_DEMUX_OFFSET
);
897 if (field32
& (1 << 0))
898 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_MAD_DEMUX
;
900 MLX4_GET(dev_cap
->dmfs_high_rate_qpn_base
, outbox
,
901 QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_BASE_OFFSET
);
902 dev_cap
->dmfs_high_rate_qpn_base
&= MGM_QPN_MASK
;
903 MLX4_GET(dev_cap
->dmfs_high_rate_qpn_range
, outbox
,
904 QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_RANGE_OFFSET
);
905 dev_cap
->dmfs_high_rate_qpn_range
&= MGM_QPN_MASK
;
907 MLX4_GET(field32
, outbox
, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET
);
908 if (field32
& (1 << 16))
909 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_UPDATE_QP
;
910 if (field32
& (1 << 26))
911 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_VLAN_CONTROL
;
912 if (field32
& (1 << 20))
913 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_FSM
;
914 if (field32
& (1 << 21))
915 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_80_VFS
;
917 for (i
= 1; i
<= dev_cap
->num_ports
; i
++) {
918 err
= mlx4_QUERY_PORT(dev
, i
, dev_cap
->port_cap
+ i
);
924 * Each UAR has 4 EQ doorbells; so if a UAR is reserved, then
925 * we can't use any EQs whose doorbell falls on that page,
926 * even if the EQ itself isn't reserved.
928 if (dev_cap
->num_sys_eqs
== 0)
929 dev_cap
->reserved_eqs
= max(dev_cap
->reserved_uars
* 4,
930 dev_cap
->reserved_eqs
);
932 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_SYS_EQS
;
935 mlx4_free_cmd_mailbox(dev
, mailbox
);
939 void mlx4_dev_cap_dump(struct mlx4_dev
*dev
, struct mlx4_dev_cap
*dev_cap
)
941 if (dev_cap
->bf_reg_size
> 0)
942 mlx4_dbg(dev
, "BlueFlame available (reg size %d, regs/page %d)\n",
943 dev_cap
->bf_reg_size
, dev_cap
->bf_regs_per_page
);
945 mlx4_dbg(dev
, "BlueFlame not available\n");
947 mlx4_dbg(dev
, "Base MM extensions: flags %08x, rsvd L_Key %08x\n",
948 dev_cap
->bmme_flags
, dev_cap
->reserved_lkey
);
949 mlx4_dbg(dev
, "Max ICM size %lld MB\n",
950 (unsigned long long) dev_cap
->max_icm_sz
>> 20);
951 mlx4_dbg(dev
, "Max QPs: %d, reserved QPs: %d, entry size: %d\n",
952 dev_cap
->max_qps
, dev_cap
->reserved_qps
, dev_cap
->qpc_entry_sz
);
953 mlx4_dbg(dev
, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n",
954 dev_cap
->max_srqs
, dev_cap
->reserved_srqs
, dev_cap
->srq_entry_sz
);
955 mlx4_dbg(dev
, "Max CQs: %d, reserved CQs: %d, entry size: %d\n",
956 dev_cap
->max_cqs
, dev_cap
->reserved_cqs
, dev_cap
->cqc_entry_sz
);
957 mlx4_dbg(dev
, "Num sys EQs: %d, max EQs: %d, reserved EQs: %d, entry size: %d\n",
958 dev_cap
->num_sys_eqs
, dev_cap
->max_eqs
, dev_cap
->reserved_eqs
,
959 dev_cap
->eqc_entry_sz
);
960 mlx4_dbg(dev
, "reserved MPTs: %d, reserved MTTs: %d\n",
961 dev_cap
->reserved_mrws
, dev_cap
->reserved_mtts
);
962 mlx4_dbg(dev
, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n",
963 dev_cap
->max_pds
, dev_cap
->reserved_pds
, dev_cap
->reserved_uars
);
964 mlx4_dbg(dev
, "Max QP/MCG: %d, reserved MGMs: %d\n",
965 dev_cap
->max_pds
, dev_cap
->reserved_mgms
);
966 mlx4_dbg(dev
, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n",
967 dev_cap
->max_cq_sz
, dev_cap
->max_qp_sz
, dev_cap
->max_srq_sz
);
968 mlx4_dbg(dev
, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n",
969 dev_cap
->local_ca_ack_delay
, 128 << dev_cap
->port_cap
[1].ib_mtu
,
970 dev_cap
->port_cap
[1].max_port_width
);
971 mlx4_dbg(dev
, "Max SQ desc size: %d, max SQ S/G: %d\n",
972 dev_cap
->max_sq_desc_sz
, dev_cap
->max_sq_sg
);
973 mlx4_dbg(dev
, "Max RQ desc size: %d, max RQ S/G: %d\n",
974 dev_cap
->max_rq_desc_sz
, dev_cap
->max_rq_sg
);
975 mlx4_dbg(dev
, "Max GSO size: %d\n", dev_cap
->max_gso_sz
);
976 mlx4_dbg(dev
, "Max counters: %d\n", dev_cap
->max_counters
);
977 mlx4_dbg(dev
, "Max RSS Table size: %d\n", dev_cap
->max_rss_tbl_sz
);
978 mlx4_dbg(dev
, "DMFS high rate steer QPn base: %d\n",
979 dev_cap
->dmfs_high_rate_qpn_base
);
980 mlx4_dbg(dev
, "DMFS high rate steer QPn range: %d\n",
981 dev_cap
->dmfs_high_rate_qpn_range
);
982 dump_dev_cap_flags(dev
, dev_cap
->flags
);
983 dump_dev_cap_flags2(dev
, dev_cap
->flags2
);
986 int mlx4_QUERY_PORT(struct mlx4_dev
*dev
, int port
, struct mlx4_port_cap
*port_cap
)
988 struct mlx4_cmd_mailbox
*mailbox
;
994 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
996 return PTR_ERR(mailbox
);
997 outbox
= mailbox
->buf
;
999 if (dev
->flags
& MLX4_FLAG_OLD_PORT_CMDS
) {
1000 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, 0, 0, MLX4_CMD_QUERY_DEV_CAP
,
1001 MLX4_CMD_TIME_CLASS_A
,
1007 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_VL_PORT_OFFSET
);
1008 port_cap
->max_vl
= field
>> 4;
1009 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MTU_WIDTH_OFFSET
);
1010 port_cap
->ib_mtu
= field
>> 4;
1011 port_cap
->max_port_width
= field
& 0xf;
1012 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_GID_OFFSET
);
1013 port_cap
->max_gids
= 1 << (field
& 0xf);
1014 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_PKEY_OFFSET
);
1015 port_cap
->max_pkeys
= 1 << (field
& 0xf);
1017 #define QUERY_PORT_SUPPORTED_TYPE_OFFSET 0x00
1018 #define QUERY_PORT_MTU_OFFSET 0x01
1019 #define QUERY_PORT_ETH_MTU_OFFSET 0x02
1020 #define QUERY_PORT_WIDTH_OFFSET 0x06
1021 #define QUERY_PORT_MAX_GID_PKEY_OFFSET 0x07
1022 #define QUERY_PORT_MAX_MACVLAN_OFFSET 0x0a
1023 #define QUERY_PORT_MAX_VL_OFFSET 0x0b
1024 #define QUERY_PORT_MAC_OFFSET 0x10
1025 #define QUERY_PORT_TRANS_VENDOR_OFFSET 0x18
1026 #define QUERY_PORT_WAVELENGTH_OFFSET 0x1c
1027 #define QUERY_PORT_TRANS_CODE_OFFSET 0x20
1029 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, port
, 0, MLX4_CMD_QUERY_PORT
,
1030 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_NATIVE
);
1034 MLX4_GET(field
, outbox
, QUERY_PORT_SUPPORTED_TYPE_OFFSET
);
1035 port_cap
->supported_port_types
= field
& 3;
1036 port_cap
->suggested_type
= (field
>> 3) & 1;
1037 port_cap
->default_sense
= (field
>> 4) & 1;
1038 port_cap
->dmfs_optimized_state
= (field
>> 5) & 1;
1039 MLX4_GET(field
, outbox
, QUERY_PORT_MTU_OFFSET
);
1040 port_cap
->ib_mtu
= field
& 0xf;
1041 MLX4_GET(field
, outbox
, QUERY_PORT_WIDTH_OFFSET
);
1042 port_cap
->max_port_width
= field
& 0xf;
1043 MLX4_GET(field
, outbox
, QUERY_PORT_MAX_GID_PKEY_OFFSET
);
1044 port_cap
->max_gids
= 1 << (field
>> 4);
1045 port_cap
->max_pkeys
= 1 << (field
& 0xf);
1046 MLX4_GET(field
, outbox
, QUERY_PORT_MAX_VL_OFFSET
);
1047 port_cap
->max_vl
= field
& 0xf;
1048 MLX4_GET(field
, outbox
, QUERY_PORT_MAX_MACVLAN_OFFSET
);
1049 port_cap
->log_max_macs
= field
& 0xf;
1050 port_cap
->log_max_vlans
= field
>> 4;
1051 MLX4_GET(port_cap
->eth_mtu
, outbox
, QUERY_PORT_ETH_MTU_OFFSET
);
1052 MLX4_GET(port_cap
->def_mac
, outbox
, QUERY_PORT_MAC_OFFSET
);
1053 MLX4_GET(field32
, outbox
, QUERY_PORT_TRANS_VENDOR_OFFSET
);
1054 port_cap
->trans_type
= field32
>> 24;
1055 port_cap
->vendor_oui
= field32
& 0xffffff;
1056 MLX4_GET(port_cap
->wavelength
, outbox
, QUERY_PORT_WAVELENGTH_OFFSET
);
1057 MLX4_GET(port_cap
->trans_code
, outbox
, QUERY_PORT_TRANS_CODE_OFFSET
);
1061 mlx4_free_cmd_mailbox(dev
, mailbox
);
1065 #define DEV_CAP_EXT_2_FLAG_VLAN_CONTROL (1 << 26)
1066 #define DEV_CAP_EXT_2_FLAG_80_VFS (1 << 21)
1067 #define DEV_CAP_EXT_2_FLAG_FSM (1 << 20)
1069 int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev
*dev
, int slave
,
1070 struct mlx4_vhcr
*vhcr
,
1071 struct mlx4_cmd_mailbox
*inbox
,
1072 struct mlx4_cmd_mailbox
*outbox
,
1073 struct mlx4_cmd_info
*cmd
)
1078 u32 bmme_flags
, field32
;
1082 struct mlx4_active_ports actv_ports
;
1084 err
= mlx4_cmd_box(dev
, 0, outbox
->dma
, 0, 0, MLX4_CMD_QUERY_DEV_CAP
,
1085 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
1089 /* add port mng change event capability and disable mw type 1
1090 * unconditionally to slaves
1092 MLX4_GET(flags
, outbox
->buf
, QUERY_DEV_CAP_EXT_FLAGS_OFFSET
);
1093 flags
|= MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV
;
1094 flags
&= ~MLX4_DEV_CAP_FLAG_MEM_WINDOW
;
1095 actv_ports
= mlx4_get_active_ports(dev
, slave
);
1096 first_port
= find_first_bit(actv_ports
.ports
, dev
->caps
.num_ports
);
1097 for (slave_port
= 0, real_port
= first_port
;
1098 real_port
< first_port
+
1099 bitmap_weight(actv_ports
.ports
, dev
->caps
.num_ports
);
1100 ++real_port
, ++slave_port
) {
1101 if (flags
& (MLX4_DEV_CAP_FLAG_WOL_PORT1
<< real_port
))
1102 flags
|= MLX4_DEV_CAP_FLAG_WOL_PORT1
<< slave_port
;
1104 flags
&= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1
<< slave_port
);
1106 for (; slave_port
< dev
->caps
.num_ports
; ++slave_port
)
1107 flags
&= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1
<< slave_port
);
1108 MLX4_PUT(outbox
->buf
, flags
, QUERY_DEV_CAP_EXT_FLAGS_OFFSET
);
1110 MLX4_GET(field
, outbox
->buf
, QUERY_DEV_CAP_VL_PORT_OFFSET
);
1112 field
|= bitmap_weight(actv_ports
.ports
, dev
->caps
.num_ports
) & 0x0F;
1113 MLX4_PUT(outbox
->buf
, field
, QUERY_DEV_CAP_VL_PORT_OFFSET
);
1115 /* For guests, disable timestamp */
1116 MLX4_GET(field
, outbox
->buf
, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET
);
1118 MLX4_PUT(outbox
->buf
, field
, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET
);
1120 /* For guests, disable vxlan tunneling */
1121 MLX4_GET(field
, outbox
->buf
, QUERY_DEV_CAP_VXLAN
);
1123 MLX4_PUT(outbox
->buf
, field
, QUERY_DEV_CAP_VXLAN
);
1125 /* For guests, report Blueflame disabled */
1126 MLX4_GET(field
, outbox
->buf
, QUERY_DEV_CAP_BF_OFFSET
);
1128 MLX4_PUT(outbox
->buf
, field
, QUERY_DEV_CAP_BF_OFFSET
);
1130 /* For guests, disable mw type 2 and port remap*/
1131 MLX4_GET(bmme_flags
, outbox
->buf
, QUERY_DEV_CAP_BMME_FLAGS_OFFSET
);
1132 bmme_flags
&= ~MLX4_BMME_FLAG_TYPE_2_WIN
;
1133 bmme_flags
&= ~MLX4_FLAG_PORT_REMAP
;
1134 MLX4_PUT(outbox
->buf
, bmme_flags
, QUERY_DEV_CAP_BMME_FLAGS_OFFSET
);
1136 /* turn off device-managed steering capability if not enabled */
1137 if (dev
->caps
.steering_mode
!= MLX4_STEERING_MODE_DEVICE_MANAGED
) {
1138 MLX4_GET(field
, outbox
->buf
,
1139 QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET
);
1141 MLX4_PUT(outbox
->buf
, field
,
1142 QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET
);
1145 /* turn off ipoib managed steering for guests */
1146 MLX4_GET(field
, outbox
->buf
, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET
);
1148 MLX4_PUT(outbox
->buf
, field
, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET
);
1150 /* turn off host side virt features (VST, FSM, etc) for guests */
1151 MLX4_GET(field32
, outbox
->buf
, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET
);
1152 field32
&= ~(DEV_CAP_EXT_2_FLAG_VLAN_CONTROL
| DEV_CAP_EXT_2_FLAG_80_VFS
|
1153 DEV_CAP_EXT_2_FLAG_FSM
);
1154 MLX4_PUT(outbox
->buf
, field32
, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET
);
1156 /* turn off QCN for guests */
1157 MLX4_GET(field
, outbox
->buf
, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET
);
1159 MLX4_PUT(outbox
->buf
, field
, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET
);
1164 int mlx4_QUERY_PORT_wrapper(struct mlx4_dev
*dev
, int slave
,
1165 struct mlx4_vhcr
*vhcr
,
1166 struct mlx4_cmd_mailbox
*inbox
,
1167 struct mlx4_cmd_mailbox
*outbox
,
1168 struct mlx4_cmd_info
*cmd
)
1170 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1175 int admin_link_state
;
1176 int port
= mlx4_slave_convert_port(dev
, slave
,
1177 vhcr
->in_modifier
& 0xFF);
1179 #define MLX4_VF_PORT_NO_LINK_SENSE_MASK 0xE0
1180 #define MLX4_PORT_LINK_UP_MASK 0x80
1181 #define QUERY_PORT_CUR_MAX_PKEY_OFFSET 0x0c
1182 #define QUERY_PORT_CUR_MAX_GID_OFFSET 0x0e
1187 /* Protect against untrusted guests: enforce that this is the
1188 * QUERY_PORT general query.
1190 if (vhcr
->op_modifier
|| vhcr
->in_modifier
& ~0xFF)
1193 vhcr
->in_modifier
= port
;
1195 err
= mlx4_cmd_box(dev
, 0, outbox
->dma
, vhcr
->in_modifier
, 0,
1196 MLX4_CMD_QUERY_PORT
, MLX4_CMD_TIME_CLASS_B
,
1199 if (!err
&& dev
->caps
.function
!= slave
) {
1200 def_mac
= priv
->mfunc
.master
.vf_oper
[slave
].vport
[vhcr
->in_modifier
].state
.mac
;
1201 MLX4_PUT(outbox
->buf
, def_mac
, QUERY_PORT_MAC_OFFSET
);
1203 /* get port type - currently only eth is enabled */
1204 MLX4_GET(port_type
, outbox
->buf
,
1205 QUERY_PORT_SUPPORTED_TYPE_OFFSET
);
1207 /* No link sensing allowed */
1208 port_type
&= MLX4_VF_PORT_NO_LINK_SENSE_MASK
;
1209 /* set port type to currently operating port type */
1210 port_type
|= (dev
->caps
.port_type
[vhcr
->in_modifier
] & 0x3);
1212 admin_link_state
= priv
->mfunc
.master
.vf_oper
[slave
].vport
[vhcr
->in_modifier
].state
.link_state
;
1213 if (IFLA_VF_LINK_STATE_ENABLE
== admin_link_state
)
1214 port_type
|= MLX4_PORT_LINK_UP_MASK
;
1215 else if (IFLA_VF_LINK_STATE_DISABLE
== admin_link_state
)
1216 port_type
&= ~MLX4_PORT_LINK_UP_MASK
;
1218 MLX4_PUT(outbox
->buf
, port_type
,
1219 QUERY_PORT_SUPPORTED_TYPE_OFFSET
);
1221 if (dev
->caps
.port_type
[vhcr
->in_modifier
] == MLX4_PORT_TYPE_ETH
)
1222 short_field
= mlx4_get_slave_num_gids(dev
, slave
, port
);
1224 short_field
= 1; /* slave max gids */
1225 MLX4_PUT(outbox
->buf
, short_field
,
1226 QUERY_PORT_CUR_MAX_GID_OFFSET
);
1228 short_field
= dev
->caps
.pkey_table_len
[vhcr
->in_modifier
];
1229 MLX4_PUT(outbox
->buf
, short_field
,
1230 QUERY_PORT_CUR_MAX_PKEY_OFFSET
);
1236 int mlx4_get_slave_pkey_gid_tbl_len(struct mlx4_dev
*dev
, u8 port
,
1237 int *gid_tbl_len
, int *pkey_tbl_len
)
1239 struct mlx4_cmd_mailbox
*mailbox
;
1244 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1245 if (IS_ERR(mailbox
))
1246 return PTR_ERR(mailbox
);
1248 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, port
, 0,
1249 MLX4_CMD_QUERY_PORT
, MLX4_CMD_TIME_CLASS_B
,
1254 outbox
= mailbox
->buf
;
1256 MLX4_GET(field
, outbox
, QUERY_PORT_CUR_MAX_GID_OFFSET
);
1257 *gid_tbl_len
= field
;
1259 MLX4_GET(field
, outbox
, QUERY_PORT_CUR_MAX_PKEY_OFFSET
);
1260 *pkey_tbl_len
= field
;
1263 mlx4_free_cmd_mailbox(dev
, mailbox
);
1266 EXPORT_SYMBOL(mlx4_get_slave_pkey_gid_tbl_len
);
1268 int mlx4_map_cmd(struct mlx4_dev
*dev
, u16 op
, struct mlx4_icm
*icm
, u64 virt
)
1270 struct mlx4_cmd_mailbox
*mailbox
;
1271 struct mlx4_icm_iter iter
;
1279 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1280 if (IS_ERR(mailbox
))
1281 return PTR_ERR(mailbox
);
1282 pages
= mailbox
->buf
;
1284 for (mlx4_icm_first(icm
, &iter
);
1285 !mlx4_icm_last(&iter
);
1286 mlx4_icm_next(&iter
)) {
1288 * We have to pass pages that are aligned to their
1289 * size, so find the least significant 1 in the
1290 * address or size and use that as our log2 size.
1292 lg
= ffs(mlx4_icm_addr(&iter
) | mlx4_icm_size(&iter
)) - 1;
1293 if (lg
< MLX4_ICM_PAGE_SHIFT
) {
1294 mlx4_warn(dev
, "Got FW area not aligned to %d (%llx/%lx)\n",
1296 (unsigned long long) mlx4_icm_addr(&iter
),
1297 mlx4_icm_size(&iter
));
1302 for (i
= 0; i
< mlx4_icm_size(&iter
) >> lg
; ++i
) {
1304 pages
[nent
* 2] = cpu_to_be64(virt
);
1308 pages
[nent
* 2 + 1] =
1309 cpu_to_be64((mlx4_icm_addr(&iter
) + (i
<< lg
)) |
1310 (lg
- MLX4_ICM_PAGE_SHIFT
));
1311 ts
+= 1 << (lg
- 10);
1314 if (++nent
== MLX4_MAILBOX_SIZE
/ 16) {
1315 err
= mlx4_cmd(dev
, mailbox
->dma
, nent
, 0, op
,
1316 MLX4_CMD_TIME_CLASS_B
,
1326 err
= mlx4_cmd(dev
, mailbox
->dma
, nent
, 0, op
,
1327 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_NATIVE
);
1332 case MLX4_CMD_MAP_FA
:
1333 mlx4_dbg(dev
, "Mapped %d chunks/%d KB for FW\n", tc
, ts
);
1335 case MLX4_CMD_MAP_ICM_AUX
:
1336 mlx4_dbg(dev
, "Mapped %d chunks/%d KB for ICM aux\n", tc
, ts
);
1338 case MLX4_CMD_MAP_ICM
:
1339 mlx4_dbg(dev
, "Mapped %d chunks/%d KB at %llx for ICM\n",
1340 tc
, ts
, (unsigned long long) virt
- (ts
<< 10));
1345 mlx4_free_cmd_mailbox(dev
, mailbox
);
1349 int mlx4_MAP_FA(struct mlx4_dev
*dev
, struct mlx4_icm
*icm
)
1351 return mlx4_map_cmd(dev
, MLX4_CMD_MAP_FA
, icm
, -1);
1354 int mlx4_UNMAP_FA(struct mlx4_dev
*dev
)
1356 return mlx4_cmd(dev
, 0, 0, 0, MLX4_CMD_UNMAP_FA
,
1357 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_NATIVE
);
1361 int mlx4_RUN_FW(struct mlx4_dev
*dev
)
1363 return mlx4_cmd(dev
, 0, 0, 0, MLX4_CMD_RUN_FW
,
1364 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
1367 int mlx4_QUERY_FW(struct mlx4_dev
*dev
)
1369 struct mlx4_fw
*fw
= &mlx4_priv(dev
)->fw
;
1370 struct mlx4_cmd
*cmd
= &mlx4_priv(dev
)->cmd
;
1371 struct mlx4_cmd_mailbox
*mailbox
;
1378 #define QUERY_FW_OUT_SIZE 0x100
1379 #define QUERY_FW_VER_OFFSET 0x00
1380 #define QUERY_FW_PPF_ID 0x09
1381 #define QUERY_FW_CMD_IF_REV_OFFSET 0x0a
1382 #define QUERY_FW_MAX_CMD_OFFSET 0x0f
1383 #define QUERY_FW_ERR_START_OFFSET 0x30
1384 #define QUERY_FW_ERR_SIZE_OFFSET 0x38
1385 #define QUERY_FW_ERR_BAR_OFFSET 0x3c
1387 #define QUERY_FW_SIZE_OFFSET 0x00
1388 #define QUERY_FW_CLR_INT_BASE_OFFSET 0x20
1389 #define QUERY_FW_CLR_INT_BAR_OFFSET 0x28
1391 #define QUERY_FW_COMM_BASE_OFFSET 0x40
1392 #define QUERY_FW_COMM_BAR_OFFSET 0x48
1394 #define QUERY_FW_CLOCK_OFFSET 0x50
1395 #define QUERY_FW_CLOCK_BAR 0x58
1397 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1398 if (IS_ERR(mailbox
))
1399 return PTR_ERR(mailbox
);
1400 outbox
= mailbox
->buf
;
1402 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, 0, 0, MLX4_CMD_QUERY_FW
,
1403 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
1407 MLX4_GET(fw_ver
, outbox
, QUERY_FW_VER_OFFSET
);
1409 * FW subminor version is at more significant bits than minor
1410 * version, so swap here.
1412 dev
->caps
.fw_ver
= (fw_ver
& 0xffff00000000ull
) |
1413 ((fw_ver
& 0xffff0000ull
) >> 16) |
1414 ((fw_ver
& 0x0000ffffull
) << 16);
1416 MLX4_GET(lg
, outbox
, QUERY_FW_PPF_ID
);
1417 dev
->caps
.function
= lg
;
1419 if (mlx4_is_slave(dev
))
1423 MLX4_GET(cmd_if_rev
, outbox
, QUERY_FW_CMD_IF_REV_OFFSET
);
1424 if (cmd_if_rev
< MLX4_COMMAND_INTERFACE_MIN_REV
||
1425 cmd_if_rev
> MLX4_COMMAND_INTERFACE_MAX_REV
) {
1426 mlx4_err(dev
, "Installed FW has unsupported command interface revision %d\n",
1428 mlx4_err(dev
, "(Installed FW version is %d.%d.%03d)\n",
1429 (int) (dev
->caps
.fw_ver
>> 32),
1430 (int) (dev
->caps
.fw_ver
>> 16) & 0xffff,
1431 (int) dev
->caps
.fw_ver
& 0xffff);
1432 mlx4_err(dev
, "This driver version supports only revisions %d to %d\n",
1433 MLX4_COMMAND_INTERFACE_MIN_REV
, MLX4_COMMAND_INTERFACE_MAX_REV
);
1438 if (cmd_if_rev
< MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS
)
1439 dev
->flags
|= MLX4_FLAG_OLD_PORT_CMDS
;
1441 MLX4_GET(lg
, outbox
, QUERY_FW_MAX_CMD_OFFSET
);
1442 cmd
->max_cmds
= 1 << lg
;
1444 mlx4_dbg(dev
, "FW version %d.%d.%03d (cmd intf rev %d), max commands %d\n",
1445 (int) (dev
->caps
.fw_ver
>> 32),
1446 (int) (dev
->caps
.fw_ver
>> 16) & 0xffff,
1447 (int) dev
->caps
.fw_ver
& 0xffff,
1448 cmd_if_rev
, cmd
->max_cmds
);
1450 MLX4_GET(fw
->catas_offset
, outbox
, QUERY_FW_ERR_START_OFFSET
);
1451 MLX4_GET(fw
->catas_size
, outbox
, QUERY_FW_ERR_SIZE_OFFSET
);
1452 MLX4_GET(fw
->catas_bar
, outbox
, QUERY_FW_ERR_BAR_OFFSET
);
1453 fw
->catas_bar
= (fw
->catas_bar
>> 6) * 2;
1455 mlx4_dbg(dev
, "Catastrophic error buffer at 0x%llx, size 0x%x, BAR %d\n",
1456 (unsigned long long) fw
->catas_offset
, fw
->catas_size
, fw
->catas_bar
);
1458 MLX4_GET(fw
->fw_pages
, outbox
, QUERY_FW_SIZE_OFFSET
);
1459 MLX4_GET(fw
->clr_int_base
, outbox
, QUERY_FW_CLR_INT_BASE_OFFSET
);
1460 MLX4_GET(fw
->clr_int_bar
, outbox
, QUERY_FW_CLR_INT_BAR_OFFSET
);
1461 fw
->clr_int_bar
= (fw
->clr_int_bar
>> 6) * 2;
1463 MLX4_GET(fw
->comm_base
, outbox
, QUERY_FW_COMM_BASE_OFFSET
);
1464 MLX4_GET(fw
->comm_bar
, outbox
, QUERY_FW_COMM_BAR_OFFSET
);
1465 fw
->comm_bar
= (fw
->comm_bar
>> 6) * 2;
1466 mlx4_dbg(dev
, "Communication vector bar:%d offset:0x%llx\n",
1467 fw
->comm_bar
, fw
->comm_base
);
1468 mlx4_dbg(dev
, "FW size %d KB\n", fw
->fw_pages
>> 2);
1470 MLX4_GET(fw
->clock_offset
, outbox
, QUERY_FW_CLOCK_OFFSET
);
1471 MLX4_GET(fw
->clock_bar
, outbox
, QUERY_FW_CLOCK_BAR
);
1472 fw
->clock_bar
= (fw
->clock_bar
>> 6) * 2;
1473 mlx4_dbg(dev
, "Internal clock bar:%d offset:0x%llx\n",
1474 fw
->clock_bar
, fw
->clock_offset
);
1477 * Round up number of system pages needed in case
1478 * MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
1481 ALIGN(fw
->fw_pages
, PAGE_SIZE
/ MLX4_ICM_PAGE_SIZE
) >>
1482 (PAGE_SHIFT
- MLX4_ICM_PAGE_SHIFT
);
1484 mlx4_dbg(dev
, "Clear int @ %llx, BAR %d\n",
1485 (unsigned long long) fw
->clr_int_base
, fw
->clr_int_bar
);
1488 mlx4_free_cmd_mailbox(dev
, mailbox
);
1492 int mlx4_QUERY_FW_wrapper(struct mlx4_dev
*dev
, int slave
,
1493 struct mlx4_vhcr
*vhcr
,
1494 struct mlx4_cmd_mailbox
*inbox
,
1495 struct mlx4_cmd_mailbox
*outbox
,
1496 struct mlx4_cmd_info
*cmd
)
1501 outbuf
= outbox
->buf
;
1502 err
= mlx4_cmd_box(dev
, 0, outbox
->dma
, 0, 0, MLX4_CMD_QUERY_FW
,
1503 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
1507 /* for slaves, set pci PPF ID to invalid and zero out everything
1508 * else except FW version */
1509 outbuf
[0] = outbuf
[1] = 0;
1510 memset(&outbuf
[8], 0, QUERY_FW_OUT_SIZE
- 8);
1511 outbuf
[QUERY_FW_PPF_ID
] = MLX4_INVALID_SLAVE_ID
;
1516 static void get_board_id(void *vsd
, char *board_id
)
1520 #define VSD_OFFSET_SIG1 0x00
1521 #define VSD_OFFSET_SIG2 0xde
1522 #define VSD_OFFSET_MLX_BOARD_ID 0xd0
1523 #define VSD_OFFSET_TS_BOARD_ID 0x20
1525 #define VSD_SIGNATURE_TOPSPIN 0x5ad
1527 memset(board_id
, 0, MLX4_BOARD_ID_LEN
);
1529 if (be16_to_cpup(vsd
+ VSD_OFFSET_SIG1
) == VSD_SIGNATURE_TOPSPIN
&&
1530 be16_to_cpup(vsd
+ VSD_OFFSET_SIG2
) == VSD_SIGNATURE_TOPSPIN
) {
1531 strlcpy(board_id
, vsd
+ VSD_OFFSET_TS_BOARD_ID
, MLX4_BOARD_ID_LEN
);
1534 * The board ID is a string but the firmware byte
1535 * swaps each 4-byte word before passing it back to
1536 * us. Therefore we need to swab it before printing.
1538 for (i
= 0; i
< 4; ++i
)
1539 ((u32
*) board_id
)[i
] =
1540 swab32(*(u32
*) (vsd
+ VSD_OFFSET_MLX_BOARD_ID
+ i
* 4));
1544 int mlx4_QUERY_ADAPTER(struct mlx4_dev
*dev
, struct mlx4_adapter
*adapter
)
1546 struct mlx4_cmd_mailbox
*mailbox
;
1550 #define QUERY_ADAPTER_OUT_SIZE 0x100
1551 #define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10
1552 #define QUERY_ADAPTER_VSD_OFFSET 0x20
1554 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1555 if (IS_ERR(mailbox
))
1556 return PTR_ERR(mailbox
);
1557 outbox
= mailbox
->buf
;
1559 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, 0, 0, MLX4_CMD_QUERY_ADAPTER
,
1560 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
1564 MLX4_GET(adapter
->inta_pin
, outbox
, QUERY_ADAPTER_INTA_PIN_OFFSET
);
1566 get_board_id(outbox
+ QUERY_ADAPTER_VSD_OFFSET
/ 4,
1570 mlx4_free_cmd_mailbox(dev
, mailbox
);
1574 int mlx4_INIT_HCA(struct mlx4_dev
*dev
, struct mlx4_init_hca_param
*param
)
1576 struct mlx4_cmd_mailbox
*mailbox
;
1579 static const u8 a0_dmfs_hw_steering
[] = {
1580 [MLX4_STEERING_DMFS_A0_DEFAULT
] = 0,
1581 [MLX4_STEERING_DMFS_A0_DYNAMIC
] = 1,
1582 [MLX4_STEERING_DMFS_A0_STATIC
] = 2,
1583 [MLX4_STEERING_DMFS_A0_DISABLE
] = 3
1586 #define INIT_HCA_IN_SIZE 0x200
1587 #define INIT_HCA_VERSION_OFFSET 0x000
1588 #define INIT_HCA_VERSION 2
1589 #define INIT_HCA_VXLAN_OFFSET 0x0c
1590 #define INIT_HCA_CACHELINE_SZ_OFFSET 0x0e
1591 #define INIT_HCA_FLAGS_OFFSET 0x014
1592 #define INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET 0x018
1593 #define INIT_HCA_QPC_OFFSET 0x020
1594 #define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10)
1595 #define INIT_HCA_LOG_QP_OFFSET (INIT_HCA_QPC_OFFSET + 0x17)
1596 #define INIT_HCA_SRQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x28)
1597 #define INIT_HCA_LOG_SRQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x2f)
1598 #define INIT_HCA_CQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x30)
1599 #define INIT_HCA_LOG_CQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x37)
1600 #define INIT_HCA_EQE_CQE_OFFSETS (INIT_HCA_QPC_OFFSET + 0x38)
1601 #define INIT_HCA_EQE_CQE_STRIDE_OFFSET (INIT_HCA_QPC_OFFSET + 0x3b)
1602 #define INIT_HCA_ALTC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x40)
1603 #define INIT_HCA_AUXC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x50)
1604 #define INIT_HCA_EQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x60)
1605 #define INIT_HCA_LOG_EQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x67)
1606 #define INIT_HCA_NUM_SYS_EQS_OFFSET (INIT_HCA_QPC_OFFSET + 0x6a)
1607 #define INIT_HCA_RDMARC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x70)
1608 #define INIT_HCA_LOG_RD_OFFSET (INIT_HCA_QPC_OFFSET + 0x77)
1609 #define INIT_HCA_MCAST_OFFSET 0x0c0
1610 #define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00)
1611 #define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12)
1612 #define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16)
1613 #define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18)
1614 #define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b)
1615 #define INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN 0x6
1616 #define INIT_HCA_FS_PARAM_OFFSET 0x1d0
1617 #define INIT_HCA_FS_BASE_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x00)
1618 #define INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x12)
1619 #define INIT_HCA_FS_A0_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x18)
1620 #define INIT_HCA_FS_LOG_TABLE_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x1b)
1621 #define INIT_HCA_FS_ETH_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x21)
1622 #define INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x22)
1623 #define INIT_HCA_FS_IB_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x25)
1624 #define INIT_HCA_FS_IB_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x26)
1625 #define INIT_HCA_TPT_OFFSET 0x0f0
1626 #define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00)
1627 #define INIT_HCA_TPT_MW_OFFSET (INIT_HCA_TPT_OFFSET + 0x08)
1628 #define INIT_HCA_LOG_MPT_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x0b)
1629 #define INIT_HCA_MTT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x10)
1630 #define INIT_HCA_CMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x18)
1631 #define INIT_HCA_UAR_OFFSET 0x120
1632 #define INIT_HCA_LOG_UAR_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0a)
1633 #define INIT_HCA_UAR_PAGE_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0b)
1635 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1636 if (IS_ERR(mailbox
))
1637 return PTR_ERR(mailbox
);
1638 inbox
= mailbox
->buf
;
1640 *((u8
*) mailbox
->buf
+ INIT_HCA_VERSION_OFFSET
) = INIT_HCA_VERSION
;
1642 *((u8
*) mailbox
->buf
+ INIT_HCA_CACHELINE_SZ_OFFSET
) =
1643 (ilog2(cache_line_size()) - 4) << 5;
1645 #if defined(__LITTLE_ENDIAN)
1646 *(inbox
+ INIT_HCA_FLAGS_OFFSET
/ 4) &= ~cpu_to_be32(1 << 1);
1647 #elif defined(__BIG_ENDIAN)
1648 *(inbox
+ INIT_HCA_FLAGS_OFFSET
/ 4) |= cpu_to_be32(1 << 1);
1650 #error Host endianness not defined
1652 /* Check port for UD address vector: */
1653 *(inbox
+ INIT_HCA_FLAGS_OFFSET
/ 4) |= cpu_to_be32(1);
1655 /* Enable IPoIB checksumming if we can: */
1656 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_IPOIB_CSUM
)
1657 *(inbox
+ INIT_HCA_FLAGS_OFFSET
/ 4) |= cpu_to_be32(1 << 3);
1659 /* Enable QoS support if module parameter set */
1661 *(inbox
+ INIT_HCA_FLAGS_OFFSET
/ 4) |= cpu_to_be32(1 << 2);
1663 /* enable counters */
1664 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_COUNTERS
)
1665 *(inbox
+ INIT_HCA_FLAGS_OFFSET
/ 4) |= cpu_to_be32(1 << 4);
1667 /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
1668 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_64B_EQE
) {
1669 *(inbox
+ INIT_HCA_EQE_CQE_OFFSETS
/ 4) |= cpu_to_be32(1 << 29);
1670 dev
->caps
.eqe_size
= 64;
1671 dev
->caps
.eqe_factor
= 1;
1673 dev
->caps
.eqe_size
= 32;
1674 dev
->caps
.eqe_factor
= 0;
1677 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_64B_CQE
) {
1678 *(inbox
+ INIT_HCA_EQE_CQE_OFFSETS
/ 4) |= cpu_to_be32(1 << 30);
1679 dev
->caps
.cqe_size
= 64;
1680 dev
->caps
.userspace_caps
|= MLX4_USER_DEV_CAP_LARGE_CQE
;
1682 dev
->caps
.cqe_size
= 32;
1685 /* CX3 is capable of extending CQEs\EQEs to strides larger than 64B */
1686 if ((dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_EQE_STRIDE
) &&
1687 (dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_CQE_STRIDE
)) {
1688 dev
->caps
.eqe_size
= cache_line_size();
1689 dev
->caps
.cqe_size
= cache_line_size();
1690 dev
->caps
.eqe_factor
= 0;
1691 MLX4_PUT(inbox
, (u8
)((ilog2(dev
->caps
.eqe_size
) - 5) << 4 |
1692 (ilog2(dev
->caps
.eqe_size
) - 5)),
1693 INIT_HCA_EQE_CQE_STRIDE_OFFSET
);
1695 /* User still need to know to support CQE > 32B */
1696 dev
->caps
.userspace_caps
|= MLX4_USER_DEV_CAP_LARGE_CQE
;
1699 if (dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT
)
1700 *(inbox
+ INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET
/ 4) |= cpu_to_be32(1 << 31);
1702 /* QPC/EEC/CQC/EQC/RDMARC attributes */
1704 MLX4_PUT(inbox
, param
->qpc_base
, INIT_HCA_QPC_BASE_OFFSET
);
1705 MLX4_PUT(inbox
, param
->log_num_qps
, INIT_HCA_LOG_QP_OFFSET
);
1706 MLX4_PUT(inbox
, param
->srqc_base
, INIT_HCA_SRQC_BASE_OFFSET
);
1707 MLX4_PUT(inbox
, param
->log_num_srqs
, INIT_HCA_LOG_SRQ_OFFSET
);
1708 MLX4_PUT(inbox
, param
->cqc_base
, INIT_HCA_CQC_BASE_OFFSET
);
1709 MLX4_PUT(inbox
, param
->log_num_cqs
, INIT_HCA_LOG_CQ_OFFSET
);
1710 MLX4_PUT(inbox
, param
->altc_base
, INIT_HCA_ALTC_BASE_OFFSET
);
1711 MLX4_PUT(inbox
, param
->auxc_base
, INIT_HCA_AUXC_BASE_OFFSET
);
1712 MLX4_PUT(inbox
, param
->eqc_base
, INIT_HCA_EQC_BASE_OFFSET
);
1713 MLX4_PUT(inbox
, param
->log_num_eqs
, INIT_HCA_LOG_EQ_OFFSET
);
1714 MLX4_PUT(inbox
, param
->num_sys_eqs
, INIT_HCA_NUM_SYS_EQS_OFFSET
);
1715 MLX4_PUT(inbox
, param
->rdmarc_base
, INIT_HCA_RDMARC_BASE_OFFSET
);
1716 MLX4_PUT(inbox
, param
->log_rd_per_qp
, INIT_HCA_LOG_RD_OFFSET
);
1718 /* steering attributes */
1719 if (dev
->caps
.steering_mode
==
1720 MLX4_STEERING_MODE_DEVICE_MANAGED
) {
1721 *(inbox
+ INIT_HCA_FLAGS_OFFSET
/ 4) |=
1723 INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN
);
1725 MLX4_PUT(inbox
, param
->mc_base
, INIT_HCA_FS_BASE_OFFSET
);
1726 MLX4_PUT(inbox
, param
->log_mc_entry_sz
,
1727 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET
);
1728 MLX4_PUT(inbox
, param
->log_mc_table_sz
,
1729 INIT_HCA_FS_LOG_TABLE_SZ_OFFSET
);
1730 /* Enable Ethernet flow steering
1731 * with udp unicast and tcp unicast
1733 if (dev
->caps
.dmfs_high_steer_mode
!=
1734 MLX4_STEERING_DMFS_A0_STATIC
)
1736 (u8
)(MLX4_FS_UDP_UC_EN
| MLX4_FS_TCP_UC_EN
),
1737 INIT_HCA_FS_ETH_BITS_OFFSET
);
1738 MLX4_PUT(inbox
, (u16
) MLX4_FS_NUM_OF_L2_ADDR
,
1739 INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET
);
1740 /* Enable IPoIB flow steering
1741 * with udp unicast and tcp unicast
1743 MLX4_PUT(inbox
, (u8
) (MLX4_FS_UDP_UC_EN
| MLX4_FS_TCP_UC_EN
),
1744 INIT_HCA_FS_IB_BITS_OFFSET
);
1745 MLX4_PUT(inbox
, (u16
) MLX4_FS_NUM_OF_L2_ADDR
,
1746 INIT_HCA_FS_IB_NUM_ADDRS_OFFSET
);
1748 if (dev
->caps
.dmfs_high_steer_mode
!=
1749 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED
)
1751 ((u8
)(a0_dmfs_hw_steering
[dev
->caps
.dmfs_high_steer_mode
]
1753 INIT_HCA_FS_A0_OFFSET
);
1755 MLX4_PUT(inbox
, param
->mc_base
, INIT_HCA_MC_BASE_OFFSET
);
1756 MLX4_PUT(inbox
, param
->log_mc_entry_sz
,
1757 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET
);
1758 MLX4_PUT(inbox
, param
->log_mc_hash_sz
,
1759 INIT_HCA_LOG_MC_HASH_SZ_OFFSET
);
1760 MLX4_PUT(inbox
, param
->log_mc_table_sz
,
1761 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET
);
1762 if (dev
->caps
.steering_mode
== MLX4_STEERING_MODE_B0
)
1763 MLX4_PUT(inbox
, (u8
) (1 << 3),
1764 INIT_HCA_UC_STEERING_OFFSET
);
1767 /* TPT attributes */
1769 MLX4_PUT(inbox
, param
->dmpt_base
, INIT_HCA_DMPT_BASE_OFFSET
);
1770 MLX4_PUT(inbox
, param
->mw_enabled
, INIT_HCA_TPT_MW_OFFSET
);
1771 MLX4_PUT(inbox
, param
->log_mpt_sz
, INIT_HCA_LOG_MPT_SZ_OFFSET
);
1772 MLX4_PUT(inbox
, param
->mtt_base
, INIT_HCA_MTT_BASE_OFFSET
);
1773 MLX4_PUT(inbox
, param
->cmpt_base
, INIT_HCA_CMPT_BASE_OFFSET
);
1775 /* UAR attributes */
1777 MLX4_PUT(inbox
, param
->uar_page_sz
, INIT_HCA_UAR_PAGE_SZ_OFFSET
);
1778 MLX4_PUT(inbox
, param
->log_uar_sz
, INIT_HCA_LOG_UAR_SZ_OFFSET
);
1780 /* set parser VXLAN attributes */
1781 if (dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS
) {
1782 u8 parser_params
= 0;
1783 MLX4_PUT(inbox
, parser_params
, INIT_HCA_VXLAN_OFFSET
);
1786 err
= mlx4_cmd(dev
, mailbox
->dma
, 0, 0, MLX4_CMD_INIT_HCA
,
1787 MLX4_CMD_TIME_CLASS_C
, MLX4_CMD_NATIVE
);
1790 mlx4_err(dev
, "INIT_HCA returns %d\n", err
);
1792 mlx4_free_cmd_mailbox(dev
, mailbox
);
1796 int mlx4_QUERY_HCA(struct mlx4_dev
*dev
,
1797 struct mlx4_init_hca_param
*param
)
1799 struct mlx4_cmd_mailbox
*mailbox
;
1804 static const u8 a0_dmfs_query_hw_steering
[] = {
1805 [0] = MLX4_STEERING_DMFS_A0_DEFAULT
,
1806 [1] = MLX4_STEERING_DMFS_A0_DYNAMIC
,
1807 [2] = MLX4_STEERING_DMFS_A0_STATIC
,
1808 [3] = MLX4_STEERING_DMFS_A0_DISABLE
1811 #define QUERY_HCA_GLOBAL_CAPS_OFFSET 0x04
1812 #define QUERY_HCA_CORE_CLOCK_OFFSET 0x0c
1814 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1815 if (IS_ERR(mailbox
))
1816 return PTR_ERR(mailbox
);
1817 outbox
= mailbox
->buf
;
1819 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, 0, 0,
1821 MLX4_CMD_TIME_CLASS_B
,
1822 !mlx4_is_slave(dev
));
1826 MLX4_GET(param
->global_caps
, outbox
, QUERY_HCA_GLOBAL_CAPS_OFFSET
);
1827 MLX4_GET(param
->hca_core_clock
, outbox
, QUERY_HCA_CORE_CLOCK_OFFSET
);
1829 /* QPC/EEC/CQC/EQC/RDMARC attributes */
1831 MLX4_GET(param
->qpc_base
, outbox
, INIT_HCA_QPC_BASE_OFFSET
);
1832 MLX4_GET(param
->log_num_qps
, outbox
, INIT_HCA_LOG_QP_OFFSET
);
1833 MLX4_GET(param
->srqc_base
, outbox
, INIT_HCA_SRQC_BASE_OFFSET
);
1834 MLX4_GET(param
->log_num_srqs
, outbox
, INIT_HCA_LOG_SRQ_OFFSET
);
1835 MLX4_GET(param
->cqc_base
, outbox
, INIT_HCA_CQC_BASE_OFFSET
);
1836 MLX4_GET(param
->log_num_cqs
, outbox
, INIT_HCA_LOG_CQ_OFFSET
);
1837 MLX4_GET(param
->altc_base
, outbox
, INIT_HCA_ALTC_BASE_OFFSET
);
1838 MLX4_GET(param
->auxc_base
, outbox
, INIT_HCA_AUXC_BASE_OFFSET
);
1839 MLX4_GET(param
->eqc_base
, outbox
, INIT_HCA_EQC_BASE_OFFSET
);
1840 MLX4_GET(param
->log_num_eqs
, outbox
, INIT_HCA_LOG_EQ_OFFSET
);
1841 MLX4_GET(param
->num_sys_eqs
, outbox
, INIT_HCA_NUM_SYS_EQS_OFFSET
);
1842 MLX4_GET(param
->rdmarc_base
, outbox
, INIT_HCA_RDMARC_BASE_OFFSET
);
1843 MLX4_GET(param
->log_rd_per_qp
, outbox
, INIT_HCA_LOG_RD_OFFSET
);
1845 MLX4_GET(dword_field
, outbox
, INIT_HCA_FLAGS_OFFSET
);
1846 if (dword_field
& (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN
)) {
1847 param
->steering_mode
= MLX4_STEERING_MODE_DEVICE_MANAGED
;
1849 MLX4_GET(byte_field
, outbox
, INIT_HCA_UC_STEERING_OFFSET
);
1850 if (byte_field
& 0x8)
1851 param
->steering_mode
= MLX4_STEERING_MODE_B0
;
1853 param
->steering_mode
= MLX4_STEERING_MODE_A0
;
1855 /* steering attributes */
1856 if (param
->steering_mode
== MLX4_STEERING_MODE_DEVICE_MANAGED
) {
1857 MLX4_GET(param
->mc_base
, outbox
, INIT_HCA_FS_BASE_OFFSET
);
1858 MLX4_GET(param
->log_mc_entry_sz
, outbox
,
1859 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET
);
1860 MLX4_GET(param
->log_mc_table_sz
, outbox
,
1861 INIT_HCA_FS_LOG_TABLE_SZ_OFFSET
);
1862 MLX4_GET(byte_field
, outbox
,
1863 INIT_HCA_FS_A0_OFFSET
);
1864 param
->dmfs_high_steer_mode
=
1865 a0_dmfs_query_hw_steering
[(byte_field
>> 6) & 3];
1867 MLX4_GET(param
->mc_base
, outbox
, INIT_HCA_MC_BASE_OFFSET
);
1868 MLX4_GET(param
->log_mc_entry_sz
, outbox
,
1869 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET
);
1870 MLX4_GET(param
->log_mc_hash_sz
, outbox
,
1871 INIT_HCA_LOG_MC_HASH_SZ_OFFSET
);
1872 MLX4_GET(param
->log_mc_table_sz
, outbox
,
1873 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET
);
1876 /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
1877 MLX4_GET(byte_field
, outbox
, INIT_HCA_EQE_CQE_OFFSETS
);
1878 if (byte_field
& 0x20) /* 64-bytes eqe enabled */
1879 param
->dev_cap_enabled
|= MLX4_DEV_CAP_64B_EQE_ENABLED
;
1880 if (byte_field
& 0x40) /* 64-bytes cqe enabled */
1881 param
->dev_cap_enabled
|= MLX4_DEV_CAP_64B_CQE_ENABLED
;
1883 /* CX3 is capable of extending CQEs\EQEs to strides larger than 64B */
1884 MLX4_GET(byte_field
, outbox
, INIT_HCA_EQE_CQE_STRIDE_OFFSET
);
1886 param
->dev_cap_enabled
|= MLX4_DEV_CAP_EQE_STRIDE_ENABLED
;
1887 param
->dev_cap_enabled
|= MLX4_DEV_CAP_CQE_STRIDE_ENABLED
;
1888 param
->cqe_size
= 1 << ((byte_field
&
1889 MLX4_CQE_SIZE_MASK_STRIDE
) + 5);
1890 param
->eqe_size
= 1 << (((byte_field
&
1891 MLX4_EQE_SIZE_MASK_STRIDE
) >> 4) + 5);
1894 /* TPT attributes */
1896 MLX4_GET(param
->dmpt_base
, outbox
, INIT_HCA_DMPT_BASE_OFFSET
);
1897 MLX4_GET(param
->mw_enabled
, outbox
, INIT_HCA_TPT_MW_OFFSET
);
1898 MLX4_GET(param
->log_mpt_sz
, outbox
, INIT_HCA_LOG_MPT_SZ_OFFSET
);
1899 MLX4_GET(param
->mtt_base
, outbox
, INIT_HCA_MTT_BASE_OFFSET
);
1900 MLX4_GET(param
->cmpt_base
, outbox
, INIT_HCA_CMPT_BASE_OFFSET
);
1902 /* UAR attributes */
1904 MLX4_GET(param
->uar_page_sz
, outbox
, INIT_HCA_UAR_PAGE_SZ_OFFSET
);
1905 MLX4_GET(param
->log_uar_sz
, outbox
, INIT_HCA_LOG_UAR_SZ_OFFSET
);
1908 mlx4_free_cmd_mailbox(dev
, mailbox
);
1913 static int mlx4_hca_core_clock_update(struct mlx4_dev
*dev
)
1915 struct mlx4_cmd_mailbox
*mailbox
;
1919 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1920 if (IS_ERR(mailbox
)) {
1921 mlx4_warn(dev
, "hca_core_clock mailbox allocation failed\n");
1922 return PTR_ERR(mailbox
);
1924 outbox
= mailbox
->buf
;
1926 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, 0, 0,
1928 MLX4_CMD_TIME_CLASS_B
,
1929 !mlx4_is_slave(dev
));
1931 mlx4_warn(dev
, "hca_core_clock update failed\n");
1935 MLX4_GET(dev
->caps
.hca_core_clock
, outbox
, QUERY_HCA_CORE_CLOCK_OFFSET
);
1938 mlx4_free_cmd_mailbox(dev
, mailbox
);
1943 /* for IB-type ports only in SRIOV mode. Checks that both proxy QP0
1944 * and real QP0 are active, so that the paravirtualized QP0 is ready
1946 static int check_qp0_state(struct mlx4_dev
*dev
, int function
, int port
)
1948 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1949 /* irrelevant if not infiniband */
1950 if (priv
->mfunc
.master
.qp0_state
[port
].proxy_qp0_active
&&
1951 priv
->mfunc
.master
.qp0_state
[port
].qp0_active
)
1956 int mlx4_INIT_PORT_wrapper(struct mlx4_dev
*dev
, int slave
,
1957 struct mlx4_vhcr
*vhcr
,
1958 struct mlx4_cmd_mailbox
*inbox
,
1959 struct mlx4_cmd_mailbox
*outbox
,
1960 struct mlx4_cmd_info
*cmd
)
1962 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1963 int port
= mlx4_slave_convert_port(dev
, slave
, vhcr
->in_modifier
);
1969 if (priv
->mfunc
.master
.slave_state
[slave
].init_port_mask
& (1 << port
))
1972 if (dev
->caps
.port_mask
[port
] != MLX4_PORT_TYPE_IB
) {
1973 /* Enable port only if it was previously disabled */
1974 if (!priv
->mfunc
.master
.init_port_ref
[port
]) {
1975 err
= mlx4_cmd(dev
, 0, port
, 0, MLX4_CMD_INIT_PORT
,
1976 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
1980 priv
->mfunc
.master
.slave_state
[slave
].init_port_mask
|= (1 << port
);
1982 if (slave
== mlx4_master_func_num(dev
)) {
1983 if (check_qp0_state(dev
, slave
, port
) &&
1984 !priv
->mfunc
.master
.qp0_state
[port
].port_active
) {
1985 err
= mlx4_cmd(dev
, 0, port
, 0, MLX4_CMD_INIT_PORT
,
1986 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
1989 priv
->mfunc
.master
.qp0_state
[port
].port_active
= 1;
1990 priv
->mfunc
.master
.slave_state
[slave
].init_port_mask
|= (1 << port
);
1993 priv
->mfunc
.master
.slave_state
[slave
].init_port_mask
|= (1 << port
);
1995 ++priv
->mfunc
.master
.init_port_ref
[port
];
1999 int mlx4_INIT_PORT(struct mlx4_dev
*dev
, int port
)
2001 struct mlx4_cmd_mailbox
*mailbox
;
2007 if (dev
->flags
& MLX4_FLAG_OLD_PORT_CMDS
) {
2008 #define INIT_PORT_IN_SIZE 256
2009 #define INIT_PORT_FLAGS_OFFSET 0x00
2010 #define INIT_PORT_FLAG_SIG (1 << 18)
2011 #define INIT_PORT_FLAG_NG (1 << 17)
2012 #define INIT_PORT_FLAG_G0 (1 << 16)
2013 #define INIT_PORT_VL_SHIFT 4
2014 #define INIT_PORT_PORT_WIDTH_SHIFT 8
2015 #define INIT_PORT_MTU_OFFSET 0x04
2016 #define INIT_PORT_MAX_GID_OFFSET 0x06
2017 #define INIT_PORT_MAX_PKEY_OFFSET 0x0a
2018 #define INIT_PORT_GUID0_OFFSET 0x10
2019 #define INIT_PORT_NODE_GUID_OFFSET 0x18
2020 #define INIT_PORT_SI_GUID_OFFSET 0x20
2022 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
2023 if (IS_ERR(mailbox
))
2024 return PTR_ERR(mailbox
);
2025 inbox
= mailbox
->buf
;
2028 flags
|= (dev
->caps
.vl_cap
[port
] & 0xf) << INIT_PORT_VL_SHIFT
;
2029 flags
|= (dev
->caps
.port_width_cap
[port
] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT
;
2030 MLX4_PUT(inbox
, flags
, INIT_PORT_FLAGS_OFFSET
);
2032 field
= 128 << dev
->caps
.ib_mtu_cap
[port
];
2033 MLX4_PUT(inbox
, field
, INIT_PORT_MTU_OFFSET
);
2034 field
= dev
->caps
.gid_table_len
[port
];
2035 MLX4_PUT(inbox
, field
, INIT_PORT_MAX_GID_OFFSET
);
2036 field
= dev
->caps
.pkey_table_len
[port
];
2037 MLX4_PUT(inbox
, field
, INIT_PORT_MAX_PKEY_OFFSET
);
2039 err
= mlx4_cmd(dev
, mailbox
->dma
, port
, 0, MLX4_CMD_INIT_PORT
,
2040 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
2042 mlx4_free_cmd_mailbox(dev
, mailbox
);
2044 err
= mlx4_cmd(dev
, 0, port
, 0, MLX4_CMD_INIT_PORT
,
2045 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_WRAPPED
);
2048 mlx4_hca_core_clock_update(dev
);
2052 EXPORT_SYMBOL_GPL(mlx4_INIT_PORT
);
2054 int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev
*dev
, int slave
,
2055 struct mlx4_vhcr
*vhcr
,
2056 struct mlx4_cmd_mailbox
*inbox
,
2057 struct mlx4_cmd_mailbox
*outbox
,
2058 struct mlx4_cmd_info
*cmd
)
2060 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2061 int port
= mlx4_slave_convert_port(dev
, slave
, vhcr
->in_modifier
);
2067 if (!(priv
->mfunc
.master
.slave_state
[slave
].init_port_mask
&
2071 if (dev
->caps
.port_mask
[port
] != MLX4_PORT_TYPE_IB
) {
2072 if (priv
->mfunc
.master
.init_port_ref
[port
] == 1) {
2073 err
= mlx4_cmd(dev
, 0, port
, 0, MLX4_CMD_CLOSE_PORT
,
2074 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
2078 priv
->mfunc
.master
.slave_state
[slave
].init_port_mask
&= ~(1 << port
);
2080 /* infiniband port */
2081 if (slave
== mlx4_master_func_num(dev
)) {
2082 if (!priv
->mfunc
.master
.qp0_state
[port
].qp0_active
&&
2083 priv
->mfunc
.master
.qp0_state
[port
].port_active
) {
2084 err
= mlx4_cmd(dev
, 0, port
, 0, MLX4_CMD_CLOSE_PORT
,
2085 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
2088 priv
->mfunc
.master
.slave_state
[slave
].init_port_mask
&= ~(1 << port
);
2089 priv
->mfunc
.master
.qp0_state
[port
].port_active
= 0;
2092 priv
->mfunc
.master
.slave_state
[slave
].init_port_mask
&= ~(1 << port
);
2094 --priv
->mfunc
.master
.init_port_ref
[port
];
2098 int mlx4_CLOSE_PORT(struct mlx4_dev
*dev
, int port
)
2100 return mlx4_cmd(dev
, 0, port
, 0, MLX4_CMD_CLOSE_PORT
,
2101 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_WRAPPED
);
2103 EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT
);
2105 int mlx4_CLOSE_HCA(struct mlx4_dev
*dev
, int panic
)
2107 return mlx4_cmd(dev
, 0, 0, panic
, MLX4_CMD_CLOSE_HCA
,
2108 MLX4_CMD_TIME_CLASS_C
, MLX4_CMD_NATIVE
);
2111 struct mlx4_config_dev
{
2112 __be32 update_flags
;
2114 __be16 vxlan_udp_dport
;
2124 #define MLX4_VXLAN_UDP_DPORT (1 << 0)
2125 #define MLX4_DISABLE_RX_PORT BIT(18)
2127 static int mlx4_CONFIG_DEV_set(struct mlx4_dev
*dev
, struct mlx4_config_dev
*config_dev
)
2130 struct mlx4_cmd_mailbox
*mailbox
;
2132 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
2133 if (IS_ERR(mailbox
))
2134 return PTR_ERR(mailbox
);
2136 memcpy(mailbox
->buf
, config_dev
, sizeof(*config_dev
));
2138 err
= mlx4_cmd(dev
, mailbox
->dma
, 0, 0, MLX4_CMD_CONFIG_DEV
,
2139 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_NATIVE
);
2141 mlx4_free_cmd_mailbox(dev
, mailbox
);
2145 static int mlx4_CONFIG_DEV_get(struct mlx4_dev
*dev
, struct mlx4_config_dev
*config_dev
)
2148 struct mlx4_cmd_mailbox
*mailbox
;
2150 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
2151 if (IS_ERR(mailbox
))
2152 return PTR_ERR(mailbox
);
2154 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, 0, 1, MLX4_CMD_CONFIG_DEV
,
2155 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
2158 memcpy(config_dev
, mailbox
->buf
, sizeof(*config_dev
));
2160 mlx4_free_cmd_mailbox(dev
, mailbox
);
2164 /* Conversion between the HW values and the actual functionality.
2165 * The value represented by the array index,
2166 * and the functionality determined by the flags.
2168 static const u8 config_dev_csum_flags
[] = {
2170 [1] = MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP
,
2171 [2] = MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP
|
2172 MLX4_RX_CSUM_MODE_L4
,
2173 [3] = MLX4_RX_CSUM_MODE_L4
|
2174 MLX4_RX_CSUM_MODE_IP_OK_IP_NON_TCP_UDP
|
2175 MLX4_RX_CSUM_MODE_MULTI_VLAN
2178 int mlx4_config_dev_retrieval(struct mlx4_dev
*dev
,
2179 struct mlx4_config_dev_params
*params
)
2181 struct mlx4_config_dev config_dev
= {0};
2185 #define CONFIG_DEV_RX_CSUM_MODE_MASK 0x7
2186 #define CONFIG_DEV_RX_CSUM_MODE_PORT1_BIT_OFFSET 0
2187 #define CONFIG_DEV_RX_CSUM_MODE_PORT2_BIT_OFFSET 4
2189 if (!(dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_CONFIG_DEV
))
2192 err
= mlx4_CONFIG_DEV_get(dev
, &config_dev
);
2196 csum_mask
= (config_dev
.rx_checksum_val
>> CONFIG_DEV_RX_CSUM_MODE_PORT1_BIT_OFFSET
) &
2197 CONFIG_DEV_RX_CSUM_MODE_MASK
;
2199 if (csum_mask
>= sizeof(config_dev_csum_flags
)/sizeof(config_dev_csum_flags
[0]))
2201 params
->rx_csum_flags_port_1
= config_dev_csum_flags
[csum_mask
];
2203 csum_mask
= (config_dev
.rx_checksum_val
>> CONFIG_DEV_RX_CSUM_MODE_PORT2_BIT_OFFSET
) &
2204 CONFIG_DEV_RX_CSUM_MODE_MASK
;
2206 if (csum_mask
>= sizeof(config_dev_csum_flags
)/sizeof(config_dev_csum_flags
[0]))
2208 params
->rx_csum_flags_port_2
= config_dev_csum_flags
[csum_mask
];
2210 params
->vxlan_udp_dport
= be16_to_cpu(config_dev
.vxlan_udp_dport
);
2214 EXPORT_SYMBOL_GPL(mlx4_config_dev_retrieval
);
2216 int mlx4_config_vxlan_port(struct mlx4_dev
*dev
, __be16 udp_port
)
2218 struct mlx4_config_dev config_dev
;
2220 memset(&config_dev
, 0, sizeof(config_dev
));
2221 config_dev
.update_flags
= cpu_to_be32(MLX4_VXLAN_UDP_DPORT
);
2222 config_dev
.vxlan_udp_dport
= udp_port
;
2224 return mlx4_CONFIG_DEV_set(dev
, &config_dev
);
2226 EXPORT_SYMBOL_GPL(mlx4_config_vxlan_port
);
2228 #define CONFIG_DISABLE_RX_PORT BIT(15)
2229 int mlx4_disable_rx_port_check(struct mlx4_dev
*dev
, bool dis
)
2231 struct mlx4_config_dev config_dev
;
2233 memset(&config_dev
, 0, sizeof(config_dev
));
2234 config_dev
.update_flags
= cpu_to_be32(MLX4_DISABLE_RX_PORT
);
2236 config_dev
.roce_flags
=
2237 cpu_to_be32(CONFIG_DISABLE_RX_PORT
);
2239 return mlx4_CONFIG_DEV_set(dev
, &config_dev
);
2242 int mlx4_virt2phy_port_map(struct mlx4_dev
*dev
, u32 port1
, u32 port2
)
2244 struct mlx4_cmd_mailbox
*mailbox
;
2251 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
2252 if (IS_ERR(mailbox
))
2256 v2p
->v_port1
= cpu_to_be32(port1
);
2257 v2p
->v_port2
= cpu_to_be32(port2
);
2259 err
= mlx4_cmd(dev
, mailbox
->dma
, 0,
2260 MLX4_SET_PORT_VIRT2PHY
, MLX4_CMD_VIRT_PORT_MAP
,
2261 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_NATIVE
);
2263 mlx4_free_cmd_mailbox(dev
, mailbox
);
2268 int mlx4_SET_ICM_SIZE(struct mlx4_dev
*dev
, u64 icm_size
, u64
*aux_pages
)
2270 int ret
= mlx4_cmd_imm(dev
, icm_size
, aux_pages
, 0, 0,
2271 MLX4_CMD_SET_ICM_SIZE
,
2272 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
2277 * Round up number of system pages needed in case
2278 * MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
2280 *aux_pages
= ALIGN(*aux_pages
, PAGE_SIZE
/ MLX4_ICM_PAGE_SIZE
) >>
2281 (PAGE_SHIFT
- MLX4_ICM_PAGE_SHIFT
);
2286 int mlx4_NOP(struct mlx4_dev
*dev
)
2288 /* Input modifier of 0x1f means "finish as soon as possible." */
2289 return mlx4_cmd(dev
, 0, 0x1f, 0, MLX4_CMD_NOP
, MLX4_CMD_TIME_CLASS_A
,
2293 int mlx4_get_phys_port_id(struct mlx4_dev
*dev
)
2297 struct mlx4_cmd_mailbox
*mailbox
;
2299 u32 guid_hi
, guid_lo
;
2301 #define MOD_STAT_CFG_PORT_OFFSET 8
2302 #define MOD_STAT_CFG_GUID_H 0X14
2303 #define MOD_STAT_CFG_GUID_L 0X1c
2305 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
2306 if (IS_ERR(mailbox
))
2307 return PTR_ERR(mailbox
);
2308 outbox
= mailbox
->buf
;
2310 for (port
= 1; port
<= dev
->caps
.num_ports
; port
++) {
2311 in_mod
= port
<< MOD_STAT_CFG_PORT_OFFSET
;
2312 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, in_mod
, 0x2,
2313 MLX4_CMD_MOD_STAT_CFG
, MLX4_CMD_TIME_CLASS_A
,
2316 mlx4_err(dev
, "Fail to get port %d uplink guid\n",
2320 MLX4_GET(guid_hi
, outbox
, MOD_STAT_CFG_GUID_H
);
2321 MLX4_GET(guid_lo
, outbox
, MOD_STAT_CFG_GUID_L
);
2322 dev
->caps
.phys_port_id
[port
] = (u64
)guid_lo
|
2326 mlx4_free_cmd_mailbox(dev
, mailbox
);
2330 #define MLX4_WOL_SETUP_MODE (5 << 28)
2331 int mlx4_wol_read(struct mlx4_dev
*dev
, u64
*config
, int port
)
2333 u32 in_mod
= MLX4_WOL_SETUP_MODE
| port
<< 8;
2335 return mlx4_cmd_imm(dev
, 0, config
, in_mod
, 0x3,
2336 MLX4_CMD_MOD_STAT_CFG
, MLX4_CMD_TIME_CLASS_A
,
2339 EXPORT_SYMBOL_GPL(mlx4_wol_read
);
2341 int mlx4_wol_write(struct mlx4_dev
*dev
, u64 config
, int port
)
2343 u32 in_mod
= MLX4_WOL_SETUP_MODE
| port
<< 8;
2345 return mlx4_cmd(dev
, config
, in_mod
, 0x1, MLX4_CMD_MOD_STAT_CFG
,
2346 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
2348 EXPORT_SYMBOL_GPL(mlx4_wol_write
);
2355 void mlx4_opreq_action(struct work_struct
*work
)
2357 struct mlx4_priv
*priv
= container_of(work
, struct mlx4_priv
,
2359 struct mlx4_dev
*dev
= &priv
->dev
;
2360 int num_tasks
= atomic_read(&priv
->opreq_count
);
2361 struct mlx4_cmd_mailbox
*mailbox
;
2362 struct mlx4_mgm
*mgm
;
2374 #define GET_OP_REQ_MODIFIER_OFFSET 0x08
2375 #define GET_OP_REQ_TOKEN_OFFSET 0x14
2376 #define GET_OP_REQ_TYPE_OFFSET 0x1a
2377 #define GET_OP_REQ_DATA_OFFSET 0x20
2379 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
2380 if (IS_ERR(mailbox
)) {
2381 mlx4_err(dev
, "Failed to allocate mailbox for GET_OP_REQ\n");
2384 outbox
= mailbox
->buf
;
2387 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, 0, 0,
2388 MLX4_CMD_GET_OP_REQ
, MLX4_CMD_TIME_CLASS_A
,
2391 mlx4_err(dev
, "Failed to retrieve required operation: %d\n",
2395 MLX4_GET(modifier
, outbox
, GET_OP_REQ_MODIFIER_OFFSET
);
2396 MLX4_GET(token
, outbox
, GET_OP_REQ_TOKEN_OFFSET
);
2397 MLX4_GET(type
, outbox
, GET_OP_REQ_TYPE_OFFSET
);
2402 if (dev
->caps
.steering_mode
==
2403 MLX4_STEERING_MODE_DEVICE_MANAGED
) {
2404 mlx4_warn(dev
, "ADD MCG operation is not supported in DEVICE_MANAGED steering mode\n");
2408 mgm
= (struct mlx4_mgm
*)((u8
*)(outbox
) +
2409 GET_OP_REQ_DATA_OFFSET
);
2410 num_qps
= be32_to_cpu(mgm
->members_count
) &
2412 rem_mcg
= ((u8
*)(&mgm
->members_count
))[0] & 1;
2413 prot
= ((u8
*)(&mgm
->members_count
))[0] >> 6;
2415 for (i
= 0; i
< num_qps
; i
++) {
2416 qp
.qpn
= be32_to_cpu(mgm
->qp
[i
]);
2418 err
= mlx4_multicast_detach(dev
, &qp
,
2422 err
= mlx4_multicast_attach(dev
, &qp
,
2432 mlx4_warn(dev
, "Bad type for required operation\n");
2436 err
= mlx4_cmd(dev
, 0, ((u32
) err
|
2437 (__force u32
)cpu_to_be32(token
) << 16),
2438 1, MLX4_CMD_GET_OP_REQ
, MLX4_CMD_TIME_CLASS_A
,
2441 mlx4_err(dev
, "Failed to acknowledge required request: %d\n",
2445 memset(outbox
, 0, 0xffc);
2446 num_tasks
= atomic_dec_return(&priv
->opreq_count
);
2450 mlx4_free_cmd_mailbox(dev
, mailbox
);
2453 static int mlx4_check_smp_firewall_active(struct mlx4_dev
*dev
,
2454 struct mlx4_cmd_mailbox
*mailbox
)
2456 #define MLX4_CMD_MAD_DEMUX_SET_ATTR_OFFSET 0x10
2457 #define MLX4_CMD_MAD_DEMUX_GETRESP_ATTR_OFFSET 0x20
2458 #define MLX4_CMD_MAD_DEMUX_TRAP_ATTR_OFFSET 0x40
2459 #define MLX4_CMD_MAD_DEMUX_TRAP_REPRESS_ATTR_OFFSET 0x70
2461 u32 set_attr_mask
, getresp_attr_mask
;
2462 u32 trap_attr_mask
, traprepress_attr_mask
;
2464 MLX4_GET(set_attr_mask
, mailbox
->buf
,
2465 MLX4_CMD_MAD_DEMUX_SET_ATTR_OFFSET
);
2466 mlx4_dbg(dev
, "SMP firewall set_attribute_mask = 0x%x\n",
2469 MLX4_GET(getresp_attr_mask
, mailbox
->buf
,
2470 MLX4_CMD_MAD_DEMUX_GETRESP_ATTR_OFFSET
);
2471 mlx4_dbg(dev
, "SMP firewall getresp_attribute_mask = 0x%x\n",
2474 MLX4_GET(trap_attr_mask
, mailbox
->buf
,
2475 MLX4_CMD_MAD_DEMUX_TRAP_ATTR_OFFSET
);
2476 mlx4_dbg(dev
, "SMP firewall trap_attribute_mask = 0x%x\n",
2479 MLX4_GET(traprepress_attr_mask
, mailbox
->buf
,
2480 MLX4_CMD_MAD_DEMUX_TRAP_REPRESS_ATTR_OFFSET
);
2481 mlx4_dbg(dev
, "SMP firewall traprepress_attribute_mask = 0x%x\n",
2482 traprepress_attr_mask
);
2484 if (set_attr_mask
&& getresp_attr_mask
&& trap_attr_mask
&&
2485 traprepress_attr_mask
)
2491 int mlx4_config_mad_demux(struct mlx4_dev
*dev
)
2493 struct mlx4_cmd_mailbox
*mailbox
;
2494 int secure_host_active
;
2497 /* Check if mad_demux is supported */
2498 if (!(dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_MAD_DEMUX
))
2501 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
2502 if (IS_ERR(mailbox
)) {
2503 mlx4_warn(dev
, "Failed to allocate mailbox for cmd MAD_DEMUX");
2507 /* Query mad_demux to find out which MADs are handled by internal sma */
2508 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, 0x01 /* subn mgmt class */,
2509 MLX4_CMD_MAD_DEMUX_QUERY_RESTR
, MLX4_CMD_MAD_DEMUX
,
2510 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_NATIVE
);
2512 mlx4_warn(dev
, "MLX4_CMD_MAD_DEMUX: query restrictions failed (%d)\n",
2517 secure_host_active
= mlx4_check_smp_firewall_active(dev
, mailbox
);
2519 /* Config mad_demux to handle all MADs returned by the query above */
2520 err
= mlx4_cmd(dev
, mailbox
->dma
, 0x01 /* subn mgmt class */,
2521 MLX4_CMD_MAD_DEMUX_CONFIG
, MLX4_CMD_MAD_DEMUX
,
2522 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_NATIVE
);
2524 mlx4_warn(dev
, "MLX4_CMD_MAD_DEMUX: configure failed (%d)\n", err
);
2528 if (secure_host_active
)
2529 mlx4_warn(dev
, "HCA operating in secure-host mode. SMP firewall activated.\n");
2531 mlx4_free_cmd_mailbox(dev
, mailbox
);
2535 /* Access Reg commands */
2536 enum mlx4_access_reg_masks
{
2537 MLX4_ACCESS_REG_STATUS_MASK
= 0x7f,
2538 MLX4_ACCESS_REG_METHOD_MASK
= 0x7f,
2539 MLX4_ACCESS_REG_LEN_MASK
= 0x7ff
2542 struct mlx4_access_reg
{
2552 #define MLX4_ACCESS_REG_HEADER_SIZE (20)
2553 u8 reg_data
[MLX4_MAILBOX_SIZE
-MLX4_ACCESS_REG_HEADER_SIZE
];
2554 } __attribute__((__packed__
));
2557 * mlx4_ACCESS_REG - Generic access reg command.
2559 * @reg_id: register ID to access.
2560 * @method: Access method Read/Write.
2561 * @reg_len: register length to Read/Write in bytes.
2562 * @reg_data: reg_data pointer to Read/Write From/To.
2564 * Access ConnectX registers FW command.
2565 * Returns 0 on success and copies outbox mlx4_access_reg data
2566 * field into reg_data or a negative error code.
2568 static int mlx4_ACCESS_REG(struct mlx4_dev
*dev
, u16 reg_id
,
2569 enum mlx4_access_reg_method method
,
2570 u16 reg_len
, void *reg_data
)
2572 struct mlx4_cmd_mailbox
*inbox
, *outbox
;
2573 struct mlx4_access_reg
*inbuf
, *outbuf
;
2576 inbox
= mlx4_alloc_cmd_mailbox(dev
);
2578 return PTR_ERR(inbox
);
2580 outbox
= mlx4_alloc_cmd_mailbox(dev
);
2581 if (IS_ERR(outbox
)) {
2582 mlx4_free_cmd_mailbox(dev
, inbox
);
2583 return PTR_ERR(outbox
);
2587 outbuf
= outbox
->buf
;
2589 inbuf
->constant1
= cpu_to_be16(0x1<<11 | 0x4);
2590 inbuf
->constant2
= 0x1;
2591 inbuf
->reg_id
= cpu_to_be16(reg_id
);
2592 inbuf
->method
= method
& MLX4_ACCESS_REG_METHOD_MASK
;
2594 reg_len
= min(reg_len
, (u16
)(sizeof(inbuf
->reg_data
)));
2596 cpu_to_be16(((reg_len
/4 + 1) & MLX4_ACCESS_REG_LEN_MASK
) |
2599 memcpy(inbuf
->reg_data
, reg_data
, reg_len
);
2600 err
= mlx4_cmd_box(dev
, inbox
->dma
, outbox
->dma
, 0, 0,
2601 MLX4_CMD_ACCESS_REG
, MLX4_CMD_TIME_CLASS_C
,
2606 if (outbuf
->status
& MLX4_ACCESS_REG_STATUS_MASK
) {
2607 err
= outbuf
->status
& MLX4_ACCESS_REG_STATUS_MASK
;
2609 "MLX4_CMD_ACCESS_REG(%x) returned REG status (%x)\n",
2614 memcpy(reg_data
, outbuf
->reg_data
, reg_len
);
2616 mlx4_free_cmd_mailbox(dev
, inbox
);
2617 mlx4_free_cmd_mailbox(dev
, outbox
);
2621 /* ConnectX registers IDs */
2623 MLX4_REG_ID_PTYS
= 0x5004,
2627 * mlx4_ACCESS_PTYS_REG - Access PTYs (Port Type and Speed)
2630 * @method: Access method Read/Write.
2631 * @ptys_reg: PTYS register data pointer.
2633 * Access ConnectX PTYS register, to Read/Write Port Type/Speed
2635 * Returns 0 on success or a negative error code.
2637 int mlx4_ACCESS_PTYS_REG(struct mlx4_dev
*dev
,
2638 enum mlx4_access_reg_method method
,
2639 struct mlx4_ptys_reg
*ptys_reg
)
2641 return mlx4_ACCESS_REG(dev
, MLX4_REG_ID_PTYS
,
2642 method
, sizeof(*ptys_reg
), ptys_reg
);
2644 EXPORT_SYMBOL_GPL(mlx4_ACCESS_PTYS_REG
);
2646 int mlx4_ACCESS_REG_wrapper(struct mlx4_dev
*dev
, int slave
,
2647 struct mlx4_vhcr
*vhcr
,
2648 struct mlx4_cmd_mailbox
*inbox
,
2649 struct mlx4_cmd_mailbox
*outbox
,
2650 struct mlx4_cmd_info
*cmd
)
2652 struct mlx4_access_reg
*inbuf
= inbox
->buf
;
2653 u8 method
= inbuf
->method
& MLX4_ACCESS_REG_METHOD_MASK
;
2654 u16 reg_id
= be16_to_cpu(inbuf
->reg_id
);
2656 if (slave
!= mlx4_master_func_num(dev
) &&
2657 method
== MLX4_ACCESS_REG_WRITE
)
2660 if (reg_id
== MLX4_REG_ID_PTYS
) {
2661 struct mlx4_ptys_reg
*ptys_reg
=
2662 (struct mlx4_ptys_reg
*)inbuf
->reg_data
;
2664 ptys_reg
->local_port
=
2665 mlx4_slave_convert_port(dev
, slave
,
2666 ptys_reg
->local_port
);
2669 return mlx4_cmd_box(dev
, inbox
->dma
, outbox
->dma
, vhcr
->in_modifier
,
2670 0, MLX4_CMD_ACCESS_REG
, MLX4_CMD_TIME_CLASS_C
,