2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/export.h>
34 #include <linux/etherdevice.h>
35 #include <linux/mlx5/driver.h>
36 #include <linux/mlx5/vport.h>
37 #include "mlx5_core.h"
39 static int _mlx5_query_vport_state(struct mlx5_core_dev
*mdev
, u8 opmod
,
40 u16 vport
, u32
*out
, int outlen
)
43 u32 in
[MLX5_ST_SZ_DW(query_vport_state_in
)];
45 memset(in
, 0, sizeof(in
));
47 MLX5_SET(query_vport_state_in
, in
, opcode
,
48 MLX5_CMD_OP_QUERY_VPORT_STATE
);
49 MLX5_SET(query_vport_state_in
, in
, op_mod
, opmod
);
50 MLX5_SET(query_vport_state_in
, in
, vport_number
, vport
);
52 MLX5_SET(query_vport_state_in
, in
, other_vport
, 1);
54 err
= mlx5_cmd_exec_check_status(mdev
, in
, sizeof(in
), out
, outlen
);
56 mlx5_core_warn(mdev
, "MLX5_CMD_OP_QUERY_VPORT_STATE failed\n");
61 u8
mlx5_query_vport_state(struct mlx5_core_dev
*mdev
, u8 opmod
, u16 vport
)
63 u32 out
[MLX5_ST_SZ_DW(query_vport_state_out
)] = {0};
65 _mlx5_query_vport_state(mdev
, opmod
, vport
, out
, sizeof(out
));
67 return MLX5_GET(query_vport_state_out
, out
, state
);
69 EXPORT_SYMBOL_GPL(mlx5_query_vport_state
);
71 u8
mlx5_query_vport_admin_state(struct mlx5_core_dev
*mdev
, u8 opmod
, u16 vport
)
73 u32 out
[MLX5_ST_SZ_DW(query_vport_state_out
)] = {0};
75 _mlx5_query_vport_state(mdev
, opmod
, vport
, out
, sizeof(out
));
77 return MLX5_GET(query_vport_state_out
, out
, admin_state
);
79 EXPORT_SYMBOL_GPL(mlx5_query_vport_admin_state
);
81 int mlx5_modify_vport_admin_state(struct mlx5_core_dev
*mdev
, u8 opmod
,
84 u32 in
[MLX5_ST_SZ_DW(modify_vport_state_in
)];
85 u32 out
[MLX5_ST_SZ_DW(modify_vport_state_out
)];
88 memset(in
, 0, sizeof(in
));
90 MLX5_SET(modify_vport_state_in
, in
, opcode
,
91 MLX5_CMD_OP_MODIFY_VPORT_STATE
);
92 MLX5_SET(modify_vport_state_in
, in
, op_mod
, opmod
);
93 MLX5_SET(modify_vport_state_in
, in
, vport_number
, vport
);
96 MLX5_SET(modify_vport_state_in
, in
, other_vport
, 1);
98 MLX5_SET(modify_vport_state_in
, in
, admin_state
, state
);
100 err
= mlx5_cmd_exec_check_status(mdev
, in
, sizeof(in
), out
,
103 mlx5_core_warn(mdev
, "MLX5_CMD_OP_MODIFY_VPORT_STATE failed\n");
107 EXPORT_SYMBOL_GPL(mlx5_modify_vport_admin_state
);
109 static int mlx5_query_nic_vport_context(struct mlx5_core_dev
*mdev
, u16 vport
,
110 u32
*out
, int outlen
)
112 u32 in
[MLX5_ST_SZ_DW(query_nic_vport_context_in
)];
114 memset(in
, 0, sizeof(in
));
116 MLX5_SET(query_nic_vport_context_in
, in
, opcode
,
117 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT
);
119 MLX5_SET(query_nic_vport_context_in
, in
, vport_number
, vport
);
121 MLX5_SET(query_nic_vport_context_in
, in
, other_vport
, 1);
123 return mlx5_cmd_exec_check_status(mdev
, in
, sizeof(in
), out
, outlen
);
126 static int mlx5_modify_nic_vport_context(struct mlx5_core_dev
*mdev
, void *in
,
129 u32 out
[MLX5_ST_SZ_DW(modify_nic_vport_context_out
)];
131 MLX5_SET(modify_nic_vport_context_in
, in
, opcode
,
132 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT
);
134 memset(out
, 0, sizeof(out
));
135 return mlx5_cmd_exec_check_status(mdev
, in
, inlen
, out
, sizeof(out
));
138 int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev
*mdev
,
142 int outlen
= MLX5_ST_SZ_BYTES(query_nic_vport_context_out
);
146 out
= mlx5_vzalloc(outlen
);
150 out_addr
= MLX5_ADDR_OF(query_nic_vport_context_out
, out
,
151 nic_vport_context
.permanent_address
);
153 err
= mlx5_query_nic_vport_context(mdev
, vport
, out
, outlen
);
155 ether_addr_copy(addr
, &out_addr
[2]);
160 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_address
);
162 int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev
*mdev
,
166 int inlen
= MLX5_ST_SZ_BYTES(modify_nic_vport_context_in
);
171 in
= mlx5_vzalloc(inlen
);
173 mlx5_core_warn(mdev
, "failed to allocate inbox\n");
177 MLX5_SET(modify_nic_vport_context_in
, in
,
178 field_select
.permanent_address
, 1);
179 MLX5_SET(modify_nic_vport_context_in
, in
, vport_number
, vport
);
182 MLX5_SET(modify_nic_vport_context_in
, in
, other_vport
, 1);
184 nic_vport_ctx
= MLX5_ADDR_OF(modify_nic_vport_context_in
,
185 in
, nic_vport_context
);
186 perm_mac
= MLX5_ADDR_OF(nic_vport_context
, nic_vport_ctx
,
189 ether_addr_copy(&perm_mac
[2], addr
);
191 err
= mlx5_modify_nic_vport_context(mdev
, in
, inlen
);
197 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_address
);
199 int mlx5_query_nic_vport_mtu(struct mlx5_core_dev
*mdev
, u16
*mtu
)
201 int outlen
= MLX5_ST_SZ_BYTES(query_nic_vport_context_out
);
205 out
= mlx5_vzalloc(outlen
);
209 err
= mlx5_query_nic_vport_context(mdev
, 0, out
, outlen
);
211 *mtu
= MLX5_GET(query_nic_vport_context_out
, out
,
212 nic_vport_context
.mtu
);
217 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mtu
);
219 int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev
*mdev
, u16 mtu
)
221 int inlen
= MLX5_ST_SZ_BYTES(modify_nic_vport_context_in
);
225 in
= mlx5_vzalloc(inlen
);
229 MLX5_SET(modify_nic_vport_context_in
, in
, field_select
.mtu
, 1);
230 MLX5_SET(modify_nic_vport_context_in
, in
, nic_vport_context
.mtu
, mtu
);
232 err
= mlx5_modify_nic_vport_context(mdev
, in
, inlen
);
237 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mtu
);
239 int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev
*dev
,
241 enum mlx5_list_type list_type
,
242 u8 addr_list
[][ETH_ALEN
],
245 u32 in
[MLX5_ST_SZ_DW(query_nic_vport_context_in
)];
254 req_list_size
= *list_size
;
256 max_list_size
= list_type
== MLX5_NVPRT_LIST_TYPE_UC
?
257 1 << MLX5_CAP_GEN(dev
, log_max_current_uc_list
) :
258 1 << MLX5_CAP_GEN(dev
, log_max_current_mc_list
);
260 if (req_list_size
> max_list_size
) {
261 mlx5_core_warn(dev
, "Requested list size (%d) > (%d) max_list_size\n",
262 req_list_size
, max_list_size
);
263 req_list_size
= max_list_size
;
266 out_sz
= MLX5_ST_SZ_BYTES(modify_nic_vport_context_in
) +
267 req_list_size
* MLX5_ST_SZ_BYTES(mac_address_layout
);
269 memset(in
, 0, sizeof(in
));
270 out
= kzalloc(out_sz
, GFP_KERNEL
);
274 MLX5_SET(query_nic_vport_context_in
, in
, opcode
,
275 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT
);
276 MLX5_SET(query_nic_vport_context_in
, in
, allowed_list_type
, list_type
);
277 MLX5_SET(query_nic_vport_context_in
, in
, vport_number
, vport
);
280 MLX5_SET(query_nic_vport_context_in
, in
, other_vport
, 1);
282 err
= mlx5_cmd_exec_check_status(dev
, in
, sizeof(in
), out
, out_sz
);
286 nic_vport_ctx
= MLX5_ADDR_OF(query_nic_vport_context_out
, out
,
288 req_list_size
= MLX5_GET(nic_vport_context
, nic_vport_ctx
,
291 *list_size
= req_list_size
;
292 for (i
= 0; i
< req_list_size
; i
++) {
293 u8
*mac_addr
= MLX5_ADDR_OF(nic_vport_context
,
295 current_uc_mac_address
[i
]) + 2;
296 ether_addr_copy(addr_list
[i
], mac_addr
);
302 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_list
);
304 int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev
*dev
,
305 enum mlx5_list_type list_type
,
306 u8 addr_list
[][ETH_ALEN
],
309 u32 out
[MLX5_ST_SZ_DW(modify_nic_vport_context_out
)];
317 max_list_size
= list_type
== MLX5_NVPRT_LIST_TYPE_UC
?
318 1 << MLX5_CAP_GEN(dev
, log_max_current_uc_list
) :
319 1 << MLX5_CAP_GEN(dev
, log_max_current_mc_list
);
321 if (list_size
> max_list_size
)
324 in_sz
= MLX5_ST_SZ_BYTES(modify_nic_vport_context_in
) +
325 list_size
* MLX5_ST_SZ_BYTES(mac_address_layout
);
327 memset(out
, 0, sizeof(out
));
328 in
= kzalloc(in_sz
, GFP_KERNEL
);
332 MLX5_SET(modify_nic_vport_context_in
, in
, opcode
,
333 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT
);
334 MLX5_SET(modify_nic_vport_context_in
, in
,
335 field_select
.addresses_list
, 1);
337 nic_vport_ctx
= MLX5_ADDR_OF(modify_nic_vport_context_in
, in
,
340 MLX5_SET(nic_vport_context
, nic_vport_ctx
,
341 allowed_list_type
, list_type
);
342 MLX5_SET(nic_vport_context
, nic_vport_ctx
,
343 allowed_list_size
, list_size
);
345 for (i
= 0; i
< list_size
; i
++) {
346 u8
*curr_mac
= MLX5_ADDR_OF(nic_vport_context
,
348 current_uc_mac_address
[i
]) + 2;
349 ether_addr_copy(curr_mac
, addr_list
[i
]);
352 err
= mlx5_cmd_exec_check_status(dev
, in
, in_sz
, out
, sizeof(out
));
356 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_list
);
358 int mlx5_query_nic_vport_vlans(struct mlx5_core_dev
*dev
,
363 u32 in
[MLX5_ST_SZ_DW(query_nic_vport_context_in
)];
372 req_list_size
= *size
;
373 max_list_size
= 1 << MLX5_CAP_GEN(dev
, log_max_vlan_list
);
374 if (req_list_size
> max_list_size
) {
375 mlx5_core_warn(dev
, "Requested list size (%d) > (%d) max list size\n",
376 req_list_size
, max_list_size
);
377 req_list_size
= max_list_size
;
380 out_sz
= MLX5_ST_SZ_BYTES(modify_nic_vport_context_in
) +
381 req_list_size
* MLX5_ST_SZ_BYTES(vlan_layout
);
383 memset(in
, 0, sizeof(in
));
384 out
= kzalloc(out_sz
, GFP_KERNEL
);
388 MLX5_SET(query_nic_vport_context_in
, in
, opcode
,
389 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT
);
390 MLX5_SET(query_nic_vport_context_in
, in
, allowed_list_type
,
391 MLX5_NVPRT_LIST_TYPE_VLAN
);
392 MLX5_SET(query_nic_vport_context_in
, in
, vport_number
, vport
);
395 MLX5_SET(query_nic_vport_context_in
, in
, other_vport
, 1);
397 err
= mlx5_cmd_exec_check_status(dev
, in
, sizeof(in
), out
, out_sz
);
401 nic_vport_ctx
= MLX5_ADDR_OF(query_nic_vport_context_out
, out
,
403 req_list_size
= MLX5_GET(nic_vport_context
, nic_vport_ctx
,
406 *size
= req_list_size
;
407 for (i
= 0; i
< req_list_size
; i
++) {
408 void *vlan_addr
= MLX5_ADDR_OF(nic_vport_context
,
410 current_uc_mac_address
[i
]);
411 vlans
[i
] = MLX5_GET(vlan_layout
, vlan_addr
, vlan
);
417 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_vlans
);
419 int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev
*dev
,
423 u32 out
[MLX5_ST_SZ_DW(modify_nic_vport_context_out
)];
431 max_list_size
= 1 << MLX5_CAP_GEN(dev
, log_max_vlan_list
);
433 if (list_size
> max_list_size
)
436 in_sz
= MLX5_ST_SZ_BYTES(modify_nic_vport_context_in
) +
437 list_size
* MLX5_ST_SZ_BYTES(vlan_layout
);
439 memset(out
, 0, sizeof(out
));
440 in
= kzalloc(in_sz
, GFP_KERNEL
);
444 MLX5_SET(modify_nic_vport_context_in
, in
, opcode
,
445 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT
);
446 MLX5_SET(modify_nic_vport_context_in
, in
,
447 field_select
.addresses_list
, 1);
449 nic_vport_ctx
= MLX5_ADDR_OF(modify_nic_vport_context_in
, in
,
452 MLX5_SET(nic_vport_context
, nic_vport_ctx
,
453 allowed_list_type
, MLX5_NVPRT_LIST_TYPE_VLAN
);
454 MLX5_SET(nic_vport_context
, nic_vport_ctx
,
455 allowed_list_size
, list_size
);
457 for (i
= 0; i
< list_size
; i
++) {
458 void *vlan_addr
= MLX5_ADDR_OF(nic_vport_context
,
460 current_uc_mac_address
[i
]);
461 MLX5_SET(vlan_layout
, vlan_addr
, vlan
, vlans
[i
]);
464 err
= mlx5_cmd_exec_check_status(dev
, in
, in_sz
, out
, sizeof(out
));
468 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_vlans
);
470 int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev
*mdev
,
471 u64
*system_image_guid
)
474 int outlen
= MLX5_ST_SZ_BYTES(query_nic_vport_context_out
);
476 out
= mlx5_vzalloc(outlen
);
480 mlx5_query_nic_vport_context(mdev
, 0, out
, outlen
);
482 *system_image_guid
= MLX5_GET64(query_nic_vport_context_out
, out
,
483 nic_vport_context
.system_image_guid
);
489 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid
);
491 int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev
*mdev
, u64
*node_guid
)
494 int outlen
= MLX5_ST_SZ_BYTES(query_nic_vport_context_out
);
496 out
= mlx5_vzalloc(outlen
);
500 mlx5_query_nic_vport_context(mdev
, 0, out
, outlen
);
502 *node_guid
= MLX5_GET64(query_nic_vport_context_out
, out
,
503 nic_vport_context
.node_guid
);
509 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid
);
511 int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev
*mdev
,
512 u32 vport
, u64 node_guid
)
514 int inlen
= MLX5_ST_SZ_BYTES(modify_nic_vport_context_in
);
515 void *nic_vport_context
;
521 if (!MLX5_CAP_GEN(mdev
, vport_group_manager
))
523 if (!MLX5_CAP_ESW(mdev
, nic_vport_node_guid_modify
))
526 in
= mlx5_vzalloc(inlen
);
530 MLX5_SET(modify_nic_vport_context_in
, in
,
531 field_select
.node_guid
, 1);
532 MLX5_SET(modify_nic_vport_context_in
, in
, vport_number
, vport
);
533 MLX5_SET(modify_nic_vport_context_in
, in
, other_vport
, !!vport
);
535 nic_vport_context
= MLX5_ADDR_OF(modify_nic_vport_context_in
,
536 in
, nic_vport_context
);
537 MLX5_SET64(nic_vport_context
, nic_vport_context
, node_guid
, node_guid
);
539 err
= mlx5_modify_nic_vport_context(mdev
, in
, inlen
);
546 int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev
*mdev
,
550 int outlen
= MLX5_ST_SZ_BYTES(query_nic_vport_context_out
);
552 out
= mlx5_vzalloc(outlen
);
556 mlx5_query_nic_vport_context(mdev
, 0, out
, outlen
);
558 *qkey_viol_cntr
= MLX5_GET(query_nic_vport_context_out
, out
,
559 nic_vport_context
.qkey_violation_counter
);
565 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_qkey_viol_cntr
);
567 int mlx5_query_hca_vport_gid(struct mlx5_core_dev
*dev
, u8 other_vport
,
568 u8 port_num
, u16 vf_num
, u16 gid_index
,
571 int in_sz
= MLX5_ST_SZ_BYTES(query_hca_vport_gid_in
);
572 int out_sz
= MLX5_ST_SZ_BYTES(query_hca_vport_gid_out
);
573 int is_group_manager
;
581 is_group_manager
= MLX5_CAP_GEN(dev
, vport_group_manager
);
582 tbsz
= mlx5_get_gid_table_len(MLX5_CAP_GEN(dev
, gid_table_size
));
583 mlx5_core_dbg(dev
, "vf_num %d, index %d, gid_table_size %d\n",
584 vf_num
, gid_index
, tbsz
);
586 if (gid_index
> tbsz
&& gid_index
!= 0xffff)
589 if (gid_index
== 0xffff)
594 out_sz
+= nout
* sizeof(*gid
);
596 in
= kzalloc(in_sz
, GFP_KERNEL
);
597 out
= kzalloc(out_sz
, GFP_KERNEL
);
603 MLX5_SET(query_hca_vport_gid_in
, in
, opcode
, MLX5_CMD_OP_QUERY_HCA_VPORT_GID
);
605 if (is_group_manager
) {
606 MLX5_SET(query_hca_vport_gid_in
, in
, vport_number
, vf_num
);
607 MLX5_SET(query_hca_vport_gid_in
, in
, other_vport
, 1);
613 MLX5_SET(query_hca_vport_gid_in
, in
, gid_index
, gid_index
);
615 if (MLX5_CAP_GEN(dev
, num_ports
) == 2)
616 MLX5_SET(query_hca_vport_gid_in
, in
, port_num
, port_num
);
618 err
= mlx5_cmd_exec(dev
, in
, in_sz
, out
, out_sz
);
622 err
= mlx5_cmd_status_to_err_v2(out
);
626 tmp
= out
+ MLX5_ST_SZ_BYTES(query_hca_vport_gid_out
);
627 gid
->global
.subnet_prefix
= tmp
->global
.subnet_prefix
;
628 gid
->global
.interface_id
= tmp
->global
.interface_id
;
635 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid
);
637 int mlx5_query_hca_vport_pkey(struct mlx5_core_dev
*dev
, u8 other_vport
,
638 u8 port_num
, u16 vf_num
, u16 pkey_index
,
641 int in_sz
= MLX5_ST_SZ_BYTES(query_hca_vport_pkey_in
);
642 int out_sz
= MLX5_ST_SZ_BYTES(query_hca_vport_pkey_out
);
643 int is_group_manager
;
652 is_group_manager
= MLX5_CAP_GEN(dev
, vport_group_manager
);
654 tbsz
= mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev
, pkey_table_size
));
655 if (pkey_index
> tbsz
&& pkey_index
!= 0xffff)
658 if (pkey_index
== 0xffff)
663 out_sz
+= nout
* MLX5_ST_SZ_BYTES(pkey
);
665 in
= kzalloc(in_sz
, GFP_KERNEL
);
666 out
= kzalloc(out_sz
, GFP_KERNEL
);
672 MLX5_SET(query_hca_vport_pkey_in
, in
, opcode
, MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY
);
674 if (is_group_manager
) {
675 MLX5_SET(query_hca_vport_pkey_in
, in
, vport_number
, vf_num
);
676 MLX5_SET(query_hca_vport_pkey_in
, in
, other_vport
, 1);
682 MLX5_SET(query_hca_vport_pkey_in
, in
, pkey_index
, pkey_index
);
684 if (MLX5_CAP_GEN(dev
, num_ports
) == 2)
685 MLX5_SET(query_hca_vport_pkey_in
, in
, port_num
, port_num
);
687 err
= mlx5_cmd_exec(dev
, in
, in_sz
, out
, out_sz
);
691 err
= mlx5_cmd_status_to_err_v2(out
);
695 pkarr
= MLX5_ADDR_OF(query_hca_vport_pkey_out
, out
, pkey
);
696 for (i
= 0; i
< nout
; i
++, pkey
++, pkarr
+= MLX5_ST_SZ_BYTES(pkey
))
697 *pkey
= MLX5_GET_PR(pkey
, pkarr
, pkey
);
704 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey
);
706 int mlx5_query_hca_vport_context(struct mlx5_core_dev
*dev
,
707 u8 other_vport
, u8 port_num
,
709 struct mlx5_hca_vport_context
*rep
)
711 int out_sz
= MLX5_ST_SZ_BYTES(query_hca_vport_context_out
);
712 int in
[MLX5_ST_SZ_DW(query_hca_vport_context_in
)];
713 int is_group_manager
;
718 is_group_manager
= MLX5_CAP_GEN(dev
, vport_group_manager
);
720 memset(in
, 0, sizeof(in
));
721 out
= kzalloc(out_sz
, GFP_KERNEL
);
725 MLX5_SET(query_hca_vport_context_in
, in
, opcode
, MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT
);
728 if (is_group_manager
) {
729 MLX5_SET(query_hca_vport_context_in
, in
, other_vport
, 1);
730 MLX5_SET(query_hca_vport_context_in
, in
, vport_number
, vf_num
);
737 if (MLX5_CAP_GEN(dev
, num_ports
) == 2)
738 MLX5_SET(query_hca_vport_context_in
, in
, port_num
, port_num
);
740 err
= mlx5_cmd_exec(dev
, in
, sizeof(in
), out
, out_sz
);
743 err
= mlx5_cmd_status_to_err_v2(out
);
747 ctx
= MLX5_ADDR_OF(query_hca_vport_context_out
, out
, hca_vport_context
);
748 rep
->field_select
= MLX5_GET_PR(hca_vport_context
, ctx
, field_select
);
749 rep
->sm_virt_aware
= MLX5_GET_PR(hca_vport_context
, ctx
, sm_virt_aware
);
750 rep
->has_smi
= MLX5_GET_PR(hca_vport_context
, ctx
, has_smi
);
751 rep
->has_raw
= MLX5_GET_PR(hca_vport_context
, ctx
, has_raw
);
752 rep
->policy
= MLX5_GET_PR(hca_vport_context
, ctx
, vport_state_policy
);
753 rep
->phys_state
= MLX5_GET_PR(hca_vport_context
, ctx
,
754 port_physical_state
);
755 rep
->vport_state
= MLX5_GET_PR(hca_vport_context
, ctx
, vport_state
);
756 rep
->port_physical_state
= MLX5_GET_PR(hca_vport_context
, ctx
,
757 port_physical_state
);
758 rep
->port_guid
= MLX5_GET64_PR(hca_vport_context
, ctx
, port_guid
);
759 rep
->node_guid
= MLX5_GET64_PR(hca_vport_context
, ctx
, node_guid
);
760 rep
->cap_mask1
= MLX5_GET_PR(hca_vport_context
, ctx
, cap_mask1
);
761 rep
->cap_mask1_perm
= MLX5_GET_PR(hca_vport_context
, ctx
,
762 cap_mask1_field_select
);
763 rep
->cap_mask2
= MLX5_GET_PR(hca_vport_context
, ctx
, cap_mask2
);
764 rep
->cap_mask2_perm
= MLX5_GET_PR(hca_vport_context
, ctx
,
765 cap_mask2_field_select
);
766 rep
->lid
= MLX5_GET_PR(hca_vport_context
, ctx
, lid
);
767 rep
->init_type_reply
= MLX5_GET_PR(hca_vport_context
, ctx
,
769 rep
->lmc
= MLX5_GET_PR(hca_vport_context
, ctx
, lmc
);
770 rep
->subnet_timeout
= MLX5_GET_PR(hca_vport_context
, ctx
,
772 rep
->sm_lid
= MLX5_GET_PR(hca_vport_context
, ctx
, sm_lid
);
773 rep
->sm_sl
= MLX5_GET_PR(hca_vport_context
, ctx
, sm_sl
);
774 rep
->qkey_violation_counter
= MLX5_GET_PR(hca_vport_context
, ctx
,
775 qkey_violation_counter
);
776 rep
->pkey_violation_counter
= MLX5_GET_PR(hca_vport_context
, ctx
,
777 pkey_violation_counter
);
778 rep
->grh_required
= MLX5_GET_PR(hca_vport_context
, ctx
, grh_required
);
779 rep
->sys_image_guid
= MLX5_GET64_PR(hca_vport_context
, ctx
,
786 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_context
);
788 int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev
*dev
,
791 struct mlx5_hca_vport_context
*rep
;
794 rep
= kzalloc(sizeof(*rep
), GFP_KERNEL
);
798 err
= mlx5_query_hca_vport_context(dev
, 0, 1, 0, rep
);
800 *sys_image_guid
= rep
->sys_image_guid
;
805 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid
);
807 int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev
*dev
,
810 struct mlx5_hca_vport_context
*rep
;
813 rep
= kzalloc(sizeof(*rep
), GFP_KERNEL
);
817 err
= mlx5_query_hca_vport_context(dev
, 0, 1, 0, rep
);
819 *node_guid
= rep
->node_guid
;
824 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid
);
826 int mlx5_query_nic_vport_promisc(struct mlx5_core_dev
*mdev
,
833 int outlen
= MLX5_ST_SZ_BYTES(query_nic_vport_context_out
);
836 out
= kzalloc(outlen
, GFP_KERNEL
);
840 err
= mlx5_query_nic_vport_context(mdev
, vport
, out
, outlen
);
844 *promisc_uc
= MLX5_GET(query_nic_vport_context_out
, out
,
845 nic_vport_context
.promisc_uc
);
846 *promisc_mc
= MLX5_GET(query_nic_vport_context_out
, out
,
847 nic_vport_context
.promisc_mc
);
848 *promisc_all
= MLX5_GET(query_nic_vport_context_out
, out
,
849 nic_vport_context
.promisc_all
);
855 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_promisc
);
857 int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev
*mdev
,
863 int inlen
= MLX5_ST_SZ_BYTES(modify_nic_vport_context_in
);
866 in
= mlx5_vzalloc(inlen
);
868 mlx5_core_err(mdev
, "failed to allocate inbox\n");
872 MLX5_SET(modify_nic_vport_context_in
, in
, field_select
.promisc
, 1);
873 MLX5_SET(modify_nic_vport_context_in
, in
,
874 nic_vport_context
.promisc_uc
, promisc_uc
);
875 MLX5_SET(modify_nic_vport_context_in
, in
,
876 nic_vport_context
.promisc_mc
, promisc_mc
);
877 MLX5_SET(modify_nic_vport_context_in
, in
,
878 nic_vport_context
.promisc_all
, promisc_all
);
880 err
= mlx5_modify_nic_vport_context(mdev
, in
, inlen
);
886 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_promisc
);
888 enum mlx5_vport_roce_state
{
889 MLX5_VPORT_ROCE_DISABLED
= 0,
890 MLX5_VPORT_ROCE_ENABLED
= 1,
893 static int mlx5_nic_vport_update_roce_state(struct mlx5_core_dev
*mdev
,
894 enum mlx5_vport_roce_state state
)
897 int inlen
= MLX5_ST_SZ_BYTES(modify_nic_vport_context_in
);
900 in
= mlx5_vzalloc(inlen
);
902 mlx5_core_warn(mdev
, "failed to allocate inbox\n");
906 MLX5_SET(modify_nic_vport_context_in
, in
, field_select
.roce_en
, 1);
907 MLX5_SET(modify_nic_vport_context_in
, in
, nic_vport_context
.roce_en
,
910 err
= mlx5_modify_nic_vport_context(mdev
, in
, inlen
);
917 int mlx5_nic_vport_enable_roce(struct mlx5_core_dev
*mdev
)
919 return mlx5_nic_vport_update_roce_state(mdev
, MLX5_VPORT_ROCE_ENABLED
);
921 EXPORT_SYMBOL_GPL(mlx5_nic_vport_enable_roce
);
923 int mlx5_nic_vport_disable_roce(struct mlx5_core_dev
*mdev
)
925 return mlx5_nic_vport_update_roce_state(mdev
, MLX5_VPORT_ROCE_DISABLED
);
927 EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce
);
929 int mlx5_core_query_vport_counter(struct mlx5_core_dev
*dev
, u8 other_vport
,
930 int vf
, u8 port_num
, void *out
,
933 int in_sz
= MLX5_ST_SZ_BYTES(query_vport_counter_in
);
934 int is_group_manager
;
938 is_group_manager
= MLX5_CAP_GEN(dev
, vport_group_manager
);
939 in
= mlx5_vzalloc(in_sz
);
945 MLX5_SET(query_vport_counter_in
, in
, opcode
,
946 MLX5_CMD_OP_QUERY_VPORT_COUNTER
);
948 if (is_group_manager
) {
949 MLX5_SET(query_vport_counter_in
, in
, other_vport
, 1);
950 MLX5_SET(query_vport_counter_in
, in
, vport_number
, vf
+ 1);
956 if (MLX5_CAP_GEN(dev
, num_ports
) == 2)
957 MLX5_SET(query_vport_counter_in
, in
, port_num
, port_num
);
959 err
= mlx5_cmd_exec(dev
, in
, in_sz
, out
, out_sz
);
962 err
= mlx5_cmd_status_to_err_v2(out
);
968 EXPORT_SYMBOL_GPL(mlx5_core_query_vport_counter
);
970 int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev
*dev
,
971 u8 other_vport
, u8 port_num
,
973 struct mlx5_hca_vport_context
*req
)
975 int in_sz
= MLX5_ST_SZ_BYTES(modify_hca_vport_context_in
);
976 u8 out
[MLX5_ST_SZ_BYTES(modify_hca_vport_context_out
)];
977 int is_group_manager
;
982 mlx5_core_dbg(dev
, "vf %d\n", vf
);
983 is_group_manager
= MLX5_CAP_GEN(dev
, vport_group_manager
);
984 in
= kzalloc(in_sz
, GFP_KERNEL
);
988 memset(out
, 0, sizeof(out
));
989 MLX5_SET(modify_hca_vport_context_in
, in
, opcode
, MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT
);
991 if (is_group_manager
) {
992 MLX5_SET(modify_hca_vport_context_in
, in
, other_vport
, 1);
993 MLX5_SET(modify_hca_vport_context_in
, in
, vport_number
, vf
);
1000 if (MLX5_CAP_GEN(dev
, num_ports
) > 1)
1001 MLX5_SET(modify_hca_vport_context_in
, in
, port_num
, port_num
);
1003 ctx
= MLX5_ADDR_OF(modify_hca_vport_context_in
, in
, hca_vport_context
);
1004 MLX5_SET(hca_vport_context
, ctx
, field_select
, req
->field_select
);
1005 MLX5_SET(hca_vport_context
, ctx
, sm_virt_aware
, req
->sm_virt_aware
);
1006 MLX5_SET(hca_vport_context
, ctx
, has_smi
, req
->has_smi
);
1007 MLX5_SET(hca_vport_context
, ctx
, has_raw
, req
->has_raw
);
1008 MLX5_SET(hca_vport_context
, ctx
, vport_state_policy
, req
->policy
);
1009 MLX5_SET(hca_vport_context
, ctx
, port_physical_state
, req
->phys_state
);
1010 MLX5_SET(hca_vport_context
, ctx
, vport_state
, req
->vport_state
);
1011 MLX5_SET64(hca_vport_context
, ctx
, port_guid
, req
->port_guid
);
1012 MLX5_SET64(hca_vport_context
, ctx
, node_guid
, req
->node_guid
);
1013 MLX5_SET(hca_vport_context
, ctx
, cap_mask1
, req
->cap_mask1
);
1014 MLX5_SET(hca_vport_context
, ctx
, cap_mask1_field_select
, req
->cap_mask1_perm
);
1015 MLX5_SET(hca_vport_context
, ctx
, cap_mask2
, req
->cap_mask2
);
1016 MLX5_SET(hca_vport_context
, ctx
, cap_mask2_field_select
, req
->cap_mask2_perm
);
1017 MLX5_SET(hca_vport_context
, ctx
, lid
, req
->lid
);
1018 MLX5_SET(hca_vport_context
, ctx
, init_type_reply
, req
->init_type_reply
);
1019 MLX5_SET(hca_vport_context
, ctx
, lmc
, req
->lmc
);
1020 MLX5_SET(hca_vport_context
, ctx
, subnet_timeout
, req
->subnet_timeout
);
1021 MLX5_SET(hca_vport_context
, ctx
, sm_lid
, req
->sm_lid
);
1022 MLX5_SET(hca_vport_context
, ctx
, sm_sl
, req
->sm_sl
);
1023 MLX5_SET(hca_vport_context
, ctx
, qkey_violation_counter
, req
->qkey_violation_counter
);
1024 MLX5_SET(hca_vport_context
, ctx
, pkey_violation_counter
, req
->pkey_violation_counter
);
1025 err
= mlx5_cmd_exec(dev
, in
, in_sz
, out
, sizeof(out
));
1029 err
= mlx5_cmd_status_to_err_v2(out
);
1035 EXPORT_SYMBOL_GPL(mlx5_core_modify_hca_vport_context
);