[media] media-device: fix builds when USB or PCI is compiled as module
[deliverable/linux.git] / drivers / net / ethernet / mellanox / mlx5 / core / vport.c
1 /*
2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/export.h>
34 #include <linux/etherdevice.h>
35 #include <linux/mlx5/driver.h>
36 #include <linux/mlx5/vport.h>
37 #include "mlx5_core.h"
38
39 static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod,
40 u16 vport, u32 *out, int outlen)
41 {
42 int err;
43 u32 in[MLX5_ST_SZ_DW(query_vport_state_in)];
44
45 memset(in, 0, sizeof(in));
46
47 MLX5_SET(query_vport_state_in, in, opcode,
48 MLX5_CMD_OP_QUERY_VPORT_STATE);
49 MLX5_SET(query_vport_state_in, in, op_mod, opmod);
50 MLX5_SET(query_vport_state_in, in, vport_number, vport);
51 if (vport)
52 MLX5_SET(query_vport_state_in, in, other_vport, 1);
53
54 err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
55 if (err)
56 mlx5_core_warn(mdev, "MLX5_CMD_OP_QUERY_VPORT_STATE failed\n");
57
58 return err;
59 }
60
61 u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
62 {
63 u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
64
65 _mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
66
67 return MLX5_GET(query_vport_state_out, out, state);
68 }
69 EXPORT_SYMBOL_GPL(mlx5_query_vport_state);
70
71 u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
72 {
73 u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
74
75 _mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
76
77 return MLX5_GET(query_vport_state_out, out, admin_state);
78 }
79 EXPORT_SYMBOL_GPL(mlx5_query_vport_admin_state);
80
81 int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
82 u16 vport, u8 state)
83 {
84 u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)];
85 u32 out[MLX5_ST_SZ_DW(modify_vport_state_out)];
86 int err;
87
88 memset(in, 0, sizeof(in));
89
90 MLX5_SET(modify_vport_state_in, in, opcode,
91 MLX5_CMD_OP_MODIFY_VPORT_STATE);
92 MLX5_SET(modify_vport_state_in, in, op_mod, opmod);
93 MLX5_SET(modify_vport_state_in, in, vport_number, vport);
94
95 if (vport)
96 MLX5_SET(modify_vport_state_in, in, other_vport, 1);
97
98 MLX5_SET(modify_vport_state_in, in, admin_state, state);
99
100 err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out,
101 sizeof(out));
102 if (err)
103 mlx5_core_warn(mdev, "MLX5_CMD_OP_MODIFY_VPORT_STATE failed\n");
104
105 return err;
106 }
107 EXPORT_SYMBOL_GPL(mlx5_modify_vport_admin_state);
108
109 static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport,
110 u32 *out, int outlen)
111 {
112 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
113
114 memset(in, 0, sizeof(in));
115
116 MLX5_SET(query_nic_vport_context_in, in, opcode,
117 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
118
119 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
120 if (vport)
121 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
122
123 return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
124 }
125
126 static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
127 int inlen)
128 {
129 u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
130
131 MLX5_SET(modify_nic_vport_context_in, in, opcode,
132 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
133
134 memset(out, 0, sizeof(out));
135 return mlx5_cmd_exec_check_status(mdev, in, inlen, out, sizeof(out));
136 }
137
138 int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
139 u16 vport, u8 *addr)
140 {
141 u32 *out;
142 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
143 u8 *out_addr;
144 int err;
145
146 out = mlx5_vzalloc(outlen);
147 if (!out)
148 return -ENOMEM;
149
150 out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out,
151 nic_vport_context.permanent_address);
152
153 err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
154 if (!err)
155 ether_addr_copy(addr, &out_addr[2]);
156
157 kvfree(out);
158 return err;
159 }
160 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_address);
161
162 int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
163 u16 vport, u8 *addr)
164 {
165 void *in;
166 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
167 int err;
168 void *nic_vport_ctx;
169 u8 *perm_mac;
170
171 in = mlx5_vzalloc(inlen);
172 if (!in) {
173 mlx5_core_warn(mdev, "failed to allocate inbox\n");
174 return -ENOMEM;
175 }
176
177 MLX5_SET(modify_nic_vport_context_in, in,
178 field_select.permanent_address, 1);
179 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
180
181 if (vport)
182 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
183
184 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
185 in, nic_vport_context);
186 perm_mac = MLX5_ADDR_OF(nic_vport_context, nic_vport_ctx,
187 permanent_address);
188
189 ether_addr_copy(&perm_mac[2], addr);
190
191 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
192
193 kvfree(in);
194
195 return err;
196 }
197 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_address);
198
199 int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
200 u32 vport,
201 enum mlx5_list_type list_type,
202 u8 addr_list[][ETH_ALEN],
203 int *list_size)
204 {
205 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
206 void *nic_vport_ctx;
207 int max_list_size;
208 int req_list_size;
209 int out_sz;
210 void *out;
211 int err;
212 int i;
213
214 req_list_size = *list_size;
215
216 max_list_size = list_type == MLX5_NVPRT_LIST_TYPE_UC ?
217 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) :
218 1 << MLX5_CAP_GEN(dev, log_max_current_mc_list);
219
220 if (req_list_size > max_list_size) {
221 mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n",
222 req_list_size, max_list_size);
223 req_list_size = max_list_size;
224 }
225
226 out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
227 req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
228
229 memset(in, 0, sizeof(in));
230 out = kzalloc(out_sz, GFP_KERNEL);
231 if (!out)
232 return -ENOMEM;
233
234 MLX5_SET(query_nic_vport_context_in, in, opcode,
235 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
236 MLX5_SET(query_nic_vport_context_in, in, allowed_list_type, list_type);
237 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
238
239 if (vport)
240 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
241
242 err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, out_sz);
243 if (err)
244 goto out;
245
246 nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
247 nic_vport_context);
248 req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
249 allowed_list_size);
250
251 *list_size = req_list_size;
252 for (i = 0; i < req_list_size; i++) {
253 u8 *mac_addr = MLX5_ADDR_OF(nic_vport_context,
254 nic_vport_ctx,
255 current_uc_mac_address[i]) + 2;
256 ether_addr_copy(addr_list[i], mac_addr);
257 }
258 out:
259 kfree(out);
260 return err;
261 }
262 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_list);
263
264 int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
265 enum mlx5_list_type list_type,
266 u8 addr_list[][ETH_ALEN],
267 int list_size)
268 {
269 u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
270 void *nic_vport_ctx;
271 int max_list_size;
272 int in_sz;
273 void *in;
274 int err;
275 int i;
276
277 max_list_size = list_type == MLX5_NVPRT_LIST_TYPE_UC ?
278 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) :
279 1 << MLX5_CAP_GEN(dev, log_max_current_mc_list);
280
281 if (list_size > max_list_size)
282 return -ENOSPC;
283
284 in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
285 list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
286
287 memset(out, 0, sizeof(out));
288 in = kzalloc(in_sz, GFP_KERNEL);
289 if (!in)
290 return -ENOMEM;
291
292 MLX5_SET(modify_nic_vport_context_in, in, opcode,
293 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
294 MLX5_SET(modify_nic_vport_context_in, in,
295 field_select.addresses_list, 1);
296
297 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
298 nic_vport_context);
299
300 MLX5_SET(nic_vport_context, nic_vport_ctx,
301 allowed_list_type, list_type);
302 MLX5_SET(nic_vport_context, nic_vport_ctx,
303 allowed_list_size, list_size);
304
305 for (i = 0; i < list_size; i++) {
306 u8 *curr_mac = MLX5_ADDR_OF(nic_vport_context,
307 nic_vport_ctx,
308 current_uc_mac_address[i]) + 2;
309 ether_addr_copy(curr_mac, addr_list[i]);
310 }
311
312 err = mlx5_cmd_exec_check_status(dev, in, in_sz, out, sizeof(out));
313 kfree(in);
314 return err;
315 }
316 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_list);
317
318 int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev,
319 u32 vport,
320 u16 vlans[],
321 int *size)
322 {
323 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
324 void *nic_vport_ctx;
325 int req_list_size;
326 int max_list_size;
327 int out_sz;
328 void *out;
329 int err;
330 int i;
331
332 req_list_size = *size;
333 max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
334 if (req_list_size > max_list_size) {
335 mlx5_core_warn(dev, "Requested list size (%d) > (%d) max list size\n",
336 req_list_size, max_list_size);
337 req_list_size = max_list_size;
338 }
339
340 out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
341 req_list_size * MLX5_ST_SZ_BYTES(vlan_layout);
342
343 memset(in, 0, sizeof(in));
344 out = kzalloc(out_sz, GFP_KERNEL);
345 if (!out)
346 return -ENOMEM;
347
348 MLX5_SET(query_nic_vport_context_in, in, opcode,
349 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
350 MLX5_SET(query_nic_vport_context_in, in, allowed_list_type,
351 MLX5_NVPRT_LIST_TYPE_VLAN);
352 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
353
354 if (vport)
355 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
356
357 err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, out_sz);
358 if (err)
359 goto out;
360
361 nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
362 nic_vport_context);
363 req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
364 allowed_list_size);
365
366 *size = req_list_size;
367 for (i = 0; i < req_list_size; i++) {
368 void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
369 nic_vport_ctx,
370 current_uc_mac_address[i]);
371 vlans[i] = MLX5_GET(vlan_layout, vlan_addr, vlan);
372 }
373 out:
374 kfree(out);
375 return err;
376 }
377 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_vlans);
378
379 int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
380 u16 vlans[],
381 int list_size)
382 {
383 u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
384 void *nic_vport_ctx;
385 int max_list_size;
386 int in_sz;
387 void *in;
388 int err;
389 int i;
390
391 max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
392
393 if (list_size > max_list_size)
394 return -ENOSPC;
395
396 in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
397 list_size * MLX5_ST_SZ_BYTES(vlan_layout);
398
399 memset(out, 0, sizeof(out));
400 in = kzalloc(in_sz, GFP_KERNEL);
401 if (!in)
402 return -ENOMEM;
403
404 MLX5_SET(modify_nic_vport_context_in, in, opcode,
405 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
406 MLX5_SET(modify_nic_vport_context_in, in,
407 field_select.addresses_list, 1);
408
409 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
410 nic_vport_context);
411
412 MLX5_SET(nic_vport_context, nic_vport_ctx,
413 allowed_list_type, MLX5_NVPRT_LIST_TYPE_VLAN);
414 MLX5_SET(nic_vport_context, nic_vport_ctx,
415 allowed_list_size, list_size);
416
417 for (i = 0; i < list_size; i++) {
418 void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
419 nic_vport_ctx,
420 current_uc_mac_address[i]);
421 MLX5_SET(vlan_layout, vlan_addr, vlan, vlans[i]);
422 }
423
424 err = mlx5_cmd_exec_check_status(dev, in, in_sz, out, sizeof(out));
425 kfree(in);
426 return err;
427 }
428 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_vlans);
429
430 int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
431 u64 *system_image_guid)
432 {
433 u32 *out;
434 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
435
436 out = mlx5_vzalloc(outlen);
437 if (!out)
438 return -ENOMEM;
439
440 mlx5_query_nic_vport_context(mdev, 0, out, outlen);
441
442 *system_image_guid = MLX5_GET64(query_nic_vport_context_out, out,
443 nic_vport_context.system_image_guid);
444
445 kfree(out);
446
447 return 0;
448 }
449 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid);
450
451 int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
452 {
453 u32 *out;
454 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
455
456 out = mlx5_vzalloc(outlen);
457 if (!out)
458 return -ENOMEM;
459
460 mlx5_query_nic_vport_context(mdev, 0, out, outlen);
461
462 *node_guid = MLX5_GET64(query_nic_vport_context_out, out,
463 nic_vport_context.node_guid);
464
465 kfree(out);
466
467 return 0;
468 }
469 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
470
471 int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
472 u16 *qkey_viol_cntr)
473 {
474 u32 *out;
475 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
476
477 out = mlx5_vzalloc(outlen);
478 if (!out)
479 return -ENOMEM;
480
481 mlx5_query_nic_vport_context(mdev, 0, out, outlen);
482
483 *qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out,
484 nic_vport_context.qkey_violation_counter);
485
486 kfree(out);
487
488 return 0;
489 }
490 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_qkey_viol_cntr);
491
492 int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
493 u8 port_num, u16 vf_num, u16 gid_index,
494 union ib_gid *gid)
495 {
496 int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_in);
497 int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
498 int is_group_manager;
499 void *out = NULL;
500 void *in = NULL;
501 union ib_gid *tmp;
502 int tbsz;
503 int nout;
504 int err;
505
506 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
507 tbsz = mlx5_get_gid_table_len(MLX5_CAP_GEN(dev, gid_table_size));
508 mlx5_core_dbg(dev, "vf_num %d, index %d, gid_table_size %d\n",
509 vf_num, gid_index, tbsz);
510
511 if (gid_index > tbsz && gid_index != 0xffff)
512 return -EINVAL;
513
514 if (gid_index == 0xffff)
515 nout = tbsz;
516 else
517 nout = 1;
518
519 out_sz += nout * sizeof(*gid);
520
521 in = kzalloc(in_sz, GFP_KERNEL);
522 out = kzalloc(out_sz, GFP_KERNEL);
523 if (!in || !out) {
524 err = -ENOMEM;
525 goto out;
526 }
527
528 MLX5_SET(query_hca_vport_gid_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_GID);
529 if (other_vport) {
530 if (is_group_manager) {
531 MLX5_SET(query_hca_vport_gid_in, in, vport_number, vf_num);
532 MLX5_SET(query_hca_vport_gid_in, in, other_vport, 1);
533 } else {
534 err = -EPERM;
535 goto out;
536 }
537 }
538 MLX5_SET(query_hca_vport_gid_in, in, gid_index, gid_index);
539
540 if (MLX5_CAP_GEN(dev, num_ports) == 2)
541 MLX5_SET(query_hca_vport_gid_in, in, port_num, port_num);
542
543 err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
544 if (err)
545 goto out;
546
547 err = mlx5_cmd_status_to_err_v2(out);
548 if (err)
549 goto out;
550
551 tmp = out + MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
552 gid->global.subnet_prefix = tmp->global.subnet_prefix;
553 gid->global.interface_id = tmp->global.interface_id;
554
555 out:
556 kfree(in);
557 kfree(out);
558 return err;
559 }
560 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid);
561
562 int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
563 u8 port_num, u16 vf_num, u16 pkey_index,
564 u16 *pkey)
565 {
566 int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_in);
567 int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_out);
568 int is_group_manager;
569 void *out = NULL;
570 void *in = NULL;
571 void *pkarr;
572 int nout;
573 int tbsz;
574 int err;
575 int i;
576
577 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
578
579 tbsz = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size));
580 if (pkey_index > tbsz && pkey_index != 0xffff)
581 return -EINVAL;
582
583 if (pkey_index == 0xffff)
584 nout = tbsz;
585 else
586 nout = 1;
587
588 out_sz += nout * MLX5_ST_SZ_BYTES(pkey);
589
590 in = kzalloc(in_sz, GFP_KERNEL);
591 out = kzalloc(out_sz, GFP_KERNEL);
592 if (!in || !out) {
593 err = -ENOMEM;
594 goto out;
595 }
596
597 MLX5_SET(query_hca_vport_pkey_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY);
598 if (other_vport) {
599 if (is_group_manager) {
600 MLX5_SET(query_hca_vport_pkey_in, in, vport_number, vf_num);
601 MLX5_SET(query_hca_vport_pkey_in, in, other_vport, 1);
602 } else {
603 err = -EPERM;
604 goto out;
605 }
606 }
607 MLX5_SET(query_hca_vport_pkey_in, in, pkey_index, pkey_index);
608
609 if (MLX5_CAP_GEN(dev, num_ports) == 2)
610 MLX5_SET(query_hca_vport_pkey_in, in, port_num, port_num);
611
612 err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
613 if (err)
614 goto out;
615
616 err = mlx5_cmd_status_to_err_v2(out);
617 if (err)
618 goto out;
619
620 pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey);
621 for (i = 0; i < nout; i++, pkey++, pkarr += MLX5_ST_SZ_BYTES(pkey))
622 *pkey = MLX5_GET_PR(pkey, pkarr, pkey);
623
624 out:
625 kfree(in);
626 kfree(out);
627 return err;
628 }
629 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey);
630
631 int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
632 u8 other_vport, u8 port_num,
633 u16 vf_num,
634 struct mlx5_hca_vport_context *rep)
635 {
636 int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
637 int in[MLX5_ST_SZ_DW(query_hca_vport_context_in)];
638 int is_group_manager;
639 void *out;
640 void *ctx;
641 int err;
642
643 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
644
645 memset(in, 0, sizeof(in));
646 out = kzalloc(out_sz, GFP_KERNEL);
647 if (!out)
648 return -ENOMEM;
649
650 MLX5_SET(query_hca_vport_context_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT);
651
652 if (other_vport) {
653 if (is_group_manager) {
654 MLX5_SET(query_hca_vport_context_in, in, other_vport, 1);
655 MLX5_SET(query_hca_vport_context_in, in, vport_number, vf_num);
656 } else {
657 err = -EPERM;
658 goto ex;
659 }
660 }
661
662 if (MLX5_CAP_GEN(dev, num_ports) == 2)
663 MLX5_SET(query_hca_vport_context_in, in, port_num, port_num);
664
665 err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
666 if (err)
667 goto ex;
668 err = mlx5_cmd_status_to_err_v2(out);
669 if (err)
670 goto ex;
671
672 ctx = MLX5_ADDR_OF(query_hca_vport_context_out, out, hca_vport_context);
673 rep->field_select = MLX5_GET_PR(hca_vport_context, ctx, field_select);
674 rep->sm_virt_aware = MLX5_GET_PR(hca_vport_context, ctx, sm_virt_aware);
675 rep->has_smi = MLX5_GET_PR(hca_vport_context, ctx, has_smi);
676 rep->has_raw = MLX5_GET_PR(hca_vport_context, ctx, has_raw);
677 rep->policy = MLX5_GET_PR(hca_vport_context, ctx, vport_state_policy);
678 rep->phys_state = MLX5_GET_PR(hca_vport_context, ctx,
679 port_physical_state);
680 rep->vport_state = MLX5_GET_PR(hca_vport_context, ctx, vport_state);
681 rep->port_physical_state = MLX5_GET_PR(hca_vport_context, ctx,
682 port_physical_state);
683 rep->port_guid = MLX5_GET64_PR(hca_vport_context, ctx, port_guid);
684 rep->node_guid = MLX5_GET64_PR(hca_vport_context, ctx, node_guid);
685 rep->cap_mask1 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask1);
686 rep->cap_mask1_perm = MLX5_GET_PR(hca_vport_context, ctx,
687 cap_mask1_field_select);
688 rep->cap_mask2 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask2);
689 rep->cap_mask2_perm = MLX5_GET_PR(hca_vport_context, ctx,
690 cap_mask2_field_select);
691 rep->lid = MLX5_GET_PR(hca_vport_context, ctx, lid);
692 rep->init_type_reply = MLX5_GET_PR(hca_vport_context, ctx,
693 init_type_reply);
694 rep->lmc = MLX5_GET_PR(hca_vport_context, ctx, lmc);
695 rep->subnet_timeout = MLX5_GET_PR(hca_vport_context, ctx,
696 subnet_timeout);
697 rep->sm_lid = MLX5_GET_PR(hca_vport_context, ctx, sm_lid);
698 rep->sm_sl = MLX5_GET_PR(hca_vport_context, ctx, sm_sl);
699 rep->qkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
700 qkey_violation_counter);
701 rep->pkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
702 pkey_violation_counter);
703 rep->grh_required = MLX5_GET_PR(hca_vport_context, ctx, grh_required);
704 rep->sys_image_guid = MLX5_GET64_PR(hca_vport_context, ctx,
705 system_image_guid);
706
707 ex:
708 kfree(out);
709 return err;
710 }
711 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_context);
712
713 int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev,
714 u64 *sys_image_guid)
715 {
716 struct mlx5_hca_vport_context *rep;
717 int err;
718
719 rep = kzalloc(sizeof(*rep), GFP_KERNEL);
720 if (!rep)
721 return -ENOMEM;
722
723 err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
724 if (!err)
725 *sys_image_guid = rep->sys_image_guid;
726
727 kfree(rep);
728 return err;
729 }
730 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid);
731
732 int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev,
733 u64 *node_guid)
734 {
735 struct mlx5_hca_vport_context *rep;
736 int err;
737
738 rep = kzalloc(sizeof(*rep), GFP_KERNEL);
739 if (!rep)
740 return -ENOMEM;
741
742 err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
743 if (!err)
744 *node_guid = rep->node_guid;
745
746 kfree(rep);
747 return err;
748 }
749 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid);
750
751 int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
752 u32 vport,
753 int *promisc_uc,
754 int *promisc_mc,
755 int *promisc_all)
756 {
757 u32 *out;
758 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
759 int err;
760
761 out = kzalloc(outlen, GFP_KERNEL);
762 if (!out)
763 return -ENOMEM;
764
765 err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
766 if (err)
767 goto out;
768
769 *promisc_uc = MLX5_GET(query_nic_vport_context_out, out,
770 nic_vport_context.promisc_uc);
771 *promisc_mc = MLX5_GET(query_nic_vport_context_out, out,
772 nic_vport_context.promisc_mc);
773 *promisc_all = MLX5_GET(query_nic_vport_context_out, out,
774 nic_vport_context.promisc_all);
775
776 out:
777 kfree(out);
778 return err;
779 }
780 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_promisc);
781
782 int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev,
783 int promisc_uc,
784 int promisc_mc,
785 int promisc_all)
786 {
787 void *in;
788 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
789 int err;
790
791 in = mlx5_vzalloc(inlen);
792 if (!in) {
793 mlx5_core_err(mdev, "failed to allocate inbox\n");
794 return -ENOMEM;
795 }
796
797 MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1);
798 MLX5_SET(modify_nic_vport_context_in, in,
799 nic_vport_context.promisc_uc, promisc_uc);
800 MLX5_SET(modify_nic_vport_context_in, in,
801 nic_vport_context.promisc_mc, promisc_mc);
802 MLX5_SET(modify_nic_vport_context_in, in,
803 nic_vport_context.promisc_all, promisc_all);
804
805 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
806
807 kvfree(in);
808
809 return err;
810 }
811 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_promisc);
812
813 enum mlx5_vport_roce_state {
814 MLX5_VPORT_ROCE_DISABLED = 0,
815 MLX5_VPORT_ROCE_ENABLED = 1,
816 };
817
818 static int mlx5_nic_vport_update_roce_state(struct mlx5_core_dev *mdev,
819 enum mlx5_vport_roce_state state)
820 {
821 void *in;
822 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
823 int err;
824
825 in = mlx5_vzalloc(inlen);
826 if (!in) {
827 mlx5_core_warn(mdev, "failed to allocate inbox\n");
828 return -ENOMEM;
829 }
830
831 MLX5_SET(modify_nic_vport_context_in, in, field_select.roce_en, 1);
832 MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.roce_en,
833 state);
834
835 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
836
837 kvfree(in);
838
839 return err;
840 }
841
842 int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev)
843 {
844 return mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_ENABLED);
845 }
846 EXPORT_SYMBOL_GPL(mlx5_nic_vport_enable_roce);
847
848 int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev)
849 {
850 return mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_DISABLED);
851 }
852 EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce);
853
854 int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
855 int vf, u8 port_num, void *out,
856 size_t out_sz)
857 {
858 int in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in);
859 int is_group_manager;
860 void *in;
861 int err;
862
863 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
864 in = mlx5_vzalloc(in_sz);
865 if (!in) {
866 err = -ENOMEM;
867 return err;
868 }
869
870 MLX5_SET(query_vport_counter_in, in, opcode,
871 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
872 if (other_vport) {
873 if (is_group_manager) {
874 MLX5_SET(query_vport_counter_in, in, other_vport, 1);
875 MLX5_SET(query_vport_counter_in, in, vport_number, vf + 1);
876 } else {
877 err = -EPERM;
878 goto free;
879 }
880 }
881 if (MLX5_CAP_GEN(dev, num_ports) == 2)
882 MLX5_SET(query_vport_counter_in, in, port_num, port_num);
883
884 err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
885 if (err)
886 goto free;
887 err = mlx5_cmd_status_to_err_v2(out);
888
889 free:
890 kvfree(in);
891 return err;
892 }
893 EXPORT_SYMBOL_GPL(mlx5_core_query_vport_counter);
894
895 int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
896 u8 other_vport, u8 port_num,
897 int vf,
898 struct mlx5_hca_vport_context *req)
899 {
900 int in_sz = MLX5_ST_SZ_BYTES(modify_hca_vport_context_in);
901 u8 out[MLX5_ST_SZ_BYTES(modify_hca_vport_context_out)];
902 int is_group_manager;
903 void *in;
904 int err;
905 void *ctx;
906
907 mlx5_core_dbg(dev, "vf %d\n", vf);
908 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
909 in = kzalloc(in_sz, GFP_KERNEL);
910 if (!in)
911 return -ENOMEM;
912
913 memset(out, 0, sizeof(out));
914 MLX5_SET(modify_hca_vport_context_in, in, opcode, MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT);
915 if (other_vport) {
916 if (is_group_manager) {
917 MLX5_SET(modify_hca_vport_context_in, in, other_vport, 1);
918 MLX5_SET(modify_hca_vport_context_in, in, vport_number, vf);
919 } else {
920 err = -EPERM;
921 goto ex;
922 }
923 }
924
925 if (MLX5_CAP_GEN(dev, num_ports) > 1)
926 MLX5_SET(modify_hca_vport_context_in, in, port_num, port_num);
927
928 ctx = MLX5_ADDR_OF(modify_hca_vport_context_in, in, hca_vport_context);
929 MLX5_SET(hca_vport_context, ctx, field_select, req->field_select);
930 MLX5_SET(hca_vport_context, ctx, sm_virt_aware, req->sm_virt_aware);
931 MLX5_SET(hca_vport_context, ctx, has_smi, req->has_smi);
932 MLX5_SET(hca_vport_context, ctx, has_raw, req->has_raw);
933 MLX5_SET(hca_vport_context, ctx, vport_state_policy, req->policy);
934 MLX5_SET(hca_vport_context, ctx, port_physical_state, req->phys_state);
935 MLX5_SET(hca_vport_context, ctx, vport_state, req->vport_state);
936 MLX5_SET64(hca_vport_context, ctx, port_guid, req->port_guid);
937 MLX5_SET64(hca_vport_context, ctx, node_guid, req->node_guid);
938 MLX5_SET(hca_vport_context, ctx, cap_mask1, req->cap_mask1);
939 MLX5_SET(hca_vport_context, ctx, cap_mask1_field_select, req->cap_mask1_perm);
940 MLX5_SET(hca_vport_context, ctx, cap_mask2, req->cap_mask2);
941 MLX5_SET(hca_vport_context, ctx, cap_mask2_field_select, req->cap_mask2_perm);
942 MLX5_SET(hca_vport_context, ctx, lid, req->lid);
943 MLX5_SET(hca_vport_context, ctx, init_type_reply, req->init_type_reply);
944 MLX5_SET(hca_vport_context, ctx, lmc, req->lmc);
945 MLX5_SET(hca_vport_context, ctx, subnet_timeout, req->subnet_timeout);
946 MLX5_SET(hca_vport_context, ctx, sm_lid, req->sm_lid);
947 MLX5_SET(hca_vport_context, ctx, sm_sl, req->sm_sl);
948 MLX5_SET(hca_vport_context, ctx, qkey_violation_counter, req->qkey_violation_counter);
949 MLX5_SET(hca_vport_context, ctx, pkey_violation_counter, req->pkey_violation_counter);
950 err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
951 if (err)
952 goto ex;
953
954 err = mlx5_cmd_status_to_err_v2(out);
955
956 ex:
957 kfree(in);
958 return err;
959 }
960 EXPORT_SYMBOL_GPL(mlx5_core_modify_hca_vport_context);
This page took 0.052722 seconds and 5 git commands to generate.