Merge remote-tracking branch 'regulator/for-next'
[deliverable/linux.git] / drivers / net / ethernet / mellanox / mlx5 / core / vport.c
1 /*
2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/export.h>
34 #include <linux/etherdevice.h>
35 #include <linux/mlx5/driver.h>
36 #include <linux/mlx5/vport.h>
37 #include "mlx5_core.h"
38
39 static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod,
40 u16 vport, u32 *out, int outlen)
41 {
42 u32 in[MLX5_ST_SZ_DW(query_vport_state_in)] = {0};
43
44 MLX5_SET(query_vport_state_in, in, opcode,
45 MLX5_CMD_OP_QUERY_VPORT_STATE);
46 MLX5_SET(query_vport_state_in, in, op_mod, opmod);
47 MLX5_SET(query_vport_state_in, in, vport_number, vport);
48 if (vport)
49 MLX5_SET(query_vport_state_in, in, other_vport, 1);
50
51 return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
52 }
53
54 u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
55 {
56 u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
57
58 _mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
59
60 return MLX5_GET(query_vport_state_out, out, state);
61 }
62 EXPORT_SYMBOL_GPL(mlx5_query_vport_state);
63
64 u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
65 {
66 u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
67
68 _mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
69
70 return MLX5_GET(query_vport_state_out, out, admin_state);
71 }
72 EXPORT_SYMBOL_GPL(mlx5_query_vport_admin_state);
73
74 int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
75 u16 vport, u8 state)
76 {
77 u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)] = {0};
78 u32 out[MLX5_ST_SZ_DW(modify_vport_state_out)] = {0};
79
80 MLX5_SET(modify_vport_state_in, in, opcode,
81 MLX5_CMD_OP_MODIFY_VPORT_STATE);
82 MLX5_SET(modify_vport_state_in, in, op_mod, opmod);
83 MLX5_SET(modify_vport_state_in, in, vport_number, vport);
84 if (vport)
85 MLX5_SET(modify_vport_state_in, in, other_vport, 1);
86 MLX5_SET(modify_vport_state_in, in, admin_state, state);
87
88 return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
89 }
90 EXPORT_SYMBOL_GPL(mlx5_modify_vport_admin_state);
91
92 static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport,
93 u32 *out, int outlen)
94 {
95 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
96
97 MLX5_SET(query_nic_vport_context_in, in, opcode,
98 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
99 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
100 if (vport)
101 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
102
103 return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
104 }
105
106 static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
107 int inlen)
108 {
109 u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
110
111 MLX5_SET(modify_nic_vport_context_in, in, opcode,
112 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
113 return mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
114 }
115
116 void mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
117 u8 *min_inline_mode)
118 {
119 u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {0};
120
121 mlx5_query_nic_vport_context(mdev, 0, out, sizeof(out));
122
123 *min_inline_mode = MLX5_GET(query_nic_vport_context_out, out,
124 nic_vport_context.min_wqe_inline_mode);
125 }
126 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_min_inline);
127
128 int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
129 u16 vport, u8 min_inline)
130 {
131 u32 in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {0};
132 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
133 void *nic_vport_ctx;
134
135 MLX5_SET(modify_nic_vport_context_in, in,
136 field_select.min_inline, 1);
137 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
138 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
139
140 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
141 in, nic_vport_context);
142 MLX5_SET(nic_vport_context, nic_vport_ctx,
143 min_wqe_inline_mode, min_inline);
144
145 return mlx5_modify_nic_vport_context(mdev, in, inlen);
146 }
147
148 int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
149 u16 vport, u8 *addr)
150 {
151 u32 *out;
152 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
153 u8 *out_addr;
154 int err;
155
156 out = mlx5_vzalloc(outlen);
157 if (!out)
158 return -ENOMEM;
159
160 out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out,
161 nic_vport_context.permanent_address);
162
163 err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
164 if (!err)
165 ether_addr_copy(addr, &out_addr[2]);
166
167 kvfree(out);
168 return err;
169 }
170 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_address);
171
172 int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
173 u16 vport, u8 *addr)
174 {
175 void *in;
176 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
177 int err;
178 void *nic_vport_ctx;
179 u8 *perm_mac;
180
181 in = mlx5_vzalloc(inlen);
182 if (!in) {
183 mlx5_core_warn(mdev, "failed to allocate inbox\n");
184 return -ENOMEM;
185 }
186
187 MLX5_SET(modify_nic_vport_context_in, in,
188 field_select.permanent_address, 1);
189 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
190
191 if (vport)
192 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
193
194 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
195 in, nic_vport_context);
196 perm_mac = MLX5_ADDR_OF(nic_vport_context, nic_vport_ctx,
197 permanent_address);
198
199 ether_addr_copy(&perm_mac[2], addr);
200
201 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
202
203 kvfree(in);
204
205 return err;
206 }
207 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_address);
208
209 int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
210 {
211 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
212 u32 *out;
213 int err;
214
215 out = mlx5_vzalloc(outlen);
216 if (!out)
217 return -ENOMEM;
218
219 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
220 if (!err)
221 *mtu = MLX5_GET(query_nic_vport_context_out, out,
222 nic_vport_context.mtu);
223
224 kvfree(out);
225 return err;
226 }
227 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mtu);
228
229 int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu)
230 {
231 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
232 void *in;
233 int err;
234
235 in = mlx5_vzalloc(inlen);
236 if (!in)
237 return -ENOMEM;
238
239 MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1);
240 MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu);
241
242 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
243
244 kvfree(in);
245 return err;
246 }
247 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mtu);
248
249 int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
250 u32 vport,
251 enum mlx5_list_type list_type,
252 u8 addr_list[][ETH_ALEN],
253 int *list_size)
254 {
255 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
256 void *nic_vport_ctx;
257 int max_list_size;
258 int req_list_size;
259 int out_sz;
260 void *out;
261 int err;
262 int i;
263
264 req_list_size = *list_size;
265
266 max_list_size = list_type == MLX5_NVPRT_LIST_TYPE_UC ?
267 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) :
268 1 << MLX5_CAP_GEN(dev, log_max_current_mc_list);
269
270 if (req_list_size > max_list_size) {
271 mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n",
272 req_list_size, max_list_size);
273 req_list_size = max_list_size;
274 }
275
276 out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
277 req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
278
279 out = kzalloc(out_sz, GFP_KERNEL);
280 if (!out)
281 return -ENOMEM;
282
283 MLX5_SET(query_nic_vport_context_in, in, opcode,
284 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
285 MLX5_SET(query_nic_vport_context_in, in, allowed_list_type, list_type);
286 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
287
288 if (vport)
289 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
290
291 err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
292 if (err)
293 goto out;
294
295 nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
296 nic_vport_context);
297 req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
298 allowed_list_size);
299
300 *list_size = req_list_size;
301 for (i = 0; i < req_list_size; i++) {
302 u8 *mac_addr = MLX5_ADDR_OF(nic_vport_context,
303 nic_vport_ctx,
304 current_uc_mac_address[i]) + 2;
305 ether_addr_copy(addr_list[i], mac_addr);
306 }
307 out:
308 kfree(out);
309 return err;
310 }
311 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_list);
312
313 int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
314 enum mlx5_list_type list_type,
315 u8 addr_list[][ETH_ALEN],
316 int list_size)
317 {
318 u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
319 void *nic_vport_ctx;
320 int max_list_size;
321 int in_sz;
322 void *in;
323 int err;
324 int i;
325
326 max_list_size = list_type == MLX5_NVPRT_LIST_TYPE_UC ?
327 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) :
328 1 << MLX5_CAP_GEN(dev, log_max_current_mc_list);
329
330 if (list_size > max_list_size)
331 return -ENOSPC;
332
333 in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
334 list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
335
336 memset(out, 0, sizeof(out));
337 in = kzalloc(in_sz, GFP_KERNEL);
338 if (!in)
339 return -ENOMEM;
340
341 MLX5_SET(modify_nic_vport_context_in, in, opcode,
342 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
343 MLX5_SET(modify_nic_vport_context_in, in,
344 field_select.addresses_list, 1);
345
346 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
347 nic_vport_context);
348
349 MLX5_SET(nic_vport_context, nic_vport_ctx,
350 allowed_list_type, list_type);
351 MLX5_SET(nic_vport_context, nic_vport_ctx,
352 allowed_list_size, list_size);
353
354 for (i = 0; i < list_size; i++) {
355 u8 *curr_mac = MLX5_ADDR_OF(nic_vport_context,
356 nic_vport_ctx,
357 current_uc_mac_address[i]) + 2;
358 ether_addr_copy(curr_mac, addr_list[i]);
359 }
360
361 err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
362 kfree(in);
363 return err;
364 }
365 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_list);
366
367 int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev,
368 u32 vport,
369 u16 vlans[],
370 int *size)
371 {
372 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
373 void *nic_vport_ctx;
374 int req_list_size;
375 int max_list_size;
376 int out_sz;
377 void *out;
378 int err;
379 int i;
380
381 req_list_size = *size;
382 max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
383 if (req_list_size > max_list_size) {
384 mlx5_core_warn(dev, "Requested list size (%d) > (%d) max list size\n",
385 req_list_size, max_list_size);
386 req_list_size = max_list_size;
387 }
388
389 out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
390 req_list_size * MLX5_ST_SZ_BYTES(vlan_layout);
391
392 memset(in, 0, sizeof(in));
393 out = kzalloc(out_sz, GFP_KERNEL);
394 if (!out)
395 return -ENOMEM;
396
397 MLX5_SET(query_nic_vport_context_in, in, opcode,
398 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
399 MLX5_SET(query_nic_vport_context_in, in, allowed_list_type,
400 MLX5_NVPRT_LIST_TYPE_VLAN);
401 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
402
403 if (vport)
404 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
405
406 err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
407 if (err)
408 goto out;
409
410 nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
411 nic_vport_context);
412 req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
413 allowed_list_size);
414
415 *size = req_list_size;
416 for (i = 0; i < req_list_size; i++) {
417 void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
418 nic_vport_ctx,
419 current_uc_mac_address[i]);
420 vlans[i] = MLX5_GET(vlan_layout, vlan_addr, vlan);
421 }
422 out:
423 kfree(out);
424 return err;
425 }
426 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_vlans);
427
428 int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
429 u16 vlans[],
430 int list_size)
431 {
432 u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
433 void *nic_vport_ctx;
434 int max_list_size;
435 int in_sz;
436 void *in;
437 int err;
438 int i;
439
440 max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
441
442 if (list_size > max_list_size)
443 return -ENOSPC;
444
445 in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
446 list_size * MLX5_ST_SZ_BYTES(vlan_layout);
447
448 memset(out, 0, sizeof(out));
449 in = kzalloc(in_sz, GFP_KERNEL);
450 if (!in)
451 return -ENOMEM;
452
453 MLX5_SET(modify_nic_vport_context_in, in, opcode,
454 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
455 MLX5_SET(modify_nic_vport_context_in, in,
456 field_select.addresses_list, 1);
457
458 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
459 nic_vport_context);
460
461 MLX5_SET(nic_vport_context, nic_vport_ctx,
462 allowed_list_type, MLX5_NVPRT_LIST_TYPE_VLAN);
463 MLX5_SET(nic_vport_context, nic_vport_ctx,
464 allowed_list_size, list_size);
465
466 for (i = 0; i < list_size; i++) {
467 void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
468 nic_vport_ctx,
469 current_uc_mac_address[i]);
470 MLX5_SET(vlan_layout, vlan_addr, vlan, vlans[i]);
471 }
472
473 err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
474 kfree(in);
475 return err;
476 }
477 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_vlans);
478
479 int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
480 u64 *system_image_guid)
481 {
482 u32 *out;
483 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
484
485 out = mlx5_vzalloc(outlen);
486 if (!out)
487 return -ENOMEM;
488
489 mlx5_query_nic_vport_context(mdev, 0, out, outlen);
490
491 *system_image_guid = MLX5_GET64(query_nic_vport_context_out, out,
492 nic_vport_context.system_image_guid);
493
494 kfree(out);
495
496 return 0;
497 }
498 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid);
499
500 int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
501 {
502 u32 *out;
503 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
504
505 out = mlx5_vzalloc(outlen);
506 if (!out)
507 return -ENOMEM;
508
509 mlx5_query_nic_vport_context(mdev, 0, out, outlen);
510
511 *node_guid = MLX5_GET64(query_nic_vport_context_out, out,
512 nic_vport_context.node_guid);
513
514 kfree(out);
515
516 return 0;
517 }
518 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
519
520 int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
521 u32 vport, u64 node_guid)
522 {
523 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
524 void *nic_vport_context;
525 void *in;
526 int err;
527
528 if (!vport)
529 return -EINVAL;
530 if (!MLX5_CAP_GEN(mdev, vport_group_manager))
531 return -EACCES;
532 if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
533 return -ENOTSUPP;
534
535 in = mlx5_vzalloc(inlen);
536 if (!in)
537 return -ENOMEM;
538
539 MLX5_SET(modify_nic_vport_context_in, in,
540 field_select.node_guid, 1);
541 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
542 MLX5_SET(modify_nic_vport_context_in, in, other_vport, !!vport);
543
544 nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
545 in, nic_vport_context);
546 MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid);
547
548 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
549
550 kvfree(in);
551
552 return err;
553 }
554
555 int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
556 u16 *qkey_viol_cntr)
557 {
558 u32 *out;
559 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
560
561 out = mlx5_vzalloc(outlen);
562 if (!out)
563 return -ENOMEM;
564
565 mlx5_query_nic_vport_context(mdev, 0, out, outlen);
566
567 *qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out,
568 nic_vport_context.qkey_violation_counter);
569
570 kfree(out);
571
572 return 0;
573 }
574 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_qkey_viol_cntr);
575
576 int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
577 u8 port_num, u16 vf_num, u16 gid_index,
578 union ib_gid *gid)
579 {
580 int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_in);
581 int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
582 int is_group_manager;
583 void *out = NULL;
584 void *in = NULL;
585 union ib_gid *tmp;
586 int tbsz;
587 int nout;
588 int err;
589
590 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
591 tbsz = mlx5_get_gid_table_len(MLX5_CAP_GEN(dev, gid_table_size));
592 mlx5_core_dbg(dev, "vf_num %d, index %d, gid_table_size %d\n",
593 vf_num, gid_index, tbsz);
594
595 if (gid_index > tbsz && gid_index != 0xffff)
596 return -EINVAL;
597
598 if (gid_index == 0xffff)
599 nout = tbsz;
600 else
601 nout = 1;
602
603 out_sz += nout * sizeof(*gid);
604
605 in = kzalloc(in_sz, GFP_KERNEL);
606 out = kzalloc(out_sz, GFP_KERNEL);
607 if (!in || !out) {
608 err = -ENOMEM;
609 goto out;
610 }
611
612 MLX5_SET(query_hca_vport_gid_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_GID);
613 if (other_vport) {
614 if (is_group_manager) {
615 MLX5_SET(query_hca_vport_gid_in, in, vport_number, vf_num);
616 MLX5_SET(query_hca_vport_gid_in, in, other_vport, 1);
617 } else {
618 err = -EPERM;
619 goto out;
620 }
621 }
622 MLX5_SET(query_hca_vport_gid_in, in, gid_index, gid_index);
623
624 if (MLX5_CAP_GEN(dev, num_ports) == 2)
625 MLX5_SET(query_hca_vport_gid_in, in, port_num, port_num);
626
627 err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
628 if (err)
629 goto out;
630
631 tmp = out + MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
632 gid->global.subnet_prefix = tmp->global.subnet_prefix;
633 gid->global.interface_id = tmp->global.interface_id;
634
635 out:
636 kfree(in);
637 kfree(out);
638 return err;
639 }
640 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid);
641
642 int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
643 u8 port_num, u16 vf_num, u16 pkey_index,
644 u16 *pkey)
645 {
646 int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_in);
647 int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_out);
648 int is_group_manager;
649 void *out = NULL;
650 void *in = NULL;
651 void *pkarr;
652 int nout;
653 int tbsz;
654 int err;
655 int i;
656
657 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
658
659 tbsz = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size));
660 if (pkey_index > tbsz && pkey_index != 0xffff)
661 return -EINVAL;
662
663 if (pkey_index == 0xffff)
664 nout = tbsz;
665 else
666 nout = 1;
667
668 out_sz += nout * MLX5_ST_SZ_BYTES(pkey);
669
670 in = kzalloc(in_sz, GFP_KERNEL);
671 out = kzalloc(out_sz, GFP_KERNEL);
672 if (!in || !out) {
673 err = -ENOMEM;
674 goto out;
675 }
676
677 MLX5_SET(query_hca_vport_pkey_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY);
678 if (other_vport) {
679 if (is_group_manager) {
680 MLX5_SET(query_hca_vport_pkey_in, in, vport_number, vf_num);
681 MLX5_SET(query_hca_vport_pkey_in, in, other_vport, 1);
682 } else {
683 err = -EPERM;
684 goto out;
685 }
686 }
687 MLX5_SET(query_hca_vport_pkey_in, in, pkey_index, pkey_index);
688
689 if (MLX5_CAP_GEN(dev, num_ports) == 2)
690 MLX5_SET(query_hca_vport_pkey_in, in, port_num, port_num);
691
692 err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
693 if (err)
694 goto out;
695
696 pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey);
697 for (i = 0; i < nout; i++, pkey++, pkarr += MLX5_ST_SZ_BYTES(pkey))
698 *pkey = MLX5_GET_PR(pkey, pkarr, pkey);
699
700 out:
701 kfree(in);
702 kfree(out);
703 return err;
704 }
705 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey);
706
707 int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
708 u8 other_vport, u8 port_num,
709 u16 vf_num,
710 struct mlx5_hca_vport_context *rep)
711 {
712 int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
713 int in[MLX5_ST_SZ_DW(query_hca_vport_context_in)] = {0};
714 int is_group_manager;
715 void *out;
716 void *ctx;
717 int err;
718
719 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
720
721 out = kzalloc(out_sz, GFP_KERNEL);
722 if (!out)
723 return -ENOMEM;
724
725 MLX5_SET(query_hca_vport_context_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT);
726
727 if (other_vport) {
728 if (is_group_manager) {
729 MLX5_SET(query_hca_vport_context_in, in, other_vport, 1);
730 MLX5_SET(query_hca_vport_context_in, in, vport_number, vf_num);
731 } else {
732 err = -EPERM;
733 goto ex;
734 }
735 }
736
737 if (MLX5_CAP_GEN(dev, num_ports) == 2)
738 MLX5_SET(query_hca_vport_context_in, in, port_num, port_num);
739
740 err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
741 if (err)
742 goto ex;
743
744 ctx = MLX5_ADDR_OF(query_hca_vport_context_out, out, hca_vport_context);
745 rep->field_select = MLX5_GET_PR(hca_vport_context, ctx, field_select);
746 rep->sm_virt_aware = MLX5_GET_PR(hca_vport_context, ctx, sm_virt_aware);
747 rep->has_smi = MLX5_GET_PR(hca_vport_context, ctx, has_smi);
748 rep->has_raw = MLX5_GET_PR(hca_vport_context, ctx, has_raw);
749 rep->policy = MLX5_GET_PR(hca_vport_context, ctx, vport_state_policy);
750 rep->phys_state = MLX5_GET_PR(hca_vport_context, ctx,
751 port_physical_state);
752 rep->vport_state = MLX5_GET_PR(hca_vport_context, ctx, vport_state);
753 rep->port_physical_state = MLX5_GET_PR(hca_vport_context, ctx,
754 port_physical_state);
755 rep->port_guid = MLX5_GET64_PR(hca_vport_context, ctx, port_guid);
756 rep->node_guid = MLX5_GET64_PR(hca_vport_context, ctx, node_guid);
757 rep->cap_mask1 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask1);
758 rep->cap_mask1_perm = MLX5_GET_PR(hca_vport_context, ctx,
759 cap_mask1_field_select);
760 rep->cap_mask2 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask2);
761 rep->cap_mask2_perm = MLX5_GET_PR(hca_vport_context, ctx,
762 cap_mask2_field_select);
763 rep->lid = MLX5_GET_PR(hca_vport_context, ctx, lid);
764 rep->init_type_reply = MLX5_GET_PR(hca_vport_context, ctx,
765 init_type_reply);
766 rep->lmc = MLX5_GET_PR(hca_vport_context, ctx, lmc);
767 rep->subnet_timeout = MLX5_GET_PR(hca_vport_context, ctx,
768 subnet_timeout);
769 rep->sm_lid = MLX5_GET_PR(hca_vport_context, ctx, sm_lid);
770 rep->sm_sl = MLX5_GET_PR(hca_vport_context, ctx, sm_sl);
771 rep->qkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
772 qkey_violation_counter);
773 rep->pkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
774 pkey_violation_counter);
775 rep->grh_required = MLX5_GET_PR(hca_vport_context, ctx, grh_required);
776 rep->sys_image_guid = MLX5_GET64_PR(hca_vport_context, ctx,
777 system_image_guid);
778
779 ex:
780 kfree(out);
781 return err;
782 }
783 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_context);
784
785 int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev,
786 u64 *sys_image_guid)
787 {
788 struct mlx5_hca_vport_context *rep;
789 int err;
790
791 rep = kzalloc(sizeof(*rep), GFP_KERNEL);
792 if (!rep)
793 return -ENOMEM;
794
795 err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
796 if (!err)
797 *sys_image_guid = rep->sys_image_guid;
798
799 kfree(rep);
800 return err;
801 }
802 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid);
803
804 int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev,
805 u64 *node_guid)
806 {
807 struct mlx5_hca_vport_context *rep;
808 int err;
809
810 rep = kzalloc(sizeof(*rep), GFP_KERNEL);
811 if (!rep)
812 return -ENOMEM;
813
814 err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
815 if (!err)
816 *node_guid = rep->node_guid;
817
818 kfree(rep);
819 return err;
820 }
821 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid);
822
823 int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
824 u32 vport,
825 int *promisc_uc,
826 int *promisc_mc,
827 int *promisc_all)
828 {
829 u32 *out;
830 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
831 int err;
832
833 out = kzalloc(outlen, GFP_KERNEL);
834 if (!out)
835 return -ENOMEM;
836
837 err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
838 if (err)
839 goto out;
840
841 *promisc_uc = MLX5_GET(query_nic_vport_context_out, out,
842 nic_vport_context.promisc_uc);
843 *promisc_mc = MLX5_GET(query_nic_vport_context_out, out,
844 nic_vport_context.promisc_mc);
845 *promisc_all = MLX5_GET(query_nic_vport_context_out, out,
846 nic_vport_context.promisc_all);
847
848 out:
849 kfree(out);
850 return err;
851 }
852 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_promisc);
853
854 int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev,
855 int promisc_uc,
856 int promisc_mc,
857 int promisc_all)
858 {
859 void *in;
860 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
861 int err;
862
863 in = mlx5_vzalloc(inlen);
864 if (!in) {
865 mlx5_core_err(mdev, "failed to allocate inbox\n");
866 return -ENOMEM;
867 }
868
869 MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1);
870 MLX5_SET(modify_nic_vport_context_in, in,
871 nic_vport_context.promisc_uc, promisc_uc);
872 MLX5_SET(modify_nic_vport_context_in, in,
873 nic_vport_context.promisc_mc, promisc_mc);
874 MLX5_SET(modify_nic_vport_context_in, in,
875 nic_vport_context.promisc_all, promisc_all);
876
877 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
878
879 kvfree(in);
880
881 return err;
882 }
883 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_promisc);
884
885 enum mlx5_vport_roce_state {
886 MLX5_VPORT_ROCE_DISABLED = 0,
887 MLX5_VPORT_ROCE_ENABLED = 1,
888 };
889
890 static int mlx5_nic_vport_update_roce_state(struct mlx5_core_dev *mdev,
891 enum mlx5_vport_roce_state state)
892 {
893 void *in;
894 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
895 int err;
896
897 in = mlx5_vzalloc(inlen);
898 if (!in) {
899 mlx5_core_warn(mdev, "failed to allocate inbox\n");
900 return -ENOMEM;
901 }
902
903 MLX5_SET(modify_nic_vport_context_in, in, field_select.roce_en, 1);
904 MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.roce_en,
905 state);
906
907 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
908
909 kvfree(in);
910
911 return err;
912 }
913
914 int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev)
915 {
916 return mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_ENABLED);
917 }
918 EXPORT_SYMBOL_GPL(mlx5_nic_vport_enable_roce);
919
920 int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev)
921 {
922 return mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_DISABLED);
923 }
924 EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce);
925
926 int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
927 int vf, u8 port_num, void *out,
928 size_t out_sz)
929 {
930 int in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in);
931 int is_group_manager;
932 void *in;
933 int err;
934
935 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
936 in = mlx5_vzalloc(in_sz);
937 if (!in) {
938 err = -ENOMEM;
939 return err;
940 }
941
942 MLX5_SET(query_vport_counter_in, in, opcode,
943 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
944 if (other_vport) {
945 if (is_group_manager) {
946 MLX5_SET(query_vport_counter_in, in, other_vport, 1);
947 MLX5_SET(query_vport_counter_in, in, vport_number, vf + 1);
948 } else {
949 err = -EPERM;
950 goto free;
951 }
952 }
953 if (MLX5_CAP_GEN(dev, num_ports) == 2)
954 MLX5_SET(query_vport_counter_in, in, port_num, port_num);
955
956 err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
957 free:
958 kvfree(in);
959 return err;
960 }
961 EXPORT_SYMBOL_GPL(mlx5_core_query_vport_counter);
962
963 int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
964 u8 other_vport, u8 port_num,
965 int vf,
966 struct mlx5_hca_vport_context *req)
967 {
968 int in_sz = MLX5_ST_SZ_BYTES(modify_hca_vport_context_in);
969 u8 out[MLX5_ST_SZ_BYTES(modify_hca_vport_context_out)];
970 int is_group_manager;
971 void *in;
972 int err;
973 void *ctx;
974
975 mlx5_core_dbg(dev, "vf %d\n", vf);
976 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
977 in = kzalloc(in_sz, GFP_KERNEL);
978 if (!in)
979 return -ENOMEM;
980
981 memset(out, 0, sizeof(out));
982 MLX5_SET(modify_hca_vport_context_in, in, opcode, MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT);
983 if (other_vport) {
984 if (is_group_manager) {
985 MLX5_SET(modify_hca_vport_context_in, in, other_vport, 1);
986 MLX5_SET(modify_hca_vport_context_in, in, vport_number, vf);
987 } else {
988 err = -EPERM;
989 goto ex;
990 }
991 }
992
993 if (MLX5_CAP_GEN(dev, num_ports) > 1)
994 MLX5_SET(modify_hca_vport_context_in, in, port_num, port_num);
995
996 ctx = MLX5_ADDR_OF(modify_hca_vport_context_in, in, hca_vport_context);
997 MLX5_SET(hca_vport_context, ctx, field_select, req->field_select);
998 MLX5_SET(hca_vport_context, ctx, sm_virt_aware, req->sm_virt_aware);
999 MLX5_SET(hca_vport_context, ctx, has_smi, req->has_smi);
1000 MLX5_SET(hca_vport_context, ctx, has_raw, req->has_raw);
1001 MLX5_SET(hca_vport_context, ctx, vport_state_policy, req->policy);
1002 MLX5_SET(hca_vport_context, ctx, port_physical_state, req->phys_state);
1003 MLX5_SET(hca_vport_context, ctx, vport_state, req->vport_state);
1004 MLX5_SET64(hca_vport_context, ctx, port_guid, req->port_guid);
1005 MLX5_SET64(hca_vport_context, ctx, node_guid, req->node_guid);
1006 MLX5_SET(hca_vport_context, ctx, cap_mask1, req->cap_mask1);
1007 MLX5_SET(hca_vport_context, ctx, cap_mask1_field_select, req->cap_mask1_perm);
1008 MLX5_SET(hca_vport_context, ctx, cap_mask2, req->cap_mask2);
1009 MLX5_SET(hca_vport_context, ctx, cap_mask2_field_select, req->cap_mask2_perm);
1010 MLX5_SET(hca_vport_context, ctx, lid, req->lid);
1011 MLX5_SET(hca_vport_context, ctx, init_type_reply, req->init_type_reply);
1012 MLX5_SET(hca_vport_context, ctx, lmc, req->lmc);
1013 MLX5_SET(hca_vport_context, ctx, subnet_timeout, req->subnet_timeout);
1014 MLX5_SET(hca_vport_context, ctx, sm_lid, req->sm_lid);
1015 MLX5_SET(hca_vport_context, ctx, sm_sl, req->sm_sl);
1016 MLX5_SET(hca_vport_context, ctx, qkey_violation_counter, req->qkey_violation_counter);
1017 MLX5_SET(hca_vport_context, ctx, pkey_violation_counter, req->pkey_violation_counter);
1018 err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
1019 ex:
1020 kfree(in);
1021 return err;
1022 }
1023 EXPORT_SYMBOL_GPL(mlx5_core_modify_hca_vport_context);
This page took 0.053342 seconds and 5 git commands to generate.