9134010e2921cb1ace000d05832c636667d2a5ba
[deliverable/linux.git] / drivers / net / ethernet / mellanox / mlx5 / core / fs_cmd.c
1 /*
2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/mlx5/driver.h>
34 #include <linux/mlx5/device.h>
35 #include <linux/mlx5/mlx5_ifc.h>
36
37 #include "fs_core.h"
38 #include "fs_cmd.h"
39 #include "mlx5_core.h"
40
41 int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
42 struct mlx5_flow_table *ft)
43 {
44 u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)];
45 u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)];
46
47 memset(in, 0, sizeof(in));
48
49 MLX5_SET(set_flow_table_root_in, in, opcode,
50 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
51 MLX5_SET(set_flow_table_root_in, in, table_type, ft->type);
52 MLX5_SET(set_flow_table_root_in, in, table_id, ft->id);
53 if (ft->vport) {
54 MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport);
55 MLX5_SET(set_flow_table_root_in, in, other_vport, 1);
56 }
57
58 memset(out, 0, sizeof(out));
59 return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
60 sizeof(out));
61 }
62
63 int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
64 u16 vport,
65 enum fs_flow_table_type type, unsigned int level,
66 unsigned int log_size, struct mlx5_flow_table
67 *next_ft, unsigned int *table_id)
68 {
69 u32 out[MLX5_ST_SZ_DW(create_flow_table_out)];
70 u32 in[MLX5_ST_SZ_DW(create_flow_table_in)];
71 int err;
72
73 memset(in, 0, sizeof(in));
74
75 MLX5_SET(create_flow_table_in, in, opcode,
76 MLX5_CMD_OP_CREATE_FLOW_TABLE);
77
78 if (next_ft) {
79 MLX5_SET(create_flow_table_in, in, table_miss_mode, 1);
80 MLX5_SET(create_flow_table_in, in, table_miss_id, next_ft->id);
81 }
82 MLX5_SET(create_flow_table_in, in, table_type, type);
83 MLX5_SET(create_flow_table_in, in, level, level);
84 MLX5_SET(create_flow_table_in, in, log_size, log_size);
85 if (vport) {
86 MLX5_SET(create_flow_table_in, in, vport_number, vport);
87 MLX5_SET(create_flow_table_in, in, other_vport, 1);
88 }
89
90 memset(out, 0, sizeof(out));
91 err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
92 sizeof(out));
93
94 if (!err)
95 *table_id = MLX5_GET(create_flow_table_out, out,
96 table_id);
97 return err;
98 }
99
100 int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev,
101 struct mlx5_flow_table *ft)
102 {
103 u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)];
104 u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)];
105
106 memset(in, 0, sizeof(in));
107 memset(out, 0, sizeof(out));
108
109 MLX5_SET(destroy_flow_table_in, in, opcode,
110 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
111 MLX5_SET(destroy_flow_table_in, in, table_type, ft->type);
112 MLX5_SET(destroy_flow_table_in, in, table_id, ft->id);
113 if (ft->vport) {
114 MLX5_SET(destroy_flow_table_in, in, vport_number, ft->vport);
115 MLX5_SET(destroy_flow_table_in, in, other_vport, 1);
116 }
117
118 return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
119 sizeof(out));
120 }
121
122 int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev,
123 struct mlx5_flow_table *ft,
124 struct mlx5_flow_table *next_ft)
125 {
126 u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)];
127 u32 out[MLX5_ST_SZ_DW(modify_flow_table_out)];
128
129 memset(in, 0, sizeof(in));
130 memset(out, 0, sizeof(out));
131
132 MLX5_SET(modify_flow_table_in, in, opcode,
133 MLX5_CMD_OP_MODIFY_FLOW_TABLE);
134 MLX5_SET(modify_flow_table_in, in, table_type, ft->type);
135 MLX5_SET(modify_flow_table_in, in, table_id, ft->id);
136 if (ft->vport) {
137 MLX5_SET(modify_flow_table_in, in, vport_number, ft->vport);
138 MLX5_SET(modify_flow_table_in, in, other_vport, 1);
139 }
140 MLX5_SET(modify_flow_table_in, in, modify_field_select,
141 MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID);
142 if (next_ft) {
143 MLX5_SET(modify_flow_table_in, in, table_miss_mode, 1);
144 MLX5_SET(modify_flow_table_in, in, table_miss_id, next_ft->id);
145 } else {
146 MLX5_SET(modify_flow_table_in, in, table_miss_mode, 0);
147 }
148
149 return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
150 sizeof(out));
151 }
152
153 int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev,
154 struct mlx5_flow_table *ft,
155 u32 *in,
156 unsigned int *group_id)
157 {
158 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
159 u32 out[MLX5_ST_SZ_DW(create_flow_group_out)];
160 int err;
161
162 memset(out, 0, sizeof(out));
163
164 MLX5_SET(create_flow_group_in, in, opcode,
165 MLX5_CMD_OP_CREATE_FLOW_GROUP);
166 MLX5_SET(create_flow_group_in, in, table_type, ft->type);
167 MLX5_SET(create_flow_group_in, in, table_id, ft->id);
168 if (ft->vport) {
169 MLX5_SET(create_flow_group_in, in, vport_number, ft->vport);
170 MLX5_SET(create_flow_group_in, in, other_vport, 1);
171 }
172
173 err = mlx5_cmd_exec_check_status(dev, in,
174 inlen, out,
175 sizeof(out));
176 if (!err)
177 *group_id = MLX5_GET(create_flow_group_out, out,
178 group_id);
179
180 return err;
181 }
182
183 int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev,
184 struct mlx5_flow_table *ft,
185 unsigned int group_id)
186 {
187 u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)];
188 u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)];
189
190 memset(in, 0, sizeof(in));
191 memset(out, 0, sizeof(out));
192
193 MLX5_SET(destroy_flow_group_in, in, opcode,
194 MLX5_CMD_OP_DESTROY_FLOW_GROUP);
195 MLX5_SET(destroy_flow_group_in, in, table_type, ft->type);
196 MLX5_SET(destroy_flow_group_in, in, table_id, ft->id);
197 MLX5_SET(destroy_flow_group_in, in, group_id, group_id);
198 if (ft->vport) {
199 MLX5_SET(destroy_flow_group_in, in, vport_number, ft->vport);
200 MLX5_SET(destroy_flow_group_in, in, other_vport, 1);
201 }
202
203 return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
204 sizeof(out));
205 }
206
207 static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
208 int opmod, int modify_mask,
209 struct mlx5_flow_table *ft,
210 unsigned group_id,
211 struct fs_fte *fte)
212 {
213 unsigned int inlen = MLX5_ST_SZ_BYTES(set_fte_in) +
214 fte->dests_size * MLX5_ST_SZ_BYTES(dest_format_struct);
215 u32 out[MLX5_ST_SZ_DW(set_fte_out)];
216 struct mlx5_flow_rule *dst;
217 void *in_flow_context;
218 void *in_match_value;
219 void *in_dests;
220 u32 *in;
221 int err;
222
223 in = mlx5_vzalloc(inlen);
224 if (!in) {
225 mlx5_core_warn(dev, "failed to allocate inbox\n");
226 return -ENOMEM;
227 }
228
229 MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
230 MLX5_SET(set_fte_in, in, op_mod, opmod);
231 MLX5_SET(set_fte_in, in, modify_enable_mask, modify_mask);
232 MLX5_SET(set_fte_in, in, table_type, ft->type);
233 MLX5_SET(set_fte_in, in, table_id, ft->id);
234 MLX5_SET(set_fte_in, in, flow_index, fte->index);
235 if (ft->vport) {
236 MLX5_SET(set_fte_in, in, vport_number, ft->vport);
237 MLX5_SET(set_fte_in, in, other_vport, 1);
238 }
239
240 in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
241 MLX5_SET(flow_context, in_flow_context, group_id, group_id);
242 MLX5_SET(flow_context, in_flow_context, flow_tag, fte->flow_tag);
243 MLX5_SET(flow_context, in_flow_context, action, fte->action);
244 in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
245 match_value);
246 memcpy(in_match_value, &fte->val, MLX5_ST_SZ_BYTES(fte_match_param));
247
248 in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
249 if (fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
250 int list_size = 0;
251
252 list_for_each_entry(dst, &fte->node.children, node.list) {
253 unsigned int id;
254
255 if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
256 continue;
257
258 MLX5_SET(dest_format_struct, in_dests, destination_type,
259 dst->dest_attr.type);
260 if (dst->dest_attr.type ==
261 MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) {
262 id = dst->dest_attr.ft->id;
263 } else {
264 id = dst->dest_attr.tir_num;
265 }
266 MLX5_SET(dest_format_struct, in_dests, destination_id, id);
267 in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
268 list_size++;
269 }
270
271 MLX5_SET(flow_context, in_flow_context, destination_list_size,
272 list_size);
273 }
274
275 if (fte->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
276 int list_size = 0;
277
278 list_for_each_entry(dst, &fte->node.children, node.list) {
279 if (dst->dest_attr.type !=
280 MLX5_FLOW_DESTINATION_TYPE_COUNTER)
281 continue;
282
283 MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
284 dst->dest_attr.counter->id);
285 in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
286 list_size++;
287 }
288
289 MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
290 list_size);
291 }
292
293 memset(out, 0, sizeof(out));
294 err = mlx5_cmd_exec_check_status(dev, in, inlen, out,
295 sizeof(out));
296 kvfree(in);
297
298 return err;
299 }
300
301 int mlx5_cmd_create_fte(struct mlx5_core_dev *dev,
302 struct mlx5_flow_table *ft,
303 unsigned group_id,
304 struct fs_fte *fte)
305 {
306 return mlx5_cmd_set_fte(dev, 0, 0, ft, group_id, fte);
307 }
308
309 int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
310 struct mlx5_flow_table *ft,
311 unsigned group_id,
312 int modify_mask,
313 struct fs_fte *fte)
314 {
315 int opmod;
316 int atomic_mod_cap = MLX5_CAP_FLOWTABLE(dev,
317 flow_table_properties_nic_receive.
318 flow_modify_en);
319 if (!atomic_mod_cap)
320 return -ENOTSUPP;
321 opmod = 1;
322
323 return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte);
324 }
325
326 int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
327 struct mlx5_flow_table *ft,
328 unsigned int index)
329 {
330 u32 out[MLX5_ST_SZ_DW(delete_fte_out)];
331 u32 in[MLX5_ST_SZ_DW(delete_fte_in)];
332 int err;
333
334 memset(in, 0, sizeof(in));
335 memset(out, 0, sizeof(out));
336
337 MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
338 MLX5_SET(delete_fte_in, in, table_type, ft->type);
339 MLX5_SET(delete_fte_in, in, table_id, ft->id);
340 MLX5_SET(delete_fte_in, in, flow_index, index);
341 if (ft->vport) {
342 MLX5_SET(delete_fte_in, in, vport_number, ft->vport);
343 MLX5_SET(delete_fte_in, in, other_vport, 1);
344 }
345
346 err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
347
348 return err;
349 }
350
351 int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u16 *id)
352 {
353 u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)];
354 u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)];
355 int err;
356
357 memset(in, 0, sizeof(in));
358 memset(out, 0, sizeof(out));
359
360 MLX5_SET(alloc_flow_counter_in, in, opcode,
361 MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
362
363 err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
364 sizeof(out));
365 if (err)
366 return err;
367
368 *id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
369
370 return 0;
371 }
372
373 int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u16 id)
374 {
375 u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)];
376 u32 out[MLX5_ST_SZ_DW(dealloc_flow_counter_out)];
377
378 memset(in, 0, sizeof(in));
379 memset(out, 0, sizeof(out));
380
381 MLX5_SET(dealloc_flow_counter_in, in, opcode,
382 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
383 MLX5_SET(dealloc_flow_counter_in, in, flow_counter_id, id);
384
385 return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
386 sizeof(out));
387 }
388
389 int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u16 id,
390 u64 *packets, u64 *bytes)
391 {
392 u32 out[MLX5_ST_SZ_BYTES(query_flow_counter_out) +
393 MLX5_ST_SZ_BYTES(traffic_counter)];
394 u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)];
395 void *stats;
396 int err = 0;
397
398 memset(in, 0, sizeof(in));
399 memset(out, 0, sizeof(out));
400
401 MLX5_SET(query_flow_counter_in, in, opcode,
402 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
403 MLX5_SET(query_flow_counter_in, in, op_mod, 0);
404 MLX5_SET(query_flow_counter_in, in, flow_counter_id, id);
405
406 err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
407 if (err)
408 return err;
409
410 stats = MLX5_ADDR_OF(query_flow_counter_out, out, flow_statistics);
411 *packets = MLX5_GET64(traffic_counter, stats, packets);
412 *bytes = MLX5_GET64(traffic_counter, stats, octets);
413
414 return 0;
415 }
416
417 struct mlx5_cmd_fc_bulk {
418 u16 id;
419 int num;
420 int outlen;
421 u32 out[0];
422 };
423
424 struct mlx5_cmd_fc_bulk *
425 mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u16 id, int num)
426 {
427 struct mlx5_cmd_fc_bulk *b;
428 int outlen = sizeof(*b) +
429 MLX5_ST_SZ_BYTES(query_flow_counter_out) +
430 MLX5_ST_SZ_BYTES(traffic_counter) * num;
431
432 b = kzalloc(outlen, GFP_KERNEL);
433 if (!b)
434 return NULL;
435
436 b->id = id;
437 b->num = num;
438 b->outlen = outlen;
439
440 return b;
441 }
442
443 void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b)
444 {
445 kfree(b);
446 }
447
448 int
449 mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b)
450 {
451 u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)];
452
453 memset(in, 0, sizeof(in));
454
455 MLX5_SET(query_flow_counter_in, in, opcode,
456 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
457 MLX5_SET(query_flow_counter_in, in, op_mod, 0);
458 MLX5_SET(query_flow_counter_in, in, flow_counter_id, b->id);
459 MLX5_SET(query_flow_counter_in, in, num_of_counters, b->num);
460
461 return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
462 b->out, b->outlen);
463 }
464
465 void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
466 struct mlx5_cmd_fc_bulk *b, u16 id,
467 u64 *packets, u64 *bytes)
468 {
469 int index = id - b->id;
470 void *stats;
471
472 if (index < 0 || index >= b->num) {
473 mlx5_core_warn(dev, "Flow counter id (0x%x) out of range (0x%x..0x%x). Counter ignored.\n",
474 id, b->id, b->id + b->num - 1);
475 return;
476 }
477
478 stats = MLX5_ADDR_OF(query_flow_counter_out, b->out,
479 flow_statistics[index]);
480 *packets = MLX5_GET64(traffic_counter, stats, packets);
481 *bytes = MLX5_GET64(traffic_counter, stats, octets);
482 }
This page took 0.058898 seconds and 4 git commands to generate.