net/switchdev: Export the same parent ID service function
[deliverable/linux.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_tc.c
CommitLineData
e8f887ac
AV
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
e3a2b7ed
AV
33#include <net/flow_dissector.h>
34#include <net/pkt_cls.h>
35#include <net/tc_act/tc_gact.h>
12185a9f 36#include <net/tc_act/tc_skbedit.h>
e8f887ac
AV
37#include <linux/mlx5/fs.h>
38#include <linux/mlx5/device.h>
39#include <linux/rhashtable.h>
40#include "en.h"
41#include "en_tc.h"
42
43struct mlx5e_tc_flow {
44 struct rhash_head node;
45 u64 cookie;
46 struct mlx5_flow_rule *rule;
47};
48
acff797c
MG
49#define MLX5E_TC_TABLE_NUM_ENTRIES 1024
50#define MLX5E_TC_TABLE_NUM_GROUPS 4
e8f887ac 51
5c40348c
OG
52static struct mlx5_flow_rule *mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
53 struct mlx5_flow_spec *spec,
54 u32 action, u32 flow_tag)
e8f887ac 55{
aad7e08d
AV
56 struct mlx5_core_dev *dev = priv->mdev;
57 struct mlx5_flow_destination dest = { 0 };
58 struct mlx5_fc *counter = NULL;
e8f887ac
AV
59 struct mlx5_flow_rule *rule;
60 bool table_created = false;
61
aad7e08d
AV
62 if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
63 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
64 dest.ft = priv->fs.vlan.ft.t;
55130287 65 } else if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
aad7e08d
AV
66 counter = mlx5_fc_create(dev, true);
67 if (IS_ERR(counter))
68 return ERR_CAST(counter);
69
70 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
71 dest.counter = counter;
72 }
73
acff797c
MG
74 if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
75 priv->fs.tc.t =
76 mlx5_create_auto_grouped_flow_table(priv->fs.ns,
77 MLX5E_TC_PRIO,
78 MLX5E_TC_TABLE_NUM_ENTRIES,
79 MLX5E_TC_TABLE_NUM_GROUPS,
d63cd286 80 0);
acff797c 81 if (IS_ERR(priv->fs.tc.t)) {
e8f887ac
AV
82 netdev_err(priv->netdev,
83 "Failed to create tc offload table\n");
aad7e08d
AV
84 rule = ERR_CAST(priv->fs.tc.t);
85 goto err_create_ft;
e8f887ac
AV
86 }
87
88 table_created = true;
89 }
90
c5bb1730
MG
91 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
92 rule = mlx5_add_flow_rule(priv->fs.tc.t, spec,
e8f887ac 93 action, flow_tag,
aad7e08d
AV
94 &dest);
95
96 if (IS_ERR(rule))
97 goto err_add_rule;
98
99 return rule;
e8f887ac 100
aad7e08d
AV
101err_add_rule:
102 if (table_created) {
acff797c
MG
103 mlx5_destroy_flow_table(priv->fs.tc.t);
104 priv->fs.tc.t = NULL;
e8f887ac 105 }
aad7e08d
AV
106err_create_ft:
107 mlx5_fc_destroy(dev, counter);
e8f887ac
AV
108
109 return rule;
110}
111
112static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
113 struct mlx5_flow_rule *rule)
114{
aad7e08d
AV
115 struct mlx5_fc *counter = NULL;
116
117 counter = mlx5_flow_rule_counter(rule);
118
e8f887ac
AV
119 mlx5_del_flow_rule(rule);
120
aad7e08d
AV
121 mlx5_fc_destroy(priv->mdev, counter);
122
5c40348c 123 if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
acff797c
MG
124 mlx5_destroy_flow_table(priv->fs.tc.t);
125 priv->fs.tc.t = NULL;
e8f887ac
AV
126 }
127}
128
c5bb1730 129static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec,
e3a2b7ed
AV
130 struct tc_cls_flower_offload *f)
131{
c5bb1730
MG
132 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
133 outer_headers);
134 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
135 outer_headers);
e3a2b7ed
AV
136 u16 addr_type = 0;
137 u8 ip_proto = 0;
138
139 if (f->dissector->used_keys &
140 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
141 BIT(FLOW_DISSECTOR_KEY_BASIC) |
142 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
143 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
144 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
145 BIT(FLOW_DISSECTOR_KEY_PORTS))) {
146 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
147 f->dissector->used_keys);
148 return -EOPNOTSUPP;
149 }
150
151 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
152 struct flow_dissector_key_control *key =
153 skb_flow_dissector_target(f->dissector,
154 FLOW_DISSECTOR_KEY_BASIC,
155 f->key);
156 addr_type = key->addr_type;
157 }
158
159 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
160 struct flow_dissector_key_basic *key =
161 skb_flow_dissector_target(f->dissector,
162 FLOW_DISSECTOR_KEY_BASIC,
163 f->key);
164 struct flow_dissector_key_basic *mask =
165 skb_flow_dissector_target(f->dissector,
166 FLOW_DISSECTOR_KEY_BASIC,
167 f->mask);
168 ip_proto = key->ip_proto;
169
170 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
171 ntohs(mask->n_proto));
172 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
173 ntohs(key->n_proto));
174
175 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
176 mask->ip_proto);
177 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
178 key->ip_proto);
179 }
180
181 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
182 struct flow_dissector_key_eth_addrs *key =
183 skb_flow_dissector_target(f->dissector,
184 FLOW_DISSECTOR_KEY_ETH_ADDRS,
185 f->key);
186 struct flow_dissector_key_eth_addrs *mask =
187 skb_flow_dissector_target(f->dissector,
188 FLOW_DISSECTOR_KEY_ETH_ADDRS,
189 f->mask);
190
191 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
192 dmac_47_16),
193 mask->dst);
194 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
195 dmac_47_16),
196 key->dst);
197
198 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
199 smac_47_16),
200 mask->src);
201 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
202 smac_47_16),
203 key->src);
204 }
205
206 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
207 struct flow_dissector_key_ipv4_addrs *key =
208 skb_flow_dissector_target(f->dissector,
209 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
210 f->key);
211 struct flow_dissector_key_ipv4_addrs *mask =
212 skb_flow_dissector_target(f->dissector,
213 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
214 f->mask);
215
216 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
217 src_ipv4_src_ipv6.ipv4_layout.ipv4),
218 &mask->src, sizeof(mask->src));
219 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
220 src_ipv4_src_ipv6.ipv4_layout.ipv4),
221 &key->src, sizeof(key->src));
222 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
223 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
224 &mask->dst, sizeof(mask->dst));
225 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
226 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
227 &key->dst, sizeof(key->dst));
228 }
229
230 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
231 struct flow_dissector_key_ipv6_addrs *key =
232 skb_flow_dissector_target(f->dissector,
233 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
234 f->key);
235 struct flow_dissector_key_ipv6_addrs *mask =
236 skb_flow_dissector_target(f->dissector,
237 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
238 f->mask);
239
240 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
241 src_ipv4_src_ipv6.ipv6_layout.ipv6),
242 &mask->src, sizeof(mask->src));
243 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
244 src_ipv4_src_ipv6.ipv6_layout.ipv6),
245 &key->src, sizeof(key->src));
246
247 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
248 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
249 &mask->dst, sizeof(mask->dst));
250 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
251 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
252 &key->dst, sizeof(key->dst));
253 }
254
255 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
256 struct flow_dissector_key_ports *key =
257 skb_flow_dissector_target(f->dissector,
258 FLOW_DISSECTOR_KEY_PORTS,
259 f->key);
260 struct flow_dissector_key_ports *mask =
261 skb_flow_dissector_target(f->dissector,
262 FLOW_DISSECTOR_KEY_PORTS,
263 f->mask);
264 switch (ip_proto) {
265 case IPPROTO_TCP:
266 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
267 tcp_sport, ntohs(mask->src));
268 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
269 tcp_sport, ntohs(key->src));
270
271 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
272 tcp_dport, ntohs(mask->dst));
273 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
274 tcp_dport, ntohs(key->dst));
275 break;
276
277 case IPPROTO_UDP:
278 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
279 udp_sport, ntohs(mask->src));
280 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
281 udp_sport, ntohs(key->src));
282
283 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
284 udp_dport, ntohs(mask->dst));
285 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
286 udp_dport, ntohs(key->dst));
287 break;
288 default:
289 netdev_err(priv->netdev,
290 "Only UDP and TCP transport are supported\n");
291 return -EINVAL;
292 }
293 }
294
295 return 0;
296}
297
5c40348c
OG
298static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
299 u32 *action, u32 *flow_tag)
e3a2b7ed
AV
300{
301 const struct tc_action *a;
302
303 if (tc_no_actions(exts))
304 return -EINVAL;
305
306 *flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
307 *action = 0;
308
309 tc_for_each_action(a, exts) {
310 /* Only support a single action per rule */
311 if (*action)
312 return -EINVAL;
313
314 if (is_tcf_gact_shot(a)) {
315 *action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
aad7e08d
AV
316 if (MLX5_CAP_FLOWTABLE(priv->mdev,
317 flow_table_properties_nic_receive.flow_counter))
318 *action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
e3a2b7ed
AV
319 continue;
320 }
321
322 if (is_tcf_skbedit_mark(a)) {
323 u32 mark = tcf_skbedit_mark(a);
324
325 if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
326 netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
327 mark);
328 return -EINVAL;
329 }
330
331 *flow_tag = mark;
332 *action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
333 continue;
334 }
335
336 return -EINVAL;
337 }
338
339 return 0;
340}
341
342int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
343 struct tc_cls_flower_offload *f)
344{
acff797c 345 struct mlx5e_tc_table *tc = &priv->fs.tc;
e3a2b7ed
AV
346 int err = 0;
347 u32 flow_tag;
348 u32 action;
349 struct mlx5e_tc_flow *flow;
c5bb1730 350 struct mlx5_flow_spec *spec;
e3a2b7ed
AV
351 struct mlx5_flow_rule *old = NULL;
352
353 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
354 tc->ht_params);
355 if (flow)
356 old = flow->rule;
357 else
358 flow = kzalloc(sizeof(*flow), GFP_KERNEL);
359
c5bb1730
MG
360 spec = mlx5_vzalloc(sizeof(*spec));
361 if (!spec || !flow) {
e3a2b7ed
AV
362 err = -ENOMEM;
363 goto err_free;
364 }
365
366 flow->cookie = f->cookie;
367
c5bb1730 368 err = parse_cls_flower(priv, spec, f);
e3a2b7ed
AV
369 if (err < 0)
370 goto err_free;
371
5c40348c 372 err = parse_tc_nic_actions(priv, f->exts, &action, &flow_tag);
e3a2b7ed
AV
373 if (err < 0)
374 goto err_free;
375
5c40348c 376 flow->rule = mlx5e_tc_add_nic_flow(priv, spec, action, flow_tag);
e3a2b7ed
AV
377 if (IS_ERR(flow->rule)) {
378 err = PTR_ERR(flow->rule);
5c40348c 379 goto err_free;
e3a2b7ed
AV
380 }
381
5c40348c
OG
382 err = rhashtable_insert_fast(&tc->ht, &flow->node,
383 tc->ht_params);
384 if (err)
385 goto err_del_rule;
386
e3a2b7ed
AV
387 if (old)
388 mlx5e_tc_del_flow(priv, old);
389
390 goto out;
391
5c40348c
OG
392err_del_rule:
393 mlx5_del_flow_rule(flow->rule);
e3a2b7ed
AV
394
395err_free:
396 if (!old)
397 kfree(flow);
398out:
c5bb1730 399 kvfree(spec);
e3a2b7ed
AV
400 return err;
401}
402
403int mlx5e_delete_flower(struct mlx5e_priv *priv,
404 struct tc_cls_flower_offload *f)
405{
406 struct mlx5e_tc_flow *flow;
acff797c 407 struct mlx5e_tc_table *tc = &priv->fs.tc;
e3a2b7ed
AV
408
409 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
410 tc->ht_params);
411 if (!flow)
412 return -EINVAL;
413
414 rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
415
416 mlx5e_tc_del_flow(priv, flow->rule);
417
418 kfree(flow);
419
420 return 0;
421}
422
aad7e08d
AV
423int mlx5e_stats_flower(struct mlx5e_priv *priv,
424 struct tc_cls_flower_offload *f)
425{
426 struct mlx5e_tc_table *tc = &priv->fs.tc;
427 struct mlx5e_tc_flow *flow;
428 struct tc_action *a;
429 struct mlx5_fc *counter;
430 u64 bytes;
431 u64 packets;
432 u64 lastuse;
433
434 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
435 tc->ht_params);
436 if (!flow)
437 return -EINVAL;
438
439 counter = mlx5_flow_rule_counter(flow->rule);
440 if (!counter)
441 return 0;
442
443 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
444
445 tc_for_each_action(a, f->exts)
446 tcf_action_stats_update(a, bytes, packets, lastuse);
447
448 return 0;
449}
450
e8f887ac
AV
451static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
452 .head_offset = offsetof(struct mlx5e_tc_flow, node),
453 .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
454 .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
455 .automatic_shrinking = true,
456};
457
458int mlx5e_tc_init(struct mlx5e_priv *priv)
459{
acff797c 460 struct mlx5e_tc_table *tc = &priv->fs.tc;
e8f887ac
AV
461
462 tc->ht_params = mlx5e_tc_flow_ht_params;
463 return rhashtable_init(&tc->ht, &tc->ht_params);
464}
465
466static void _mlx5e_tc_del_flow(void *ptr, void *arg)
467{
468 struct mlx5e_tc_flow *flow = ptr;
469 struct mlx5e_priv *priv = arg;
470
471 mlx5e_tc_del_flow(priv, flow->rule);
472 kfree(flow);
473}
474
475void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
476{
acff797c 477 struct mlx5e_tc_table *tc = &priv->fs.tc;
e8f887ac
AV
478
479 rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
480
acff797c
MG
481 if (!IS_ERR_OR_NULL(tc->t)) {
482 mlx5_destroy_flow_table(tc->t);
483 tc->t = NULL;
e8f887ac
AV
484 }
485}
This page took 0.087915 seconds and 5 git commands to generate.