Merge remote-tracking branch 'battery/for-next'
[deliverable/linux.git] / drivers / net / ethernet / rocker / rocker_ofdpa.c
CommitLineData
e420114e
JP
1/*
2 * drivers/net/ethernet/rocker/rocker_ofdpa.c - Rocker switch OF-DPA-like
3 * implementation
4 * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
5 * Copyright (c) 2014-2016 Jiri Pirko <jiri@mellanox.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 */
12
13#include <linux/kernel.h>
3fbcdbf3
JP
14#include <linux/types.h>
15#include <linux/spinlock.h>
16#include <linux/hashtable.h>
17#include <linux/crc32.h>
18#include <linux/netdevice.h>
19#include <linux/inetdevice.h>
20#include <linux/if_vlan.h>
21#include <linux/if_bridge.h>
22#include <net/neighbour.h>
23#include <net/switchdev.h>
24#include <net/ip_fib.h>
25#include <net/arp.h>
26
e420114e 27#include "rocker.h"
3fbcdbf3
JP
28#include "rocker_tlv.h"
29
30struct ofdpa_flow_tbl_key {
31 u32 priority;
32 enum rocker_of_dpa_table_id tbl_id;
33 union {
34 struct {
35 u32 in_pport;
36 u32 in_pport_mask;
37 enum rocker_of_dpa_table_id goto_tbl;
38 } ig_port;
39 struct {
40 u32 in_pport;
41 __be16 vlan_id;
42 __be16 vlan_id_mask;
43 enum rocker_of_dpa_table_id goto_tbl;
44 bool untagged;
45 __be16 new_vlan_id;
46 } vlan;
47 struct {
48 u32 in_pport;
49 u32 in_pport_mask;
50 __be16 eth_type;
51 u8 eth_dst[ETH_ALEN];
52 u8 eth_dst_mask[ETH_ALEN];
53 __be16 vlan_id;
54 __be16 vlan_id_mask;
55 enum rocker_of_dpa_table_id goto_tbl;
56 bool copy_to_cpu;
57 } term_mac;
58 struct {
59 __be16 eth_type;
60 __be32 dst4;
61 __be32 dst4_mask;
62 enum rocker_of_dpa_table_id goto_tbl;
63 u32 group_id;
64 } ucast_routing;
65 struct {
66 u8 eth_dst[ETH_ALEN];
67 u8 eth_dst_mask[ETH_ALEN];
68 int has_eth_dst;
69 int has_eth_dst_mask;
70 __be16 vlan_id;
71 u32 tunnel_id;
72 enum rocker_of_dpa_table_id goto_tbl;
73 u32 group_id;
74 bool copy_to_cpu;
75 } bridge;
76 struct {
77 u32 in_pport;
78 u32 in_pport_mask;
79 u8 eth_src[ETH_ALEN];
80 u8 eth_src_mask[ETH_ALEN];
81 u8 eth_dst[ETH_ALEN];
82 u8 eth_dst_mask[ETH_ALEN];
83 __be16 eth_type;
84 __be16 vlan_id;
85 __be16 vlan_id_mask;
86 u8 ip_proto;
87 u8 ip_proto_mask;
88 u8 ip_tos;
89 u8 ip_tos_mask;
90 u32 group_id;
91 } acl;
92 };
93};
94
95struct ofdpa_flow_tbl_entry {
96 struct hlist_node entry;
97 u32 cmd;
98 u64 cookie;
99 struct ofdpa_flow_tbl_key key;
100 size_t key_len;
101 u32 key_crc32; /* key */
102};
103
104struct ofdpa_group_tbl_entry {
105 struct hlist_node entry;
106 u32 cmd;
107 u32 group_id; /* key */
108 u16 group_count;
109 u32 *group_ids;
110 union {
111 struct {
112 u8 pop_vlan;
113 } l2_interface;
114 struct {
115 u8 eth_src[ETH_ALEN];
116 u8 eth_dst[ETH_ALEN];
117 __be16 vlan_id;
118 u32 group_id;
119 } l2_rewrite;
120 struct {
121 u8 eth_src[ETH_ALEN];
122 u8 eth_dst[ETH_ALEN];
123 __be16 vlan_id;
124 bool ttl_check;
125 u32 group_id;
126 } l3_unicast;
127 };
128};
129
130struct ofdpa_fdb_tbl_entry {
131 struct hlist_node entry;
132 u32 key_crc32; /* key */
133 bool learned;
134 unsigned long touched;
135 struct ofdpa_fdb_tbl_key {
136 struct ofdpa_port *ofdpa_port;
137 u8 addr[ETH_ALEN];
138 __be16 vlan_id;
139 } key;
140};
141
142struct ofdpa_internal_vlan_tbl_entry {
143 struct hlist_node entry;
144 int ifindex; /* key */
145 u32 ref_count;
146 __be16 vlan_id;
147};
148
149struct ofdpa_neigh_tbl_entry {
150 struct hlist_node entry;
151 __be32 ip_addr; /* key */
152 struct net_device *dev;
153 u32 ref_count;
154 u32 index;
155 u8 eth_dst[ETH_ALEN];
156 bool ttl_check;
157};
158
159enum {
160 OFDPA_CTRL_LINK_LOCAL_MCAST,
161 OFDPA_CTRL_LOCAL_ARP,
162 OFDPA_CTRL_IPV4_MCAST,
163 OFDPA_CTRL_IPV6_MCAST,
164 OFDPA_CTRL_DFLT_BRIDGING,
165 OFDPA_CTRL_DFLT_OVS,
166 OFDPA_CTRL_MAX,
167};
168
169#define OFDPA_INTERNAL_VLAN_ID_BASE 0x0f00
170#define OFDPA_N_INTERNAL_VLANS 255
171#define OFDPA_VLAN_BITMAP_LEN BITS_TO_LONGS(VLAN_N_VID)
172#define OFDPA_INTERNAL_VLAN_BITMAP_LEN BITS_TO_LONGS(OFDPA_N_INTERNAL_VLANS)
173#define OFDPA_UNTAGGED_VID 0
e420114e
JP
174
175struct ofdpa {
3fbcdbf3
JP
176 struct rocker *rocker;
177 DECLARE_HASHTABLE(flow_tbl, 16);
178 spinlock_t flow_tbl_lock; /* for flow tbl accesses */
179 u64 flow_tbl_next_cookie;
180 DECLARE_HASHTABLE(group_tbl, 16);
181 spinlock_t group_tbl_lock; /* for group tbl accesses */
182 struct timer_list fdb_cleanup_timer;
183 DECLARE_HASHTABLE(fdb_tbl, 16);
184 spinlock_t fdb_tbl_lock; /* for fdb tbl accesses */
185 unsigned long internal_vlan_bitmap[OFDPA_INTERNAL_VLAN_BITMAP_LEN];
186 DECLARE_HASHTABLE(internal_vlan_tbl, 8);
187 spinlock_t internal_vlan_tbl_lock; /* for vlan tbl accesses */
188 DECLARE_HASHTABLE(neigh_tbl, 16);
189 spinlock_t neigh_tbl_lock; /* for neigh tbl accesses */
190 u32 neigh_tbl_next_index;
3a8befcd 191 unsigned long ageing_time;
e420114e
JP
192};
193
194struct ofdpa_port {
3fbcdbf3
JP
195 struct ofdpa *ofdpa;
196 struct rocker_port *rocker_port;
197 struct net_device *dev;
198 u32 pport;
199 struct net_device *bridge_dev;
200 __be16 internal_vlan_id;
201 int stp_state;
202 u32 brport_flags;
203 unsigned long ageing_time;
204 bool ctrls[OFDPA_CTRL_MAX];
205 unsigned long vlan_bitmap[OFDPA_VLAN_BITMAP_LEN];
e420114e
JP
206};
207
3fbcdbf3
JP
208static const u8 zero_mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
209static const u8 ff_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
210static const u8 ll_mac[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
211static const u8 ll_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 };
212static const u8 mcast_mac[ETH_ALEN] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
213static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
214static const u8 ipv4_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
215static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
216static const u8 ipv6_mask[ETH_ALEN] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
217
218/* Rocker priority levels for flow table entries. Higher
219 * priority match takes precedence over lower priority match.
220 */
221
222enum {
223 OFDPA_PRIORITY_UNKNOWN = 0,
224 OFDPA_PRIORITY_IG_PORT = 1,
225 OFDPA_PRIORITY_VLAN = 1,
226 OFDPA_PRIORITY_TERM_MAC_UCAST = 0,
227 OFDPA_PRIORITY_TERM_MAC_MCAST = 1,
228 OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1,
229 OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2,
230 OFDPA_PRIORITY_BRIDGING_VLAN = 3,
231 OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1,
232 OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2,
233 OFDPA_PRIORITY_BRIDGING_TENANT = 3,
234 OFDPA_PRIORITY_ACL_CTRL = 3,
235 OFDPA_PRIORITY_ACL_NORMAL = 2,
236 OFDPA_PRIORITY_ACL_DFLT = 1,
237};
238
239static bool ofdpa_vlan_id_is_internal(__be16 vlan_id)
240{
241 u16 start = OFDPA_INTERNAL_VLAN_ID_BASE;
242 u16 end = 0xffe;
243 u16 _vlan_id = ntohs(vlan_id);
244
245 return (_vlan_id >= start && _vlan_id <= end);
246}
247
248static __be16 ofdpa_port_vid_to_vlan(const struct ofdpa_port *ofdpa_port,
249 u16 vid, bool *pop_vlan)
250{
251 __be16 vlan_id;
252
253 if (pop_vlan)
254 *pop_vlan = false;
255 vlan_id = htons(vid);
256 if (!vlan_id) {
257 vlan_id = ofdpa_port->internal_vlan_id;
258 if (pop_vlan)
259 *pop_vlan = true;
260 }
261
262 return vlan_id;
263}
264
265static u16 ofdpa_port_vlan_to_vid(const struct ofdpa_port *ofdpa_port,
266 __be16 vlan_id)
267{
268 if (ofdpa_vlan_id_is_internal(vlan_id))
269 return 0;
270
271 return ntohs(vlan_id);
272}
273
274static bool ofdpa_port_is_slave(const struct ofdpa_port *ofdpa_port,
275 const char *kind)
276{
277 return ofdpa_port->bridge_dev &&
278 !strcmp(ofdpa_port->bridge_dev->rtnl_link_ops->kind, kind);
279}
280
281static bool ofdpa_port_is_bridged(const struct ofdpa_port *ofdpa_port)
282{
283 return ofdpa_port_is_slave(ofdpa_port, "bridge");
284}
285
286static bool ofdpa_port_is_ovsed(const struct ofdpa_port *ofdpa_port)
287{
288 return ofdpa_port_is_slave(ofdpa_port, "openvswitch");
289}
290
291#define OFDPA_OP_FLAG_REMOVE BIT(0)
292#define OFDPA_OP_FLAG_NOWAIT BIT(1)
293#define OFDPA_OP_FLAG_LEARNED BIT(2)
294#define OFDPA_OP_FLAG_REFRESH BIT(3)
295
296static bool ofdpa_flags_nowait(int flags)
297{
298 return flags & OFDPA_OP_FLAG_NOWAIT;
299}
300
301static void *__ofdpa_mem_alloc(struct switchdev_trans *trans, int flags,
302 size_t size)
303{
304 struct switchdev_trans_item *elem = NULL;
305 gfp_t gfp_flags = (flags & OFDPA_OP_FLAG_NOWAIT) ?
306 GFP_ATOMIC : GFP_KERNEL;
307
308 /* If in transaction prepare phase, allocate the memory
309 * and enqueue it on a transaction. If in transaction
310 * commit phase, dequeue the memory from the transaction
311 * rather than re-allocating the memory. The idea is the
312 * driver code paths for prepare and commit are identical
313 * so the memory allocated in the prepare phase is the
314 * memory used in the commit phase.
315 */
316
317 if (!trans) {
318 elem = kzalloc(size + sizeof(*elem), gfp_flags);
319 } else if (switchdev_trans_ph_prepare(trans)) {
320 elem = kzalloc(size + sizeof(*elem), gfp_flags);
321 if (!elem)
322 return NULL;
323 switchdev_trans_item_enqueue(trans, elem, kfree, elem);
324 } else {
325 elem = switchdev_trans_item_dequeue(trans);
326 }
327
328 return elem ? elem + 1 : NULL;
329}
330
331static void *ofdpa_kzalloc(struct switchdev_trans *trans, int flags,
332 size_t size)
333{
334 return __ofdpa_mem_alloc(trans, flags, size);
335}
336
337static void *ofdpa_kcalloc(struct switchdev_trans *trans, int flags,
338 size_t n, size_t size)
339{
340 return __ofdpa_mem_alloc(trans, flags, n * size);
341}
342
343static void ofdpa_kfree(struct switchdev_trans *trans, const void *mem)
344{
345 struct switchdev_trans_item *elem;
346
347 /* Frees are ignored if in transaction prepare phase. The
348 * memory remains on the per-port list until freed in the
349 * commit phase.
350 */
351
352 if (switchdev_trans_ph_prepare(trans))
353 return;
354
355 elem = (struct switchdev_trans_item *) mem - 1;
356 kfree(elem);
357}
358
359/*************************************************************
360 * Flow, group, FDB, internal VLAN and neigh command prepares
361 *************************************************************/
362
363static int
364ofdpa_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
365 const struct ofdpa_flow_tbl_entry *entry)
366{
367 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
368 entry->key.ig_port.in_pport))
369 return -EMSGSIZE;
370 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
371 entry->key.ig_port.in_pport_mask))
372 return -EMSGSIZE;
373 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
374 entry->key.ig_port.goto_tbl))
375 return -EMSGSIZE;
376
377 return 0;
378}
379
380static int
381ofdpa_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
382 const struct ofdpa_flow_tbl_entry *entry)
383{
384 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
385 entry->key.vlan.in_pport))
386 return -EMSGSIZE;
387 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
388 entry->key.vlan.vlan_id))
389 return -EMSGSIZE;
390 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
391 entry->key.vlan.vlan_id_mask))
392 return -EMSGSIZE;
393 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
394 entry->key.vlan.goto_tbl))
395 return -EMSGSIZE;
396 if (entry->key.vlan.untagged &&
397 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID,
398 entry->key.vlan.new_vlan_id))
399 return -EMSGSIZE;
400
401 return 0;
402}
403
404static int
405ofdpa_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
406 const struct ofdpa_flow_tbl_entry *entry)
407{
408 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
409 entry->key.term_mac.in_pport))
410 return -EMSGSIZE;
411 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
412 entry->key.term_mac.in_pport_mask))
413 return -EMSGSIZE;
414 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
415 entry->key.term_mac.eth_type))
416 return -EMSGSIZE;
417 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
418 ETH_ALEN, entry->key.term_mac.eth_dst))
419 return -EMSGSIZE;
420 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
421 ETH_ALEN, entry->key.term_mac.eth_dst_mask))
422 return -EMSGSIZE;
423 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
424 entry->key.term_mac.vlan_id))
425 return -EMSGSIZE;
426 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
427 entry->key.term_mac.vlan_id_mask))
428 return -EMSGSIZE;
429 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
430 entry->key.term_mac.goto_tbl))
431 return -EMSGSIZE;
432 if (entry->key.term_mac.copy_to_cpu &&
433 rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
434 entry->key.term_mac.copy_to_cpu))
435 return -EMSGSIZE;
436
437 return 0;
438}
439
440static int
441ofdpa_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
442 const struct ofdpa_flow_tbl_entry *entry)
443{
444 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
445 entry->key.ucast_routing.eth_type))
446 return -EMSGSIZE;
447 if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP,
448 entry->key.ucast_routing.dst4))
449 return -EMSGSIZE;
450 if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK,
451 entry->key.ucast_routing.dst4_mask))
452 return -EMSGSIZE;
453 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
454 entry->key.ucast_routing.goto_tbl))
455 return -EMSGSIZE;
456 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
457 entry->key.ucast_routing.group_id))
458 return -EMSGSIZE;
459
460 return 0;
461}
462
463static int
464ofdpa_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
465 const struct ofdpa_flow_tbl_entry *entry)
466{
467 if (entry->key.bridge.has_eth_dst &&
468 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
469 ETH_ALEN, entry->key.bridge.eth_dst))
470 return -EMSGSIZE;
471 if (entry->key.bridge.has_eth_dst_mask &&
472 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
473 ETH_ALEN, entry->key.bridge.eth_dst_mask))
474 return -EMSGSIZE;
475 if (entry->key.bridge.vlan_id &&
476 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
477 entry->key.bridge.vlan_id))
478 return -EMSGSIZE;
479 if (entry->key.bridge.tunnel_id &&
480 rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID,
481 entry->key.bridge.tunnel_id))
482 return -EMSGSIZE;
483 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
484 entry->key.bridge.goto_tbl))
485 return -EMSGSIZE;
486 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
487 entry->key.bridge.group_id))
488 return -EMSGSIZE;
489 if (entry->key.bridge.copy_to_cpu &&
490 rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
491 entry->key.bridge.copy_to_cpu))
492 return -EMSGSIZE;
493
494 return 0;
495}
496
497static int
498ofdpa_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
499 const struct ofdpa_flow_tbl_entry *entry)
500{
501 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
502 entry->key.acl.in_pport))
503 return -EMSGSIZE;
504 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
505 entry->key.acl.in_pport_mask))
506 return -EMSGSIZE;
507 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
508 ETH_ALEN, entry->key.acl.eth_src))
509 return -EMSGSIZE;
510 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK,
511 ETH_ALEN, entry->key.acl.eth_src_mask))
512 return -EMSGSIZE;
513 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
514 ETH_ALEN, entry->key.acl.eth_dst))
515 return -EMSGSIZE;
516 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
517 ETH_ALEN, entry->key.acl.eth_dst_mask))
518 return -EMSGSIZE;
519 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
520 entry->key.acl.eth_type))
521 return -EMSGSIZE;
522 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
523 entry->key.acl.vlan_id))
524 return -EMSGSIZE;
525 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
526 entry->key.acl.vlan_id_mask))
527 return -EMSGSIZE;
528
529 switch (ntohs(entry->key.acl.eth_type)) {
530 case ETH_P_IP:
531 case ETH_P_IPV6:
532 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO,
533 entry->key.acl.ip_proto))
534 return -EMSGSIZE;
535 if (rocker_tlv_put_u8(desc_info,
536 ROCKER_TLV_OF_DPA_IP_PROTO_MASK,
537 entry->key.acl.ip_proto_mask))
538 return -EMSGSIZE;
539 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP,
540 entry->key.acl.ip_tos & 0x3f))
541 return -EMSGSIZE;
542 if (rocker_tlv_put_u8(desc_info,
543 ROCKER_TLV_OF_DPA_IP_DSCP_MASK,
544 entry->key.acl.ip_tos_mask & 0x3f))
545 return -EMSGSIZE;
546 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN,
547 (entry->key.acl.ip_tos & 0xc0) >> 6))
548 return -EMSGSIZE;
549 if (rocker_tlv_put_u8(desc_info,
550 ROCKER_TLV_OF_DPA_IP_ECN_MASK,
551 (entry->key.acl.ip_tos_mask & 0xc0) >> 6))
552 return -EMSGSIZE;
553 break;
554 }
555
556 if (entry->key.acl.group_id != ROCKER_GROUP_NONE &&
557 rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
558 entry->key.acl.group_id))
559 return -EMSGSIZE;
560
561 return 0;
562}
563
564static int ofdpa_cmd_flow_tbl_add(const struct rocker_port *rocker_port,
565 struct rocker_desc_info *desc_info,
566 void *priv)
567{
568 const struct ofdpa_flow_tbl_entry *entry = priv;
569 struct rocker_tlv *cmd_info;
570 int err = 0;
571
572 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
573 return -EMSGSIZE;
574 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
575 if (!cmd_info)
576 return -EMSGSIZE;
577 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID,
578 entry->key.tbl_id))
579 return -EMSGSIZE;
580 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY,
581 entry->key.priority))
582 return -EMSGSIZE;
583 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0))
584 return -EMSGSIZE;
585 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
586 entry->cookie))
587 return -EMSGSIZE;
588
589 switch (entry->key.tbl_id) {
590 case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT:
591 err = ofdpa_cmd_flow_tbl_add_ig_port(desc_info, entry);
592 break;
593 case ROCKER_OF_DPA_TABLE_ID_VLAN:
594 err = ofdpa_cmd_flow_tbl_add_vlan(desc_info, entry);
595 break;
596 case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC:
597 err = ofdpa_cmd_flow_tbl_add_term_mac(desc_info, entry);
598 break;
599 case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING:
600 err = ofdpa_cmd_flow_tbl_add_ucast_routing(desc_info, entry);
601 break;
602 case ROCKER_OF_DPA_TABLE_ID_BRIDGING:
603 err = ofdpa_cmd_flow_tbl_add_bridge(desc_info, entry);
604 break;
605 case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY:
606 err = ofdpa_cmd_flow_tbl_add_acl(desc_info, entry);
607 break;
608 default:
609 err = -ENOTSUPP;
610 break;
611 }
612
613 if (err)
614 return err;
615
616 rocker_tlv_nest_end(desc_info, cmd_info);
617
618 return 0;
619}
620
621static int ofdpa_cmd_flow_tbl_del(const struct rocker_port *rocker_port,
622 struct rocker_desc_info *desc_info,
623 void *priv)
624{
625 const struct ofdpa_flow_tbl_entry *entry = priv;
626 struct rocker_tlv *cmd_info;
627
628 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
629 return -EMSGSIZE;
630 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
631 if (!cmd_info)
632 return -EMSGSIZE;
633 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
634 entry->cookie))
635 return -EMSGSIZE;
636 rocker_tlv_nest_end(desc_info, cmd_info);
637
638 return 0;
639}
640
641static int
642ofdpa_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info,
643 struct ofdpa_group_tbl_entry *entry)
644{
645 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT,
646 ROCKER_GROUP_PORT_GET(entry->group_id)))
647 return -EMSGSIZE;
648 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN,
649 entry->l2_interface.pop_vlan))
650 return -EMSGSIZE;
651
652 return 0;
653}
654
655static int
656ofdpa_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
657 const struct ofdpa_group_tbl_entry *entry)
658{
659 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
660 entry->l2_rewrite.group_id))
661 return -EMSGSIZE;
662 if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) &&
663 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
664 ETH_ALEN, entry->l2_rewrite.eth_src))
665 return -EMSGSIZE;
666 if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) &&
667 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
668 ETH_ALEN, entry->l2_rewrite.eth_dst))
669 return -EMSGSIZE;
670 if (entry->l2_rewrite.vlan_id &&
671 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
672 entry->l2_rewrite.vlan_id))
673 return -EMSGSIZE;
674
675 return 0;
676}
677
678static int
679ofdpa_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
680 const struct ofdpa_group_tbl_entry *entry)
681{
682 int i;
683 struct rocker_tlv *group_ids;
684
685 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT,
686 entry->group_count))
687 return -EMSGSIZE;
688
689 group_ids = rocker_tlv_nest_start(desc_info,
690 ROCKER_TLV_OF_DPA_GROUP_IDS);
691 if (!group_ids)
692 return -EMSGSIZE;
693
694 for (i = 0; i < entry->group_count; i++)
695 /* Note TLV array is 1-based */
696 if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i]))
697 return -EMSGSIZE;
698
699 rocker_tlv_nest_end(desc_info, group_ids);
700
701 return 0;
702}
703
704static int
705ofdpa_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
706 const struct ofdpa_group_tbl_entry *entry)
707{
708 if (!is_zero_ether_addr(entry->l3_unicast.eth_src) &&
709 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
710 ETH_ALEN, entry->l3_unicast.eth_src))
711 return -EMSGSIZE;
712 if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) &&
713 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
714 ETH_ALEN, entry->l3_unicast.eth_dst))
715 return -EMSGSIZE;
716 if (entry->l3_unicast.vlan_id &&
717 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
718 entry->l3_unicast.vlan_id))
719 return -EMSGSIZE;
720 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK,
721 entry->l3_unicast.ttl_check))
722 return -EMSGSIZE;
723 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
724 entry->l3_unicast.group_id))
725 return -EMSGSIZE;
726
727 return 0;
728}
729
730static int ofdpa_cmd_group_tbl_add(const struct rocker_port *rocker_port,
731 struct rocker_desc_info *desc_info,
732 void *priv)
733{
734 struct ofdpa_group_tbl_entry *entry = priv;
735 struct rocker_tlv *cmd_info;
736 int err = 0;
737
738 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
739 return -EMSGSIZE;
740 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
741 if (!cmd_info)
742 return -EMSGSIZE;
743
744 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
745 entry->group_id))
746 return -EMSGSIZE;
747
748 switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
749 case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
750 err = ofdpa_cmd_group_tbl_add_l2_interface(desc_info, entry);
751 break;
752 case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
753 err = ofdpa_cmd_group_tbl_add_l2_rewrite(desc_info, entry);
754 break;
755 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
756 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
757 err = ofdpa_cmd_group_tbl_add_group_ids(desc_info, entry);
758 break;
759 case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
760 err = ofdpa_cmd_group_tbl_add_l3_unicast(desc_info, entry);
761 break;
762 default:
763 err = -ENOTSUPP;
764 break;
765 }
766
767 if (err)
768 return err;
769
770 rocker_tlv_nest_end(desc_info, cmd_info);
771
772 return 0;
773}
774
775static int ofdpa_cmd_group_tbl_del(const struct rocker_port *rocker_port,
776 struct rocker_desc_info *desc_info,
777 void *priv)
778{
779 const struct ofdpa_group_tbl_entry *entry = priv;
780 struct rocker_tlv *cmd_info;
781
782 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
783 return -EMSGSIZE;
784 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
785 if (!cmd_info)
786 return -EMSGSIZE;
787 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
788 entry->group_id))
789 return -EMSGSIZE;
790 rocker_tlv_nest_end(desc_info, cmd_info);
791
792 return 0;
793}
794
795/***************************************************
796 * Flow, group, FDB, internal VLAN and neigh tables
797 ***************************************************/
798
799static struct ofdpa_flow_tbl_entry *
800ofdpa_flow_tbl_find(const struct ofdpa *ofdpa,
801 const struct ofdpa_flow_tbl_entry *match)
802{
803 struct ofdpa_flow_tbl_entry *found;
804 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
805
806 hash_for_each_possible(ofdpa->flow_tbl, found,
807 entry, match->key_crc32) {
808 if (memcmp(&found->key, &match->key, key_len) == 0)
809 return found;
810 }
811
812 return NULL;
813}
814
815static int ofdpa_flow_tbl_add(struct ofdpa_port *ofdpa_port,
816 struct switchdev_trans *trans, int flags,
817 struct ofdpa_flow_tbl_entry *match)
818{
819 struct ofdpa *ofdpa = ofdpa_port->ofdpa;
820 struct ofdpa_flow_tbl_entry *found;
821 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
822 unsigned long lock_flags;
823
824 match->key_crc32 = crc32(~0, &match->key, key_len);
825
826 spin_lock_irqsave(&ofdpa->flow_tbl_lock, lock_flags);
827
828 found = ofdpa_flow_tbl_find(ofdpa, match);
829
830 if (found) {
831 match->cookie = found->cookie;
832 if (!switchdev_trans_ph_prepare(trans))
833 hash_del(&found->entry);
834 ofdpa_kfree(trans, found);
835 found = match;
836 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD;
837 } else {
838 found = match;
839 found->cookie = ofdpa->flow_tbl_next_cookie++;
840 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD;
841 }
842
843 if (!switchdev_trans_ph_prepare(trans))
844 hash_add(ofdpa->flow_tbl, &found->entry, found->key_crc32);
845
846 spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, lock_flags);
847
848 if (!switchdev_trans_ph_prepare(trans))
849 return rocker_cmd_exec(ofdpa_port->rocker_port,
850 ofdpa_flags_nowait(flags),
851 ofdpa_cmd_flow_tbl_add,
852 found, NULL, NULL);
853 return 0;
854}
855
856static int ofdpa_flow_tbl_del(struct ofdpa_port *ofdpa_port,
857 struct switchdev_trans *trans, int flags,
858 struct ofdpa_flow_tbl_entry *match)
859{
860 struct ofdpa *ofdpa = ofdpa_port->ofdpa;
861 struct ofdpa_flow_tbl_entry *found;
862 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
863 unsigned long lock_flags;
864 int err = 0;
865
866 match->key_crc32 = crc32(~0, &match->key, key_len);
867
868 spin_lock_irqsave(&ofdpa->flow_tbl_lock, lock_flags);
869
870 found = ofdpa_flow_tbl_find(ofdpa, match);
871
872 if (found) {
873 if (!switchdev_trans_ph_prepare(trans))
874 hash_del(&found->entry);
875 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL;
876 }
877
878 spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, lock_flags);
879
880 ofdpa_kfree(trans, match);
881
882 if (found) {
883 if (!switchdev_trans_ph_prepare(trans))
884 err = rocker_cmd_exec(ofdpa_port->rocker_port,
885 ofdpa_flags_nowait(flags),
886 ofdpa_cmd_flow_tbl_del,
887 found, NULL, NULL);
888 ofdpa_kfree(trans, found);
889 }
890
891 return err;
892}
893
894static int ofdpa_flow_tbl_do(struct ofdpa_port *ofdpa_port,
895 struct switchdev_trans *trans, int flags,
896 struct ofdpa_flow_tbl_entry *entry)
897{
898 if (flags & OFDPA_OP_FLAG_REMOVE)
899 return ofdpa_flow_tbl_del(ofdpa_port, trans, flags, entry);
900 else
901 return ofdpa_flow_tbl_add(ofdpa_port, trans, flags, entry);
902}
903
904static int ofdpa_flow_tbl_ig_port(struct ofdpa_port *ofdpa_port,
905 struct switchdev_trans *trans, int flags,
906 u32 in_pport, u32 in_pport_mask,
907 enum rocker_of_dpa_table_id goto_tbl)
908{
909 struct ofdpa_flow_tbl_entry *entry;
910
911 entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
912 if (!entry)
913 return -ENOMEM;
914
915 entry->key.priority = OFDPA_PRIORITY_IG_PORT;
916 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
917 entry->key.ig_port.in_pport = in_pport;
918 entry->key.ig_port.in_pport_mask = in_pport_mask;
919 entry->key.ig_port.goto_tbl = goto_tbl;
920
921 return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
922}
923
924static int ofdpa_flow_tbl_vlan(struct ofdpa_port *ofdpa_port,
925 struct switchdev_trans *trans, int flags,
926 u32 in_pport, __be16 vlan_id,
927 __be16 vlan_id_mask,
928 enum rocker_of_dpa_table_id goto_tbl,
929 bool untagged, __be16 new_vlan_id)
930{
931 struct ofdpa_flow_tbl_entry *entry;
932
933 entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
934 if (!entry)
935 return -ENOMEM;
936
937 entry->key.priority = OFDPA_PRIORITY_VLAN;
938 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
939 entry->key.vlan.in_pport = in_pport;
940 entry->key.vlan.vlan_id = vlan_id;
941 entry->key.vlan.vlan_id_mask = vlan_id_mask;
942 entry->key.vlan.goto_tbl = goto_tbl;
943
944 entry->key.vlan.untagged = untagged;
945 entry->key.vlan.new_vlan_id = new_vlan_id;
946
947 return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
948}
949
950static int ofdpa_flow_tbl_term_mac(struct ofdpa_port *ofdpa_port,
951 struct switchdev_trans *trans,
952 u32 in_pport, u32 in_pport_mask,
953 __be16 eth_type, const u8 *eth_dst,
954 const u8 *eth_dst_mask, __be16 vlan_id,
955 __be16 vlan_id_mask, bool copy_to_cpu,
956 int flags)
957{
958 struct ofdpa_flow_tbl_entry *entry;
959
960 entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
961 if (!entry)
962 return -ENOMEM;
963
964 if (is_multicast_ether_addr(eth_dst)) {
965 entry->key.priority = OFDPA_PRIORITY_TERM_MAC_MCAST;
966 entry->key.term_mac.goto_tbl =
967 ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
968 } else {
969 entry->key.priority = OFDPA_PRIORITY_TERM_MAC_UCAST;
970 entry->key.term_mac.goto_tbl =
971 ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
972 }
973
974 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
975 entry->key.term_mac.in_pport = in_pport;
976 entry->key.term_mac.in_pport_mask = in_pport_mask;
977 entry->key.term_mac.eth_type = eth_type;
978 ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst);
979 ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask);
980 entry->key.term_mac.vlan_id = vlan_id;
981 entry->key.term_mac.vlan_id_mask = vlan_id_mask;
982 entry->key.term_mac.copy_to_cpu = copy_to_cpu;
983
984 return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
985}
986
987static int ofdpa_flow_tbl_bridge(struct ofdpa_port *ofdpa_port,
988 struct switchdev_trans *trans, int flags,
989 const u8 *eth_dst, const u8 *eth_dst_mask,
990 __be16 vlan_id, u32 tunnel_id,
991 enum rocker_of_dpa_table_id goto_tbl,
992 u32 group_id, bool copy_to_cpu)
993{
994 struct ofdpa_flow_tbl_entry *entry;
995 u32 priority;
996 bool vlan_bridging = !!vlan_id;
997 bool dflt = !eth_dst || (eth_dst && eth_dst_mask);
998 bool wild = false;
999
1000 entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
1001 if (!entry)
1002 return -ENOMEM;
1003
1004 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
1005
1006 if (eth_dst) {
1007 entry->key.bridge.has_eth_dst = 1;
1008 ether_addr_copy(entry->key.bridge.eth_dst, eth_dst);
1009 }
1010 if (eth_dst_mask) {
1011 entry->key.bridge.has_eth_dst_mask = 1;
1012 ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask);
1013 if (!ether_addr_equal(eth_dst_mask, ff_mac))
1014 wild = true;
1015 }
1016
1017 priority = OFDPA_PRIORITY_UNKNOWN;
1018 if (vlan_bridging && dflt && wild)
1019 priority = OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_WILD;
1020 else if (vlan_bridging && dflt && !wild)
1021 priority = OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_EXACT;
1022 else if (vlan_bridging && !dflt)
1023 priority = OFDPA_PRIORITY_BRIDGING_VLAN;
1024 else if (!vlan_bridging && dflt && wild)
1025 priority = OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_WILD;
1026 else if (!vlan_bridging && dflt && !wild)
1027 priority = OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_EXACT;
1028 else if (!vlan_bridging && !dflt)
1029 priority = OFDPA_PRIORITY_BRIDGING_TENANT;
1030
1031 entry->key.priority = priority;
1032 entry->key.bridge.vlan_id = vlan_id;
1033 entry->key.bridge.tunnel_id = tunnel_id;
1034 entry->key.bridge.goto_tbl = goto_tbl;
1035 entry->key.bridge.group_id = group_id;
1036 entry->key.bridge.copy_to_cpu = copy_to_cpu;
1037
1038 return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
1039}
1040
1041static int ofdpa_flow_tbl_ucast4_routing(struct ofdpa_port *ofdpa_port,
1042 struct switchdev_trans *trans,
1043 __be16 eth_type, __be32 dst,
1044 __be32 dst_mask, u32 priority,
1045 enum rocker_of_dpa_table_id goto_tbl,
1046 u32 group_id, int flags)
1047{
1048 struct ofdpa_flow_tbl_entry *entry;
1049
1050 entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
1051 if (!entry)
1052 return -ENOMEM;
1053
1054 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
1055 entry->key.priority = priority;
1056 entry->key.ucast_routing.eth_type = eth_type;
1057 entry->key.ucast_routing.dst4 = dst;
1058 entry->key.ucast_routing.dst4_mask = dst_mask;
1059 entry->key.ucast_routing.goto_tbl = goto_tbl;
1060 entry->key.ucast_routing.group_id = group_id;
1061 entry->key_len = offsetof(struct ofdpa_flow_tbl_key,
1062 ucast_routing.group_id);
1063
1064 return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
1065}
1066
1067static int ofdpa_flow_tbl_acl(struct ofdpa_port *ofdpa_port,
1068 struct switchdev_trans *trans, int flags,
1069 u32 in_pport, u32 in_pport_mask,
1070 const u8 *eth_src, const u8 *eth_src_mask,
1071 const u8 *eth_dst, const u8 *eth_dst_mask,
1072 __be16 eth_type, __be16 vlan_id,
1073 __be16 vlan_id_mask, u8 ip_proto,
1074 u8 ip_proto_mask, u8 ip_tos, u8 ip_tos_mask,
1075 u32 group_id)
1076{
1077 u32 priority;
1078 struct ofdpa_flow_tbl_entry *entry;
1079
1080 entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
1081 if (!entry)
1082 return -ENOMEM;
1083
1084 priority = OFDPA_PRIORITY_ACL_NORMAL;
1085 if (eth_dst && eth_dst_mask) {
1086 if (ether_addr_equal(eth_dst_mask, mcast_mac))
1087 priority = OFDPA_PRIORITY_ACL_DFLT;
1088 else if (is_link_local_ether_addr(eth_dst))
1089 priority = OFDPA_PRIORITY_ACL_CTRL;
1090 }
1091
1092 entry->key.priority = priority;
1093 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1094 entry->key.acl.in_pport = in_pport;
1095 entry->key.acl.in_pport_mask = in_pport_mask;
1096
1097 if (eth_src)
1098 ether_addr_copy(entry->key.acl.eth_src, eth_src);
1099 if (eth_src_mask)
1100 ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask);
1101 if (eth_dst)
1102 ether_addr_copy(entry->key.acl.eth_dst, eth_dst);
1103 if (eth_dst_mask)
1104 ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask);
1105
1106 entry->key.acl.eth_type = eth_type;
1107 entry->key.acl.vlan_id = vlan_id;
1108 entry->key.acl.vlan_id_mask = vlan_id_mask;
1109 entry->key.acl.ip_proto = ip_proto;
1110 entry->key.acl.ip_proto_mask = ip_proto_mask;
1111 entry->key.acl.ip_tos = ip_tos;
1112 entry->key.acl.ip_tos_mask = ip_tos_mask;
1113 entry->key.acl.group_id = group_id;
1114
1115 return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
1116}
1117
1118static struct ofdpa_group_tbl_entry *
1119ofdpa_group_tbl_find(const struct ofdpa *ofdpa,
1120 const struct ofdpa_group_tbl_entry *match)
1121{
1122 struct ofdpa_group_tbl_entry *found;
1123
1124 hash_for_each_possible(ofdpa->group_tbl, found,
1125 entry, match->group_id) {
1126 if (found->group_id == match->group_id)
1127 return found;
1128 }
1129
1130 return NULL;
1131}
1132
1133static void ofdpa_group_tbl_entry_free(struct switchdev_trans *trans,
1134 struct ofdpa_group_tbl_entry *entry)
1135{
1136 switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
1137 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
1138 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
1139 ofdpa_kfree(trans, entry->group_ids);
1140 break;
1141 default:
1142 break;
1143 }
1144 ofdpa_kfree(trans, entry);
1145}
1146
1147static int ofdpa_group_tbl_add(struct ofdpa_port *ofdpa_port,
1148 struct switchdev_trans *trans, int flags,
1149 struct ofdpa_group_tbl_entry *match)
1150{
1151 struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1152 struct ofdpa_group_tbl_entry *found;
1153 unsigned long lock_flags;
1154
1155 spin_lock_irqsave(&ofdpa->group_tbl_lock, lock_flags);
1156
1157 found = ofdpa_group_tbl_find(ofdpa, match);
1158
1159 if (found) {
1160 if (!switchdev_trans_ph_prepare(trans))
1161 hash_del(&found->entry);
1162 ofdpa_group_tbl_entry_free(trans, found);
1163 found = match;
1164 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD;
1165 } else {
1166 found = match;
1167 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
1168 }
1169
1170 if (!switchdev_trans_ph_prepare(trans))
1171 hash_add(ofdpa->group_tbl, &found->entry, found->group_id);
1172
1173 spin_unlock_irqrestore(&ofdpa->group_tbl_lock, lock_flags);
1174
1175 if (!switchdev_trans_ph_prepare(trans))
1176 return rocker_cmd_exec(ofdpa_port->rocker_port,
1177 ofdpa_flags_nowait(flags),
1178 ofdpa_cmd_group_tbl_add,
1179 found, NULL, NULL);
1180 return 0;
1181}
1182
1183static int ofdpa_group_tbl_del(struct ofdpa_port *ofdpa_port,
1184 struct switchdev_trans *trans, int flags,
1185 struct ofdpa_group_tbl_entry *match)
1186{
1187 struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1188 struct ofdpa_group_tbl_entry *found;
1189 unsigned long lock_flags;
1190 int err = 0;
1191
1192 spin_lock_irqsave(&ofdpa->group_tbl_lock, lock_flags);
1193
1194 found = ofdpa_group_tbl_find(ofdpa, match);
1195
1196 if (found) {
1197 if (!switchdev_trans_ph_prepare(trans))
1198 hash_del(&found->entry);
1199 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
1200 }
1201
1202 spin_unlock_irqrestore(&ofdpa->group_tbl_lock, lock_flags);
1203
1204 ofdpa_group_tbl_entry_free(trans, match);
1205
1206 if (found) {
1207 if (!switchdev_trans_ph_prepare(trans))
1208 err = rocker_cmd_exec(ofdpa_port->rocker_port,
1209 ofdpa_flags_nowait(flags),
1210 ofdpa_cmd_group_tbl_del,
1211 found, NULL, NULL);
1212 ofdpa_group_tbl_entry_free(trans, found);
1213 }
1214
1215 return err;
1216}
1217
1218static int ofdpa_group_tbl_do(struct ofdpa_port *ofdpa_port,
1219 struct switchdev_trans *trans, int flags,
1220 struct ofdpa_group_tbl_entry *entry)
1221{
1222 if (flags & OFDPA_OP_FLAG_REMOVE)
1223 return ofdpa_group_tbl_del(ofdpa_port, trans, flags, entry);
1224 else
1225 return ofdpa_group_tbl_add(ofdpa_port, trans, flags, entry);
1226}
1227
1228static int ofdpa_group_l2_interface(struct ofdpa_port *ofdpa_port,
1229 struct switchdev_trans *trans, int flags,
1230 __be16 vlan_id, u32 out_pport,
1231 int pop_vlan)
1232{
1233 struct ofdpa_group_tbl_entry *entry;
1234
1235 entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
1236 if (!entry)
1237 return -ENOMEM;
1238
1239 entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
1240 entry->l2_interface.pop_vlan = pop_vlan;
1241
1242 return ofdpa_group_tbl_do(ofdpa_port, trans, flags, entry);
1243}
1244
1245static int ofdpa_group_l2_fan_out(struct ofdpa_port *ofdpa_port,
1246 struct switchdev_trans *trans,
1247 int flags, u8 group_count,
1248 const u32 *group_ids, u32 group_id)
1249{
1250 struct ofdpa_group_tbl_entry *entry;
1251
1252 entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
1253 if (!entry)
1254 return -ENOMEM;
1255
1256 entry->group_id = group_id;
1257 entry->group_count = group_count;
1258
1259 entry->group_ids = ofdpa_kcalloc(trans, flags,
1260 group_count, sizeof(u32));
1261 if (!entry->group_ids) {
1262 ofdpa_kfree(trans, entry);
1263 return -ENOMEM;
1264 }
1265 memcpy(entry->group_ids, group_ids, group_count * sizeof(u32));
1266
1267 return ofdpa_group_tbl_do(ofdpa_port, trans, flags, entry);
1268}
1269
1270static int ofdpa_group_l2_flood(struct ofdpa_port *ofdpa_port,
1271 struct switchdev_trans *trans, int flags,
1272 __be16 vlan_id, u8 group_count,
1273 const u32 *group_ids, u32 group_id)
1274{
1275 return ofdpa_group_l2_fan_out(ofdpa_port, trans, flags,
1276 group_count, group_ids,
1277 group_id);
1278}
1279
1280static int ofdpa_group_l3_unicast(struct ofdpa_port *ofdpa_port,
1281 struct switchdev_trans *trans, int flags,
1282 u32 index, const u8 *src_mac, const u8 *dst_mac,
1283 __be16 vlan_id, bool ttl_check, u32 pport)
1284{
1285 struct ofdpa_group_tbl_entry *entry;
1286
1287 entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
1288 if (!entry)
1289 return -ENOMEM;
1290
1291 entry->group_id = ROCKER_GROUP_L3_UNICAST(index);
1292 if (src_mac)
1293 ether_addr_copy(entry->l3_unicast.eth_src, src_mac);
1294 if (dst_mac)
1295 ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac);
1296 entry->l3_unicast.vlan_id = vlan_id;
1297 entry->l3_unicast.ttl_check = ttl_check;
1298 entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport);
1299
1300 return ofdpa_group_tbl_do(ofdpa_port, trans, flags, entry);
1301}
1302
1303static struct ofdpa_neigh_tbl_entry *
1304ofdpa_neigh_tbl_find(const struct ofdpa *ofdpa, __be32 ip_addr)
1305{
1306 struct ofdpa_neigh_tbl_entry *found;
1307
1308 hash_for_each_possible(ofdpa->neigh_tbl, found,
1309 entry, be32_to_cpu(ip_addr))
1310 if (found->ip_addr == ip_addr)
1311 return found;
1312
1313 return NULL;
1314}
1315
1316static void ofdpa_neigh_add(struct ofdpa *ofdpa,
1317 struct switchdev_trans *trans,
1318 struct ofdpa_neigh_tbl_entry *entry)
1319{
1320 if (!switchdev_trans_ph_commit(trans))
1321 entry->index = ofdpa->neigh_tbl_next_index++;
1322 if (switchdev_trans_ph_prepare(trans))
1323 return;
1324 entry->ref_count++;
1325 hash_add(ofdpa->neigh_tbl, &entry->entry,
1326 be32_to_cpu(entry->ip_addr));
1327}
1328
1329static void ofdpa_neigh_del(struct switchdev_trans *trans,
1330 struct ofdpa_neigh_tbl_entry *entry)
1331{
1332 if (switchdev_trans_ph_prepare(trans))
1333 return;
1334 if (--entry->ref_count == 0) {
1335 hash_del(&entry->entry);
1336 ofdpa_kfree(trans, entry);
1337 }
1338}
1339
1340static void ofdpa_neigh_update(struct ofdpa_neigh_tbl_entry *entry,
1341 struct switchdev_trans *trans,
1342 const u8 *eth_dst, bool ttl_check)
1343{
1344 if (eth_dst) {
1345 ether_addr_copy(entry->eth_dst, eth_dst);
1346 entry->ttl_check = ttl_check;
1347 } else if (!switchdev_trans_ph_prepare(trans)) {
1348 entry->ref_count++;
1349 }
1350}
1351
1352static int ofdpa_port_ipv4_neigh(struct ofdpa_port *ofdpa_port,
1353 struct switchdev_trans *trans,
1354 int flags, __be32 ip_addr, const u8 *eth_dst)
1355{
1356 struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1357 struct ofdpa_neigh_tbl_entry *entry;
1358 struct ofdpa_neigh_tbl_entry *found;
1359 unsigned long lock_flags;
1360 __be16 eth_type = htons(ETH_P_IP);
1361 enum rocker_of_dpa_table_id goto_tbl =
1362 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1363 u32 group_id;
1364 u32 priority = 0;
1365 bool adding = !(flags & OFDPA_OP_FLAG_REMOVE);
1366 bool updating;
1367 bool removing;
1368 int err = 0;
1369
1370 entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
1371 if (!entry)
1372 return -ENOMEM;
1373
1374 spin_lock_irqsave(&ofdpa->neigh_tbl_lock, lock_flags);
1375
1376 found = ofdpa_neigh_tbl_find(ofdpa, ip_addr);
1377
1378 updating = found && adding;
1379 removing = found && !adding;
1380 adding = !found && adding;
1381
1382 if (adding) {
1383 entry->ip_addr = ip_addr;
1384 entry->dev = ofdpa_port->dev;
1385 ether_addr_copy(entry->eth_dst, eth_dst);
1386 entry->ttl_check = true;
1387 ofdpa_neigh_add(ofdpa, trans, entry);
1388 } else if (removing) {
1389 memcpy(entry, found, sizeof(*entry));
1390 ofdpa_neigh_del(trans, found);
1391 } else if (updating) {
1392 ofdpa_neigh_update(found, trans, eth_dst, true);
1393 memcpy(entry, found, sizeof(*entry));
1394 } else {
1395 err = -ENOENT;
1396 }
1397
1398 spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, lock_flags);
1399
1400 if (err)
1401 goto err_out;
1402
1403 /* For each active neighbor, we have an L3 unicast group and
1404 * a /32 route to the neighbor, which uses the L3 unicast
1405 * group. The L3 unicast group can also be referred to by
1406 * other routes' nexthops.
1407 */
1408
1409 err = ofdpa_group_l3_unicast(ofdpa_port, trans, flags,
1410 entry->index,
1411 ofdpa_port->dev->dev_addr,
1412 entry->eth_dst,
1413 ofdpa_port->internal_vlan_id,
1414 entry->ttl_check,
1415 ofdpa_port->pport);
1416 if (err) {
1417 netdev_err(ofdpa_port->dev, "Error (%d) L3 unicast group index %d\n",
1418 err, entry->index);
1419 goto err_out;
1420 }
1421
1422 if (adding || removing) {
1423 group_id = ROCKER_GROUP_L3_UNICAST(entry->index);
1424 err = ofdpa_flow_tbl_ucast4_routing(ofdpa_port, trans,
1425 eth_type, ip_addr,
1426 inet_make_mask(32),
1427 priority, goto_tbl,
1428 group_id, flags);
1429
1430 if (err)
1431 netdev_err(ofdpa_port->dev, "Error (%d) /32 unicast route %pI4 group 0x%08x\n",
1432 err, &entry->ip_addr, group_id);
1433 }
1434
1435err_out:
1436 if (!adding)
1437 ofdpa_kfree(trans, entry);
1438
1439 return err;
1440}
1441
1442static int ofdpa_port_ipv4_resolve(struct ofdpa_port *ofdpa_port,
1443 struct switchdev_trans *trans,
1444 __be32 ip_addr)
1445{
1446 struct net_device *dev = ofdpa_port->dev;
1447 struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr);
1448 int err = 0;
1449
1450 if (!n) {
1451 n = neigh_create(&arp_tbl, &ip_addr, dev);
1452 if (IS_ERR(n))
f73e0c24 1453 return PTR_ERR(n);
3fbcdbf3
JP
1454 }
1455
1456 /* If the neigh is already resolved, then go ahead and
1457 * install the entry, otherwise start the ARP process to
1458 * resolve the neigh.
1459 */
1460
1461 if (n->nud_state & NUD_VALID)
1462 err = ofdpa_port_ipv4_neigh(ofdpa_port, trans, 0,
1463 ip_addr, n->ha);
1464 else
1465 neigh_event_send(n, NULL);
1466
1467 neigh_release(n);
1468 return err;
1469}
1470
1471static int ofdpa_port_ipv4_nh(struct ofdpa_port *ofdpa_port,
1472 struct switchdev_trans *trans, int flags,
1473 __be32 ip_addr, u32 *index)
1474{
1475 struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1476 struct ofdpa_neigh_tbl_entry *entry;
1477 struct ofdpa_neigh_tbl_entry *found;
1478 unsigned long lock_flags;
1479 bool adding = !(flags & OFDPA_OP_FLAG_REMOVE);
1480 bool updating;
1481 bool removing;
1482 bool resolved = true;
1483 int err = 0;
1484
1485 entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
1486 if (!entry)
1487 return -ENOMEM;
1488
1489 spin_lock_irqsave(&ofdpa->neigh_tbl_lock, lock_flags);
1490
1491 found = ofdpa_neigh_tbl_find(ofdpa, ip_addr);
1492 if (found)
1493 *index = found->index;
1494
1495 updating = found && adding;
1496 removing = found && !adding;
1497 adding = !found && adding;
1498
1499 if (adding) {
1500 entry->ip_addr = ip_addr;
1501 entry->dev = ofdpa_port->dev;
1502 ofdpa_neigh_add(ofdpa, trans, entry);
1503 *index = entry->index;
1504 resolved = false;
1505 } else if (removing) {
1506 ofdpa_neigh_del(trans, found);
1507 } else if (updating) {
1508 ofdpa_neigh_update(found, trans, NULL, false);
1509 resolved = !is_zero_ether_addr(found->eth_dst);
1510 } else {
1511 err = -ENOENT;
1512 }
1513
1514 spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, lock_flags);
1515
1516 if (!adding)
1517 ofdpa_kfree(trans, entry);
1518
1519 if (err)
1520 return err;
1521
1522 /* Resolved means neigh ip_addr is resolved to neigh mac. */
1523
1524 if (!resolved)
1525 err = ofdpa_port_ipv4_resolve(ofdpa_port, trans, ip_addr);
1526
1527 return err;
1528}
1529
1530static struct ofdpa_port *ofdpa_port_get(const struct ofdpa *ofdpa,
1531 int port_index)
1532{
1533 struct rocker_port *rocker_port;
1534
1535 rocker_port = ofdpa->rocker->ports[port_index];
1536 return rocker_port ? rocker_port->wpriv : NULL;
1537}
1538
1539static int ofdpa_port_vlan_flood_group(struct ofdpa_port *ofdpa_port,
1540 struct switchdev_trans *trans,
1541 int flags, __be16 vlan_id)
1542{
1543 struct ofdpa_port *p;
1544 const struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1545 unsigned int port_count = ofdpa->rocker->port_count;
1546 u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
1547 u32 *group_ids;
1548 u8 group_count = 0;
1549 int err = 0;
1550 int i;
1551
1552 group_ids = ofdpa_kcalloc(trans, flags, port_count, sizeof(u32));
1553 if (!group_ids)
1554 return -ENOMEM;
1555
1556 /* Adjust the flood group for this VLAN. The flood group
1557 * references an L2 interface group for each port in this
1558 * VLAN.
1559 */
1560
1561 for (i = 0; i < port_count; i++) {
1562 p = ofdpa_port_get(ofdpa, i);
1563 if (!p)
1564 continue;
1565 if (!ofdpa_port_is_bridged(p))
1566 continue;
1567 if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) {
1568 group_ids[group_count++] =
1569 ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport);
1570 }
1571 }
1572
1573 /* If there are no bridged ports in this VLAN, we're done */
1574 if (group_count == 0)
1575 goto no_ports_in_vlan;
1576
1577 err = ofdpa_group_l2_flood(ofdpa_port, trans, flags, vlan_id,
1578 group_count, group_ids, group_id);
1579 if (err)
1580 netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 flood group\n", err);
1581
1582no_ports_in_vlan:
1583 ofdpa_kfree(trans, group_ids);
1584 return err;
1585}
1586
1587static int ofdpa_port_vlan_l2_groups(struct ofdpa_port *ofdpa_port,
1588 struct switchdev_trans *trans, int flags,
1589 __be16 vlan_id, bool pop_vlan)
1590{
1591 const struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1592 unsigned int port_count = ofdpa->rocker->port_count;
1593 struct ofdpa_port *p;
1594 bool adding = !(flags & OFDPA_OP_FLAG_REMOVE);
1595 u32 out_pport;
1596 int ref = 0;
1597 int err;
1598 int i;
1599
1600 /* An L2 interface group for this port in this VLAN, but
1601 * only when port STP state is LEARNING|FORWARDING.
1602 */
1603
1604 if (ofdpa_port->stp_state == BR_STATE_LEARNING ||
1605 ofdpa_port->stp_state == BR_STATE_FORWARDING) {
1606 out_pport = ofdpa_port->pport;
1607 err = ofdpa_group_l2_interface(ofdpa_port, trans, flags,
1608 vlan_id, out_pport, pop_vlan);
1609 if (err) {
1610 netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for pport %d\n",
1611 err, out_pport);
1612 return err;
1613 }
1614 }
1615
1616 /* An L2 interface group for this VLAN to CPU port.
1617 * Add when first port joins this VLAN and destroy when
1618 * last port leaves this VLAN.
1619 */
1620
1621 for (i = 0; i < port_count; i++) {
1622 p = ofdpa_port_get(ofdpa, i);
1623 if (p && test_bit(ntohs(vlan_id), p->vlan_bitmap))
1624 ref++;
1625 }
1626
1627 if ((!adding || ref != 1) && (adding || ref != 0))
1628 return 0;
1629
1630 out_pport = 0;
1631 err = ofdpa_group_l2_interface(ofdpa_port, trans, flags,
1632 vlan_id, out_pport, pop_vlan);
1633 if (err) {
1634 netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for CPU port\n", err);
1635 return err;
1636 }
1637
1638 return 0;
1639}
1640
1641static struct ofdpa_ctrl {
1642 const u8 *eth_dst;
1643 const u8 *eth_dst_mask;
1644 __be16 eth_type;
1645 bool acl;
1646 bool bridge;
1647 bool term;
1648 bool copy_to_cpu;
1649} ofdpa_ctrls[] = {
1650 [OFDPA_CTRL_LINK_LOCAL_MCAST] = {
1651 /* pass link local multicast pkts up to CPU for filtering */
1652 .eth_dst = ll_mac,
1653 .eth_dst_mask = ll_mask,
1654 .acl = true,
1655 },
1656 [OFDPA_CTRL_LOCAL_ARP] = {
1657 /* pass local ARP pkts up to CPU */
1658 .eth_dst = zero_mac,
1659 .eth_dst_mask = zero_mac,
1660 .eth_type = htons(ETH_P_ARP),
1661 .acl = true,
1662 },
1663 [OFDPA_CTRL_IPV4_MCAST] = {
1664 /* pass IPv4 mcast pkts up to CPU, RFC 1112 */
1665 .eth_dst = ipv4_mcast,
1666 .eth_dst_mask = ipv4_mask,
1667 .eth_type = htons(ETH_P_IP),
1668 .term = true,
1669 .copy_to_cpu = true,
1670 },
1671 [OFDPA_CTRL_IPV6_MCAST] = {
1672 /* pass IPv6 mcast pkts up to CPU, RFC 2464 */
1673 .eth_dst = ipv6_mcast,
1674 .eth_dst_mask = ipv6_mask,
1675 .eth_type = htons(ETH_P_IPV6),
1676 .term = true,
1677 .copy_to_cpu = true,
1678 },
1679 [OFDPA_CTRL_DFLT_BRIDGING] = {
1680 /* flood any pkts on vlan */
1681 .bridge = true,
1682 .copy_to_cpu = true,
1683 },
1684 [OFDPA_CTRL_DFLT_OVS] = {
1685 /* pass all pkts up to CPU */
1686 .eth_dst = zero_mac,
1687 .eth_dst_mask = zero_mac,
1688 .acl = true,
1689 },
1690};
1691
1692static int ofdpa_port_ctrl_vlan_acl(struct ofdpa_port *ofdpa_port,
1693 struct switchdev_trans *trans, int flags,
1694 const struct ofdpa_ctrl *ctrl, __be16 vlan_id)
1695{
1696 u32 in_pport = ofdpa_port->pport;
1697 u32 in_pport_mask = 0xffffffff;
1698 u32 out_pport = 0;
1699 const u8 *eth_src = NULL;
1700 const u8 *eth_src_mask = NULL;
1701 __be16 vlan_id_mask = htons(0xffff);
1702 u8 ip_proto = 0;
1703 u8 ip_proto_mask = 0;
1704 u8 ip_tos = 0;
1705 u8 ip_tos_mask = 0;
1706 u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
1707 int err;
1708
1709 err = ofdpa_flow_tbl_acl(ofdpa_port, trans, flags,
1710 in_pport, in_pport_mask,
1711 eth_src, eth_src_mask,
1712 ctrl->eth_dst, ctrl->eth_dst_mask,
1713 ctrl->eth_type,
1714 vlan_id, vlan_id_mask,
1715 ip_proto, ip_proto_mask,
1716 ip_tos, ip_tos_mask,
1717 group_id);
1718
1719 if (err)
1720 netdev_err(ofdpa_port->dev, "Error (%d) ctrl ACL\n", err);
1721
1722 return err;
1723}
1724
1725static int ofdpa_port_ctrl_vlan_bridge(struct ofdpa_port *ofdpa_port,
1726 struct switchdev_trans *trans,
1727 int flags,
1728 const struct ofdpa_ctrl *ctrl,
1729 __be16 vlan_id)
1730{
1731 enum rocker_of_dpa_table_id goto_tbl =
1732 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1733 u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
1734 u32 tunnel_id = 0;
1735 int err;
1736
1737 if (!ofdpa_port_is_bridged(ofdpa_port))
1738 return 0;
1739
1740 err = ofdpa_flow_tbl_bridge(ofdpa_port, trans, flags,
1741 ctrl->eth_dst, ctrl->eth_dst_mask,
1742 vlan_id, tunnel_id,
1743 goto_tbl, group_id, ctrl->copy_to_cpu);
1744
1745 if (err)
1746 netdev_err(ofdpa_port->dev, "Error (%d) ctrl FLOOD\n", err);
1747
1748 return err;
1749}
1750
1751static int ofdpa_port_ctrl_vlan_term(struct ofdpa_port *ofdpa_port,
1752 struct switchdev_trans *trans, int flags,
1753 const struct ofdpa_ctrl *ctrl, __be16 vlan_id)
1754{
1755 u32 in_pport_mask = 0xffffffff;
1756 __be16 vlan_id_mask = htons(0xffff);
1757 int err;
1758
1759 if (ntohs(vlan_id) == 0)
1760 vlan_id = ofdpa_port->internal_vlan_id;
1761
1762 err = ofdpa_flow_tbl_term_mac(ofdpa_port, trans,
1763 ofdpa_port->pport, in_pport_mask,
1764 ctrl->eth_type, ctrl->eth_dst,
1765 ctrl->eth_dst_mask, vlan_id,
1766 vlan_id_mask, ctrl->copy_to_cpu,
1767 flags);
1768
1769 if (err)
1770 netdev_err(ofdpa_port->dev, "Error (%d) ctrl term\n", err);
1771
1772 return err;
1773}
1774
1775static int ofdpa_port_ctrl_vlan(struct ofdpa_port *ofdpa_port,
1776 struct switchdev_trans *trans, int flags,
1777 const struct ofdpa_ctrl *ctrl, __be16 vlan_id)
1778{
1779 if (ctrl->acl)
1780 return ofdpa_port_ctrl_vlan_acl(ofdpa_port, trans, flags,
1781 ctrl, vlan_id);
1782 if (ctrl->bridge)
1783 return ofdpa_port_ctrl_vlan_bridge(ofdpa_port, trans, flags,
1784 ctrl, vlan_id);
1785
1786 if (ctrl->term)
1787 return ofdpa_port_ctrl_vlan_term(ofdpa_port, trans, flags,
1788 ctrl, vlan_id);
1789
1790 return -EOPNOTSUPP;
1791}
1792
1793static int ofdpa_port_ctrl_vlan_add(struct ofdpa_port *ofdpa_port,
1794 struct switchdev_trans *trans, int flags,
1795 __be16 vlan_id)
1796{
1797 int err = 0;
1798 int i;
1799
1800 for (i = 0; i < OFDPA_CTRL_MAX; i++) {
1801 if (ofdpa_port->ctrls[i]) {
1802 err = ofdpa_port_ctrl_vlan(ofdpa_port, trans, flags,
1803 &ofdpa_ctrls[i], vlan_id);
1804 if (err)
1805 return err;
1806 }
1807 }
1808
1809 return err;
1810}
1811
1812static int ofdpa_port_ctrl(struct ofdpa_port *ofdpa_port,
1813 struct switchdev_trans *trans, int flags,
1814 const struct ofdpa_ctrl *ctrl)
1815{
1816 u16 vid;
1817 int err = 0;
1818
1819 for (vid = 1; vid < VLAN_N_VID; vid++) {
1820 if (!test_bit(vid, ofdpa_port->vlan_bitmap))
1821 continue;
1822 err = ofdpa_port_ctrl_vlan(ofdpa_port, trans, flags,
1823 ctrl, htons(vid));
1824 if (err)
1825 break;
1826 }
1827
1828 return err;
1829}
1830
1831static int ofdpa_port_vlan(struct ofdpa_port *ofdpa_port,
1832 struct switchdev_trans *trans, int flags, u16 vid)
1833{
1834 enum rocker_of_dpa_table_id goto_tbl =
1835 ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
1836 u32 in_pport = ofdpa_port->pport;
1837 __be16 vlan_id = htons(vid);
1838 __be16 vlan_id_mask = htons(0xffff);
1839 __be16 internal_vlan_id;
1840 bool untagged;
1841 bool adding = !(flags & OFDPA_OP_FLAG_REMOVE);
1842 int err;
1843
1844 internal_vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, vid, &untagged);
1845
1846 if (adding &&
1847 test_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap))
1848 return 0; /* already added */
1849 else if (!adding &&
1850 !test_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap))
1851 return 0; /* already removed */
1852
1853 change_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap);
1854
1855 if (adding) {
1856 err = ofdpa_port_ctrl_vlan_add(ofdpa_port, trans, flags,
1857 internal_vlan_id);
1858 if (err) {
1859 netdev_err(ofdpa_port->dev, "Error (%d) port ctrl vlan add\n", err);
1860 goto err_out;
1861 }
1862 }
1863
1864 err = ofdpa_port_vlan_l2_groups(ofdpa_port, trans, flags,
1865 internal_vlan_id, untagged);
1866 if (err) {
1867 netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 groups\n", err);
1868 goto err_out;
1869 }
1870
1871 err = ofdpa_port_vlan_flood_group(ofdpa_port, trans, flags,
1872 internal_vlan_id);
1873 if (err) {
1874 netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 flood group\n", err);
1875 goto err_out;
1876 }
1877
1878 err = ofdpa_flow_tbl_vlan(ofdpa_port, trans, flags,
1879 in_pport, vlan_id, vlan_id_mask,
1880 goto_tbl, untagged, internal_vlan_id);
1881 if (err)
1882 netdev_err(ofdpa_port->dev, "Error (%d) port VLAN table\n", err);
1883
1884err_out:
1885 if (switchdev_trans_ph_prepare(trans))
1886 change_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap);
1887
1888 return err;
1889}
1890
1891static int ofdpa_port_ig_tbl(struct ofdpa_port *ofdpa_port,
1892 struct switchdev_trans *trans, int flags)
1893{
1894 enum rocker_of_dpa_table_id goto_tbl;
1895 u32 in_pport;
1896 u32 in_pport_mask;
1897 int err;
1898
1899 /* Normal Ethernet Frames. Matches pkts from any local physical
1900 * ports. Goto VLAN tbl.
1901 */
1902
1903 in_pport = 0;
1904 in_pport_mask = 0xffff0000;
1905 goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
1906
1907 err = ofdpa_flow_tbl_ig_port(ofdpa_port, trans, flags,
1908 in_pport, in_pport_mask,
1909 goto_tbl);
1910 if (err)
1911 netdev_err(ofdpa_port->dev, "Error (%d) ingress port table entry\n", err);
1912
1913 return err;
1914}
1915
1916struct ofdpa_fdb_learn_work {
1917 struct work_struct work;
1918 struct ofdpa_port *ofdpa_port;
1919 struct switchdev_trans *trans;
1920 int flags;
1921 u8 addr[ETH_ALEN];
1922 u16 vid;
1923};
1924
1925static void ofdpa_port_fdb_learn_work(struct work_struct *work)
1926{
1927 const struct ofdpa_fdb_learn_work *lw =
1928 container_of(work, struct ofdpa_fdb_learn_work, work);
1929 bool removing = (lw->flags & OFDPA_OP_FLAG_REMOVE);
1930 bool learned = (lw->flags & OFDPA_OP_FLAG_LEARNED);
1931 struct switchdev_notifier_fdb_info info;
1932
1933 info.addr = lw->addr;
1934 info.vid = lw->vid;
1935
1936 rtnl_lock();
1937 if (learned && removing)
1938 call_switchdev_notifiers(SWITCHDEV_FDB_DEL,
1939 lw->ofdpa_port->dev, &info.info);
1940 else if (learned && !removing)
1941 call_switchdev_notifiers(SWITCHDEV_FDB_ADD,
1942 lw->ofdpa_port->dev, &info.info);
1943 rtnl_unlock();
1944
1945 ofdpa_kfree(lw->trans, work);
1946}
1947
1948static int ofdpa_port_fdb_learn(struct ofdpa_port *ofdpa_port,
1949 struct switchdev_trans *trans, int flags,
1950 const u8 *addr, __be16 vlan_id)
1951{
1952 struct ofdpa_fdb_learn_work *lw;
1953 enum rocker_of_dpa_table_id goto_tbl =
1954 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1955 u32 out_pport = ofdpa_port->pport;
1956 u32 tunnel_id = 0;
1957 u32 group_id = ROCKER_GROUP_NONE;
1958 bool syncing = !!(ofdpa_port->brport_flags & BR_LEARNING_SYNC);
1959 bool copy_to_cpu = false;
1960 int err;
1961
1962 if (ofdpa_port_is_bridged(ofdpa_port))
1963 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
1964
1965 if (!(flags & OFDPA_OP_FLAG_REFRESH)) {
1966 err = ofdpa_flow_tbl_bridge(ofdpa_port, trans, flags, addr,
1967 NULL, vlan_id, tunnel_id, goto_tbl,
1968 group_id, copy_to_cpu);
1969 if (err)
1970 return err;
1971 }
1972
1973 if (!syncing)
1974 return 0;
1975
1976 if (!ofdpa_port_is_bridged(ofdpa_port))
1977 return 0;
1978
1979 lw = ofdpa_kzalloc(trans, flags, sizeof(*lw));
1980 if (!lw)
1981 return -ENOMEM;
1982
1983 INIT_WORK(&lw->work, ofdpa_port_fdb_learn_work);
1984
1985 lw->ofdpa_port = ofdpa_port;
1986 lw->trans = trans;
1987 lw->flags = flags;
1988 ether_addr_copy(lw->addr, addr);
1989 lw->vid = ofdpa_port_vlan_to_vid(ofdpa_port, vlan_id);
1990
1991 if (switchdev_trans_ph_prepare(trans))
1992 ofdpa_kfree(trans, lw);
1993 else
1994 schedule_work(&lw->work);
1995
1996 return 0;
1997}
1998
1999static struct ofdpa_fdb_tbl_entry *
2000ofdpa_fdb_tbl_find(const struct ofdpa *ofdpa,
2001 const struct ofdpa_fdb_tbl_entry *match)
2002{
2003 struct ofdpa_fdb_tbl_entry *found;
2004
2005 hash_for_each_possible(ofdpa->fdb_tbl, found, entry, match->key_crc32)
2006 if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
2007 return found;
2008
2009 return NULL;
2010}
2011
2012static int ofdpa_port_fdb(struct ofdpa_port *ofdpa_port,
2013 struct switchdev_trans *trans,
2014 const unsigned char *addr,
2015 __be16 vlan_id, int flags)
2016{
2017 struct ofdpa *ofdpa = ofdpa_port->ofdpa;
2018 struct ofdpa_fdb_tbl_entry *fdb;
2019 struct ofdpa_fdb_tbl_entry *found;
2020 bool removing = (flags & OFDPA_OP_FLAG_REMOVE);
2021 unsigned long lock_flags;
2022
2023 fdb = ofdpa_kzalloc(trans, flags, sizeof(*fdb));
2024 if (!fdb)
2025 return -ENOMEM;
2026
2027 fdb->learned = (flags & OFDPA_OP_FLAG_LEARNED);
2028 fdb->touched = jiffies;
2029 fdb->key.ofdpa_port = ofdpa_port;
2030 ether_addr_copy(fdb->key.addr, addr);
2031 fdb->key.vlan_id = vlan_id;
2032 fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key));
2033
2034 spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags);
2035
2036 found = ofdpa_fdb_tbl_find(ofdpa, fdb);
2037
2038 if (found) {
2039 found->touched = jiffies;
2040 if (removing) {
2041 ofdpa_kfree(trans, fdb);
2042 if (!switchdev_trans_ph_prepare(trans))
2043 hash_del(&found->entry);
2044 }
2045 } else if (!removing) {
2046 if (!switchdev_trans_ph_prepare(trans))
2047 hash_add(ofdpa->fdb_tbl, &fdb->entry,
2048 fdb->key_crc32);
2049 }
2050
2051 spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
2052
2053 /* Check if adding and already exists, or removing and can't find */
2054 if (!found != !removing) {
2055 ofdpa_kfree(trans, fdb);
2056 if (!found && removing)
2057 return 0;
2058 /* Refreshing existing to update aging timers */
2059 flags |= OFDPA_OP_FLAG_REFRESH;
2060 }
2061
2062 return ofdpa_port_fdb_learn(ofdpa_port, trans, flags, addr, vlan_id);
2063}
2064
2065static int ofdpa_port_fdb_flush(struct ofdpa_port *ofdpa_port,
2066 struct switchdev_trans *trans, int flags)
2067{
2068 struct ofdpa *ofdpa = ofdpa_port->ofdpa;
2069 struct ofdpa_fdb_tbl_entry *found;
2070 unsigned long lock_flags;
2071 struct hlist_node *tmp;
2072 int bkt;
2073 int err = 0;
2074
2075 if (ofdpa_port->stp_state == BR_STATE_LEARNING ||
2076 ofdpa_port->stp_state == BR_STATE_FORWARDING)
2077 return 0;
2078
2079 flags |= OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_REMOVE;
2080
2081 spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags);
2082
2083 hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, found, entry) {
2084 if (found->key.ofdpa_port != ofdpa_port)
2085 continue;
2086 if (!found->learned)
2087 continue;
2088 err = ofdpa_port_fdb_learn(ofdpa_port, trans, flags,
2089 found->key.addr,
2090 found->key.vlan_id);
2091 if (err)
2092 goto err_out;
2093 if (!switchdev_trans_ph_prepare(trans))
2094 hash_del(&found->entry);
2095 }
2096
2097err_out:
2098 spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
2099
2100 return err;
2101}
2102
2103static void ofdpa_fdb_cleanup(unsigned long data)
2104{
2105 struct ofdpa *ofdpa = (struct ofdpa *)data;
2106 struct ofdpa_port *ofdpa_port;
2107 struct ofdpa_fdb_tbl_entry *entry;
2108 struct hlist_node *tmp;
3a8befcd 2109 unsigned long next_timer = jiffies + ofdpa->ageing_time;
3fbcdbf3
JP
2110 unsigned long expires;
2111 unsigned long lock_flags;
2112 int flags = OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_REMOVE |
2113 OFDPA_OP_FLAG_LEARNED;
2114 int bkt;
2115
2116 spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags);
2117
2118 hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, entry, entry) {
2119 if (!entry->learned)
2120 continue;
2121 ofdpa_port = entry->key.ofdpa_port;
2122 expires = entry->touched + ofdpa_port->ageing_time;
2123 if (time_before_eq(expires, jiffies)) {
2124 ofdpa_port_fdb_learn(ofdpa_port, NULL,
2125 flags, entry->key.addr,
2126 entry->key.vlan_id);
2127 hash_del(&entry->entry);
2128 } else if (time_before(expires, next_timer)) {
2129 next_timer = expires;
2130 }
2131 }
2132
2133 spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
2134
2135 mod_timer(&ofdpa->fdb_cleanup_timer, round_jiffies_up(next_timer));
2136}
2137
2138static int ofdpa_port_router_mac(struct ofdpa_port *ofdpa_port,
2139 struct switchdev_trans *trans, int flags,
2140 __be16 vlan_id)
2141{
2142 u32 in_pport_mask = 0xffffffff;
2143 __be16 eth_type;
2144 const u8 *dst_mac_mask = ff_mac;
2145 __be16 vlan_id_mask = htons(0xffff);
2146 bool copy_to_cpu = false;
2147 int err;
2148
2149 if (ntohs(vlan_id) == 0)
2150 vlan_id = ofdpa_port->internal_vlan_id;
2151
2152 eth_type = htons(ETH_P_IP);
2153 err = ofdpa_flow_tbl_term_mac(ofdpa_port, trans,
2154 ofdpa_port->pport, in_pport_mask,
2155 eth_type, ofdpa_port->dev->dev_addr,
2156 dst_mac_mask, vlan_id, vlan_id_mask,
2157 copy_to_cpu, flags);
2158 if (err)
2159 return err;
2160
2161 eth_type = htons(ETH_P_IPV6);
2162 err = ofdpa_flow_tbl_term_mac(ofdpa_port, trans,
2163 ofdpa_port->pport, in_pport_mask,
2164 eth_type, ofdpa_port->dev->dev_addr,
2165 dst_mac_mask, vlan_id, vlan_id_mask,
2166 copy_to_cpu, flags);
2167
2168 return err;
2169}
2170
2171static int ofdpa_port_fwding(struct ofdpa_port *ofdpa_port,
2172 struct switchdev_trans *trans, int flags)
2173{
2174 bool pop_vlan;
2175 u32 out_pport;
2176 __be16 vlan_id;
2177 u16 vid;
2178 int err;
2179
2180 /* Port will be forwarding-enabled if its STP state is LEARNING
2181 * or FORWARDING. Traffic from CPU can still egress, regardless of
2182 * port STP state. Use L2 interface group on port VLANs as a way
2183 * to toggle port forwarding: if forwarding is disabled, L2
2184 * interface group will not exist.
2185 */
2186
2187 if (ofdpa_port->stp_state != BR_STATE_LEARNING &&
2188 ofdpa_port->stp_state != BR_STATE_FORWARDING)
2189 flags |= OFDPA_OP_FLAG_REMOVE;
2190
2191 out_pport = ofdpa_port->pport;
2192 for (vid = 1; vid < VLAN_N_VID; vid++) {
2193 if (!test_bit(vid, ofdpa_port->vlan_bitmap))
2194 continue;
2195 vlan_id = htons(vid);
2196 pop_vlan = ofdpa_vlan_id_is_internal(vlan_id);
2197 err = ofdpa_group_l2_interface(ofdpa_port, trans, flags,
2198 vlan_id, out_pport, pop_vlan);
2199 if (err) {
2200 netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for pport %d\n",
2201 err, out_pport);
2202 return err;
2203 }
2204 }
2205
2206 return 0;
2207}
2208
2209static int ofdpa_port_stp_update(struct ofdpa_port *ofdpa_port,
2210 struct switchdev_trans *trans,
2211 int flags, u8 state)
2212{
2213 bool want[OFDPA_CTRL_MAX] = { 0, };
2214 bool prev_ctrls[OFDPA_CTRL_MAX];
2215 u8 uninitialized_var(prev_state);
2216 int err;
2217 int i;
2218
2219 if (switchdev_trans_ph_prepare(trans)) {
2220 memcpy(prev_ctrls, ofdpa_port->ctrls, sizeof(prev_ctrls));
2221 prev_state = ofdpa_port->stp_state;
2222 }
2223
2224 if (ofdpa_port->stp_state == state)
2225 return 0;
2226
2227 ofdpa_port->stp_state = state;
2228
2229 switch (state) {
2230 case BR_STATE_DISABLED:
2231 /* port is completely disabled */
2232 break;
2233 case BR_STATE_LISTENING:
2234 case BR_STATE_BLOCKING:
2235 want[OFDPA_CTRL_LINK_LOCAL_MCAST] = true;
2236 break;
2237 case BR_STATE_LEARNING:
2238 case BR_STATE_FORWARDING:
2239 if (!ofdpa_port_is_ovsed(ofdpa_port))
2240 want[OFDPA_CTRL_LINK_LOCAL_MCAST] = true;
2241 want[OFDPA_CTRL_IPV4_MCAST] = true;
2242 want[OFDPA_CTRL_IPV6_MCAST] = true;
2243 if (ofdpa_port_is_bridged(ofdpa_port))
2244 want[OFDPA_CTRL_DFLT_BRIDGING] = true;
2245 else if (ofdpa_port_is_ovsed(ofdpa_port))
2246 want[OFDPA_CTRL_DFLT_OVS] = true;
2247 else
2248 want[OFDPA_CTRL_LOCAL_ARP] = true;
2249 break;
2250 }
2251
2252 for (i = 0; i < OFDPA_CTRL_MAX; i++) {
2253 if (want[i] != ofdpa_port->ctrls[i]) {
2254 int ctrl_flags = flags |
2255 (want[i] ? 0 : OFDPA_OP_FLAG_REMOVE);
2256 err = ofdpa_port_ctrl(ofdpa_port, trans, ctrl_flags,
2257 &ofdpa_ctrls[i]);
2258 if (err)
2259 goto err_out;
2260 ofdpa_port->ctrls[i] = want[i];
2261 }
2262 }
2263
2264 err = ofdpa_port_fdb_flush(ofdpa_port, trans, flags);
2265 if (err)
2266 goto err_out;
2267
2268 err = ofdpa_port_fwding(ofdpa_port, trans, flags);
2269
2270err_out:
2271 if (switchdev_trans_ph_prepare(trans)) {
2272 memcpy(ofdpa_port->ctrls, prev_ctrls, sizeof(prev_ctrls));
2273 ofdpa_port->stp_state = prev_state;
2274 }
2275
2276 return err;
2277}
2278
2279static int ofdpa_port_fwd_enable(struct ofdpa_port *ofdpa_port, int flags)
2280{
2281 if (ofdpa_port_is_bridged(ofdpa_port))
2282 /* bridge STP will enable port */
2283 return 0;
2284
2285 /* port is not bridged, so simulate going to FORWARDING state */
2286 return ofdpa_port_stp_update(ofdpa_port, NULL, flags,
2287 BR_STATE_FORWARDING);
2288}
2289
2290static int ofdpa_port_fwd_disable(struct ofdpa_port *ofdpa_port, int flags)
2291{
2292 if (ofdpa_port_is_bridged(ofdpa_port))
2293 /* bridge STP will disable port */
2294 return 0;
2295
2296 /* port is not bridged, so simulate going to DISABLED state */
2297 return ofdpa_port_stp_update(ofdpa_port, NULL, flags,
2298 BR_STATE_DISABLED);
2299}
2300
2301static int ofdpa_port_vlan_add(struct ofdpa_port *ofdpa_port,
2302 struct switchdev_trans *trans,
2303 u16 vid, u16 flags)
2304{
2305 int err;
2306
2307 /* XXX deal with flags for PVID and untagged */
2308
2309 err = ofdpa_port_vlan(ofdpa_port, trans, 0, vid);
2310 if (err)
2311 return err;
2312
2313 err = ofdpa_port_router_mac(ofdpa_port, trans, 0, htons(vid));
2314 if (err)
2315 ofdpa_port_vlan(ofdpa_port, trans,
2316 OFDPA_OP_FLAG_REMOVE, vid);
2317
2318 return err;
2319}
2320
2321static int ofdpa_port_vlan_del(struct ofdpa_port *ofdpa_port,
2322 u16 vid, u16 flags)
2323{
2324 int err;
2325
2326 err = ofdpa_port_router_mac(ofdpa_port, NULL,
2327 OFDPA_OP_FLAG_REMOVE, htons(vid));
2328 if (err)
2329 return err;
2330
2331 return ofdpa_port_vlan(ofdpa_port, NULL,
2332 OFDPA_OP_FLAG_REMOVE, vid);
2333}
2334
2335static struct ofdpa_internal_vlan_tbl_entry *
2336ofdpa_internal_vlan_tbl_find(const struct ofdpa *ofdpa, int ifindex)
2337{
2338 struct ofdpa_internal_vlan_tbl_entry *found;
2339
2340 hash_for_each_possible(ofdpa->internal_vlan_tbl, found,
2341 entry, ifindex) {
2342 if (found->ifindex == ifindex)
2343 return found;
2344 }
2345
2346 return NULL;
2347}
2348
2349static __be16 ofdpa_port_internal_vlan_id_get(struct ofdpa_port *ofdpa_port,
2350 int ifindex)
2351{
2352 struct ofdpa *ofdpa = ofdpa_port->ofdpa;
2353 struct ofdpa_internal_vlan_tbl_entry *entry;
2354 struct ofdpa_internal_vlan_tbl_entry *found;
2355 unsigned long lock_flags;
2356 int i;
2357
2358 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2359 if (!entry)
2360 return 0;
2361
2362 entry->ifindex = ifindex;
2363
2364 spin_lock_irqsave(&ofdpa->internal_vlan_tbl_lock, lock_flags);
2365
2366 found = ofdpa_internal_vlan_tbl_find(ofdpa, ifindex);
2367 if (found) {
2368 kfree(entry);
2369 goto found;
2370 }
2371
2372 found = entry;
2373 hash_add(ofdpa->internal_vlan_tbl, &found->entry, found->ifindex);
2374
2375 for (i = 0; i < OFDPA_N_INTERNAL_VLANS; i++) {
2376 if (test_and_set_bit(i, ofdpa->internal_vlan_bitmap))
2377 continue;
2378 found->vlan_id = htons(OFDPA_INTERNAL_VLAN_ID_BASE + i);
2379 goto found;
2380 }
2381
2382 netdev_err(ofdpa_port->dev, "Out of internal VLAN IDs\n");
2383
2384found:
2385 found->ref_count++;
2386 spin_unlock_irqrestore(&ofdpa->internal_vlan_tbl_lock, lock_flags);
2387
2388 return found->vlan_id;
2389}
2390
2391static int ofdpa_port_fib_ipv4(struct ofdpa_port *ofdpa_port,
2392 struct switchdev_trans *trans, __be32 dst,
2393 int dst_len, const struct fib_info *fi,
2394 u32 tb_id, int flags)
2395{
2396 const struct fib_nh *nh;
2397 __be16 eth_type = htons(ETH_P_IP);
2398 __be32 dst_mask = inet_make_mask(dst_len);
2399 __be16 internal_vlan_id = ofdpa_port->internal_vlan_id;
2400 u32 priority = fi->fib_priority;
2401 enum rocker_of_dpa_table_id goto_tbl =
2402 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
2403 u32 group_id;
2404 bool nh_on_port;
2405 bool has_gw;
2406 u32 index;
2407 int err;
2408
2409 /* XXX support ECMP */
2410
2411 nh = fi->fib_nh;
2412 nh_on_port = (fi->fib_dev == ofdpa_port->dev);
2413 has_gw = !!nh->nh_gw;
2414
2415 if (has_gw && nh_on_port) {
2416 err = ofdpa_port_ipv4_nh(ofdpa_port, trans, flags,
2417 nh->nh_gw, &index);
2418 if (err)
2419 return err;
2420
2421 group_id = ROCKER_GROUP_L3_UNICAST(index);
2422 } else {
2423 /* Send to CPU for processing */
2424 group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0);
2425 }
2426
2427 err = ofdpa_flow_tbl_ucast4_routing(ofdpa_port, trans, eth_type, dst,
2428 dst_mask, priority, goto_tbl,
2429 group_id, flags);
2430 if (err)
2431 netdev_err(ofdpa_port->dev, "Error (%d) IPv4 route %pI4\n",
2432 err, &dst);
2433
2434 return err;
2435}
2436
2437static void
2438ofdpa_port_internal_vlan_id_put(const struct ofdpa_port *ofdpa_port,
2439 int ifindex)
2440{
2441 struct ofdpa *ofdpa = ofdpa_port->ofdpa;
2442 struct ofdpa_internal_vlan_tbl_entry *found;
2443 unsigned long lock_flags;
2444 unsigned long bit;
2445
2446 spin_lock_irqsave(&ofdpa->internal_vlan_tbl_lock, lock_flags);
2447
2448 found = ofdpa_internal_vlan_tbl_find(ofdpa, ifindex);
2449 if (!found) {
2450 netdev_err(ofdpa_port->dev,
2451 "ifindex (%d) not found in internal VLAN tbl\n",
2452 ifindex);
2453 goto not_found;
2454 }
2455
2456 if (--found->ref_count <= 0) {
2457 bit = ntohs(found->vlan_id) - OFDPA_INTERNAL_VLAN_ID_BASE;
2458 clear_bit(bit, ofdpa->internal_vlan_bitmap);
2459 hash_del(&found->entry);
2460 kfree(found);
2461 }
2462
2463not_found:
2464 spin_unlock_irqrestore(&ofdpa->internal_vlan_tbl_lock, lock_flags);
2465}
2466
2467/**********************************
2468 * Rocker world ops implementation
2469 **********************************/
2470
2471static int ofdpa_init(struct rocker *rocker)
2472{
2473 struct ofdpa *ofdpa = rocker->wpriv;
2474
2475 ofdpa->rocker = rocker;
2476
2477 hash_init(ofdpa->flow_tbl);
2478 spin_lock_init(&ofdpa->flow_tbl_lock);
2479
2480 hash_init(ofdpa->group_tbl);
2481 spin_lock_init(&ofdpa->group_tbl_lock);
2482
2483 hash_init(ofdpa->fdb_tbl);
2484 spin_lock_init(&ofdpa->fdb_tbl_lock);
2485
2486 hash_init(ofdpa->internal_vlan_tbl);
2487 spin_lock_init(&ofdpa->internal_vlan_tbl_lock);
2488
2489 hash_init(ofdpa->neigh_tbl);
2490 spin_lock_init(&ofdpa->neigh_tbl_lock);
2491
2492 setup_timer(&ofdpa->fdb_cleanup_timer, ofdpa_fdb_cleanup,
2493 (unsigned long) ofdpa);
2494 mod_timer(&ofdpa->fdb_cleanup_timer, jiffies);
2495
3a8befcd
JP
2496 ofdpa->ageing_time = BR_DEFAULT_AGEING_TIME;
2497
3fbcdbf3
JP
2498 return 0;
2499}
2500
2501static void ofdpa_fini(struct rocker *rocker)
2502{
2503 struct ofdpa *ofdpa = rocker->wpriv;
2504
2505 unsigned long flags;
2506 struct ofdpa_flow_tbl_entry *flow_entry;
2507 struct ofdpa_group_tbl_entry *group_entry;
2508 struct ofdpa_fdb_tbl_entry *fdb_entry;
2509 struct ofdpa_internal_vlan_tbl_entry *internal_vlan_entry;
2510 struct ofdpa_neigh_tbl_entry *neigh_entry;
2511 struct hlist_node *tmp;
2512 int bkt;
2513
2514 del_timer_sync(&ofdpa->fdb_cleanup_timer);
2515
2516 spin_lock_irqsave(&ofdpa->flow_tbl_lock, flags);
2517 hash_for_each_safe(ofdpa->flow_tbl, bkt, tmp, flow_entry, entry)
2518 hash_del(&flow_entry->entry);
2519 spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, flags);
2520
2521 spin_lock_irqsave(&ofdpa->group_tbl_lock, flags);
2522 hash_for_each_safe(ofdpa->group_tbl, bkt, tmp, group_entry, entry)
2523 hash_del(&group_entry->entry);
2524 spin_unlock_irqrestore(&ofdpa->group_tbl_lock, flags);
2525
2526 spin_lock_irqsave(&ofdpa->fdb_tbl_lock, flags);
2527 hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, fdb_entry, entry)
2528 hash_del(&fdb_entry->entry);
2529 spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, flags);
2530
2531 spin_lock_irqsave(&ofdpa->internal_vlan_tbl_lock, flags);
2532 hash_for_each_safe(ofdpa->internal_vlan_tbl, bkt,
2533 tmp, internal_vlan_entry, entry)
2534 hash_del(&internal_vlan_entry->entry);
2535 spin_unlock_irqrestore(&ofdpa->internal_vlan_tbl_lock, flags);
2536
2537 spin_lock_irqsave(&ofdpa->neigh_tbl_lock, flags);
2538 hash_for_each_safe(ofdpa->neigh_tbl, bkt, tmp, neigh_entry, entry)
2539 hash_del(&neigh_entry->entry);
2540 spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, flags);
2541}
2542
2543static int ofdpa_port_pre_init(struct rocker_port *rocker_port)
2544{
2545 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2546
2547 ofdpa_port->ofdpa = rocker_port->rocker->wpriv;
2548 ofdpa_port->rocker_port = rocker_port;
2549 ofdpa_port->dev = rocker_port->dev;
2550 ofdpa_port->pport = rocker_port->pport;
2551 ofdpa_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC;
2552 ofdpa_port->ageing_time = BR_DEFAULT_AGEING_TIME;
2553 return 0;
2554}
2555
2556static int ofdpa_port_init(struct rocker_port *rocker_port)
2557{
2558 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2559 int err;
2560
3fbcdbf3
JP
2561 rocker_port_set_learning(rocker_port,
2562 !!(ofdpa_port->brport_flags & BR_LEARNING));
2563
2564 err = ofdpa_port_ig_tbl(ofdpa_port, NULL, 0);
2565 if (err) {
2566 netdev_err(ofdpa_port->dev, "install ig port table failed\n");
2567 return err;
2568 }
2569
2570 ofdpa_port->internal_vlan_id =
2571 ofdpa_port_internal_vlan_id_get(ofdpa_port,
2572 ofdpa_port->dev->ifindex);
2573
2574 err = ofdpa_port_vlan_add(ofdpa_port, NULL, OFDPA_UNTAGGED_VID, 0);
2575 if (err) {
2576 netdev_err(ofdpa_port->dev, "install untagged VLAN failed\n");
2577 goto err_untagged_vlan;
2578 }
2579 return 0;
2580
2581err_untagged_vlan:
2582 ofdpa_port_ig_tbl(ofdpa_port, NULL, OFDPA_OP_FLAG_REMOVE);
2583 return err;
2584}
2585
2586static void ofdpa_port_fini(struct rocker_port *rocker_port)
2587{
2588 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2589
2590 ofdpa_port_ig_tbl(ofdpa_port, NULL, OFDPA_OP_FLAG_REMOVE);
2591}
2592
2593static int ofdpa_port_open(struct rocker_port *rocker_port)
2594{
2595 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2596
2597 return ofdpa_port_fwd_enable(ofdpa_port, 0);
2598}
2599
2600static void ofdpa_port_stop(struct rocker_port *rocker_port)
2601{
2602 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2603
2604 ofdpa_port_fwd_disable(ofdpa_port, OFDPA_OP_FLAG_NOWAIT);
2605}
2606
2607static int ofdpa_port_attr_stp_state_set(struct rocker_port *rocker_port,
2608 u8 state,
2609 struct switchdev_trans *trans)
2610{
2611 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2612
2613 return ofdpa_port_stp_update(ofdpa_port, trans, 0, state);
2614}
2615
2616static int ofdpa_port_attr_bridge_flags_set(struct rocker_port *rocker_port,
2617 unsigned long brport_flags,
2618 struct switchdev_trans *trans)
2619{
2620 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2621 unsigned long orig_flags;
2622 int err = 0;
2623
2624 orig_flags = ofdpa_port->brport_flags;
2625 ofdpa_port->brport_flags = brport_flags;
2626 if ((orig_flags ^ ofdpa_port->brport_flags) & BR_LEARNING &&
2627 !switchdev_trans_ph_prepare(trans))
2628 err = rocker_port_set_learning(ofdpa_port->rocker_port,
2629 !!(ofdpa_port->brport_flags & BR_LEARNING));
2630
2631 if (switchdev_trans_ph_prepare(trans))
2632 ofdpa_port->brport_flags = orig_flags;
2633
2634 return err;
2635}
2636
2637static int
2638ofdpa_port_attr_bridge_flags_get(const struct rocker_port *rocker_port,
2639 unsigned long *p_brport_flags)
2640{
2641 const struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2642
2643 *p_brport_flags = ofdpa_port->brport_flags;
2644 return 0;
2645}
2646
2647static int
2648ofdpa_port_attr_bridge_ageing_time_set(struct rocker_port *rocker_port,
2649 u32 ageing_time,
2650 struct switchdev_trans *trans)
2651{
2652 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
3a8befcd 2653 struct ofdpa *ofdpa = ofdpa_port->ofdpa;
3fbcdbf3
JP
2654
2655 if (!switchdev_trans_ph_prepare(trans)) {
2656 ofdpa_port->ageing_time = clock_t_to_jiffies(ageing_time);
3a8befcd
JP
2657 if (ofdpa_port->ageing_time < ofdpa->ageing_time)
2658 ofdpa->ageing_time = ofdpa_port->ageing_time;
3fbcdbf3
JP
2659 mod_timer(&ofdpa_port->ofdpa->fdb_cleanup_timer, jiffies);
2660 }
2661
2662 return 0;
2663}
2664
2665static int ofdpa_port_obj_vlan_add(struct rocker_port *rocker_port,
2666 const struct switchdev_obj_port_vlan *vlan,
2667 struct switchdev_trans *trans)
2668{
2669 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2670 u16 vid;
2671 int err;
2672
2673 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
2674 err = ofdpa_port_vlan_add(ofdpa_port, trans, vid, vlan->flags);
2675 if (err)
2676 return err;
2677 }
2678
2679 return 0;
2680}
2681
2682static int ofdpa_port_obj_vlan_del(struct rocker_port *rocker_port,
2683 const struct switchdev_obj_port_vlan *vlan)
2684{
2685 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2686 u16 vid;
2687 int err;
2688
2689 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
2690 err = ofdpa_port_vlan_del(ofdpa_port, vid, vlan->flags);
2691 if (err)
2692 return err;
2693 }
2694
2695 return 0;
2696}
2697
2698static int ofdpa_port_obj_vlan_dump(const struct rocker_port *rocker_port,
2699 struct switchdev_obj_port_vlan *vlan,
2700 switchdev_obj_dump_cb_t *cb)
2701{
2702 const struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2703 u16 vid;
2704 int err = 0;
2705
2706 for (vid = 1; vid < VLAN_N_VID; vid++) {
2707 if (!test_bit(vid, ofdpa_port->vlan_bitmap))
2708 continue;
2709 vlan->flags = 0;
2710 if (ofdpa_vlan_id_is_internal(htons(vid)))
2711 vlan->flags |= BRIDGE_VLAN_INFO_PVID;
2712 vlan->vid_begin = vlan->vid_end = vid;
2713 err = cb(&vlan->obj);
2714 if (err)
2715 break;
2716 }
2717
2718 return err;
2719}
2720
2721static int ofdpa_port_obj_fib4_add(struct rocker_port *rocker_port,
2722 const struct switchdev_obj_ipv4_fib *fib4,
2723 struct switchdev_trans *trans)
2724{
2725 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2726
2727 return ofdpa_port_fib_ipv4(ofdpa_port, trans,
2728 htonl(fib4->dst), fib4->dst_len,
da4ed551 2729 fib4->fi, fib4->tb_id, 0);
3fbcdbf3
JP
2730}
2731
2732static int ofdpa_port_obj_fib4_del(struct rocker_port *rocker_port,
2733 const struct switchdev_obj_ipv4_fib *fib4)
2734{
2735 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2736
2737 return ofdpa_port_fib_ipv4(ofdpa_port, NULL,
2738 htonl(fib4->dst), fib4->dst_len,
da4ed551 2739 fib4->fi, fib4->tb_id,
3fbcdbf3
JP
2740 OFDPA_OP_FLAG_REMOVE);
2741}
2742
2743static int ofdpa_port_obj_fdb_add(struct rocker_port *rocker_port,
2744 const struct switchdev_obj_port_fdb *fdb,
2745 struct switchdev_trans *trans)
2746{
2747 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2748 __be16 vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, fdb->vid, NULL);
2749
2750 if (!ofdpa_port_is_bridged(ofdpa_port))
2751 return -EINVAL;
2752
2753 return ofdpa_port_fdb(ofdpa_port, trans, fdb->addr, vlan_id, 0);
2754}
2755
2756static int ofdpa_port_obj_fdb_del(struct rocker_port *rocker_port,
2757 const struct switchdev_obj_port_fdb *fdb)
2758{
2759 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2760 __be16 vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, fdb->vid, NULL);
2761 int flags = OFDPA_OP_FLAG_REMOVE;
2762
2763 if (!ofdpa_port_is_bridged(ofdpa_port))
2764 return -EINVAL;
2765
2766 return ofdpa_port_fdb(ofdpa_port, NULL, fdb->addr, vlan_id, flags);
2767}
2768
2769static int ofdpa_port_obj_fdb_dump(const struct rocker_port *rocker_port,
2770 struct switchdev_obj_port_fdb *fdb,
2771 switchdev_obj_dump_cb_t *cb)
2772{
2773 const struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2774 struct ofdpa *ofdpa = ofdpa_port->ofdpa;
2775 struct ofdpa_fdb_tbl_entry *found;
2776 struct hlist_node *tmp;
2777 unsigned long lock_flags;
2778 int bkt;
2779 int err = 0;
2780
2781 spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags);
2782 hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, found, entry) {
2783 if (found->key.ofdpa_port != ofdpa_port)
2784 continue;
2785 ether_addr_copy(fdb->addr, found->key.addr);
2786 fdb->ndm_state = NUD_REACHABLE;
2787 fdb->vid = ofdpa_port_vlan_to_vid(ofdpa_port,
2788 found->key.vlan_id);
2789 err = cb(&fdb->obj);
2790 if (err)
2791 break;
2792 }
2793 spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
2794
2795 return err;
2796}
2797
2798static int ofdpa_port_bridge_join(struct ofdpa_port *ofdpa_port,
2799 struct net_device *bridge)
2800{
2801 int err;
2802
2803 /* Port is joining bridge, so the internal VLAN for the
2804 * port is going to change to the bridge internal VLAN.
2805 * Let's remove untagged VLAN (vid=0) from port and
2806 * re-add once internal VLAN has changed.
2807 */
2808
2809 err = ofdpa_port_vlan_del(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
2810 if (err)
2811 return err;
2812
2813 ofdpa_port_internal_vlan_id_put(ofdpa_port,
2814 ofdpa_port->dev->ifindex);
2815 ofdpa_port->internal_vlan_id =
2816 ofdpa_port_internal_vlan_id_get(ofdpa_port, bridge->ifindex);
2817
2818 ofdpa_port->bridge_dev = bridge;
3fbcdbf3
JP
2819
2820 return ofdpa_port_vlan_add(ofdpa_port, NULL, OFDPA_UNTAGGED_VID, 0);
2821}
2822
2823static int ofdpa_port_bridge_leave(struct ofdpa_port *ofdpa_port)
2824{
2825 int err;
2826
2827 err = ofdpa_port_vlan_del(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
2828 if (err)
2829 return err;
2830
2831 ofdpa_port_internal_vlan_id_put(ofdpa_port,
2832 ofdpa_port->bridge_dev->ifindex);
2833 ofdpa_port->internal_vlan_id =
2834 ofdpa_port_internal_vlan_id_get(ofdpa_port,
2835 ofdpa_port->dev->ifindex);
2836
3fbcdbf3
JP
2837 ofdpa_port->bridge_dev = NULL;
2838
2839 err = ofdpa_port_vlan_add(ofdpa_port, NULL, OFDPA_UNTAGGED_VID, 0);
2840 if (err)
2841 return err;
2842
2843 if (ofdpa_port->dev->flags & IFF_UP)
2844 err = ofdpa_port_fwd_enable(ofdpa_port, 0);
2845
2846 return err;
2847}
2848
2849static int ofdpa_port_ovs_changed(struct ofdpa_port *ofdpa_port,
2850 struct net_device *master)
2851{
2852 int err;
2853
2854 ofdpa_port->bridge_dev = master;
2855
2856 err = ofdpa_port_fwd_disable(ofdpa_port, 0);
2857 if (err)
2858 return err;
2859 err = ofdpa_port_fwd_enable(ofdpa_port, 0);
2860
2861 return err;
2862}
2863
2864static int ofdpa_port_master_linked(struct rocker_port *rocker_port,
2865 struct net_device *master)
2866{
2867 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2868 int err = 0;
2869
2870 if (netif_is_bridge_master(master))
2871 err = ofdpa_port_bridge_join(ofdpa_port, master);
2872 else if (netif_is_ovs_master(master))
2873 err = ofdpa_port_ovs_changed(ofdpa_port, master);
2874 return err;
2875}
2876
2877static int ofdpa_port_master_unlinked(struct rocker_port *rocker_port,
2878 struct net_device *master)
2879{
2880 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2881 int err = 0;
2882
2883 if (ofdpa_port_is_bridged(ofdpa_port))
2884 err = ofdpa_port_bridge_leave(ofdpa_port);
2885 else if (ofdpa_port_is_ovsed(ofdpa_port))
2886 err = ofdpa_port_ovs_changed(ofdpa_port, NULL);
2887 return err;
2888}
2889
2890static int ofdpa_port_neigh_update(struct rocker_port *rocker_port,
2891 struct neighbour *n)
2892{
2893 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2894 int flags = (n->nud_state & NUD_VALID ? 0 : OFDPA_OP_FLAG_REMOVE) |
2895 OFDPA_OP_FLAG_NOWAIT;
2896 __be32 ip_addr = *(__be32 *) n->primary_key;
2897
2898 return ofdpa_port_ipv4_neigh(ofdpa_port, NULL, flags, ip_addr, n->ha);
2899}
2900
2901static int ofdpa_port_neigh_destroy(struct rocker_port *rocker_port,
2902 struct neighbour *n)
2903{
2904 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2905 int flags = OFDPA_OP_FLAG_REMOVE | OFDPA_OP_FLAG_NOWAIT;
2906 __be32 ip_addr = *(__be32 *) n->primary_key;
2907
2908 return ofdpa_port_ipv4_neigh(ofdpa_port, NULL, flags, ip_addr, n->ha);
2909}
2910
2911static int ofdpa_port_ev_mac_vlan_seen(struct rocker_port *rocker_port,
2912 const unsigned char *addr,
2913 __be16 vlan_id)
2914{
2915 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2916 int flags = OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_LEARNED;
2917
2918 if (ofdpa_port->stp_state != BR_STATE_LEARNING &&
2919 ofdpa_port->stp_state != BR_STATE_FORWARDING)
2920 return 0;
2921
2922 return ofdpa_port_fdb(ofdpa_port, NULL, addr, vlan_id, flags);
2923}
2924
e420114e
JP
2925struct rocker_world_ops rocker_ofdpa_ops = {
2926 .kind = "ofdpa",
2927 .priv_size = sizeof(struct ofdpa),
2928 .port_priv_size = sizeof(struct ofdpa_port),
2929 .mode = ROCKER_PORT_MODE_OF_DPA,
3fbcdbf3
JP
2930 .init = ofdpa_init,
2931 .fini = ofdpa_fini,
2932 .port_pre_init = ofdpa_port_pre_init,
2933 .port_init = ofdpa_port_init,
2934 .port_fini = ofdpa_port_fini,
2935 .port_open = ofdpa_port_open,
2936 .port_stop = ofdpa_port_stop,
2937 .port_attr_stp_state_set = ofdpa_port_attr_stp_state_set,
2938 .port_attr_bridge_flags_set = ofdpa_port_attr_bridge_flags_set,
2939 .port_attr_bridge_flags_get = ofdpa_port_attr_bridge_flags_get,
2940 .port_attr_bridge_ageing_time_set = ofdpa_port_attr_bridge_ageing_time_set,
2941 .port_obj_vlan_add = ofdpa_port_obj_vlan_add,
2942 .port_obj_vlan_del = ofdpa_port_obj_vlan_del,
2943 .port_obj_vlan_dump = ofdpa_port_obj_vlan_dump,
2944 .port_obj_fib4_add = ofdpa_port_obj_fib4_add,
2945 .port_obj_fib4_del = ofdpa_port_obj_fib4_del,
2946 .port_obj_fdb_add = ofdpa_port_obj_fdb_add,
2947 .port_obj_fdb_del = ofdpa_port_obj_fdb_del,
2948 .port_obj_fdb_dump = ofdpa_port_obj_fdb_dump,
2949 .port_master_linked = ofdpa_port_master_linked,
2950 .port_master_unlinked = ofdpa_port_master_unlinked,
2951 .port_neigh_update = ofdpa_port_neigh_update,
2952 .port_neigh_destroy = ofdpa_port_neigh_destroy,
2953 .port_ev_mac_vlan_seen = ofdpa_port_ev_mac_vlan_seen,
e420114e 2954};
This page took 0.228647 seconds and 5 git commands to generate.