2 * Copyright (c) 2007-2011 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 #include <linux/uaccess.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/if_ether.h>
25 #include <linux/if_vlan.h>
26 #include <net/llc_pdu.h>
27 #include <linux/kernel.h>
28 #include <linux/jhash.h>
29 #include <linux/jiffies.h>
30 #include <linux/llc.h>
31 #include <linux/module.h>
33 #include <linux/rcupdate.h>
34 #include <linux/if_arp.h>
36 #include <linux/ipv6.h>
37 #include <linux/tcp.h>
38 #include <linux/udp.h>
39 #include <linux/icmp.h>
40 #include <linux/icmpv6.h>
41 #include <linux/rculist.h>
44 #include <net/ndisc.h>
46 static struct kmem_cache
*flow_cache
;
48 static int check_header(struct sk_buff
*skb
, int len
)
50 if (unlikely(skb
->len
< len
))
52 if (unlikely(!pskb_may_pull(skb
, len
)))
57 static bool arphdr_ok(struct sk_buff
*skb
)
59 return pskb_may_pull(skb
, skb_network_offset(skb
) +
60 sizeof(struct arp_eth_header
));
63 static int check_iphdr(struct sk_buff
*skb
)
65 unsigned int nh_ofs
= skb_network_offset(skb
);
69 err
= check_header(skb
, nh_ofs
+ sizeof(struct iphdr
));
73 ip_len
= ip_hdrlen(skb
);
74 if (unlikely(ip_len
< sizeof(struct iphdr
) ||
75 skb
->len
< nh_ofs
+ ip_len
))
78 skb_set_transport_header(skb
, nh_ofs
+ ip_len
);
82 static bool tcphdr_ok(struct sk_buff
*skb
)
84 int th_ofs
= skb_transport_offset(skb
);
87 if (unlikely(!pskb_may_pull(skb
, th_ofs
+ sizeof(struct tcphdr
))))
90 tcp_len
= tcp_hdrlen(skb
);
91 if (unlikely(tcp_len
< sizeof(struct tcphdr
) ||
92 skb
->len
< th_ofs
+ tcp_len
))
98 static bool udphdr_ok(struct sk_buff
*skb
)
100 return pskb_may_pull(skb
, skb_transport_offset(skb
) +
101 sizeof(struct udphdr
));
104 static bool icmphdr_ok(struct sk_buff
*skb
)
106 return pskb_may_pull(skb
, skb_transport_offset(skb
) +
107 sizeof(struct icmphdr
));
110 u64
ovs_flow_used_time(unsigned long flow_jiffies
)
112 struct timespec cur_ts
;
115 ktime_get_ts(&cur_ts
);
116 idle_ms
= jiffies_to_msecs(jiffies
- flow_jiffies
);
117 cur_ms
= (u64
)cur_ts
.tv_sec
* MSEC_PER_SEC
+
118 cur_ts
.tv_nsec
/ NSEC_PER_MSEC
;
120 return cur_ms
- idle_ms
;
123 #define SW_FLOW_KEY_OFFSET(field) \
124 (offsetof(struct sw_flow_key, field) + \
125 FIELD_SIZEOF(struct sw_flow_key, field))
127 static int parse_ipv6hdr(struct sk_buff
*skb
, struct sw_flow_key
*key
,
130 unsigned int nh_ofs
= skb_network_offset(skb
);
138 *key_lenp
= SW_FLOW_KEY_OFFSET(ipv6
.label
);
140 err
= check_header(skb
, nh_ofs
+ sizeof(*nh
));
145 nexthdr
= nh
->nexthdr
;
146 payload_ofs
= (u8
*)(nh
+ 1) - skb
->data
;
148 key
->ip
.proto
= NEXTHDR_NONE
;
149 key
->ip
.tos
= ipv6_get_dsfield(nh
);
150 key
->ip
.ttl
= nh
->hop_limit
;
151 key
->ipv6
.label
= *(__be32
*)nh
& htonl(IPV6_FLOWINFO_FLOWLABEL
);
152 key
->ipv6
.addr
.src
= nh
->saddr
;
153 key
->ipv6
.addr
.dst
= nh
->daddr
;
155 payload_ofs
= ipv6_skip_exthdr(skb
, payload_ofs
, &nexthdr
, &frag_off
);
156 if (unlikely(payload_ofs
< 0))
160 if (frag_off
& htons(~0x7))
161 key
->ip
.frag
= OVS_FRAG_TYPE_LATER
;
163 key
->ip
.frag
= OVS_FRAG_TYPE_FIRST
;
166 nh_len
= payload_ofs
- nh_ofs
;
167 skb_set_transport_header(skb
, nh_ofs
+ nh_len
);
168 key
->ip
.proto
= nexthdr
;
172 static bool icmp6hdr_ok(struct sk_buff
*skb
)
174 return pskb_may_pull(skb
, skb_transport_offset(skb
) +
175 sizeof(struct icmp6hdr
));
178 #define TCP_FLAGS_OFFSET 13
179 #define TCP_FLAG_MASK 0x3f
181 void ovs_flow_used(struct sw_flow
*flow
, struct sk_buff
*skb
)
185 if ((flow
->key
.eth
.type
== htons(ETH_P_IP
) ||
186 flow
->key
.eth
.type
== htons(ETH_P_IPV6
)) &&
187 flow
->key
.ip
.proto
== IPPROTO_TCP
&&
188 likely(skb
->len
>= skb_transport_offset(skb
) + sizeof(struct tcphdr
))) {
189 u8
*tcp
= (u8
*)tcp_hdr(skb
);
190 tcp_flags
= *(tcp
+ TCP_FLAGS_OFFSET
) & TCP_FLAG_MASK
;
193 spin_lock(&flow
->lock
);
194 flow
->used
= jiffies
;
195 flow
->packet_count
++;
196 flow
->byte_count
+= skb
->len
;
197 flow
->tcp_flags
|= tcp_flags
;
198 spin_unlock(&flow
->lock
);
201 struct sw_flow_actions
*ovs_flow_actions_alloc(const struct nlattr
*actions
)
203 int actions_len
= nla_len(actions
);
204 struct sw_flow_actions
*sfa
;
206 /* At least DP_MAX_PORTS actions are required to be able to flood a
207 * packet to every port. Factor of 2 allows for setting VLAN tags,
209 if (actions_len
> 2 * DP_MAX_PORTS
* nla_total_size(4))
210 return ERR_PTR(-EINVAL
);
212 sfa
= kmalloc(sizeof(*sfa
) + actions_len
, GFP_KERNEL
);
214 return ERR_PTR(-ENOMEM
);
216 sfa
->actions_len
= actions_len
;
217 memcpy(sfa
->actions
, nla_data(actions
), actions_len
);
221 struct sw_flow
*ovs_flow_alloc(void)
223 struct sw_flow
*flow
;
225 flow
= kmem_cache_alloc(flow_cache
, GFP_KERNEL
);
227 return ERR_PTR(-ENOMEM
);
229 spin_lock_init(&flow
->lock
);
230 flow
->sf_acts
= NULL
;
235 static struct hlist_head
*find_bucket(struct flow_table
*table
, u32 hash
)
237 hash
= jhash_1word(hash
, table
->hash_seed
);
238 return flex_array_get(table
->buckets
,
239 (hash
& (table
->n_buckets
- 1)));
242 static struct flex_array
*alloc_buckets(unsigned int n_buckets
)
244 struct flex_array
*buckets
;
247 buckets
= flex_array_alloc(sizeof(struct hlist_head
*),
248 n_buckets
, GFP_KERNEL
);
252 err
= flex_array_prealloc(buckets
, 0, n_buckets
, GFP_KERNEL
);
254 flex_array_free(buckets
);
258 for (i
= 0; i
< n_buckets
; i
++)
259 INIT_HLIST_HEAD((struct hlist_head
*)
260 flex_array_get(buckets
, i
));
265 static void free_buckets(struct flex_array
*buckets
)
267 flex_array_free(buckets
);
270 struct flow_table
*ovs_flow_tbl_alloc(int new_size
)
272 struct flow_table
*table
= kmalloc(sizeof(*table
), GFP_KERNEL
);
277 table
->buckets
= alloc_buckets(new_size
);
279 if (!table
->buckets
) {
283 table
->n_buckets
= new_size
;
286 table
->keep_flows
= false;
287 get_random_bytes(&table
->hash_seed
, sizeof(u32
));
292 void ovs_flow_tbl_destroy(struct flow_table
*table
)
299 if (table
->keep_flows
)
302 for (i
= 0; i
< table
->n_buckets
; i
++) {
303 struct sw_flow
*flow
;
304 struct hlist_head
*head
= flex_array_get(table
->buckets
, i
);
305 struct hlist_node
*node
, *n
;
306 int ver
= table
->node_ver
;
308 hlist_for_each_entry_safe(flow
, node
, n
, head
, hash_node
[ver
]) {
309 hlist_del_rcu(&flow
->hash_node
[ver
]);
315 free_buckets(table
->buckets
);
319 static void flow_tbl_destroy_rcu_cb(struct rcu_head
*rcu
)
321 struct flow_table
*table
= container_of(rcu
, struct flow_table
, rcu
);
323 ovs_flow_tbl_destroy(table
);
326 void ovs_flow_tbl_deferred_destroy(struct flow_table
*table
)
331 call_rcu(&table
->rcu
, flow_tbl_destroy_rcu_cb
);
334 struct sw_flow
*ovs_flow_tbl_next(struct flow_table
*table
, u32
*bucket
, u32
*last
)
336 struct sw_flow
*flow
;
337 struct hlist_head
*head
;
338 struct hlist_node
*n
;
342 ver
= table
->node_ver
;
343 while (*bucket
< table
->n_buckets
) {
345 head
= flex_array_get(table
->buckets
, *bucket
);
346 hlist_for_each_entry_rcu(flow
, n
, head
, hash_node
[ver
]) {
361 static void flow_table_copy_flows(struct flow_table
*old
, struct flow_table
*new)
366 old_ver
= old
->node_ver
;
367 new->node_ver
= !old_ver
;
369 /* Insert in new table. */
370 for (i
= 0; i
< old
->n_buckets
; i
++) {
371 struct sw_flow
*flow
;
372 struct hlist_head
*head
;
373 struct hlist_node
*n
;
375 head
= flex_array_get(old
->buckets
, i
);
377 hlist_for_each_entry(flow
, n
, head
, hash_node
[old_ver
])
378 ovs_flow_tbl_insert(new, flow
);
380 old
->keep_flows
= true;
383 static struct flow_table
*__flow_tbl_rehash(struct flow_table
*table
, int n_buckets
)
385 struct flow_table
*new_table
;
387 new_table
= ovs_flow_tbl_alloc(n_buckets
);
389 return ERR_PTR(-ENOMEM
);
391 flow_table_copy_flows(table
, new_table
);
396 struct flow_table
*ovs_flow_tbl_rehash(struct flow_table
*table
)
398 return __flow_tbl_rehash(table
, table
->n_buckets
);
401 struct flow_table
*ovs_flow_tbl_expand(struct flow_table
*table
)
403 return __flow_tbl_rehash(table
, table
->n_buckets
* 2);
406 void ovs_flow_free(struct sw_flow
*flow
)
411 kfree((struct sf_flow_acts __force
*)flow
->sf_acts
);
412 kmem_cache_free(flow_cache
, flow
);
415 /* RCU callback used by ovs_flow_deferred_free. */
416 static void rcu_free_flow_callback(struct rcu_head
*rcu
)
418 struct sw_flow
*flow
= container_of(rcu
, struct sw_flow
, rcu
);
423 /* Schedules 'flow' to be freed after the next RCU grace period.
424 * The caller must hold rcu_read_lock for this to be sensible. */
425 void ovs_flow_deferred_free(struct sw_flow
*flow
)
427 call_rcu(&flow
->rcu
, rcu_free_flow_callback
);
430 /* RCU callback used by ovs_flow_deferred_free_acts. */
431 static void rcu_free_acts_callback(struct rcu_head
*rcu
)
433 struct sw_flow_actions
*sf_acts
= container_of(rcu
,
434 struct sw_flow_actions
, rcu
);
438 /* Schedules 'sf_acts' to be freed after the next RCU grace period.
439 * The caller must hold rcu_read_lock for this to be sensible. */
440 void ovs_flow_deferred_free_acts(struct sw_flow_actions
*sf_acts
)
442 call_rcu(&sf_acts
->rcu
, rcu_free_acts_callback
);
445 static int parse_vlan(struct sk_buff
*skb
, struct sw_flow_key
*key
)
448 __be16 eth_type
; /* ETH_P_8021Q */
451 struct qtag_prefix
*qp
;
453 if (unlikely(skb
->len
< sizeof(struct qtag_prefix
) + sizeof(__be16
)))
456 if (unlikely(!pskb_may_pull(skb
, sizeof(struct qtag_prefix
) +
460 qp
= (struct qtag_prefix
*) skb
->data
;
461 key
->eth
.tci
= qp
->tci
| htons(VLAN_TAG_PRESENT
);
462 __skb_pull(skb
, sizeof(struct qtag_prefix
));
467 static __be16
parse_ethertype(struct sk_buff
*skb
)
469 struct llc_snap_hdr
{
470 u8 dsap
; /* Always 0xAA */
471 u8 ssap
; /* Always 0xAA */
476 struct llc_snap_hdr
*llc
;
479 proto
= *(__be16
*) skb
->data
;
480 __skb_pull(skb
, sizeof(__be16
));
482 if (ntohs(proto
) >= 1536)
485 if (skb
->len
< sizeof(struct llc_snap_hdr
))
486 return htons(ETH_P_802_2
);
488 if (unlikely(!pskb_may_pull(skb
, sizeof(struct llc_snap_hdr
))))
491 llc
= (struct llc_snap_hdr
*) skb
->data
;
492 if (llc
->dsap
!= LLC_SAP_SNAP
||
493 llc
->ssap
!= LLC_SAP_SNAP
||
494 (llc
->oui
[0] | llc
->oui
[1] | llc
->oui
[2]) != 0)
495 return htons(ETH_P_802_2
);
497 __skb_pull(skb
, sizeof(struct llc_snap_hdr
));
498 return llc
->ethertype
;
501 static int parse_icmpv6(struct sk_buff
*skb
, struct sw_flow_key
*key
,
502 int *key_lenp
, int nh_len
)
504 struct icmp6hdr
*icmp
= icmp6_hdr(skb
);
508 /* The ICMPv6 type and code fields use the 16-bit transport port
509 * fields, so we need to store them in 16-bit network byte order.
511 key
->ipv6
.tp
.src
= htons(icmp
->icmp6_type
);
512 key
->ipv6
.tp
.dst
= htons(icmp
->icmp6_code
);
513 key_len
= SW_FLOW_KEY_OFFSET(ipv6
.tp
);
515 if (icmp
->icmp6_code
== 0 &&
516 (icmp
->icmp6_type
== NDISC_NEIGHBOUR_SOLICITATION
||
517 icmp
->icmp6_type
== NDISC_NEIGHBOUR_ADVERTISEMENT
)) {
518 int icmp_len
= skb
->len
- skb_transport_offset(skb
);
522 key_len
= SW_FLOW_KEY_OFFSET(ipv6
.nd
);
524 /* In order to process neighbor discovery options, we need the
527 if (unlikely(icmp_len
< sizeof(*nd
)))
529 if (unlikely(skb_linearize(skb
))) {
534 nd
= (struct nd_msg
*)skb_transport_header(skb
);
535 key
->ipv6
.nd
.target
= nd
->target
;
536 key_len
= SW_FLOW_KEY_OFFSET(ipv6
.nd
);
538 icmp_len
-= sizeof(*nd
);
540 while (icmp_len
>= 8) {
541 struct nd_opt_hdr
*nd_opt
=
542 (struct nd_opt_hdr
*)(nd
->opt
+ offset
);
543 int opt_len
= nd_opt
->nd_opt_len
* 8;
545 if (unlikely(!opt_len
|| opt_len
> icmp_len
))
548 /* Store the link layer address if the appropriate
549 * option is provided. It is considered an error if
550 * the same link layer option is specified twice.
552 if (nd_opt
->nd_opt_type
== ND_OPT_SOURCE_LL_ADDR
554 if (unlikely(!is_zero_ether_addr(key
->ipv6
.nd
.sll
)))
556 memcpy(key
->ipv6
.nd
.sll
,
557 &nd
->opt
[offset
+sizeof(*nd_opt
)], ETH_ALEN
);
558 } else if (nd_opt
->nd_opt_type
== ND_OPT_TARGET_LL_ADDR
560 if (unlikely(!is_zero_ether_addr(key
->ipv6
.nd
.tll
)))
562 memcpy(key
->ipv6
.nd
.tll
,
563 &nd
->opt
[offset
+sizeof(*nd_opt
)], ETH_ALEN
);
574 memset(&key
->ipv6
.nd
.target
, 0, sizeof(key
->ipv6
.nd
.target
));
575 memset(key
->ipv6
.nd
.sll
, 0, sizeof(key
->ipv6
.nd
.sll
));
576 memset(key
->ipv6
.nd
.tll
, 0, sizeof(key
->ipv6
.nd
.tll
));
584 * ovs_flow_extract - extracts a flow key from an Ethernet frame.
585 * @skb: sk_buff that contains the frame, with skb->data pointing to the
587 * @in_port: port number on which @skb was received.
588 * @key: output flow key
589 * @key_lenp: length of output flow key
591 * The caller must ensure that skb->len >= ETH_HLEN.
593 * Returns 0 if successful, otherwise a negative errno value.
595 * Initializes @skb header pointers as follows:
597 * - skb->mac_header: the Ethernet header.
599 * - skb->network_header: just past the Ethernet header, or just past the
600 * VLAN header, to the first byte of the Ethernet payload.
602 * - skb->transport_header: If key->dl_type is ETH_P_IP or ETH_P_IPV6
603 * on output, then just past the IP header, if one is present and
604 * of a correct length, otherwise the same as skb->network_header.
605 * For other key->dl_type values it is left untouched.
607 int ovs_flow_extract(struct sk_buff
*skb
, u16 in_port
, struct sw_flow_key
*key
,
611 int key_len
= SW_FLOW_KEY_OFFSET(eth
);
614 memset(key
, 0, sizeof(*key
));
616 key
->phy
.priority
= skb
->priority
;
617 key
->phy
.in_port
= in_port
;
619 skb_reset_mac_header(skb
);
621 /* Link layer. We are guaranteed to have at least the 14 byte Ethernet
622 * header in the linear data area.
625 memcpy(key
->eth
.src
, eth
->h_source
, ETH_ALEN
);
626 memcpy(key
->eth
.dst
, eth
->h_dest
, ETH_ALEN
);
628 __skb_pull(skb
, 2 * ETH_ALEN
);
630 if (vlan_tx_tag_present(skb
))
631 key
->eth
.tci
= htons(skb
->vlan_tci
);
632 else if (eth
->h_proto
== htons(ETH_P_8021Q
))
633 if (unlikely(parse_vlan(skb
, key
)))
636 key
->eth
.type
= parse_ethertype(skb
);
637 if (unlikely(key
->eth
.type
== htons(0)))
640 skb_reset_network_header(skb
);
641 __skb_push(skb
, skb
->data
- skb_mac_header(skb
));
644 if (key
->eth
.type
== htons(ETH_P_IP
)) {
648 key_len
= SW_FLOW_KEY_OFFSET(ipv4
.addr
);
650 error
= check_iphdr(skb
);
651 if (unlikely(error
)) {
652 if (error
== -EINVAL
) {
653 skb
->transport_header
= skb
->network_header
;
660 key
->ipv4
.addr
.src
= nh
->saddr
;
661 key
->ipv4
.addr
.dst
= nh
->daddr
;
663 key
->ip
.proto
= nh
->protocol
;
664 key
->ip
.tos
= nh
->tos
;
665 key
->ip
.ttl
= nh
->ttl
;
667 offset
= nh
->frag_off
& htons(IP_OFFSET
);
669 key
->ip
.frag
= OVS_FRAG_TYPE_LATER
;
672 if (nh
->frag_off
& htons(IP_MF
) ||
673 skb_shinfo(skb
)->gso_type
& SKB_GSO_UDP
)
674 key
->ip
.frag
= OVS_FRAG_TYPE_FIRST
;
676 /* Transport layer. */
677 if (key
->ip
.proto
== IPPROTO_TCP
) {
678 key_len
= SW_FLOW_KEY_OFFSET(ipv4
.tp
);
679 if (tcphdr_ok(skb
)) {
680 struct tcphdr
*tcp
= tcp_hdr(skb
);
681 key
->ipv4
.tp
.src
= tcp
->source
;
682 key
->ipv4
.tp
.dst
= tcp
->dest
;
684 } else if (key
->ip
.proto
== IPPROTO_UDP
) {
685 key_len
= SW_FLOW_KEY_OFFSET(ipv4
.tp
);
686 if (udphdr_ok(skb
)) {
687 struct udphdr
*udp
= udp_hdr(skb
);
688 key
->ipv4
.tp
.src
= udp
->source
;
689 key
->ipv4
.tp
.dst
= udp
->dest
;
691 } else if (key
->ip
.proto
== IPPROTO_ICMP
) {
692 key_len
= SW_FLOW_KEY_OFFSET(ipv4
.tp
);
693 if (icmphdr_ok(skb
)) {
694 struct icmphdr
*icmp
= icmp_hdr(skb
);
695 /* The ICMP type and code fields use the 16-bit
696 * transport port fields, so we need to store
697 * them in 16-bit network byte order. */
698 key
->ipv4
.tp
.src
= htons(icmp
->type
);
699 key
->ipv4
.tp
.dst
= htons(icmp
->code
);
703 } else if (key
->eth
.type
== htons(ETH_P_ARP
) && arphdr_ok(skb
)) {
704 struct arp_eth_header
*arp
;
706 arp
= (struct arp_eth_header
*)skb_network_header(skb
);
708 if (arp
->ar_hrd
== htons(ARPHRD_ETHER
)
709 && arp
->ar_pro
== htons(ETH_P_IP
)
710 && arp
->ar_hln
== ETH_ALEN
711 && arp
->ar_pln
== 4) {
713 /* We only match on the lower 8 bits of the opcode. */
714 if (ntohs(arp
->ar_op
) <= 0xff)
715 key
->ip
.proto
= ntohs(arp
->ar_op
);
717 if (key
->ip
.proto
== ARPOP_REQUEST
718 || key
->ip
.proto
== ARPOP_REPLY
) {
719 memcpy(&key
->ipv4
.addr
.src
, arp
->ar_sip
, sizeof(key
->ipv4
.addr
.src
));
720 memcpy(&key
->ipv4
.addr
.dst
, arp
->ar_tip
, sizeof(key
->ipv4
.addr
.dst
));
721 memcpy(key
->ipv4
.arp
.sha
, arp
->ar_sha
, ETH_ALEN
);
722 memcpy(key
->ipv4
.arp
.tha
, arp
->ar_tha
, ETH_ALEN
);
723 key_len
= SW_FLOW_KEY_OFFSET(ipv4
.arp
);
726 } else if (key
->eth
.type
== htons(ETH_P_IPV6
)) {
727 int nh_len
; /* IPv6 Header + Extensions */
729 nh_len
= parse_ipv6hdr(skb
, key
, &key_len
);
730 if (unlikely(nh_len
< 0)) {
731 if (nh_len
== -EINVAL
)
732 skb
->transport_header
= skb
->network_header
;
738 if (key
->ip
.frag
== OVS_FRAG_TYPE_LATER
)
740 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_UDP
)
741 key
->ip
.frag
= OVS_FRAG_TYPE_FIRST
;
743 /* Transport layer. */
744 if (key
->ip
.proto
== NEXTHDR_TCP
) {
745 key_len
= SW_FLOW_KEY_OFFSET(ipv6
.tp
);
746 if (tcphdr_ok(skb
)) {
747 struct tcphdr
*tcp
= tcp_hdr(skb
);
748 key
->ipv6
.tp
.src
= tcp
->source
;
749 key
->ipv6
.tp
.dst
= tcp
->dest
;
751 } else if (key
->ip
.proto
== NEXTHDR_UDP
) {
752 key_len
= SW_FLOW_KEY_OFFSET(ipv6
.tp
);
753 if (udphdr_ok(skb
)) {
754 struct udphdr
*udp
= udp_hdr(skb
);
755 key
->ipv6
.tp
.src
= udp
->source
;
756 key
->ipv6
.tp
.dst
= udp
->dest
;
758 } else if (key
->ip
.proto
== NEXTHDR_ICMP
) {
759 key_len
= SW_FLOW_KEY_OFFSET(ipv6
.tp
);
760 if (icmp6hdr_ok(skb
)) {
761 error
= parse_icmpv6(skb
, key
, &key_len
, nh_len
);
773 u32
ovs_flow_hash(const struct sw_flow_key
*key
, int key_len
)
775 return jhash2((u32
*)key
, DIV_ROUND_UP(key_len
, sizeof(u32
)), 0);
778 struct sw_flow
*ovs_flow_tbl_lookup(struct flow_table
*table
,
779 struct sw_flow_key
*key
, int key_len
)
781 struct sw_flow
*flow
;
782 struct hlist_node
*n
;
783 struct hlist_head
*head
;
786 hash
= ovs_flow_hash(key
, key_len
);
788 head
= find_bucket(table
, hash
);
789 hlist_for_each_entry_rcu(flow
, n
, head
, hash_node
[table
->node_ver
]) {
791 if (flow
->hash
== hash
&&
792 !memcmp(&flow
->key
, key
, key_len
)) {
799 void ovs_flow_tbl_insert(struct flow_table
*table
, struct sw_flow
*flow
)
801 struct hlist_head
*head
;
803 head
= find_bucket(table
, flow
->hash
);
804 hlist_add_head_rcu(&flow
->hash_node
[table
->node_ver
], head
);
808 void ovs_flow_tbl_remove(struct flow_table
*table
, struct sw_flow
*flow
)
810 hlist_del_rcu(&flow
->hash_node
[table
->node_ver
]);
812 BUG_ON(table
->count
< 0);
815 /* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */
816 const int ovs_key_lens
[OVS_KEY_ATTR_MAX
+ 1] = {
817 [OVS_KEY_ATTR_ENCAP
] = -1,
818 [OVS_KEY_ATTR_PRIORITY
] = sizeof(u32
),
819 [OVS_KEY_ATTR_IN_PORT
] = sizeof(u32
),
820 [OVS_KEY_ATTR_ETHERNET
] = sizeof(struct ovs_key_ethernet
),
821 [OVS_KEY_ATTR_VLAN
] = sizeof(__be16
),
822 [OVS_KEY_ATTR_ETHERTYPE
] = sizeof(__be16
),
823 [OVS_KEY_ATTR_IPV4
] = sizeof(struct ovs_key_ipv4
),
824 [OVS_KEY_ATTR_IPV6
] = sizeof(struct ovs_key_ipv6
),
825 [OVS_KEY_ATTR_TCP
] = sizeof(struct ovs_key_tcp
),
826 [OVS_KEY_ATTR_UDP
] = sizeof(struct ovs_key_udp
),
827 [OVS_KEY_ATTR_ICMP
] = sizeof(struct ovs_key_icmp
),
828 [OVS_KEY_ATTR_ICMPV6
] = sizeof(struct ovs_key_icmpv6
),
829 [OVS_KEY_ATTR_ARP
] = sizeof(struct ovs_key_arp
),
830 [OVS_KEY_ATTR_ND
] = sizeof(struct ovs_key_nd
),
833 static int ipv4_flow_from_nlattrs(struct sw_flow_key
*swkey
, int *key_len
,
834 const struct nlattr
*a
[], u32
*attrs
)
836 const struct ovs_key_icmp
*icmp_key
;
837 const struct ovs_key_tcp
*tcp_key
;
838 const struct ovs_key_udp
*udp_key
;
840 switch (swkey
->ip
.proto
) {
842 if (!(*attrs
& (1 << OVS_KEY_ATTR_TCP
)))
844 *attrs
&= ~(1 << OVS_KEY_ATTR_TCP
);
846 *key_len
= SW_FLOW_KEY_OFFSET(ipv4
.tp
);
847 tcp_key
= nla_data(a
[OVS_KEY_ATTR_TCP
]);
848 swkey
->ipv4
.tp
.src
= tcp_key
->tcp_src
;
849 swkey
->ipv4
.tp
.dst
= tcp_key
->tcp_dst
;
853 if (!(*attrs
& (1 << OVS_KEY_ATTR_UDP
)))
855 *attrs
&= ~(1 << OVS_KEY_ATTR_UDP
);
857 *key_len
= SW_FLOW_KEY_OFFSET(ipv4
.tp
);
858 udp_key
= nla_data(a
[OVS_KEY_ATTR_UDP
]);
859 swkey
->ipv4
.tp
.src
= udp_key
->udp_src
;
860 swkey
->ipv4
.tp
.dst
= udp_key
->udp_dst
;
864 if (!(*attrs
& (1 << OVS_KEY_ATTR_ICMP
)))
866 *attrs
&= ~(1 << OVS_KEY_ATTR_ICMP
);
868 *key_len
= SW_FLOW_KEY_OFFSET(ipv4
.tp
);
869 icmp_key
= nla_data(a
[OVS_KEY_ATTR_ICMP
]);
870 swkey
->ipv4
.tp
.src
= htons(icmp_key
->icmp_type
);
871 swkey
->ipv4
.tp
.dst
= htons(icmp_key
->icmp_code
);
878 static int ipv6_flow_from_nlattrs(struct sw_flow_key
*swkey
, int *key_len
,
879 const struct nlattr
*a
[], u32
*attrs
)
881 const struct ovs_key_icmpv6
*icmpv6_key
;
882 const struct ovs_key_tcp
*tcp_key
;
883 const struct ovs_key_udp
*udp_key
;
885 switch (swkey
->ip
.proto
) {
887 if (!(*attrs
& (1 << OVS_KEY_ATTR_TCP
)))
889 *attrs
&= ~(1 << OVS_KEY_ATTR_TCP
);
891 *key_len
= SW_FLOW_KEY_OFFSET(ipv6
.tp
);
892 tcp_key
= nla_data(a
[OVS_KEY_ATTR_TCP
]);
893 swkey
->ipv6
.tp
.src
= tcp_key
->tcp_src
;
894 swkey
->ipv6
.tp
.dst
= tcp_key
->tcp_dst
;
898 if (!(*attrs
& (1 << OVS_KEY_ATTR_UDP
)))
900 *attrs
&= ~(1 << OVS_KEY_ATTR_UDP
);
902 *key_len
= SW_FLOW_KEY_OFFSET(ipv6
.tp
);
903 udp_key
= nla_data(a
[OVS_KEY_ATTR_UDP
]);
904 swkey
->ipv6
.tp
.src
= udp_key
->udp_src
;
905 swkey
->ipv6
.tp
.dst
= udp_key
->udp_dst
;
909 if (!(*attrs
& (1 << OVS_KEY_ATTR_ICMPV6
)))
911 *attrs
&= ~(1 << OVS_KEY_ATTR_ICMPV6
);
913 *key_len
= SW_FLOW_KEY_OFFSET(ipv6
.tp
);
914 icmpv6_key
= nla_data(a
[OVS_KEY_ATTR_ICMPV6
]);
915 swkey
->ipv6
.tp
.src
= htons(icmpv6_key
->icmpv6_type
);
916 swkey
->ipv6
.tp
.dst
= htons(icmpv6_key
->icmpv6_code
);
918 if (swkey
->ipv6
.tp
.src
== htons(NDISC_NEIGHBOUR_SOLICITATION
) ||
919 swkey
->ipv6
.tp
.src
== htons(NDISC_NEIGHBOUR_ADVERTISEMENT
)) {
920 const struct ovs_key_nd
*nd_key
;
922 if (!(*attrs
& (1 << OVS_KEY_ATTR_ND
)))
924 *attrs
&= ~(1 << OVS_KEY_ATTR_ND
);
926 *key_len
= SW_FLOW_KEY_OFFSET(ipv6
.nd
);
927 nd_key
= nla_data(a
[OVS_KEY_ATTR_ND
]);
928 memcpy(&swkey
->ipv6
.nd
.target
, nd_key
->nd_target
,
929 sizeof(swkey
->ipv6
.nd
.target
));
930 memcpy(swkey
->ipv6
.nd
.sll
, nd_key
->nd_sll
, ETH_ALEN
);
931 memcpy(swkey
->ipv6
.nd
.tll
, nd_key
->nd_tll
, ETH_ALEN
);
939 static int parse_flow_nlattrs(const struct nlattr
*attr
,
940 const struct nlattr
*a
[], u32
*attrsp
)
942 const struct nlattr
*nla
;
947 nla_for_each_nested(nla
, attr
, rem
) {
948 u16 type
= nla_type(nla
);
951 if (type
> OVS_KEY_ATTR_MAX
|| attrs
& (1 << type
))
954 expected_len
= ovs_key_lens
[type
];
955 if (nla_len(nla
) != expected_len
&& expected_len
!= -1)
969 * ovs_flow_from_nlattrs - parses Netlink attributes into a flow key.
970 * @swkey: receives the extracted flow key.
971 * @key_lenp: number of bytes used in @swkey.
972 * @attr: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
975 int ovs_flow_from_nlattrs(struct sw_flow_key
*swkey
, int *key_lenp
,
976 const struct nlattr
*attr
)
978 const struct nlattr
*a
[OVS_KEY_ATTR_MAX
+ 1];
979 const struct ovs_key_ethernet
*eth_key
;
984 memset(swkey
, 0, sizeof(struct sw_flow_key
));
985 key_len
= SW_FLOW_KEY_OFFSET(eth
);
987 err
= parse_flow_nlattrs(attr
, a
, &attrs
);
991 /* Metadata attributes. */
992 if (attrs
& (1 << OVS_KEY_ATTR_PRIORITY
)) {
993 swkey
->phy
.priority
= nla_get_u32(a
[OVS_KEY_ATTR_PRIORITY
]);
994 attrs
&= ~(1 << OVS_KEY_ATTR_PRIORITY
);
996 if (attrs
& (1 << OVS_KEY_ATTR_IN_PORT
)) {
997 u32 in_port
= nla_get_u32(a
[OVS_KEY_ATTR_IN_PORT
]);
998 if (in_port
>= DP_MAX_PORTS
)
1000 swkey
->phy
.in_port
= in_port
;
1001 attrs
&= ~(1 << OVS_KEY_ATTR_IN_PORT
);
1003 swkey
->phy
.in_port
= USHRT_MAX
;
1006 /* Data attributes. */
1007 if (!(attrs
& (1 << OVS_KEY_ATTR_ETHERNET
)))
1009 attrs
&= ~(1 << OVS_KEY_ATTR_ETHERNET
);
1011 eth_key
= nla_data(a
[OVS_KEY_ATTR_ETHERNET
]);
1012 memcpy(swkey
->eth
.src
, eth_key
->eth_src
, ETH_ALEN
);
1013 memcpy(swkey
->eth
.dst
, eth_key
->eth_dst
, ETH_ALEN
);
1015 if (attrs
& (1u << OVS_KEY_ATTR_ETHERTYPE
) &&
1016 nla_get_be16(a
[OVS_KEY_ATTR_ETHERTYPE
]) == htons(ETH_P_8021Q
)) {
1017 const struct nlattr
*encap
;
1020 if (attrs
!= ((1 << OVS_KEY_ATTR_VLAN
) |
1021 (1 << OVS_KEY_ATTR_ETHERTYPE
) |
1022 (1 << OVS_KEY_ATTR_ENCAP
)))
1025 encap
= a
[OVS_KEY_ATTR_ENCAP
];
1026 tci
= nla_get_be16(a
[OVS_KEY_ATTR_VLAN
]);
1027 if (tci
& htons(VLAN_TAG_PRESENT
)) {
1028 swkey
->eth
.tci
= tci
;
1030 err
= parse_flow_nlattrs(encap
, a
, &attrs
);
1034 /* Corner case for truncated 802.1Q header. */
1038 swkey
->eth
.type
= htons(ETH_P_8021Q
);
1039 *key_lenp
= key_len
;
1046 if (attrs
& (1 << OVS_KEY_ATTR_ETHERTYPE
)) {
1047 swkey
->eth
.type
= nla_get_be16(a
[OVS_KEY_ATTR_ETHERTYPE
]);
1048 if (ntohs(swkey
->eth
.type
) < 1536)
1050 attrs
&= ~(1 << OVS_KEY_ATTR_ETHERTYPE
);
1052 swkey
->eth
.type
= htons(ETH_P_802_2
);
1055 if (swkey
->eth
.type
== htons(ETH_P_IP
)) {
1056 const struct ovs_key_ipv4
*ipv4_key
;
1058 if (!(attrs
& (1 << OVS_KEY_ATTR_IPV4
)))
1060 attrs
&= ~(1 << OVS_KEY_ATTR_IPV4
);
1062 key_len
= SW_FLOW_KEY_OFFSET(ipv4
.addr
);
1063 ipv4_key
= nla_data(a
[OVS_KEY_ATTR_IPV4
]);
1064 if (ipv4_key
->ipv4_frag
> OVS_FRAG_TYPE_MAX
)
1066 swkey
->ip
.proto
= ipv4_key
->ipv4_proto
;
1067 swkey
->ip
.tos
= ipv4_key
->ipv4_tos
;
1068 swkey
->ip
.ttl
= ipv4_key
->ipv4_ttl
;
1069 swkey
->ip
.frag
= ipv4_key
->ipv4_frag
;
1070 swkey
->ipv4
.addr
.src
= ipv4_key
->ipv4_src
;
1071 swkey
->ipv4
.addr
.dst
= ipv4_key
->ipv4_dst
;
1073 if (swkey
->ip
.frag
!= OVS_FRAG_TYPE_LATER
) {
1074 err
= ipv4_flow_from_nlattrs(swkey
, &key_len
, a
, &attrs
);
1078 } else if (swkey
->eth
.type
== htons(ETH_P_IPV6
)) {
1079 const struct ovs_key_ipv6
*ipv6_key
;
1081 if (!(attrs
& (1 << OVS_KEY_ATTR_IPV6
)))
1083 attrs
&= ~(1 << OVS_KEY_ATTR_IPV6
);
1085 key_len
= SW_FLOW_KEY_OFFSET(ipv6
.label
);
1086 ipv6_key
= nla_data(a
[OVS_KEY_ATTR_IPV6
]);
1087 if (ipv6_key
->ipv6_frag
> OVS_FRAG_TYPE_MAX
)
1089 swkey
->ipv6
.label
= ipv6_key
->ipv6_label
;
1090 swkey
->ip
.proto
= ipv6_key
->ipv6_proto
;
1091 swkey
->ip
.tos
= ipv6_key
->ipv6_tclass
;
1092 swkey
->ip
.ttl
= ipv6_key
->ipv6_hlimit
;
1093 swkey
->ip
.frag
= ipv6_key
->ipv6_frag
;
1094 memcpy(&swkey
->ipv6
.addr
.src
, ipv6_key
->ipv6_src
,
1095 sizeof(swkey
->ipv6
.addr
.src
));
1096 memcpy(&swkey
->ipv6
.addr
.dst
, ipv6_key
->ipv6_dst
,
1097 sizeof(swkey
->ipv6
.addr
.dst
));
1099 if (swkey
->ip
.frag
!= OVS_FRAG_TYPE_LATER
) {
1100 err
= ipv6_flow_from_nlattrs(swkey
, &key_len
, a
, &attrs
);
1104 } else if (swkey
->eth
.type
== htons(ETH_P_ARP
)) {
1105 const struct ovs_key_arp
*arp_key
;
1107 if (!(attrs
& (1 << OVS_KEY_ATTR_ARP
)))
1109 attrs
&= ~(1 << OVS_KEY_ATTR_ARP
);
1111 key_len
= SW_FLOW_KEY_OFFSET(ipv4
.arp
);
1112 arp_key
= nla_data(a
[OVS_KEY_ATTR_ARP
]);
1113 swkey
->ipv4
.addr
.src
= arp_key
->arp_sip
;
1114 swkey
->ipv4
.addr
.dst
= arp_key
->arp_tip
;
1115 if (arp_key
->arp_op
& htons(0xff00))
1117 swkey
->ip
.proto
= ntohs(arp_key
->arp_op
);
1118 memcpy(swkey
->ipv4
.arp
.sha
, arp_key
->arp_sha
, ETH_ALEN
);
1119 memcpy(swkey
->ipv4
.arp
.tha
, arp_key
->arp_tha
, ETH_ALEN
);
1124 *key_lenp
= key_len
;
1130 * ovs_flow_metadata_from_nlattrs - parses Netlink attributes into a flow key.
1131 * @in_port: receives the extracted input port.
1132 * @key: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
1135 * This parses a series of Netlink attributes that form a flow key, which must
1136 * take the same form accepted by flow_from_nlattrs(), but only enough of it to
1137 * get the metadata, that is, the parts of the flow key that cannot be
1138 * extracted from the packet itself.
1140 int ovs_flow_metadata_from_nlattrs(u32
*priority
, u16
*in_port
,
1141 const struct nlattr
*attr
)
1143 const struct nlattr
*nla
;
1146 *in_port
= USHRT_MAX
;
1149 nla_for_each_nested(nla
, attr
, rem
) {
1150 int type
= nla_type(nla
);
1152 if (type
<= OVS_KEY_ATTR_MAX
&& ovs_key_lens
[type
] > 0) {
1153 if (nla_len(nla
) != ovs_key_lens
[type
])
1157 case OVS_KEY_ATTR_PRIORITY
:
1158 *priority
= nla_get_u32(nla
);
1161 case OVS_KEY_ATTR_IN_PORT
:
1162 if (nla_get_u32(nla
) >= DP_MAX_PORTS
)
1164 *in_port
= nla_get_u32(nla
);
1174 int ovs_flow_to_nlattrs(const struct sw_flow_key
*swkey
, struct sk_buff
*skb
)
1176 struct ovs_key_ethernet
*eth_key
;
1177 struct nlattr
*nla
, *encap
;
1179 if (swkey
->phy
.priority
&&
1180 nla_put_u32(skb
, OVS_KEY_ATTR_PRIORITY
, swkey
->phy
.priority
))
1181 goto nla_put_failure
;
1183 if (swkey
->phy
.in_port
!= USHRT_MAX
&&
1184 nla_put_u32(skb
, OVS_KEY_ATTR_IN_PORT
, swkey
->phy
.in_port
))
1185 goto nla_put_failure
;
1187 nla
= nla_reserve(skb
, OVS_KEY_ATTR_ETHERNET
, sizeof(*eth_key
));
1189 goto nla_put_failure
;
1190 eth_key
= nla_data(nla
);
1191 memcpy(eth_key
->eth_src
, swkey
->eth
.src
, ETH_ALEN
);
1192 memcpy(eth_key
->eth_dst
, swkey
->eth
.dst
, ETH_ALEN
);
1194 if (swkey
->eth
.tci
|| swkey
->eth
.type
== htons(ETH_P_8021Q
)) {
1195 if (nla_put_be16(skb
, OVS_KEY_ATTR_ETHERTYPE
, htons(ETH_P_8021Q
)) ||
1196 nla_put_be16(skb
, OVS_KEY_ATTR_VLAN
, swkey
->eth
.tci
))
1197 goto nla_put_failure
;
1198 encap
= nla_nest_start(skb
, OVS_KEY_ATTR_ENCAP
);
1199 if (!swkey
->eth
.tci
)
1205 if (swkey
->eth
.type
== htons(ETH_P_802_2
))
1208 if (nla_put_be16(skb
, OVS_KEY_ATTR_ETHERTYPE
, swkey
->eth
.type
))
1209 goto nla_put_failure
;
1211 if (swkey
->eth
.type
== htons(ETH_P_IP
)) {
1212 struct ovs_key_ipv4
*ipv4_key
;
1214 nla
= nla_reserve(skb
, OVS_KEY_ATTR_IPV4
, sizeof(*ipv4_key
));
1216 goto nla_put_failure
;
1217 ipv4_key
= nla_data(nla
);
1218 ipv4_key
->ipv4_src
= swkey
->ipv4
.addr
.src
;
1219 ipv4_key
->ipv4_dst
= swkey
->ipv4
.addr
.dst
;
1220 ipv4_key
->ipv4_proto
= swkey
->ip
.proto
;
1221 ipv4_key
->ipv4_tos
= swkey
->ip
.tos
;
1222 ipv4_key
->ipv4_ttl
= swkey
->ip
.ttl
;
1223 ipv4_key
->ipv4_frag
= swkey
->ip
.frag
;
1224 } else if (swkey
->eth
.type
== htons(ETH_P_IPV6
)) {
1225 struct ovs_key_ipv6
*ipv6_key
;
1227 nla
= nla_reserve(skb
, OVS_KEY_ATTR_IPV6
, sizeof(*ipv6_key
));
1229 goto nla_put_failure
;
1230 ipv6_key
= nla_data(nla
);
1231 memcpy(ipv6_key
->ipv6_src
, &swkey
->ipv6
.addr
.src
,
1232 sizeof(ipv6_key
->ipv6_src
));
1233 memcpy(ipv6_key
->ipv6_dst
, &swkey
->ipv6
.addr
.dst
,
1234 sizeof(ipv6_key
->ipv6_dst
));
1235 ipv6_key
->ipv6_label
= swkey
->ipv6
.label
;
1236 ipv6_key
->ipv6_proto
= swkey
->ip
.proto
;
1237 ipv6_key
->ipv6_tclass
= swkey
->ip
.tos
;
1238 ipv6_key
->ipv6_hlimit
= swkey
->ip
.ttl
;
1239 ipv6_key
->ipv6_frag
= swkey
->ip
.frag
;
1240 } else if (swkey
->eth
.type
== htons(ETH_P_ARP
)) {
1241 struct ovs_key_arp
*arp_key
;
1243 nla
= nla_reserve(skb
, OVS_KEY_ATTR_ARP
, sizeof(*arp_key
));
1245 goto nla_put_failure
;
1246 arp_key
= nla_data(nla
);
1247 memset(arp_key
, 0, sizeof(struct ovs_key_arp
));
1248 arp_key
->arp_sip
= swkey
->ipv4
.addr
.src
;
1249 arp_key
->arp_tip
= swkey
->ipv4
.addr
.dst
;
1250 arp_key
->arp_op
= htons(swkey
->ip
.proto
);
1251 memcpy(arp_key
->arp_sha
, swkey
->ipv4
.arp
.sha
, ETH_ALEN
);
1252 memcpy(arp_key
->arp_tha
, swkey
->ipv4
.arp
.tha
, ETH_ALEN
);
1255 if ((swkey
->eth
.type
== htons(ETH_P_IP
) ||
1256 swkey
->eth
.type
== htons(ETH_P_IPV6
)) &&
1257 swkey
->ip
.frag
!= OVS_FRAG_TYPE_LATER
) {
1259 if (swkey
->ip
.proto
== IPPROTO_TCP
) {
1260 struct ovs_key_tcp
*tcp_key
;
1262 nla
= nla_reserve(skb
, OVS_KEY_ATTR_TCP
, sizeof(*tcp_key
));
1264 goto nla_put_failure
;
1265 tcp_key
= nla_data(nla
);
1266 if (swkey
->eth
.type
== htons(ETH_P_IP
)) {
1267 tcp_key
->tcp_src
= swkey
->ipv4
.tp
.src
;
1268 tcp_key
->tcp_dst
= swkey
->ipv4
.tp
.dst
;
1269 } else if (swkey
->eth
.type
== htons(ETH_P_IPV6
)) {
1270 tcp_key
->tcp_src
= swkey
->ipv6
.tp
.src
;
1271 tcp_key
->tcp_dst
= swkey
->ipv6
.tp
.dst
;
1273 } else if (swkey
->ip
.proto
== IPPROTO_UDP
) {
1274 struct ovs_key_udp
*udp_key
;
1276 nla
= nla_reserve(skb
, OVS_KEY_ATTR_UDP
, sizeof(*udp_key
));
1278 goto nla_put_failure
;
1279 udp_key
= nla_data(nla
);
1280 if (swkey
->eth
.type
== htons(ETH_P_IP
)) {
1281 udp_key
->udp_src
= swkey
->ipv4
.tp
.src
;
1282 udp_key
->udp_dst
= swkey
->ipv4
.tp
.dst
;
1283 } else if (swkey
->eth
.type
== htons(ETH_P_IPV6
)) {
1284 udp_key
->udp_src
= swkey
->ipv6
.tp
.src
;
1285 udp_key
->udp_dst
= swkey
->ipv6
.tp
.dst
;
1287 } else if (swkey
->eth
.type
== htons(ETH_P_IP
) &&
1288 swkey
->ip
.proto
== IPPROTO_ICMP
) {
1289 struct ovs_key_icmp
*icmp_key
;
1291 nla
= nla_reserve(skb
, OVS_KEY_ATTR_ICMP
, sizeof(*icmp_key
));
1293 goto nla_put_failure
;
1294 icmp_key
= nla_data(nla
);
1295 icmp_key
->icmp_type
= ntohs(swkey
->ipv4
.tp
.src
);
1296 icmp_key
->icmp_code
= ntohs(swkey
->ipv4
.tp
.dst
);
1297 } else if (swkey
->eth
.type
== htons(ETH_P_IPV6
) &&
1298 swkey
->ip
.proto
== IPPROTO_ICMPV6
) {
1299 struct ovs_key_icmpv6
*icmpv6_key
;
1301 nla
= nla_reserve(skb
, OVS_KEY_ATTR_ICMPV6
,
1302 sizeof(*icmpv6_key
));
1304 goto nla_put_failure
;
1305 icmpv6_key
= nla_data(nla
);
1306 icmpv6_key
->icmpv6_type
= ntohs(swkey
->ipv6
.tp
.src
);
1307 icmpv6_key
->icmpv6_code
= ntohs(swkey
->ipv6
.tp
.dst
);
1309 if (icmpv6_key
->icmpv6_type
== NDISC_NEIGHBOUR_SOLICITATION
||
1310 icmpv6_key
->icmpv6_type
== NDISC_NEIGHBOUR_ADVERTISEMENT
) {
1311 struct ovs_key_nd
*nd_key
;
1313 nla
= nla_reserve(skb
, OVS_KEY_ATTR_ND
, sizeof(*nd_key
));
1315 goto nla_put_failure
;
1316 nd_key
= nla_data(nla
);
1317 memcpy(nd_key
->nd_target
, &swkey
->ipv6
.nd
.target
,
1318 sizeof(nd_key
->nd_target
));
1319 memcpy(nd_key
->nd_sll
, swkey
->ipv6
.nd
.sll
, ETH_ALEN
);
1320 memcpy(nd_key
->nd_tll
, swkey
->ipv6
.nd
.tll
, ETH_ALEN
);
1327 nla_nest_end(skb
, encap
);
1335 /* Initializes the flow module.
1336 * Returns zero if successful or a negative error code. */
1337 int ovs_flow_init(void)
1339 flow_cache
= kmem_cache_create("sw_flow", sizeof(struct sw_flow
), 0,
1341 if (flow_cache
== NULL
)
1347 /* Uninitializes the flow module. */
1348 void ovs_flow_exit(void)
1350 kmem_cache_destroy(flow_cache
);