Merge branch 'for-3.18-consistent-ops' of git://git.kernel.org/pub/scm/linux/kernel...
[deliverable/linux.git] / net / openvswitch / vport-gre.c
1 /*
2 * Copyright (c) 2007-2014 Nicira, Inc.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA
17 */
18
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21 #include <linux/if.h>
22 #include <linux/skbuff.h>
23 #include <linux/ip.h>
24 #include <linux/if_tunnel.h>
25 #include <linux/if_vlan.h>
26 #include <linux/in.h>
27 #include <linux/in_route.h>
28 #include <linux/inetdevice.h>
29 #include <linux/jhash.h>
30 #include <linux/list.h>
31 #include <linux/kernel.h>
32 #include <linux/workqueue.h>
33 #include <linux/rculist.h>
34 #include <net/route.h>
35 #include <net/xfrm.h>
36
37 #include <net/icmp.h>
38 #include <net/ip.h>
39 #include <net/ip_tunnels.h>
40 #include <net/gre.h>
41 #include <net/net_namespace.h>
42 #include <net/netns/generic.h>
43 #include <net/protocol.h>
44
45 #include "datapath.h"
46 #include "vport.h"
47
48 /* Returns the least-significant 32 bits of a __be64. */
49 static __be32 be64_get_low32(__be64 x)
50 {
51 #ifdef __BIG_ENDIAN
52 return (__force __be32)x;
53 #else
54 return (__force __be32)((__force u64)x >> 32);
55 #endif
56 }
57
58 static __be16 filter_tnl_flags(__be16 flags)
59 {
60 return flags & (TUNNEL_CSUM | TUNNEL_KEY);
61 }
62
63 static struct sk_buff *__build_header(struct sk_buff *skb,
64 int tunnel_hlen)
65 {
66 struct tnl_ptk_info tpi;
67 const struct ovs_key_ipv4_tunnel *tun_key;
68
69 tun_key = &OVS_CB(skb)->egress_tun_info->tunnel;
70
71 skb = gre_handle_offloads(skb, !!(tun_key->tun_flags & TUNNEL_CSUM));
72 if (IS_ERR(skb))
73 return NULL;
74
75 tpi.flags = filter_tnl_flags(tun_key->tun_flags);
76 tpi.proto = htons(ETH_P_TEB);
77 tpi.key = be64_get_low32(tun_key->tun_id);
78 tpi.seq = 0;
79 gre_build_header(skb, &tpi, tunnel_hlen);
80
81 return skb;
82 }
83
84 static __be64 key_to_tunnel_id(__be32 key, __be32 seq)
85 {
86 #ifdef __BIG_ENDIAN
87 return (__force __be64)((__force u64)seq << 32 | (__force u32)key);
88 #else
89 return (__force __be64)((__force u64)key << 32 | (__force u32)seq);
90 #endif
91 }
92
93 /* Called with rcu_read_lock and BH disabled. */
94 static int gre_rcv(struct sk_buff *skb,
95 const struct tnl_ptk_info *tpi)
96 {
97 struct ovs_tunnel_info tun_info;
98 struct ovs_net *ovs_net;
99 struct vport *vport;
100 __be64 key;
101
102 ovs_net = net_generic(dev_net(skb->dev), ovs_net_id);
103 vport = rcu_dereference(ovs_net->vport_net.gre_vport);
104 if (unlikely(!vport))
105 return PACKET_REJECT;
106
107 key = key_to_tunnel_id(tpi->key, tpi->seq);
108 ovs_flow_tun_info_init(&tun_info, ip_hdr(skb), key,
109 filter_tnl_flags(tpi->flags), NULL, 0);
110
111 ovs_vport_receive(vport, skb, &tun_info);
112 return PACKET_RCVD;
113 }
114
115 /* Called with rcu_read_lock and BH disabled. */
116 static int gre_err(struct sk_buff *skb, u32 info,
117 const struct tnl_ptk_info *tpi)
118 {
119 struct ovs_net *ovs_net;
120 struct vport *vport;
121
122 ovs_net = net_generic(dev_net(skb->dev), ovs_net_id);
123 vport = rcu_dereference(ovs_net->vport_net.gre_vport);
124
125 if (unlikely(!vport))
126 return PACKET_REJECT;
127 else
128 return PACKET_RCVD;
129 }
130
131 static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
132 {
133 struct net *net = ovs_dp_get_net(vport->dp);
134 struct ovs_key_ipv4_tunnel *tun_key;
135 struct flowi4 fl;
136 struct rtable *rt;
137 int min_headroom;
138 int tunnel_hlen;
139 __be16 df;
140 int err;
141
142 if (unlikely(!OVS_CB(skb)->egress_tun_info)) {
143 err = -EINVAL;
144 goto error;
145 }
146
147 tun_key = &OVS_CB(skb)->egress_tun_info->tunnel;
148 /* Route lookup */
149 memset(&fl, 0, sizeof(fl));
150 fl.daddr = tun_key->ipv4_dst;
151 fl.saddr = tun_key->ipv4_src;
152 fl.flowi4_tos = RT_TOS(tun_key->ipv4_tos);
153 fl.flowi4_mark = skb->mark;
154 fl.flowi4_proto = IPPROTO_GRE;
155
156 rt = ip_route_output_key(net, &fl);
157 if (IS_ERR(rt))
158 return PTR_ERR(rt);
159
160 tunnel_hlen = ip_gre_calc_hlen(tun_key->tun_flags);
161
162 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
163 + tunnel_hlen + sizeof(struct iphdr)
164 + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
165 if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
166 int head_delta = SKB_DATA_ALIGN(min_headroom -
167 skb_headroom(skb) +
168 16);
169 err = pskb_expand_head(skb, max_t(int, head_delta, 0),
170 0, GFP_ATOMIC);
171 if (unlikely(err))
172 goto err_free_rt;
173 }
174
175 if (vlan_tx_tag_present(skb)) {
176 if (unlikely(!__vlan_put_tag(skb,
177 skb->vlan_proto,
178 vlan_tx_tag_get(skb)))) {
179 err = -ENOMEM;
180 goto err_free_rt;
181 }
182 skb->vlan_tci = 0;
183 }
184
185 /* Push Tunnel header. */
186 skb = __build_header(skb, tunnel_hlen);
187 if (unlikely(!skb)) {
188 err = 0;
189 goto err_free_rt;
190 }
191
192 df = tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ?
193 htons(IP_DF) : 0;
194
195 skb->ignore_df = 1;
196
197 return iptunnel_xmit(skb->sk, rt, skb, fl.saddr,
198 tun_key->ipv4_dst, IPPROTO_GRE,
199 tun_key->ipv4_tos, tun_key->ipv4_ttl, df, false);
200 err_free_rt:
201 ip_rt_put(rt);
202 error:
203 return err;
204 }
205
206 static struct gre_cisco_protocol gre_protocol = {
207 .handler = gre_rcv,
208 .err_handler = gre_err,
209 .priority = 1,
210 };
211
212 static int gre_ports;
213 static int gre_init(void)
214 {
215 int err;
216
217 gre_ports++;
218 if (gre_ports > 1)
219 return 0;
220
221 err = gre_cisco_register(&gre_protocol);
222 if (err)
223 pr_warn("cannot register gre protocol handler\n");
224
225 return err;
226 }
227
228 static void gre_exit(void)
229 {
230 gre_ports--;
231 if (gre_ports > 0)
232 return;
233
234 gre_cisco_unregister(&gre_protocol);
235 }
236
237 static const char *gre_get_name(const struct vport *vport)
238 {
239 return vport_priv(vport);
240 }
241
242 static struct vport *gre_create(const struct vport_parms *parms)
243 {
244 struct net *net = ovs_dp_get_net(parms->dp);
245 struct ovs_net *ovs_net;
246 struct vport *vport;
247 int err;
248
249 err = gre_init();
250 if (err)
251 return ERR_PTR(err);
252
253 ovs_net = net_generic(net, ovs_net_id);
254 if (ovsl_dereference(ovs_net->vport_net.gre_vport)) {
255 vport = ERR_PTR(-EEXIST);
256 goto error;
257 }
258
259 vport = ovs_vport_alloc(IFNAMSIZ, &ovs_gre_vport_ops, parms);
260 if (IS_ERR(vport))
261 goto error;
262
263 strncpy(vport_priv(vport), parms->name, IFNAMSIZ);
264 rcu_assign_pointer(ovs_net->vport_net.gre_vport, vport);
265 return vport;
266
267 error:
268 gre_exit();
269 return vport;
270 }
271
272 static void gre_tnl_destroy(struct vport *vport)
273 {
274 struct net *net = ovs_dp_get_net(vport->dp);
275 struct ovs_net *ovs_net;
276
277 ovs_net = net_generic(net, ovs_net_id);
278
279 RCU_INIT_POINTER(ovs_net->vport_net.gre_vport, NULL);
280 ovs_vport_deferred_free(vport);
281 gre_exit();
282 }
283
284 const struct vport_ops ovs_gre_vport_ops = {
285 .type = OVS_VPORT_TYPE_GRE,
286 .create = gre_create,
287 .destroy = gre_tnl_destroy,
288 .get_name = gre_get_name,
289 .send = gre_tnl_send,
290 };
This page took 0.037786 seconds and 6 git commands to generate.