Merge git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-3.0-fixes
[deliverable/linux.git] / net / 8021q / vlan_core.c
CommitLineData
7750f403
PM
1#include <linux/skbuff.h>
2#include <linux/netdevice.h>
3#include <linux/if_vlan.h>
4ead4431 4#include <linux/netpoll.h>
bc3b2d7f 5#include <linux/export.h>
7750f403
PM
6#include "vlan.h"
7
48cc32d3 8bool vlan_do_receive(struct sk_buff **skbp)
7750f403 9{
3701e513 10 struct sk_buff *skb = *skbp;
86a9bad3 11 __be16 vlan_proto = skb->vlan_proto;
d4b812de 12 u16 vlan_id = vlan_tx_tag_get_id(skb);
ad1afb00 13 struct net_device *vlan_dev;
4af429d2 14 struct vlan_pcpu_stats *rx_stats;
7750f403 15
86a9bad3 16 vlan_dev = vlan_find_dev(skb->dev, vlan_proto, vlan_id);
48cc32d3 17 if (!vlan_dev)
3701e513 18 return false;
9b22ea56 19
3701e513
JG
20 skb = *skbp = skb_share_check(skb, GFP_ATOMIC);
21 if (unlikely(!skb))
22 return false;
e1c096e2 23
3701e513 24 skb->dev = vlan_dev;
0b5c9db1
JP
25 if (skb->pkt_type == PACKET_OTHERHOST) {
26 /* Our lower layer thinks this is not local, let's make sure.
27 * This allows the VLAN to have a different MAC than the
28 * underlying device, and still route correctly. */
53a2b3a1 29 if (ether_addr_equal(eth_hdr(skb)->h_dest, vlan_dev->dev_addr))
0b5c9db1
JP
30 skb->pkt_type = PACKET_HOST;
31 }
32
7da82c06 33 if (!(vlan_dev_priv(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR)) {
0b5c9db1
JP
34 unsigned int offset = skb->data - skb_mac_header(skb);
35
36 /*
37 * vlan_insert_tag expect skb->data pointing to mac header.
38 * So change skb->data before calling it and change back to
39 * original position later
40 */
41 skb_push(skb, offset);
86a9bad3
PM
42 skb = *skbp = vlan_insert_tag(skb, skb->vlan_proto,
43 skb->vlan_tci);
0b5c9db1
JP
44 if (!skb)
45 return false;
46 skb_pull(skb, offset + VLAN_HLEN);
47 skb_reset_mac_len(skb);
48 }
49
3701e513 50 skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci);
bc1d0411 51 skb->vlan_tci = 0;
7750f403 52
7da82c06 53 rx_stats = this_cpu_ptr(vlan_dev_priv(vlan_dev)->vlan_pcpu_stats);
9793241f 54
9618e2ff 55 u64_stats_update_begin(&rx_stats->syncp);
9793241f
ED
56 rx_stats->rx_packets++;
57 rx_stats->rx_bytes += skb->len;
0b5c9db1 58 if (skb->pkt_type == PACKET_MULTICAST)
9618e2ff 59 rx_stats->rx_multicast++;
9618e2ff 60 u64_stats_update_end(&rx_stats->syncp);
3701e513
JG
61
62 return true;
7750f403 63}
22d1ba74 64
1cdfd72f
JP
65/* Must be invoked with rcu_read_lock. */
66struct net_device *__vlan_find_dev_deep(struct net_device *dev,
1fd9b1fc 67 __be16 vlan_proto, u16 vlan_id)
cec9c133 68{
1cdfd72f 69 struct vlan_info *vlan_info = rcu_dereference(dev->vlan_info);
cec9c133 70
5b9ea6e0 71 if (vlan_info) {
1fd9b1fc
PM
72 return vlan_group_get_device(&vlan_info->grp,
73 vlan_proto, vlan_id);
cec9c133
JP
74 } else {
75 /*
1cdfd72f
JP
76 * Lower devices of master uppers (bonding, team) do not have
77 * grp assigned to themselves. Grp is assigned to upper device
78 * instead.
cec9c133 79 */
1cdfd72f
JP
80 struct net_device *upper_dev;
81
82 upper_dev = netdev_master_upper_dev_get_rcu(dev);
83 if (upper_dev)
1fd9b1fc
PM
84 return __vlan_find_dev_deep(upper_dev,
85 vlan_proto, vlan_id);
cec9c133
JP
86 }
87
88 return NULL;
89}
90EXPORT_SYMBOL(__vlan_find_dev_deep);
91
22d1ba74
PM
92struct net_device *vlan_dev_real_dev(const struct net_device *dev)
93{
0369722f 94 struct net_device *ret = vlan_dev_priv(dev)->real_dev;
95
96 while (is_vlan_dev(ret))
97 ret = vlan_dev_priv(ret)->real_dev;
98
99 return ret;
22d1ba74 100}
116cb428 101EXPORT_SYMBOL(vlan_dev_real_dev);
22d1ba74
PM
102
103u16 vlan_dev_vlan_id(const struct net_device *dev)
104{
7da82c06 105 return vlan_dev_priv(dev)->vlan_id;
22d1ba74 106}
116cb428 107EXPORT_SYMBOL(vlan_dev_vlan_id);
e1c096e2 108
0b5c9db1 109static struct sk_buff *vlan_reorder_header(struct sk_buff *skb)
bcc6d479 110{
0b5c9db1
JP
111 if (skb_cow(skb, skb_headroom(skb)) < 0)
112 return NULL;
113 memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN);
114 skb->mac_header += VLAN_HLEN;
bcc6d479
JP
115 return skb;
116}
117
bcc6d479
JP
118struct sk_buff *vlan_untag(struct sk_buff *skb)
119{
120 struct vlan_hdr *vhdr;
121 u16 vlan_tci;
122
123 if (unlikely(vlan_tx_tag_present(skb))) {
124 /* vlan_tci is already set-up so leave this for another time */
125 return skb;
126 }
127
128 skb = skb_share_check(skb, GFP_ATOMIC);
129 if (unlikely(!skb))
130 goto err_free;
131
132 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
133 goto err_free;
134
135 vhdr = (struct vlan_hdr *) skb->data;
136 vlan_tci = ntohs(vhdr->h_vlan_TCI);
86a9bad3 137 __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci);
bcc6d479
JP
138
139 skb_pull_rcsum(skb, VLAN_HLEN);
140 vlan_set_encap_proto(skb, vhdr);
141
0b5c9db1 142 skb = vlan_reorder_header(skb);
bcc6d479
JP
143 if (unlikely(!skb))
144 goto err_free;
145
c5114cd5
JP
146 skb_reset_network_header(skb);
147 skb_reset_transport_header(skb);
5316cf9a
AQ
148 skb_reset_mac_len(skb);
149
bcc6d479
JP
150 return skb;
151
152err_free:
153 kfree_skb(skb);
154 return NULL;
155}
78851988 156EXPORT_SYMBOL(vlan_untag);
87002b03 157
5b9ea6e0
JP
158
159/*
160 * vlan info and vid list
161 */
162
163static void vlan_group_free(struct vlan_group *grp)
164{
cf2c014a 165 int i, j;
5b9ea6e0 166
cf2c014a
PM
167 for (i = 0; i < VLAN_PROTO_NUM; i++)
168 for (j = 0; j < VLAN_GROUP_ARRAY_SPLIT_PARTS; j++)
169 kfree(grp->vlan_devices_arrays[i][j]);
5b9ea6e0
JP
170}
171
172static void vlan_info_free(struct vlan_info *vlan_info)
173{
174 vlan_group_free(&vlan_info->grp);
175 kfree(vlan_info);
176}
177
178static void vlan_info_rcu_free(struct rcu_head *rcu)
179{
180 vlan_info_free(container_of(rcu, struct vlan_info, rcu));
181}
182
183static struct vlan_info *vlan_info_alloc(struct net_device *dev)
184{
185 struct vlan_info *vlan_info;
186
187 vlan_info = kzalloc(sizeof(struct vlan_info), GFP_KERNEL);
188 if (!vlan_info)
189 return NULL;
190
191 vlan_info->real_dev = dev;
192 INIT_LIST_HEAD(&vlan_info->vid_list);
193 return vlan_info;
194}
195
196struct vlan_vid_info {
197 struct list_head list;
80d5c368
PM
198 __be16 proto;
199 u16 vid;
5b9ea6e0
JP
200 int refcount;
201};
202
8ad227ff
PM
203static bool vlan_hw_filter_capable(const struct net_device *dev,
204 const struct vlan_vid_info *vid_info)
205{
206 if (vid_info->proto == htons(ETH_P_8021Q) &&
207 dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
208 return true;
209 if (vid_info->proto == htons(ETH_P_8021AD) &&
210 dev->features & NETIF_F_HW_VLAN_STAG_FILTER)
211 return true;
212 return false;
213}
214
5b9ea6e0 215static struct vlan_vid_info *vlan_vid_info_get(struct vlan_info *vlan_info,
80d5c368 216 __be16 proto, u16 vid)
5b9ea6e0
JP
217{
218 struct vlan_vid_info *vid_info;
219
220 list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
80d5c368 221 if (vid_info->proto == proto && vid_info->vid == vid)
5b9ea6e0
JP
222 return vid_info;
223 }
224 return NULL;
225}
226
80d5c368 227static struct vlan_vid_info *vlan_vid_info_alloc(__be16 proto, u16 vid)
5b9ea6e0
JP
228{
229 struct vlan_vid_info *vid_info;
230
231 vid_info = kzalloc(sizeof(struct vlan_vid_info), GFP_KERNEL);
232 if (!vid_info)
233 return NULL;
80d5c368 234 vid_info->proto = proto;
5b9ea6e0
JP
235 vid_info->vid = vid;
236
237 return vid_info;
238}
239
80d5c368 240static int __vlan_vid_add(struct vlan_info *vlan_info, __be16 proto, u16 vid,
5b9ea6e0 241 struct vlan_vid_info **pvid_info)
87002b03 242{
5b9ea6e0 243 struct net_device *dev = vlan_info->real_dev;
87002b03 244 const struct net_device_ops *ops = dev->netdev_ops;
5b9ea6e0
JP
245 struct vlan_vid_info *vid_info;
246 int err;
247
80d5c368 248 vid_info = vlan_vid_info_alloc(proto, vid);
5b9ea6e0
JP
249 if (!vid_info)
250 return -ENOMEM;
87002b03 251
8ad227ff 252 if (vlan_hw_filter_capable(dev, vid_info)) {
80d5c368 253 err = ops->ndo_vlan_rx_add_vid(dev, proto, vid);
5b9ea6e0
JP
254 if (err) {
255 kfree(vid_info);
256 return err;
257 }
87002b03 258 }
5b9ea6e0
JP
259 list_add(&vid_info->list, &vlan_info->vid_list);
260 vlan_info->nr_vids++;
261 *pvid_info = vid_info;
87002b03
JP
262 return 0;
263}
5b9ea6e0 264
80d5c368 265int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid)
5b9ea6e0
JP
266{
267 struct vlan_info *vlan_info;
268 struct vlan_vid_info *vid_info;
269 bool vlan_info_created = false;
270 int err;
271
272 ASSERT_RTNL();
273
274 vlan_info = rtnl_dereference(dev->vlan_info);
275 if (!vlan_info) {
276 vlan_info = vlan_info_alloc(dev);
277 if (!vlan_info)
278 return -ENOMEM;
279 vlan_info_created = true;
280 }
80d5c368 281 vid_info = vlan_vid_info_get(vlan_info, proto, vid);
5b9ea6e0 282 if (!vid_info) {
80d5c368 283 err = __vlan_vid_add(vlan_info, proto, vid, &vid_info);
5b9ea6e0
JP
284 if (err)
285 goto out_free_vlan_info;
286 }
287 vid_info->refcount++;
288
289 if (vlan_info_created)
290 rcu_assign_pointer(dev->vlan_info, vlan_info);
291
292 return 0;
293
294out_free_vlan_info:
295 if (vlan_info_created)
296 kfree(vlan_info);
297 return err;
298}
87002b03
JP
299EXPORT_SYMBOL(vlan_vid_add);
300
5b9ea6e0
JP
301static void __vlan_vid_del(struct vlan_info *vlan_info,
302 struct vlan_vid_info *vid_info)
87002b03 303{
5b9ea6e0 304 struct net_device *dev = vlan_info->real_dev;
87002b03 305 const struct net_device_ops *ops = dev->netdev_ops;
80d5c368
PM
306 __be16 proto = vid_info->proto;
307 u16 vid = vid_info->vid;
5b9ea6e0 308 int err;
87002b03 309
8ad227ff 310 if (vlan_hw_filter_capable(dev, vid_info)) {
80d5c368 311 err = ops->ndo_vlan_rx_kill_vid(dev, proto, vid);
5b9ea6e0 312 if (err) {
80d5c368
PM
313 pr_warn("failed to kill vid %04x/%d for device %s\n",
314 proto, vid, dev->name);
5b9ea6e0
JP
315 }
316 }
317 list_del(&vid_info->list);
318 kfree(vid_info);
319 vlan_info->nr_vids--;
320}
321
80d5c368 322void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid)
5b9ea6e0
JP
323{
324 struct vlan_info *vlan_info;
325 struct vlan_vid_info *vid_info;
326
327 ASSERT_RTNL();
328
329 vlan_info = rtnl_dereference(dev->vlan_info);
330 if (!vlan_info)
331 return;
332
80d5c368 333 vid_info = vlan_vid_info_get(vlan_info, proto, vid);
5b9ea6e0
JP
334 if (!vid_info)
335 return;
336 vid_info->refcount--;
337 if (vid_info->refcount == 0) {
338 __vlan_vid_del(vlan_info, vid_info);
339 if (vlan_info->nr_vids == 0) {
340 RCU_INIT_POINTER(dev->vlan_info, NULL);
341 call_rcu(&vlan_info->rcu, vlan_info_rcu_free);
342 }
87002b03
JP
343 }
344}
345EXPORT_SYMBOL(vlan_vid_del);
348a1443
JP
346
347int vlan_vids_add_by_dev(struct net_device *dev,
348 const struct net_device *by_dev)
349{
350 struct vlan_vid_info *vid_info;
f9586f79 351 struct vlan_info *vlan_info;
348a1443
JP
352 int err;
353
354 ASSERT_RTNL();
355
f9586f79
DC
356 vlan_info = rtnl_dereference(by_dev->vlan_info);
357 if (!vlan_info)
348a1443
JP
358 return 0;
359
f9586f79 360 list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
80d5c368 361 err = vlan_vid_add(dev, vid_info->proto, vid_info->vid);
348a1443
JP
362 if (err)
363 goto unwind;
364 }
365 return 0;
366
367unwind:
368 list_for_each_entry_continue_reverse(vid_info,
f9586f79 369 &vlan_info->vid_list,
348a1443 370 list) {
80d5c368 371 vlan_vid_del(dev, vid_info->proto, vid_info->vid);
348a1443
JP
372 }
373
374 return err;
375}
376EXPORT_SYMBOL(vlan_vids_add_by_dev);
377
378void vlan_vids_del_by_dev(struct net_device *dev,
379 const struct net_device *by_dev)
380{
381 struct vlan_vid_info *vid_info;
f9586f79 382 struct vlan_info *vlan_info;
348a1443
JP
383
384 ASSERT_RTNL();
385
f9586f79
DC
386 vlan_info = rtnl_dereference(by_dev->vlan_info);
387 if (!vlan_info)
348a1443
JP
388 return;
389
f9586f79 390 list_for_each_entry(vid_info, &vlan_info->vid_list, list)
80d5c368 391 vlan_vid_del(dev, vid_info->proto, vid_info->vid);
348a1443
JP
392}
393EXPORT_SYMBOL(vlan_vids_del_by_dev);
9b361c13
JP
394
395bool vlan_uses_dev(const struct net_device *dev)
396{
55462cf3
JP
397 struct vlan_info *vlan_info;
398
399 ASSERT_RTNL();
400
401 vlan_info = rtnl_dereference(dev->vlan_info);
402 if (!vlan_info)
403 return false;
404 return vlan_info->grp.nr_vlan_devs ? true : false;
9b361c13
JP
405}
406EXPORT_SYMBOL(vlan_uses_dev);
This page took 0.366851 seconds and 5 git commands to generate.