MAINTAINERS: add entry for ec_bhf driver
[deliverable/linux.git] / net / packet / af_packet.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * PACKET - implements raw packet sockets.
7 *
02c30a84 8 * Authors: Ross Biro
1da177e4
LT
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
11 *
1ce4f28b 12 * Fixes:
1da177e4
LT
13 * Alan Cox : verify_area() now used correctly
14 * Alan Cox : new skbuff lists, look ma no backlogs!
15 * Alan Cox : tidied skbuff lists.
16 * Alan Cox : Now uses generic datagram routines I
17 * added. Also fixed the peek/read crash
18 * from all old Linux datagram code.
19 * Alan Cox : Uses the improved datagram code.
20 * Alan Cox : Added NULL's for socket options.
21 * Alan Cox : Re-commented the code.
22 * Alan Cox : Use new kernel side addressing
23 * Rob Janssen : Correct MTU usage.
24 * Dave Platt : Counter leaks caused by incorrect
25 * interrupt locking and some slightly
26 * dubious gcc output. Can you read
27 * compiler: it said _VOLATILE_
28 * Richard Kooijman : Timestamp fixes.
29 * Alan Cox : New buffers. Use sk->mac.raw.
30 * Alan Cox : sendmsg/recvmsg support.
31 * Alan Cox : Protocol setting support
32 * Alexey Kuznetsov : Untied from IPv4 stack.
33 * Cyrus Durgin : Fixed kerneld for kmod.
34 * Michal Ostrowski : Module initialization cleanup.
1ce4f28b 35 * Ulises Alonso : Frame number limit removal and
1da177e4 36 * packet_set_ring memory leak.
0fb375fb
EB
37 * Eric Biederman : Allow for > 8 byte hardware addresses.
38 * The convention is that longer addresses
39 * will simply extend the hardware address
1ce4f28b 40 * byte arrays at the end of sockaddr_ll
0fb375fb 41 * and packet_mreq.
69e3c75f 42 * Johann Baudy : Added TX RING.
f6fb8f10 43 * Chetan Loke : Implemented TPACKET_V3 block abstraction
44 * layer.
45 * Copyright (C) 2011, <lokec@ccs.neu.edu>
46 *
1da177e4
LT
47 *
48 * This program is free software; you can redistribute it and/or
49 * modify it under the terms of the GNU General Public License
50 * as published by the Free Software Foundation; either version
51 * 2 of the License, or (at your option) any later version.
52 *
53 */
1ce4f28b 54
1da177e4 55#include <linux/types.h>
1da177e4 56#include <linux/mm.h>
4fc268d2 57#include <linux/capability.h>
1da177e4
LT
58#include <linux/fcntl.h>
59#include <linux/socket.h>
60#include <linux/in.h>
61#include <linux/inet.h>
62#include <linux/netdevice.h>
63#include <linux/if_packet.h>
64#include <linux/wireless.h>
ffbc6111 65#include <linux/kernel.h>
1da177e4 66#include <linux/kmod.h>
5a0e3ad6 67#include <linux/slab.h>
0e3125c7 68#include <linux/vmalloc.h>
457c4cbc 69#include <net/net_namespace.h>
1da177e4
LT
70#include <net/ip.h>
71#include <net/protocol.h>
72#include <linux/skbuff.h>
73#include <net/sock.h>
74#include <linux/errno.h>
75#include <linux/timer.h>
1da177e4
LT
76#include <asm/uaccess.h>
77#include <asm/ioctls.h>
78#include <asm/page.h>
a1f8e7f7 79#include <asm/cacheflush.h>
1da177e4
LT
80#include <asm/io.h>
81#include <linux/proc_fs.h>
82#include <linux/seq_file.h>
83#include <linux/poll.h>
84#include <linux/module.h>
85#include <linux/init.h>
905db440 86#include <linux/mutex.h>
05423b24 87#include <linux/if_vlan.h>
bfd5f4a3 88#include <linux/virtio_net.h>
ed85b565 89#include <linux/errqueue.h>
614f60fa 90#include <linux/net_tstamp.h>
b0138408 91#include <linux/percpu.h>
1da177e4
LT
92#ifdef CONFIG_INET
93#include <net/inet_common.h>
94#endif
95
2787b04b
PE
96#include "internal.h"
97
1da177e4
LT
98/*
99 Assumptions:
100 - if device has no dev->hard_header routine, it adds and removes ll header
101 inside itself. In this case ll header is invisible outside of device,
102 but higher levels still should reserve dev->hard_header_len.
103 Some devices are enough clever to reallocate skb, when header
104 will not fit to reserved space (tunnel), another ones are silly
105 (PPP).
106 - packet socket receives packets with pulled ll header,
107 so that SOCK_RAW should push it back.
108
109On receive:
110-----------
111
112Incoming, dev->hard_header!=NULL
b0e380b1
ACM
113 mac_header -> ll header
114 data -> data
1da177e4
LT
115
116Outgoing, dev->hard_header!=NULL
b0e380b1
ACM
117 mac_header -> ll header
118 data -> ll header
1da177e4
LT
119
120Incoming, dev->hard_header==NULL
b0e380b1
ACM
121 mac_header -> UNKNOWN position. It is very likely, that it points to ll
122 header. PPP makes it, that is wrong, because introduce
db0c58f9 123 assymetry between rx and tx paths.
b0e380b1 124 data -> data
1da177e4
LT
125
126Outgoing, dev->hard_header==NULL
b0e380b1
ACM
127 mac_header -> data. ll header is still not built!
128 data -> data
1da177e4
LT
129
130Resume
131 If dev->hard_header==NULL we are unlikely to restore sensible ll header.
132
133
134On transmit:
135------------
136
137dev->hard_header != NULL
b0e380b1
ACM
138 mac_header -> ll header
139 data -> ll header
1da177e4
LT
140
141dev->hard_header == NULL (ll header is added by device, we cannot control it)
b0e380b1
ACM
142 mac_header -> data
143 data -> data
1da177e4
LT
144
145 We should set nh.raw on output to correct posistion,
146 packet classifier depends on it.
147 */
148
1da177e4
LT
149/* Private packet socket structures. */
150
0fb375fb
EB
151/* identical to struct packet_mreq except it has
152 * a longer address field.
153 */
40d4e3df 154struct packet_mreq_max {
0fb375fb
EB
155 int mr_ifindex;
156 unsigned short mr_type;
157 unsigned short mr_alen;
158 unsigned char mr_address[MAX_ADDR_LEN];
1da177e4 159};
a2efcfa0 160
184f489e
DB
161union tpacket_uhdr {
162 struct tpacket_hdr *h1;
163 struct tpacket2_hdr *h2;
164 struct tpacket3_hdr *h3;
165 void *raw;
166};
167
f6fb8f10 168static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
69e3c75f
JB
169 int closing, int tx_ring);
170
f6fb8f10 171#define V3_ALIGNMENT (8)
172
bc59ba39 173#define BLK_HDR_LEN (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
f6fb8f10 174
175#define BLK_PLUS_PRIV(sz_of_priv) \
176 (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
177
f6fb8f10 178#define PGV_FROM_VMALLOC 1
69e3c75f 179
f6fb8f10 180#define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status)
181#define BLOCK_NUM_PKTS(x) ((x)->hdr.bh1.num_pkts)
182#define BLOCK_O2FP(x) ((x)->hdr.bh1.offset_to_first_pkt)
183#define BLOCK_LEN(x) ((x)->hdr.bh1.blk_len)
184#define BLOCK_SNUM(x) ((x)->hdr.bh1.seq_num)
185#define BLOCK_O2PRIV(x) ((x)->offset_to_priv)
186#define BLOCK_PRIV(x) ((void *)((char *)(x) + BLOCK_O2PRIV(x)))
187
69e3c75f
JB
188struct packet_sock;
189static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
77f65ebd
WB
190static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
191 struct packet_type *pt, struct net_device *orig_dev);
1da177e4 192
f6fb8f10 193static void *packet_previous_frame(struct packet_sock *po,
194 struct packet_ring_buffer *rb,
195 int status);
196static void packet_increment_head(struct packet_ring_buffer *buff);
bc59ba39 197static int prb_curr_blk_in_use(struct tpacket_kbdq_core *,
198 struct tpacket_block_desc *);
199static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
f6fb8f10 200 struct packet_sock *);
bc59ba39 201static void prb_retire_current_block(struct tpacket_kbdq_core *,
f6fb8f10 202 struct packet_sock *, unsigned int status);
bc59ba39 203static int prb_queue_frozen(struct tpacket_kbdq_core *);
204static void prb_open_block(struct tpacket_kbdq_core *,
205 struct tpacket_block_desc *);
f6fb8f10 206static void prb_retire_rx_blk_timer_expired(unsigned long);
bc59ba39 207static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
208static void prb_init_blk_timer(struct packet_sock *,
209 struct tpacket_kbdq_core *,
210 void (*func) (unsigned long));
211static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
212static void prb_clear_rxhash(struct tpacket_kbdq_core *,
213 struct tpacket3_hdr *);
214static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
215 struct tpacket3_hdr *);
1da177e4
LT
216static void packet_flush_mclist(struct sock *sk);
217
ffbc6111
HX
218struct packet_skb_cb {
219 unsigned int origlen;
220 union {
221 struct sockaddr_pkt pkt;
222 struct sockaddr_ll ll;
223 } sa;
224};
225
226#define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
8dc41944 227
bc59ba39 228#define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
f6fb8f10 229#define GET_PBLOCK_DESC(x, bid) \
bc59ba39 230 ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
f6fb8f10 231#define GET_CURR_PBLOCK_DESC_FROM_CORE(x) \
bc59ba39 232 ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
f6fb8f10 233#define GET_NEXT_PRB_BLK_NUM(x) \
234 (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
235 ((x)->kactive_blk_num+1) : 0)
236
dc99f600
DM
237static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
238static void __fanout_link(struct sock *sk, struct packet_sock *po);
239
d346a3fa
DB
240static int packet_direct_xmit(struct sk_buff *skb)
241{
242 struct net_device *dev = skb->dev;
243 const struct net_device_ops *ops = dev->netdev_ops;
244 netdev_features_t features;
245 struct netdev_queue *txq;
43279500 246 int ret = NETDEV_TX_BUSY;
d346a3fa 247 u16 queue_map;
d346a3fa
DB
248
249 if (unlikely(!netif_running(dev) ||
43279500
DB
250 !netif_carrier_ok(dev)))
251 goto drop;
d346a3fa
DB
252
253 features = netif_skb_features(skb);
254 if (skb_needs_linearize(skb, features) &&
43279500
DB
255 __skb_linearize(skb))
256 goto drop;
d346a3fa
DB
257
258 queue_map = skb_get_queue_mapping(skb);
259 txq = netdev_get_tx_queue(dev, queue_map);
260
43279500
DB
261 local_bh_disable();
262
263 HARD_TX_LOCK(dev, txq, smp_processor_id());
8e2f1a63 264 if (!netif_xmit_frozen_or_drv_stopped(txq)) {
43279500
DB
265 ret = ops->ndo_start_xmit(skb, dev);
266 if (ret == NETDEV_TX_OK)
267 txq_trans_update(txq);
d346a3fa 268 }
43279500 269 HARD_TX_UNLOCK(dev, txq);
d346a3fa 270
43279500
DB
271 local_bh_enable();
272
273 if (!dev_xmit_complete(ret))
d346a3fa 274 kfree_skb(skb);
43279500 275
d346a3fa 276 return ret;
43279500 277drop:
0f97ede4 278 atomic_long_inc(&dev->tx_dropped);
43279500
DB
279 kfree_skb(skb);
280 return NET_XMIT_DROP;
d346a3fa
DB
281}
282
66e56cd4
DB
283static struct net_device *packet_cached_dev_get(struct packet_sock *po)
284{
285 struct net_device *dev;
286
287 rcu_read_lock();
288 dev = rcu_dereference(po->cached_dev);
289 if (likely(dev))
290 dev_hold(dev);
291 rcu_read_unlock();
292
293 return dev;
294}
295
296static void packet_cached_dev_assign(struct packet_sock *po,
297 struct net_device *dev)
298{
299 rcu_assign_pointer(po->cached_dev, dev);
300}
301
302static void packet_cached_dev_reset(struct packet_sock *po)
303{
304 RCU_INIT_POINTER(po->cached_dev, NULL);
305}
306
d346a3fa
DB
307static bool packet_use_direct_xmit(const struct packet_sock *po)
308{
309 return po->xmit == packet_direct_xmit;
310}
311
0fd5d57b 312static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
d346a3fa 313{
1cbac010 314 return (u16) raw_smp_processor_id() % dev->real_num_tx_queues;
d346a3fa
DB
315}
316
0fd5d57b
DB
317static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
318{
319 const struct net_device_ops *ops = dev->netdev_ops;
320 u16 queue_index;
321
322 if (ops->ndo_select_queue) {
323 queue_index = ops->ndo_select_queue(dev, skb, NULL,
324 __packet_pick_tx_queue);
325 queue_index = netdev_cap_txqueue(dev, queue_index);
326 } else {
327 queue_index = __packet_pick_tx_queue(dev, skb);
328 }
329
330 skb_set_queue_mapping(skb, queue_index);
331}
332
ce06b03e
DM
333/* register_prot_hook must be invoked with the po->bind_lock held,
334 * or from a context in which asynchronous accesses to the packet
335 * socket is not possible (packet_create()).
336 */
337static void register_prot_hook(struct sock *sk)
338{
339 struct packet_sock *po = pkt_sk(sk);
e40526cb 340
ce06b03e 341 if (!po->running) {
66e56cd4 342 if (po->fanout)
dc99f600 343 __fanout_link(sk, po);
66e56cd4 344 else
dc99f600 345 dev_add_pack(&po->prot_hook);
e40526cb 346
ce06b03e
DM
347 sock_hold(sk);
348 po->running = 1;
349 }
350}
351
352/* {,__}unregister_prot_hook() must be invoked with the po->bind_lock
353 * held. If the sync parameter is true, we will temporarily drop
354 * the po->bind_lock and do a synchronize_net to make sure no
355 * asynchronous packet processing paths still refer to the elements
356 * of po->prot_hook. If the sync parameter is false, it is the
357 * callers responsibility to take care of this.
358 */
359static void __unregister_prot_hook(struct sock *sk, bool sync)
360{
361 struct packet_sock *po = pkt_sk(sk);
362
363 po->running = 0;
66e56cd4
DB
364
365 if (po->fanout)
dc99f600 366 __fanout_unlink(sk, po);
66e56cd4 367 else
dc99f600 368 __dev_remove_pack(&po->prot_hook);
e40526cb 369
ce06b03e
DM
370 __sock_put(sk);
371
372 if (sync) {
373 spin_unlock(&po->bind_lock);
374 synchronize_net();
375 spin_lock(&po->bind_lock);
376 }
377}
378
379static void unregister_prot_hook(struct sock *sk, bool sync)
380{
381 struct packet_sock *po = pkt_sk(sk);
382
383 if (po->running)
384 __unregister_prot_hook(sk, sync);
385}
386
f6dafa95 387static inline __pure struct page *pgv_to_page(void *addr)
0af55bb5
CG
388{
389 if (is_vmalloc_addr(addr))
390 return vmalloc_to_page(addr);
391 return virt_to_page(addr);
392}
393
69e3c75f 394static void __packet_set_status(struct packet_sock *po, void *frame, int status)
1da177e4 395{
184f489e 396 union tpacket_uhdr h;
1da177e4 397
69e3c75f 398 h.raw = frame;
bbd6ef87
PM
399 switch (po->tp_version) {
400 case TPACKET_V1:
69e3c75f 401 h.h1->tp_status = status;
0af55bb5 402 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
bbd6ef87
PM
403 break;
404 case TPACKET_V2:
69e3c75f 405 h.h2->tp_status = status;
0af55bb5 406 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
bbd6ef87 407 break;
f6fb8f10 408 case TPACKET_V3:
69e3c75f 409 default:
f6fb8f10 410 WARN(1, "TPACKET version not supported.\n");
69e3c75f 411 BUG();
bbd6ef87 412 }
69e3c75f
JB
413
414 smp_wmb();
bbd6ef87
PM
415}
416
69e3c75f 417static int __packet_get_status(struct packet_sock *po, void *frame)
bbd6ef87 418{
184f489e 419 union tpacket_uhdr h;
bbd6ef87 420
69e3c75f
JB
421 smp_rmb();
422
bbd6ef87
PM
423 h.raw = frame;
424 switch (po->tp_version) {
425 case TPACKET_V1:
0af55bb5 426 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
69e3c75f 427 return h.h1->tp_status;
bbd6ef87 428 case TPACKET_V2:
0af55bb5 429 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
69e3c75f 430 return h.h2->tp_status;
f6fb8f10 431 case TPACKET_V3:
69e3c75f 432 default:
f6fb8f10 433 WARN(1, "TPACKET version not supported.\n");
69e3c75f
JB
434 BUG();
435 return 0;
bbd6ef87 436 }
1da177e4 437}
69e3c75f 438
b9c32fb2
DB
439static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec *ts,
440 unsigned int flags)
7a51384c
DB
441{
442 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
443
68a360e8
WB
444 if (shhwtstamps &&
445 (flags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
446 ktime_to_timespec_cond(shhwtstamps->hwtstamp, ts))
447 return TP_STATUS_TS_RAW_HARDWARE;
7a51384c
DB
448
449 if (ktime_to_timespec_cond(skb->tstamp, ts))
b9c32fb2 450 return TP_STATUS_TS_SOFTWARE;
7a51384c 451
b9c32fb2 452 return 0;
7a51384c
DB
453}
454
b9c32fb2
DB
455static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame,
456 struct sk_buff *skb)
2e31396f
WB
457{
458 union tpacket_uhdr h;
459 struct timespec ts;
b9c32fb2 460 __u32 ts_status;
2e31396f 461
b9c32fb2
DB
462 if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
463 return 0;
2e31396f
WB
464
465 h.raw = frame;
466 switch (po->tp_version) {
467 case TPACKET_V1:
468 h.h1->tp_sec = ts.tv_sec;
469 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
470 break;
471 case TPACKET_V2:
472 h.h2->tp_sec = ts.tv_sec;
473 h.h2->tp_nsec = ts.tv_nsec;
474 break;
475 case TPACKET_V3:
476 default:
477 WARN(1, "TPACKET version not supported.\n");
478 BUG();
479 }
480
481 /* one flush is safe, as both fields always lie on the same cacheline */
482 flush_dcache_page(pgv_to_page(&h.h1->tp_sec));
483 smp_wmb();
b9c32fb2
DB
484
485 return ts_status;
2e31396f
WB
486}
487
69e3c75f
JB
488static void *packet_lookup_frame(struct packet_sock *po,
489 struct packet_ring_buffer *rb,
490 unsigned int position,
491 int status)
492{
493 unsigned int pg_vec_pos, frame_offset;
184f489e 494 union tpacket_uhdr h;
69e3c75f
JB
495
496 pg_vec_pos = position / rb->frames_per_block;
497 frame_offset = position % rb->frames_per_block;
498
0e3125c7
NH
499 h.raw = rb->pg_vec[pg_vec_pos].buffer +
500 (frame_offset * rb->frame_size);
69e3c75f
JB
501
502 if (status != __packet_get_status(po, h.raw))
503 return NULL;
504
505 return h.raw;
506}
507
eea49cc9 508static void *packet_current_frame(struct packet_sock *po,
69e3c75f
JB
509 struct packet_ring_buffer *rb,
510 int status)
511{
512 return packet_lookup_frame(po, rb, rb->head, status);
513}
514
bc59ba39 515static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
f6fb8f10 516{
517 del_timer_sync(&pkc->retire_blk_timer);
518}
519
520static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
521 int tx_ring,
522 struct sk_buff_head *rb_queue)
523{
bc59ba39 524 struct tpacket_kbdq_core *pkc;
f6fb8f10 525
22781a5b
DJ
526 pkc = tx_ring ? GET_PBDQC_FROM_RB(&po->tx_ring) :
527 GET_PBDQC_FROM_RB(&po->rx_ring);
f6fb8f10 528
ec6f809f 529 spin_lock_bh(&rb_queue->lock);
f6fb8f10 530 pkc->delete_blk_timer = 1;
ec6f809f 531 spin_unlock_bh(&rb_queue->lock);
f6fb8f10 532
533 prb_del_retire_blk_timer(pkc);
534}
535
536static void prb_init_blk_timer(struct packet_sock *po,
bc59ba39 537 struct tpacket_kbdq_core *pkc,
f6fb8f10 538 void (*func) (unsigned long))
539{
540 init_timer(&pkc->retire_blk_timer);
541 pkc->retire_blk_timer.data = (long)po;
542 pkc->retire_blk_timer.function = func;
543 pkc->retire_blk_timer.expires = jiffies;
544}
545
546static void prb_setup_retire_blk_timer(struct packet_sock *po, int tx_ring)
547{
bc59ba39 548 struct tpacket_kbdq_core *pkc;
f6fb8f10 549
550 if (tx_ring)
551 BUG();
552
22781a5b
DJ
553 pkc = tx_ring ? GET_PBDQC_FROM_RB(&po->tx_ring) :
554 GET_PBDQC_FROM_RB(&po->rx_ring);
f6fb8f10 555 prb_init_blk_timer(po, pkc, prb_retire_rx_blk_timer_expired);
556}
557
558static int prb_calc_retire_blk_tmo(struct packet_sock *po,
559 int blk_size_in_bytes)
560{
561 struct net_device *dev;
562 unsigned int mbits = 0, msec = 0, div = 0, tmo = 0;
4bc71cb9
JP
563 struct ethtool_cmd ecmd;
564 int err;
e440cf2c 565 u32 speed;
f6fb8f10 566
4bc71cb9
JP
567 rtnl_lock();
568 dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
569 if (unlikely(!dev)) {
570 rtnl_unlock();
f6fb8f10 571 return DEFAULT_PRB_RETIRE_TOV;
4bc71cb9
JP
572 }
573 err = __ethtool_get_settings(dev, &ecmd);
e440cf2c 574 speed = ethtool_cmd_speed(&ecmd);
4bc71cb9
JP
575 rtnl_unlock();
576 if (!err) {
4bc71cb9
JP
577 /*
578 * If the link speed is so slow you don't really
579 * need to worry about perf anyways
580 */
e440cf2c 581 if (speed < SPEED_1000 || speed == SPEED_UNKNOWN) {
4bc71cb9 582 return DEFAULT_PRB_RETIRE_TOV;
e440cf2c 583 } else {
584 msec = 1;
585 div = speed / 1000;
f6fb8f10 586 }
587 }
588
589 mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
590
591 if (div)
592 mbits /= div;
593
594 tmo = mbits * msec;
595
596 if (div)
597 return tmo+1;
598 return tmo;
599}
600
bc59ba39 601static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
f6fb8f10 602 union tpacket_req_u *req_u)
603{
604 p1->feature_req_word = req_u->req3.tp_feature_req_word;
605}
606
607static void init_prb_bdqc(struct packet_sock *po,
608 struct packet_ring_buffer *rb,
609 struct pgv *pg_vec,
610 union tpacket_req_u *req_u, int tx_ring)
611{
22781a5b 612 struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb);
bc59ba39 613 struct tpacket_block_desc *pbd;
f6fb8f10 614
615 memset(p1, 0x0, sizeof(*p1));
616
617 p1->knxt_seq_num = 1;
618 p1->pkbdq = pg_vec;
bc59ba39 619 pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
e3192690 620 p1->pkblk_start = pg_vec[0].buffer;
f6fb8f10 621 p1->kblk_size = req_u->req3.tp_block_size;
622 p1->knum_blocks = req_u->req3.tp_block_nr;
623 p1->hdrlen = po->tp_hdrlen;
624 p1->version = po->tp_version;
625 p1->last_kactive_blk_num = 0;
ee80fbf3 626 po->stats.stats3.tp_freeze_q_cnt = 0;
f6fb8f10 627 if (req_u->req3.tp_retire_blk_tov)
628 p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
629 else
630 p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
631 req_u->req3.tp_block_size);
632 p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
633 p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
634
635 prb_init_ft_ops(p1, req_u);
636 prb_setup_retire_blk_timer(po, tx_ring);
637 prb_open_block(p1, pbd);
638}
639
640/* Do NOT update the last_blk_num first.
641 * Assumes sk_buff_head lock is held.
642 */
bc59ba39 643static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
f6fb8f10 644{
645 mod_timer(&pkc->retire_blk_timer,
646 jiffies + pkc->tov_in_jiffies);
647 pkc->last_kactive_blk_num = pkc->kactive_blk_num;
648}
649
650/*
651 * Timer logic:
652 * 1) We refresh the timer only when we open a block.
653 * By doing this we don't waste cycles refreshing the timer
654 * on packet-by-packet basis.
655 *
656 * With a 1MB block-size, on a 1Gbps line, it will take
657 * i) ~8 ms to fill a block + ii) memcpy etc.
658 * In this cut we are not accounting for the memcpy time.
659 *
660 * So, if the user sets the 'tmo' to 10ms then the timer
661 * will never fire while the block is still getting filled
662 * (which is what we want). However, the user could choose
663 * to close a block early and that's fine.
664 *
665 * But when the timer does fire, we check whether or not to refresh it.
666 * Since the tmo granularity is in msecs, it is not too expensive
667 * to refresh the timer, lets say every '8' msecs.
668 * Either the user can set the 'tmo' or we can derive it based on
669 * a) line-speed and b) block-size.
670 * prb_calc_retire_blk_tmo() calculates the tmo.
671 *
672 */
673static void prb_retire_rx_blk_timer_expired(unsigned long data)
674{
675 struct packet_sock *po = (struct packet_sock *)data;
22781a5b 676 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
f6fb8f10 677 unsigned int frozen;
bc59ba39 678 struct tpacket_block_desc *pbd;
f6fb8f10 679
680 spin_lock(&po->sk.sk_receive_queue.lock);
681
682 frozen = prb_queue_frozen(pkc);
683 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
684
685 if (unlikely(pkc->delete_blk_timer))
686 goto out;
687
688 /* We only need to plug the race when the block is partially filled.
689 * tpacket_rcv:
690 * lock(); increment BLOCK_NUM_PKTS; unlock()
691 * copy_bits() is in progress ...
692 * timer fires on other cpu:
693 * we can't retire the current block because copy_bits
694 * is in progress.
695 *
696 */
697 if (BLOCK_NUM_PKTS(pbd)) {
698 while (atomic_read(&pkc->blk_fill_in_prog)) {
699 /* Waiting for skb_copy_bits to finish... */
700 cpu_relax();
701 }
702 }
703
704 if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
705 if (!frozen) {
706 prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
707 if (!prb_dispatch_next_block(pkc, po))
708 goto refresh_timer;
709 else
710 goto out;
711 } else {
712 /* Case 1. Queue was frozen because user-space was
713 * lagging behind.
714 */
715 if (prb_curr_blk_in_use(pkc, pbd)) {
716 /*
717 * Ok, user-space is still behind.
718 * So just refresh the timer.
719 */
720 goto refresh_timer;
721 } else {
722 /* Case 2. queue was frozen,user-space caught up,
723 * now the link went idle && the timer fired.
724 * We don't have a block to close.So we open this
725 * block and restart the timer.
726 * opening a block thaws the queue,restarts timer
727 * Thawing/timer-refresh is a side effect.
728 */
729 prb_open_block(pkc, pbd);
730 goto out;
731 }
732 }
733 }
734
735refresh_timer:
736 _prb_refresh_rx_retire_blk_timer(pkc);
737
738out:
739 spin_unlock(&po->sk.sk_receive_queue.lock);
740}
741
eea49cc9 742static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
bc59ba39 743 struct tpacket_block_desc *pbd1, __u32 status)
f6fb8f10 744{
745 /* Flush everything minus the block header */
746
747#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
748 u8 *start, *end;
749
750 start = (u8 *)pbd1;
751
752 /* Skip the block header(we know header WILL fit in 4K) */
753 start += PAGE_SIZE;
754
755 end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
756 for (; start < end; start += PAGE_SIZE)
757 flush_dcache_page(pgv_to_page(start));
758
759 smp_wmb();
760#endif
761
762 /* Now update the block status. */
763
764 BLOCK_STATUS(pbd1) = status;
765
766 /* Flush the block header */
767
768#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
769 start = (u8 *)pbd1;
770 flush_dcache_page(pgv_to_page(start));
771
772 smp_wmb();
773#endif
774}
775
776/*
777 * Side effect:
778 *
779 * 1) flush the block
780 * 2) Increment active_blk_num
781 *
782 * Note:We DONT refresh the timer on purpose.
783 * Because almost always the next block will be opened.
784 */
bc59ba39 785static void prb_close_block(struct tpacket_kbdq_core *pkc1,
786 struct tpacket_block_desc *pbd1,
f6fb8f10 787 struct packet_sock *po, unsigned int stat)
788{
789 __u32 status = TP_STATUS_USER | stat;
790
791 struct tpacket3_hdr *last_pkt;
bc59ba39 792 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
f6fb8f10 793
ee80fbf3 794 if (po->stats.stats3.tp_drops)
f6fb8f10 795 status |= TP_STATUS_LOSING;
796
797 last_pkt = (struct tpacket3_hdr *)pkc1->prev;
798 last_pkt->tp_next_offset = 0;
799
800 /* Get the ts of the last pkt */
801 if (BLOCK_NUM_PKTS(pbd1)) {
802 h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
803 h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec;
804 } else {
805 /* Ok, we tmo'd - so get the current time */
806 struct timespec ts;
807 getnstimeofday(&ts);
808 h1->ts_last_pkt.ts_sec = ts.tv_sec;
809 h1->ts_last_pkt.ts_nsec = ts.tv_nsec;
810 }
811
812 smp_wmb();
813
814 /* Flush the block */
815 prb_flush_block(pkc1, pbd1, status);
816
817 pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
818}
819
eea49cc9 820static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
f6fb8f10 821{
822 pkc->reset_pending_on_curr_blk = 0;
823}
824
825/*
826 * Side effect of opening a block:
827 *
828 * 1) prb_queue is thawed.
829 * 2) retire_blk_timer is refreshed.
830 *
831 */
bc59ba39 832static void prb_open_block(struct tpacket_kbdq_core *pkc1,
833 struct tpacket_block_desc *pbd1)
f6fb8f10 834{
835 struct timespec ts;
bc59ba39 836 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
f6fb8f10 837
838 smp_rmb();
839
8da3056c
DB
840 /* We could have just memset this but we will lose the
841 * flexibility of making the priv area sticky
842 */
f6fb8f10 843
8da3056c
DB
844 BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
845 BLOCK_NUM_PKTS(pbd1) = 0;
846 BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
f6fb8f10 847
8da3056c
DB
848 getnstimeofday(&ts);
849
850 h1->ts_first_pkt.ts_sec = ts.tv_sec;
851 h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
f6fb8f10 852
8da3056c
DB
853 pkc1->pkblk_start = (char *)pbd1;
854 pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
855
856 BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
857 BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
858
859 pbd1->version = pkc1->version;
860 pkc1->prev = pkc1->nxt_offset;
861 pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
862
863 prb_thaw_queue(pkc1);
864 _prb_refresh_rx_retire_blk_timer(pkc1);
865
866 smp_wmb();
f6fb8f10 867}
868
869/*
870 * Queue freeze logic:
871 * 1) Assume tp_block_nr = 8 blocks.
872 * 2) At time 't0', user opens Rx ring.
873 * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
874 * 4) user-space is either sleeping or processing block '0'.
875 * 5) tpacket_rcv is currently filling block '7', since there is no space left,
876 * it will close block-7,loop around and try to fill block '0'.
877 * call-flow:
878 * __packet_lookup_frame_in_block
879 * prb_retire_current_block()
880 * prb_dispatch_next_block()
881 * |->(BLOCK_STATUS == USER) evaluates to true
882 * 5.1) Since block-0 is currently in-use, we just freeze the queue.
883 * 6) Now there are two cases:
884 * 6.1) Link goes idle right after the queue is frozen.
885 * But remember, the last open_block() refreshed the timer.
886 * When this timer expires,it will refresh itself so that we can
887 * re-open block-0 in near future.
888 * 6.2) Link is busy and keeps on receiving packets. This is a simple
889 * case and __packet_lookup_frame_in_block will check if block-0
890 * is free and can now be re-used.
891 */
eea49cc9 892static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
f6fb8f10 893 struct packet_sock *po)
894{
895 pkc->reset_pending_on_curr_blk = 1;
ee80fbf3 896 po->stats.stats3.tp_freeze_q_cnt++;
f6fb8f10 897}
898
899#define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
900
901/*
902 * If the next block is free then we will dispatch it
903 * and return a good offset.
904 * Else, we will freeze the queue.
905 * So, caller must check the return value.
906 */
bc59ba39 907static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
f6fb8f10 908 struct packet_sock *po)
909{
bc59ba39 910 struct tpacket_block_desc *pbd;
f6fb8f10 911
912 smp_rmb();
913
914 /* 1. Get current block num */
915 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
916
917 /* 2. If this block is currently in_use then freeze the queue */
918 if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
919 prb_freeze_queue(pkc, po);
920 return NULL;
921 }
922
923 /*
924 * 3.
925 * open this block and return the offset where the first packet
926 * needs to get stored.
927 */
928 prb_open_block(pkc, pbd);
929 return (void *)pkc->nxt_offset;
930}
931
bc59ba39 932static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
f6fb8f10 933 struct packet_sock *po, unsigned int status)
934{
bc59ba39 935 struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
f6fb8f10 936
937 /* retire/close the current block */
938 if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
939 /*
940 * Plug the case where copy_bits() is in progress on
941 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
942 * have space to copy the pkt in the current block and
943 * called prb_retire_current_block()
944 *
945 * We don't need to worry about the TMO case because
946 * the timer-handler already handled this case.
947 */
948 if (!(status & TP_STATUS_BLK_TMO)) {
949 while (atomic_read(&pkc->blk_fill_in_prog)) {
950 /* Waiting for skb_copy_bits to finish... */
951 cpu_relax();
952 }
953 }
954 prb_close_block(pkc, pbd, po, status);
955 return;
956 }
f6fb8f10 957}
958
eea49cc9 959static int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc,
bc59ba39 960 struct tpacket_block_desc *pbd)
f6fb8f10 961{
962 return TP_STATUS_USER & BLOCK_STATUS(pbd);
963}
964
eea49cc9 965static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
f6fb8f10 966{
967 return pkc->reset_pending_on_curr_blk;
968}
969
eea49cc9 970static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
f6fb8f10 971{
bc59ba39 972 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
f6fb8f10 973 atomic_dec(&pkc->blk_fill_in_prog);
974}
975
eea49cc9 976static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
f6fb8f10 977 struct tpacket3_hdr *ppd)
978{
3958afa1 979 ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb);
f6fb8f10 980}
981
eea49cc9 982static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
f6fb8f10 983 struct tpacket3_hdr *ppd)
984{
985 ppd->hv1.tp_rxhash = 0;
986}
987
eea49cc9 988static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
f6fb8f10 989 struct tpacket3_hdr *ppd)
990{
991 if (vlan_tx_tag_present(pkc->skb)) {
992 ppd->hv1.tp_vlan_tci = vlan_tx_tag_get(pkc->skb);
a0cdfcf3
AW
993 ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto);
994 ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
f6fb8f10 995 } else {
9e67030a 996 ppd->hv1.tp_vlan_tci = 0;
a0cdfcf3 997 ppd->hv1.tp_vlan_tpid = 0;
9e67030a 998 ppd->tp_status = TP_STATUS_AVAILABLE;
f6fb8f10 999 }
1000}
1001
bc59ba39 1002static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
f6fb8f10 1003 struct tpacket3_hdr *ppd)
1004{
a0cdfcf3 1005 ppd->hv1.tp_padding = 0;
f6fb8f10 1006 prb_fill_vlan_info(pkc, ppd);
1007
1008 if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
1009 prb_fill_rxhash(pkc, ppd);
1010 else
1011 prb_clear_rxhash(pkc, ppd);
1012}
1013
eea49cc9 1014static void prb_fill_curr_block(char *curr,
bc59ba39 1015 struct tpacket_kbdq_core *pkc,
1016 struct tpacket_block_desc *pbd,
f6fb8f10 1017 unsigned int len)
1018{
1019 struct tpacket3_hdr *ppd;
1020
1021 ppd = (struct tpacket3_hdr *)curr;
1022 ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
1023 pkc->prev = curr;
1024 pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
1025 BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
1026 BLOCK_NUM_PKTS(pbd) += 1;
1027 atomic_inc(&pkc->blk_fill_in_prog);
1028 prb_run_all_ft_ops(pkc, ppd);
1029}
1030
1031/* Assumes caller has the sk->rx_queue.lock */
1032static void *__packet_lookup_frame_in_block(struct packet_sock *po,
1033 struct sk_buff *skb,
1034 int status,
1035 unsigned int len
1036 )
1037{
bc59ba39 1038 struct tpacket_kbdq_core *pkc;
1039 struct tpacket_block_desc *pbd;
f6fb8f10 1040 char *curr, *end;
1041
e3192690 1042 pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
f6fb8f10 1043 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1044
1045 /* Queue is frozen when user space is lagging behind */
1046 if (prb_queue_frozen(pkc)) {
1047 /*
1048 * Check if that last block which caused the queue to freeze,
1049 * is still in_use by user-space.
1050 */
1051 if (prb_curr_blk_in_use(pkc, pbd)) {
1052 /* Can't record this packet */
1053 return NULL;
1054 } else {
1055 /*
1056 * Ok, the block was released by user-space.
1057 * Now let's open that block.
1058 * opening a block also thaws the queue.
1059 * Thawing is a side effect.
1060 */
1061 prb_open_block(pkc, pbd);
1062 }
1063 }
1064
1065 smp_mb();
1066 curr = pkc->nxt_offset;
1067 pkc->skb = skb;
e3192690 1068 end = (char *)pbd + pkc->kblk_size;
f6fb8f10 1069
1070 /* first try the current block */
1071 if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
1072 prb_fill_curr_block(curr, pkc, pbd, len);
1073 return (void *)curr;
1074 }
1075
1076 /* Ok, close the current block */
1077 prb_retire_current_block(pkc, po, 0);
1078
1079 /* Now, try to dispatch the next block */
1080 curr = (char *)prb_dispatch_next_block(pkc, po);
1081 if (curr) {
1082 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1083 prb_fill_curr_block(curr, pkc, pbd, len);
1084 return (void *)curr;
1085 }
1086
1087 /*
1088 * No free blocks are available.user_space hasn't caught up yet.
1089 * Queue was just frozen and now this packet will get dropped.
1090 */
1091 return NULL;
1092}
1093
eea49cc9 1094static void *packet_current_rx_frame(struct packet_sock *po,
f6fb8f10 1095 struct sk_buff *skb,
1096 int status, unsigned int len)
1097{
1098 char *curr = NULL;
1099 switch (po->tp_version) {
1100 case TPACKET_V1:
1101 case TPACKET_V2:
1102 curr = packet_lookup_frame(po, &po->rx_ring,
1103 po->rx_ring.head, status);
1104 return curr;
1105 case TPACKET_V3:
1106 return __packet_lookup_frame_in_block(po, skb, status, len);
1107 default:
1108 WARN(1, "TPACKET version not supported\n");
1109 BUG();
99aa3473 1110 return NULL;
f6fb8f10 1111 }
1112}
1113
eea49cc9 1114static void *prb_lookup_block(struct packet_sock *po,
f6fb8f10 1115 struct packet_ring_buffer *rb,
77f65ebd 1116 unsigned int idx,
f6fb8f10 1117 int status)
1118{
bc59ba39 1119 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
77f65ebd 1120 struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx);
f6fb8f10 1121
1122 if (status != BLOCK_STATUS(pbd))
1123 return NULL;
1124 return pbd;
1125}
1126
eea49cc9 1127static int prb_previous_blk_num(struct packet_ring_buffer *rb)
f6fb8f10 1128{
1129 unsigned int prev;
1130 if (rb->prb_bdqc.kactive_blk_num)
1131 prev = rb->prb_bdqc.kactive_blk_num-1;
1132 else
1133 prev = rb->prb_bdqc.knum_blocks-1;
1134 return prev;
1135}
1136
1137/* Assumes caller has held the rx_queue.lock */
eea49cc9 1138static void *__prb_previous_block(struct packet_sock *po,
f6fb8f10 1139 struct packet_ring_buffer *rb,
1140 int status)
1141{
1142 unsigned int previous = prb_previous_blk_num(rb);
1143 return prb_lookup_block(po, rb, previous, status);
1144}
1145
eea49cc9 1146static void *packet_previous_rx_frame(struct packet_sock *po,
f6fb8f10 1147 struct packet_ring_buffer *rb,
1148 int status)
1149{
1150 if (po->tp_version <= TPACKET_V2)
1151 return packet_previous_frame(po, rb, status);
1152
1153 return __prb_previous_block(po, rb, status);
1154}
1155
eea49cc9 1156static void packet_increment_rx_head(struct packet_sock *po,
f6fb8f10 1157 struct packet_ring_buffer *rb)
1158{
1159 switch (po->tp_version) {
1160 case TPACKET_V1:
1161 case TPACKET_V2:
1162 return packet_increment_head(rb);
1163 case TPACKET_V3:
1164 default:
1165 WARN(1, "TPACKET version not supported.\n");
1166 BUG();
1167 return;
1168 }
1169}
1170
eea49cc9 1171static void *packet_previous_frame(struct packet_sock *po,
69e3c75f
JB
1172 struct packet_ring_buffer *rb,
1173 int status)
1174{
1175 unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
1176 return packet_lookup_frame(po, rb, previous, status);
1177}
1178
eea49cc9 1179static void packet_increment_head(struct packet_ring_buffer *buff)
69e3c75f
JB
1180{
1181 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
1182}
1183
b0138408
DB
1184static void packet_inc_pending(struct packet_ring_buffer *rb)
1185{
1186 this_cpu_inc(*rb->pending_refcnt);
1187}
1188
1189static void packet_dec_pending(struct packet_ring_buffer *rb)
1190{
1191 this_cpu_dec(*rb->pending_refcnt);
1192}
1193
1194static unsigned int packet_read_pending(const struct packet_ring_buffer *rb)
1195{
1196 unsigned int refcnt = 0;
1197 int cpu;
1198
1199 /* We don't use pending refcount in rx_ring. */
1200 if (rb->pending_refcnt == NULL)
1201 return 0;
1202
1203 for_each_possible_cpu(cpu)
1204 refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu);
1205
1206 return refcnt;
1207}
1208
1209static int packet_alloc_pending(struct packet_sock *po)
1210{
1211 po->rx_ring.pending_refcnt = NULL;
1212
1213 po->tx_ring.pending_refcnt = alloc_percpu(unsigned int);
1214 if (unlikely(po->tx_ring.pending_refcnt == NULL))
1215 return -ENOBUFS;
1216
1217 return 0;
1218}
1219
1220static void packet_free_pending(struct packet_sock *po)
1221{
1222 free_percpu(po->tx_ring.pending_refcnt);
1223}
1224
77f65ebd
WB
1225static bool packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
1226{
1227 struct sock *sk = &po->sk;
1228 bool has_room;
1229
1230 if (po->prot_hook.func != tpacket_rcv)
1231 return (atomic_read(&sk->sk_rmem_alloc) + skb->truesize)
1232 <= sk->sk_rcvbuf;
1233
1234 spin_lock(&sk->sk_receive_queue.lock);
1235 if (po->tp_version == TPACKET_V3)
1236 has_room = prb_lookup_block(po, &po->rx_ring,
1237 po->rx_ring.prb_bdqc.kactive_blk_num,
1238 TP_STATUS_KERNEL);
1239 else
1240 has_room = packet_lookup_frame(po, &po->rx_ring,
1241 po->rx_ring.head,
1242 TP_STATUS_KERNEL);
1243 spin_unlock(&sk->sk_receive_queue.lock);
1244
1245 return has_room;
1246}
1247
1da177e4
LT
1248static void packet_sock_destruct(struct sock *sk)
1249{
ed85b565
RC
1250 skb_queue_purge(&sk->sk_error_queue);
1251
547b792c
IJ
1252 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
1253 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
1da177e4
LT
1254
1255 if (!sock_flag(sk, SOCK_DEAD)) {
40d4e3df 1256 pr_err("Attempt to release alive packet socket: %p\n", sk);
1da177e4
LT
1257 return;
1258 }
1259
17ab56a2 1260 sk_refcnt_debug_dec(sk);
1da177e4
LT
1261}
1262
dc99f600
DM
1263static int fanout_rr_next(struct packet_fanout *f, unsigned int num)
1264{
1265 int x = atomic_read(&f->rr_cur) + 1;
1266
1267 if (x >= num)
1268 x = 0;
1269
1270 return x;
1271}
1272
77f65ebd
WB
1273static unsigned int fanout_demux_hash(struct packet_fanout *f,
1274 struct sk_buff *skb,
1275 unsigned int num)
dc99f600 1276{
61b905da 1277 return reciprocal_scale(skb_get_hash(skb), num);
dc99f600
DM
1278}
1279
77f65ebd
WB
1280static unsigned int fanout_demux_lb(struct packet_fanout *f,
1281 struct sk_buff *skb,
1282 unsigned int num)
dc99f600
DM
1283{
1284 int cur, old;
1285
1286 cur = atomic_read(&f->rr_cur);
1287 while ((old = atomic_cmpxchg(&f->rr_cur, cur,
1288 fanout_rr_next(f, num))) != cur)
1289 cur = old;
77f65ebd
WB
1290 return cur;
1291}
1292
1293static unsigned int fanout_demux_cpu(struct packet_fanout *f,
1294 struct sk_buff *skb,
1295 unsigned int num)
1296{
1297 return smp_processor_id() % num;
dc99f600
DM
1298}
1299
5df0ddfb
DB
1300static unsigned int fanout_demux_rnd(struct packet_fanout *f,
1301 struct sk_buff *skb,
1302 unsigned int num)
1303{
f337db64 1304 return prandom_u32_max(num);
5df0ddfb
DB
1305}
1306
77f65ebd
WB
1307static unsigned int fanout_demux_rollover(struct packet_fanout *f,
1308 struct sk_buff *skb,
1309 unsigned int idx, unsigned int skip,
1310 unsigned int num)
95ec3eb4 1311{
77f65ebd 1312 unsigned int i, j;
95ec3eb4 1313
77f65ebd
WB
1314 i = j = min_t(int, f->next[idx], num - 1);
1315 do {
1316 if (i != skip && packet_rcv_has_room(pkt_sk(f->arr[i]), skb)) {
1317 if (i != j)
1318 f->next[idx] = i;
1319 return i;
1320 }
1321 if (++i == num)
1322 i = 0;
1323 } while (i != j);
1324
1325 return idx;
1326}
1327
2d36097d
NH
1328static unsigned int fanout_demux_qm(struct packet_fanout *f,
1329 struct sk_buff *skb,
1330 unsigned int num)
1331{
1332 return skb_get_queue_mapping(skb) % num;
1333}
1334
77f65ebd
WB
1335static bool fanout_has_flag(struct packet_fanout *f, u16 flag)
1336{
1337 return f->flags & (flag >> 8);
95ec3eb4
DM
1338}
1339
95ec3eb4
DM
1340static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1341 struct packet_type *pt, struct net_device *orig_dev)
dc99f600
DM
1342{
1343 struct packet_fanout *f = pt->af_packet_priv;
1344 unsigned int num = f->num_members;
1345 struct packet_sock *po;
77f65ebd 1346 unsigned int idx;
dc99f600
DM
1347
1348 if (!net_eq(dev_net(dev), read_pnet(&f->net)) ||
1349 !num) {
1350 kfree_skb(skb);
1351 return 0;
1352 }
1353
95ec3eb4
DM
1354 switch (f->type) {
1355 case PACKET_FANOUT_HASH:
1356 default:
77f65ebd 1357 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
bc416d97 1358 skb = ip_check_defrag(skb, IP_DEFRAG_AF_PACKET);
95ec3eb4
DM
1359 if (!skb)
1360 return 0;
1361 }
77f65ebd 1362 idx = fanout_demux_hash(f, skb, num);
95ec3eb4
DM
1363 break;
1364 case PACKET_FANOUT_LB:
77f65ebd 1365 idx = fanout_demux_lb(f, skb, num);
95ec3eb4
DM
1366 break;
1367 case PACKET_FANOUT_CPU:
77f65ebd
WB
1368 idx = fanout_demux_cpu(f, skb, num);
1369 break;
5df0ddfb
DB
1370 case PACKET_FANOUT_RND:
1371 idx = fanout_demux_rnd(f, skb, num);
1372 break;
2d36097d
NH
1373 case PACKET_FANOUT_QM:
1374 idx = fanout_demux_qm(f, skb, num);
1375 break;
77f65ebd
WB
1376 case PACKET_FANOUT_ROLLOVER:
1377 idx = fanout_demux_rollover(f, skb, 0, (unsigned int) -1, num);
95ec3eb4 1378 break;
dc99f600
DM
1379 }
1380
77f65ebd
WB
1381 po = pkt_sk(f->arr[idx]);
1382 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER) &&
1383 unlikely(!packet_rcv_has_room(po, skb))) {
1384 idx = fanout_demux_rollover(f, skb, idx, idx, num);
1385 po = pkt_sk(f->arr[idx]);
1386 }
dc99f600
DM
1387
1388 return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1389}
1390
fff3321d
PE
1391DEFINE_MUTEX(fanout_mutex);
1392EXPORT_SYMBOL_GPL(fanout_mutex);
dc99f600
DM
1393static LIST_HEAD(fanout_list);
1394
1395static void __fanout_link(struct sock *sk, struct packet_sock *po)
1396{
1397 struct packet_fanout *f = po->fanout;
1398
1399 spin_lock(&f->lock);
1400 f->arr[f->num_members] = sk;
1401 smp_wmb();
1402 f->num_members++;
1403 spin_unlock(&f->lock);
1404}
1405
1406static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1407{
1408 struct packet_fanout *f = po->fanout;
1409 int i;
1410
1411 spin_lock(&f->lock);
1412 for (i = 0; i < f->num_members; i++) {
1413 if (f->arr[i] == sk)
1414 break;
1415 }
1416 BUG_ON(i >= f->num_members);
1417 f->arr[i] = f->arr[f->num_members - 1];
1418 f->num_members--;
1419 spin_unlock(&f->lock);
1420}
1421
d4dd8aee 1422static bool match_fanout_group(struct packet_type *ptype, struct sock *sk)
c0de08d0 1423{
d4dd8aee 1424 if (ptype->af_packet_priv == (void *)((struct packet_sock *)sk)->fanout)
c0de08d0
EL
1425 return true;
1426
1427 return false;
1428}
1429
7736d33f 1430static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
dc99f600
DM
1431{
1432 struct packet_sock *po = pkt_sk(sk);
1433 struct packet_fanout *f, *match;
7736d33f 1434 u8 type = type_flags & 0xff;
77f65ebd 1435 u8 flags = type_flags >> 8;
dc99f600
DM
1436 int err;
1437
1438 switch (type) {
77f65ebd
WB
1439 case PACKET_FANOUT_ROLLOVER:
1440 if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)
1441 return -EINVAL;
dc99f600
DM
1442 case PACKET_FANOUT_HASH:
1443 case PACKET_FANOUT_LB:
95ec3eb4 1444 case PACKET_FANOUT_CPU:
5df0ddfb 1445 case PACKET_FANOUT_RND:
2d36097d 1446 case PACKET_FANOUT_QM:
dc99f600
DM
1447 break;
1448 default:
1449 return -EINVAL;
1450 }
1451
1452 if (!po->running)
1453 return -EINVAL;
1454
1455 if (po->fanout)
1456 return -EALREADY;
1457
1458 mutex_lock(&fanout_mutex);
1459 match = NULL;
1460 list_for_each_entry(f, &fanout_list, list) {
1461 if (f->id == id &&
1462 read_pnet(&f->net) == sock_net(sk)) {
1463 match = f;
1464 break;
1465 }
1466 }
afe62c68 1467 err = -EINVAL;
77f65ebd 1468 if (match && match->flags != flags)
afe62c68 1469 goto out;
dc99f600 1470 if (!match) {
afe62c68 1471 err = -ENOMEM;
dc99f600 1472 match = kzalloc(sizeof(*match), GFP_KERNEL);
afe62c68
ED
1473 if (!match)
1474 goto out;
1475 write_pnet(&match->net, sock_net(sk));
1476 match->id = id;
1477 match->type = type;
77f65ebd 1478 match->flags = flags;
afe62c68
ED
1479 atomic_set(&match->rr_cur, 0);
1480 INIT_LIST_HEAD(&match->list);
1481 spin_lock_init(&match->lock);
1482 atomic_set(&match->sk_ref, 0);
1483 match->prot_hook.type = po->prot_hook.type;
1484 match->prot_hook.dev = po->prot_hook.dev;
1485 match->prot_hook.func = packet_rcv_fanout;
1486 match->prot_hook.af_packet_priv = match;
c0de08d0 1487 match->prot_hook.id_match = match_fanout_group;
afe62c68
ED
1488 dev_add_pack(&match->prot_hook);
1489 list_add(&match->list, &fanout_list);
dc99f600 1490 }
afe62c68
ED
1491 err = -EINVAL;
1492 if (match->type == type &&
1493 match->prot_hook.type == po->prot_hook.type &&
1494 match->prot_hook.dev == po->prot_hook.dev) {
1495 err = -ENOSPC;
1496 if (atomic_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
1497 __dev_remove_pack(&po->prot_hook);
1498 po->fanout = match;
1499 atomic_inc(&match->sk_ref);
1500 __fanout_link(sk, po);
1501 err = 0;
dc99f600
DM
1502 }
1503 }
afe62c68 1504out:
dc99f600
DM
1505 mutex_unlock(&fanout_mutex);
1506 return err;
1507}
1508
1509static void fanout_release(struct sock *sk)
1510{
1511 struct packet_sock *po = pkt_sk(sk);
1512 struct packet_fanout *f;
1513
1514 f = po->fanout;
1515 if (!f)
1516 return;
1517
fff3321d 1518 mutex_lock(&fanout_mutex);
dc99f600
DM
1519 po->fanout = NULL;
1520
dc99f600
DM
1521 if (atomic_dec_and_test(&f->sk_ref)) {
1522 list_del(&f->list);
1523 dev_remove_pack(&f->prot_hook);
1524 kfree(f);
1525 }
1526 mutex_unlock(&fanout_mutex);
1527}
1da177e4 1528
90ddc4f0 1529static const struct proto_ops packet_ops;
1da177e4 1530
90ddc4f0 1531static const struct proto_ops packet_ops_spkt;
1da177e4 1532
40d4e3df
ED
1533static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
1534 struct packet_type *pt, struct net_device *orig_dev)
1da177e4
LT
1535{
1536 struct sock *sk;
1537 struct sockaddr_pkt *spkt;
1538
1539 /*
1540 * When we registered the protocol we saved the socket in the data
1541 * field for just this event.
1542 */
1543
1544 sk = pt->af_packet_priv;
1ce4f28b 1545
1da177e4
LT
1546 /*
1547 * Yank back the headers [hope the device set this
1548 * right or kerboom...]
1549 *
1550 * Incoming packets have ll header pulled,
1551 * push it back.
1552 *
98e399f8 1553 * For outgoing ones skb->data == skb_mac_header(skb)
1da177e4
LT
1554 * so that this procedure is noop.
1555 */
1556
1557 if (skb->pkt_type == PACKET_LOOPBACK)
1558 goto out;
1559
09ad9bc7 1560 if (!net_eq(dev_net(dev), sock_net(sk)))
d12d01d6
DL
1561 goto out;
1562
40d4e3df
ED
1563 skb = skb_share_check(skb, GFP_ATOMIC);
1564 if (skb == NULL)
1da177e4
LT
1565 goto oom;
1566
1567 /* drop any routing info */
adf30907 1568 skb_dst_drop(skb);
1da177e4 1569
84531c24
PO
1570 /* drop conntrack reference */
1571 nf_reset(skb);
1572
ffbc6111 1573 spkt = &PACKET_SKB_CB(skb)->sa.pkt;
1da177e4 1574
98e399f8 1575 skb_push(skb, skb->data - skb_mac_header(skb));
1da177e4
LT
1576
1577 /*
1578 * The SOCK_PACKET socket receives _all_ frames.
1579 */
1580
1581 spkt->spkt_family = dev->type;
1582 strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
1583 spkt->spkt_protocol = skb->protocol;
1584
1585 /*
1586 * Charge the memory to the socket. This is done specifically
1587 * to prevent sockets using all the memory up.
1588 */
1589
40d4e3df 1590 if (sock_queue_rcv_skb(sk, skb) == 0)
1da177e4
LT
1591 return 0;
1592
1593out:
1594 kfree_skb(skb);
1595oom:
1596 return 0;
1597}
1598
1599
1600/*
1601 * Output a raw packet to a device layer. This bypasses all the other
1602 * protocol layers and you must therefore supply it with a complete frame
1603 */
1ce4f28b 1604
1da177e4
LT
1605static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
1606 struct msghdr *msg, size_t len)
1607{
1608 struct sock *sk = sock->sk;
342dfc30 1609 DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name);
1a35ca80 1610 struct sk_buff *skb = NULL;
1da177e4 1611 struct net_device *dev;
40d4e3df 1612 __be16 proto = 0;
1da177e4 1613 int err;
3bdc0eba 1614 int extra_len = 0;
1ce4f28b 1615
1da177e4 1616 /*
1ce4f28b 1617 * Get and verify the address.
1da177e4
LT
1618 */
1619
40d4e3df 1620 if (saddr) {
1da177e4 1621 if (msg->msg_namelen < sizeof(struct sockaddr))
40d4e3df
ED
1622 return -EINVAL;
1623 if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
1624 proto = saddr->spkt_protocol;
1625 } else
1626 return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */
1da177e4
LT
1627
1628 /*
1ce4f28b 1629 * Find the device first to size check it
1da177e4
LT
1630 */
1631
de74e92a 1632 saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
1a35ca80 1633retry:
654d1f8a
ED
1634 rcu_read_lock();
1635 dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
1da177e4
LT
1636 err = -ENODEV;
1637 if (dev == NULL)
1638 goto out_unlock;
1ce4f28b 1639
d5e76b0a
DM
1640 err = -ENETDOWN;
1641 if (!(dev->flags & IFF_UP))
1642 goto out_unlock;
1643
1da177e4 1644 /*
40d4e3df
ED
1645 * You may not queue a frame bigger than the mtu. This is the lowest level
1646 * raw protocol and you must do your own fragmentation at this level.
1da177e4 1647 */
1ce4f28b 1648
3bdc0eba
BG
1649 if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
1650 if (!netif_supports_nofcs(dev)) {
1651 err = -EPROTONOSUPPORT;
1652 goto out_unlock;
1653 }
1654 extra_len = 4; /* We're doing our own CRC */
1655 }
1656
1da177e4 1657 err = -EMSGSIZE;
3bdc0eba 1658 if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
1da177e4
LT
1659 goto out_unlock;
1660
1a35ca80
ED
1661 if (!skb) {
1662 size_t reserved = LL_RESERVED_SPACE(dev);
4ce40912 1663 int tlen = dev->needed_tailroom;
1a35ca80
ED
1664 unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
1665
1666 rcu_read_unlock();
4ce40912 1667 skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
1a35ca80
ED
1668 if (skb == NULL)
1669 return -ENOBUFS;
1670 /* FIXME: Save some space for broken drivers that write a hard
1671 * header at transmission time by themselves. PPP is the notable
1672 * one here. This should really be fixed at the driver level.
1673 */
1674 skb_reserve(skb, reserved);
1675 skb_reset_network_header(skb);
1676
1677 /* Try to align data part correctly */
1678 if (hhlen) {
1679 skb->data -= hhlen;
1680 skb->tail -= hhlen;
1681 if (len < hhlen)
1682 skb_reset_network_header(skb);
1683 }
1684 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
1685 if (err)
1686 goto out_free;
1687 goto retry;
1da177e4
LT
1688 }
1689
3bdc0eba 1690 if (len > (dev->mtu + dev->hard_header_len + extra_len)) {
57f89bfa
BG
1691 /* Earlier code assumed this would be a VLAN pkt,
1692 * double-check this now that we have the actual
1693 * packet in hand.
1694 */
1695 struct ethhdr *ehdr;
1696 skb_reset_mac_header(skb);
1697 ehdr = eth_hdr(skb);
1698 if (ehdr->h_proto != htons(ETH_P_8021Q)) {
1699 err = -EMSGSIZE;
1700 goto out_unlock;
1701 }
1702 }
1a35ca80 1703
1da177e4
LT
1704 skb->protocol = proto;
1705 skb->dev = dev;
1706 skb->priority = sk->sk_priority;
2d37a186 1707 skb->mark = sk->sk_mark;
bf84a010
DB
1708
1709 sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
1da177e4 1710
3bdc0eba
BG
1711 if (unlikely(extra_len == 4))
1712 skb->no_fcs = 1;
1713
40893fd0 1714 skb_probe_transport_header(skb, 0);
c1aad275 1715
1da177e4 1716 dev_queue_xmit(skb);
654d1f8a 1717 rcu_read_unlock();
40d4e3df 1718 return len;
1da177e4 1719
1da177e4 1720out_unlock:
654d1f8a 1721 rcu_read_unlock();
1a35ca80
ED
1722out_free:
1723 kfree_skb(skb);
1da177e4
LT
1724 return err;
1725}
1da177e4 1726
eea49cc9 1727static unsigned int run_filter(const struct sk_buff *skb,
62ab0812 1728 const struct sock *sk,
dbcb5855 1729 unsigned int res)
1da177e4
LT
1730{
1731 struct sk_filter *filter;
fda9ef5d 1732
80f8f102
ED
1733 rcu_read_lock();
1734 filter = rcu_dereference(sk->sk_filter);
dbcb5855 1735 if (filter != NULL)
0a14842f 1736 res = SK_RUN_FILTER(filter, skb);
80f8f102 1737 rcu_read_unlock();
1da177e4 1738
dbcb5855 1739 return res;
1da177e4
LT
1740}
1741
1742/*
62ab0812
ED
1743 * This function makes lazy skb cloning in hope that most of packets
1744 * are discarded by BPF.
1745 *
1746 * Note tricky part: we DO mangle shared skb! skb->data, skb->len
1747 * and skb->cb are mangled. It works because (and until) packets
1748 * falling here are owned by current CPU. Output packets are cloned
1749 * by dev_queue_xmit_nit(), input packets are processed by net_bh
1750 * sequencially, so that if we return skb to original state on exit,
1751 * we will not harm anyone.
1da177e4
LT
1752 */
1753
40d4e3df
ED
1754static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
1755 struct packet_type *pt, struct net_device *orig_dev)
1da177e4
LT
1756{
1757 struct sock *sk;
1758 struct sockaddr_ll *sll;
1759 struct packet_sock *po;
40d4e3df 1760 u8 *skb_head = skb->data;
1da177e4 1761 int skb_len = skb->len;
dbcb5855 1762 unsigned int snaplen, res;
1da177e4
LT
1763
1764 if (skb->pkt_type == PACKET_LOOPBACK)
1765 goto drop;
1766
1767 sk = pt->af_packet_priv;
1768 po = pkt_sk(sk);
1769
09ad9bc7 1770 if (!net_eq(dev_net(dev), sock_net(sk)))
d12d01d6
DL
1771 goto drop;
1772
1da177e4
LT
1773 skb->dev = dev;
1774
3b04ddde 1775 if (dev->header_ops) {
1da177e4 1776 /* The device has an explicit notion of ll header,
62ab0812
ED
1777 * exported to higher levels.
1778 *
1779 * Otherwise, the device hides details of its frame
1780 * structure, so that corresponding packet head is
1781 * never delivered to user.
1da177e4
LT
1782 */
1783 if (sk->sk_type != SOCK_DGRAM)
98e399f8 1784 skb_push(skb, skb->data - skb_mac_header(skb));
1da177e4
LT
1785 else if (skb->pkt_type == PACKET_OUTGOING) {
1786 /* Special case: outgoing packets have ll header at head */
bbe735e4 1787 skb_pull(skb, skb_network_offset(skb));
1da177e4
LT
1788 }
1789 }
1790
1791 snaplen = skb->len;
1792
dbcb5855
DM
1793 res = run_filter(skb, sk, snaplen);
1794 if (!res)
fda9ef5d 1795 goto drop_n_restore;
dbcb5855
DM
1796 if (snaplen > res)
1797 snaplen = res;
1da177e4 1798
0fd7bac6 1799 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
1da177e4
LT
1800 goto drop_n_acct;
1801
1802 if (skb_shared(skb)) {
1803 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
1804 if (nskb == NULL)
1805 goto drop_n_acct;
1806
1807 if (skb_head != skb->data) {
1808 skb->data = skb_head;
1809 skb->len = skb_len;
1810 }
abc4e4fa 1811 consume_skb(skb);
1da177e4
LT
1812 skb = nskb;
1813 }
1814
ffbc6111
HX
1815 BUILD_BUG_ON(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8 >
1816 sizeof(skb->cb));
1817
1818 sll = &PACKET_SKB_CB(skb)->sa.ll;
1da177e4
LT
1819 sll->sll_family = AF_PACKET;
1820 sll->sll_hatype = dev->type;
1821 sll->sll_protocol = skb->protocol;
1822 sll->sll_pkttype = skb->pkt_type;
8032b464 1823 if (unlikely(po->origdev))
80feaacb
PWJ
1824 sll->sll_ifindex = orig_dev->ifindex;
1825 else
1826 sll->sll_ifindex = dev->ifindex;
1da177e4 1827
b95cce35 1828 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
1da177e4 1829
ffbc6111 1830 PACKET_SKB_CB(skb)->origlen = skb->len;
8dc41944 1831
1da177e4
LT
1832 if (pskb_trim(skb, snaplen))
1833 goto drop_n_acct;
1834
1835 skb_set_owner_r(skb, sk);
1836 skb->dev = NULL;
adf30907 1837 skb_dst_drop(skb);
1da177e4 1838
84531c24
PO
1839 /* drop conntrack reference */
1840 nf_reset(skb);
1841
1da177e4 1842 spin_lock(&sk->sk_receive_queue.lock);
ee80fbf3 1843 po->stats.stats1.tp_packets++;
3b885787 1844 skb->dropcount = atomic_read(&sk->sk_drops);
1da177e4
LT
1845 __skb_queue_tail(&sk->sk_receive_queue, skb);
1846 spin_unlock(&sk->sk_receive_queue.lock);
676d2369 1847 sk->sk_data_ready(sk);
1da177e4
LT
1848 return 0;
1849
1850drop_n_acct:
7091fbd8 1851 spin_lock(&sk->sk_receive_queue.lock);
ee80fbf3 1852 po->stats.stats1.tp_drops++;
7091fbd8
WB
1853 atomic_inc(&sk->sk_drops);
1854 spin_unlock(&sk->sk_receive_queue.lock);
1da177e4
LT
1855
1856drop_n_restore:
1857 if (skb_head != skb->data && skb_shared(skb)) {
1858 skb->data = skb_head;
1859 skb->len = skb_len;
1860 }
1861drop:
ead2ceb0 1862 consume_skb(skb);
1da177e4
LT
1863 return 0;
1864}
1865
40d4e3df
ED
1866static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
1867 struct packet_type *pt, struct net_device *orig_dev)
1da177e4
LT
1868{
1869 struct sock *sk;
1870 struct packet_sock *po;
1871 struct sockaddr_ll *sll;
184f489e 1872 union tpacket_uhdr h;
40d4e3df 1873 u8 *skb_head = skb->data;
1da177e4 1874 int skb_len = skb->len;
dbcb5855 1875 unsigned int snaplen, res;
f6fb8f10 1876 unsigned long status = TP_STATUS_USER;
bbd6ef87 1877 unsigned short macoff, netoff, hdrlen;
1da177e4 1878 struct sk_buff *copy_skb = NULL;
bbd6ef87 1879 struct timespec ts;
b9c32fb2 1880 __u32 ts_status;
1da177e4 1881
51846355
AW
1882 /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
1883 * We may add members to them until current aligned size without forcing
1884 * userspace to call getsockopt(..., PACKET_HDRLEN, ...).
1885 */
1886 BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32);
1887 BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48);
1888
1da177e4
LT
1889 if (skb->pkt_type == PACKET_LOOPBACK)
1890 goto drop;
1891
1892 sk = pt->af_packet_priv;
1893 po = pkt_sk(sk);
1894
09ad9bc7 1895 if (!net_eq(dev_net(dev), sock_net(sk)))
d12d01d6
DL
1896 goto drop;
1897
3b04ddde 1898 if (dev->header_ops) {
1da177e4 1899 if (sk->sk_type != SOCK_DGRAM)
98e399f8 1900 skb_push(skb, skb->data - skb_mac_header(skb));
1da177e4
LT
1901 else if (skb->pkt_type == PACKET_OUTGOING) {
1902 /* Special case: outgoing packets have ll header at head */
bbe735e4 1903 skb_pull(skb, skb_network_offset(skb));
1da177e4
LT
1904 }
1905 }
1906
8dc41944
HX
1907 if (skb->ip_summed == CHECKSUM_PARTIAL)
1908 status |= TP_STATUS_CSUMNOTREADY;
1909
1da177e4
LT
1910 snaplen = skb->len;
1911
dbcb5855
DM
1912 res = run_filter(skb, sk, snaplen);
1913 if (!res)
fda9ef5d 1914 goto drop_n_restore;
dbcb5855
DM
1915 if (snaplen > res)
1916 snaplen = res;
1da177e4
LT
1917
1918 if (sk->sk_type == SOCK_DGRAM) {
8913336a
PM
1919 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
1920 po->tp_reserve;
1da177e4 1921 } else {
95c96174 1922 unsigned int maclen = skb_network_offset(skb);
bbd6ef87 1923 netoff = TPACKET_ALIGN(po->tp_hdrlen +
8913336a
PM
1924 (maclen < 16 ? 16 : maclen)) +
1925 po->tp_reserve;
1da177e4
LT
1926 macoff = netoff - maclen;
1927 }
f6fb8f10 1928 if (po->tp_version <= TPACKET_V2) {
1929 if (macoff + snaplen > po->rx_ring.frame_size) {
1930 if (po->copy_thresh &&
0fd7bac6 1931 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
f6fb8f10 1932 if (skb_shared(skb)) {
1933 copy_skb = skb_clone(skb, GFP_ATOMIC);
1934 } else {
1935 copy_skb = skb_get(skb);
1936 skb_head = skb->data;
1937 }
1938 if (copy_skb)
1939 skb_set_owner_r(copy_skb, sk);
1da177e4 1940 }
f6fb8f10 1941 snaplen = po->rx_ring.frame_size - macoff;
1942 if ((int)snaplen < 0)
1943 snaplen = 0;
1da177e4 1944 }
1da177e4 1945 }
1da177e4 1946 spin_lock(&sk->sk_receive_queue.lock);
f6fb8f10 1947 h.raw = packet_current_rx_frame(po, skb,
1948 TP_STATUS_KERNEL, (macoff+snaplen));
bbd6ef87 1949 if (!h.raw)
1da177e4 1950 goto ring_is_full;
f6fb8f10 1951 if (po->tp_version <= TPACKET_V2) {
1952 packet_increment_rx_head(po, &po->rx_ring);
1953 /*
1954 * LOSING will be reported till you read the stats,
1955 * because it's COR - Clear On Read.
1956 * Anyways, moving it for V1/V2 only as V3 doesn't need this
1957 * at packet level.
1958 */
ee80fbf3 1959 if (po->stats.stats1.tp_drops)
f6fb8f10 1960 status |= TP_STATUS_LOSING;
1961 }
ee80fbf3 1962 po->stats.stats1.tp_packets++;
1da177e4
LT
1963 if (copy_skb) {
1964 status |= TP_STATUS_COPY;
1965 __skb_queue_tail(&sk->sk_receive_queue, copy_skb);
1966 }
1da177e4
LT
1967 spin_unlock(&sk->sk_receive_queue.lock);
1968
bbd6ef87 1969 skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
b9c32fb2
DB
1970
1971 if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
7a51384c 1972 getnstimeofday(&ts);
1da177e4 1973
b9c32fb2
DB
1974 status |= ts_status;
1975
bbd6ef87
PM
1976 switch (po->tp_version) {
1977 case TPACKET_V1:
1978 h.h1->tp_len = skb->len;
1979 h.h1->tp_snaplen = snaplen;
1980 h.h1->tp_mac = macoff;
1981 h.h1->tp_net = netoff;
4b457bdf
DB
1982 h.h1->tp_sec = ts.tv_sec;
1983 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
bbd6ef87
PM
1984 hdrlen = sizeof(*h.h1);
1985 break;
1986 case TPACKET_V2:
1987 h.h2->tp_len = skb->len;
1988 h.h2->tp_snaplen = snaplen;
1989 h.h2->tp_mac = macoff;
1990 h.h2->tp_net = netoff;
bbd6ef87
PM
1991 h.h2->tp_sec = ts.tv_sec;
1992 h.h2->tp_nsec = ts.tv_nsec;
a3bcc23e
BG
1993 if (vlan_tx_tag_present(skb)) {
1994 h.h2->tp_vlan_tci = vlan_tx_tag_get(skb);
a0cdfcf3
AW
1995 h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto);
1996 status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
a3bcc23e
BG
1997 } else {
1998 h.h2->tp_vlan_tci = 0;
a0cdfcf3 1999 h.h2->tp_vlan_tpid = 0;
a3bcc23e 2000 }
e4d26f4b 2001 memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding));
bbd6ef87
PM
2002 hdrlen = sizeof(*h.h2);
2003 break;
f6fb8f10 2004 case TPACKET_V3:
2005 /* tp_nxt_offset,vlan are already populated above.
2006 * So DONT clear those fields here
2007 */
2008 h.h3->tp_status |= status;
2009 h.h3->tp_len = skb->len;
2010 h.h3->tp_snaplen = snaplen;
2011 h.h3->tp_mac = macoff;
2012 h.h3->tp_net = netoff;
f6fb8f10 2013 h.h3->tp_sec = ts.tv_sec;
2014 h.h3->tp_nsec = ts.tv_nsec;
e4d26f4b 2015 memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding));
f6fb8f10 2016 hdrlen = sizeof(*h.h3);
2017 break;
bbd6ef87
PM
2018 default:
2019 BUG();
2020 }
1da177e4 2021
bbd6ef87 2022 sll = h.raw + TPACKET_ALIGN(hdrlen);
b95cce35 2023 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
1da177e4
LT
2024 sll->sll_family = AF_PACKET;
2025 sll->sll_hatype = dev->type;
2026 sll->sll_protocol = skb->protocol;
2027 sll->sll_pkttype = skb->pkt_type;
8032b464 2028 if (unlikely(po->origdev))
80feaacb
PWJ
2029 sll->sll_ifindex = orig_dev->ifindex;
2030 else
2031 sll->sll_ifindex = dev->ifindex;
1da177e4 2032
e16aa207 2033 smp_mb();
f0d4eb29 2034
f6dafa95 2035#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
f0d4eb29 2036 if (po->tp_version <= TPACKET_V2) {
0af55bb5
CG
2037 u8 *start, *end;
2038
f0d4eb29
DB
2039 end = (u8 *) PAGE_ALIGN((unsigned long) h.raw +
2040 macoff + snaplen);
2041
2042 for (start = h.raw; start < end; start += PAGE_SIZE)
2043 flush_dcache_page(pgv_to_page(start));
1da177e4 2044 }
f0d4eb29 2045 smp_wmb();
f6dafa95 2046#endif
f0d4eb29 2047
f6fb8f10 2048 if (po->tp_version <= TPACKET_V2)
2049 __packet_set_status(po, h.raw, status);
2050 else
2051 prb_clear_blk_fill_status(&po->rx_ring);
1da177e4 2052
676d2369 2053 sk->sk_data_ready(sk);
1da177e4
LT
2054
2055drop_n_restore:
2056 if (skb_head != skb->data && skb_shared(skb)) {
2057 skb->data = skb_head;
2058 skb->len = skb_len;
2059 }
2060drop:
1ce4f28b 2061 kfree_skb(skb);
1da177e4
LT
2062 return 0;
2063
2064ring_is_full:
ee80fbf3 2065 po->stats.stats1.tp_drops++;
1da177e4
LT
2066 spin_unlock(&sk->sk_receive_queue.lock);
2067
676d2369 2068 sk->sk_data_ready(sk);
acb5d75b 2069 kfree_skb(copy_skb);
1da177e4
LT
2070 goto drop_n_restore;
2071}
2072
69e3c75f
JB
2073static void tpacket_destruct_skb(struct sk_buff *skb)
2074{
2075 struct packet_sock *po = pkt_sk(skb->sk);
1da177e4 2076
69e3c75f 2077 if (likely(po->tx_ring.pg_vec)) {
f0d4eb29 2078 void *ph;
b9c32fb2
DB
2079 __u32 ts;
2080
69e3c75f 2081 ph = skb_shinfo(skb)->destructor_arg;
b0138408 2082 packet_dec_pending(&po->tx_ring);
b9c32fb2
DB
2083
2084 ts = __packet_set_timestamp(po, ph, skb);
2085 __packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts);
69e3c75f
JB
2086 }
2087
2088 sock_wfree(skb);
2089}
2090
40d4e3df
ED
2091static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2092 void *frame, struct net_device *dev, int size_max,
ae641949 2093 __be16 proto, unsigned char *addr, int hlen)
69e3c75f 2094{
184f489e 2095 union tpacket_uhdr ph;
09effa67 2096 int to_write, offset, len, tp_len, nr_frags, len_max;
69e3c75f
JB
2097 struct socket *sock = po->sk.sk_socket;
2098 struct page *page;
2099 void *data;
2100 int err;
2101
2102 ph.raw = frame;
2103
2104 skb->protocol = proto;
2105 skb->dev = dev;
2106 skb->priority = po->sk.sk_priority;
2d37a186 2107 skb->mark = po->sk.sk_mark;
2e31396f 2108 sock_tx_timestamp(&po->sk, &skb_shinfo(skb)->tx_flags);
69e3c75f
JB
2109 skb_shinfo(skb)->destructor_arg = ph.raw;
2110
2111 switch (po->tp_version) {
2112 case TPACKET_V2:
2113 tp_len = ph.h2->tp_len;
2114 break;
2115 default:
2116 tp_len = ph.h1->tp_len;
2117 break;
2118 }
09effa67
DM
2119 if (unlikely(tp_len > size_max)) {
2120 pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
2121 return -EMSGSIZE;
2122 }
69e3c75f 2123
ae641949 2124 skb_reserve(skb, hlen);
69e3c75f 2125 skb_reset_network_header(skb);
c1aad275 2126
d346a3fa
DB
2127 if (!packet_use_direct_xmit(po))
2128 skb_probe_transport_header(skb, 0);
2129 if (unlikely(po->tp_tx_has_off)) {
5920cd3a
PC
2130 int off_min, off_max, off;
2131 off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2132 off_max = po->tx_ring.frame_size - tp_len;
2133 if (sock->type == SOCK_DGRAM) {
2134 switch (po->tp_version) {
2135 case TPACKET_V2:
2136 off = ph.h2->tp_net;
2137 break;
2138 default:
2139 off = ph.h1->tp_net;
2140 break;
2141 }
2142 } else {
2143 switch (po->tp_version) {
2144 case TPACKET_V2:
2145 off = ph.h2->tp_mac;
2146 break;
2147 default:
2148 off = ph.h1->tp_mac;
2149 break;
2150 }
2151 }
2152 if (unlikely((off < off_min) || (off_max < off)))
2153 return -EINVAL;
2154 data = ph.raw + off;
2155 } else {
2156 data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll);
2157 }
69e3c75f
JB
2158 to_write = tp_len;
2159
2160 if (sock->type == SOCK_DGRAM) {
2161 err = dev_hard_header(skb, dev, ntohs(proto), addr,
2162 NULL, tp_len);
2163 if (unlikely(err < 0))
2164 return -EINVAL;
40d4e3df 2165 } else if (dev->hard_header_len) {
69e3c75f
JB
2166 /* net device doesn't like empty head */
2167 if (unlikely(tp_len <= dev->hard_header_len)) {
40d4e3df
ED
2168 pr_err("packet size is too short (%d < %d)\n",
2169 tp_len, dev->hard_header_len);
69e3c75f
JB
2170 return -EINVAL;
2171 }
2172
2173 skb_push(skb, dev->hard_header_len);
2174 err = skb_store_bits(skb, 0, data,
2175 dev->hard_header_len);
2176 if (unlikely(err))
2177 return err;
2178
2179 data += dev->hard_header_len;
2180 to_write -= dev->hard_header_len;
2181 }
2182
69e3c75f
JB
2183 offset = offset_in_page(data);
2184 len_max = PAGE_SIZE - offset;
2185 len = ((to_write > len_max) ? len_max : to_write);
2186
2187 skb->data_len = to_write;
2188 skb->len += to_write;
2189 skb->truesize += to_write;
2190 atomic_add(to_write, &po->sk.sk_wmem_alloc);
2191
2192 while (likely(to_write)) {
2193 nr_frags = skb_shinfo(skb)->nr_frags;
2194
2195 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
40d4e3df
ED
2196 pr_err("Packet exceed the number of skb frags(%lu)\n",
2197 MAX_SKB_FRAGS);
69e3c75f
JB
2198 return -EFAULT;
2199 }
2200
0af55bb5
CG
2201 page = pgv_to_page(data);
2202 data += len;
69e3c75f
JB
2203 flush_dcache_page(page);
2204 get_page(page);
0af55bb5 2205 skb_fill_page_desc(skb, nr_frags, page, offset, len);
69e3c75f
JB
2206 to_write -= len;
2207 offset = 0;
2208 len_max = PAGE_SIZE;
2209 len = ((to_write > len_max) ? len_max : to_write);
2210 }
2211
2212 return tp_len;
2213}
2214
2215static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2216{
69e3c75f
JB
2217 struct sk_buff *skb;
2218 struct net_device *dev;
2219 __be16 proto;
09effa67 2220 int err, reserve = 0;
40d4e3df 2221 void *ph;
342dfc30 2222 DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
87a2fd28 2223 bool need_wait = !(msg->msg_flags & MSG_DONTWAIT);
69e3c75f
JB
2224 int tp_len, size_max;
2225 unsigned char *addr;
2226 int len_sum = 0;
9e67030a 2227 int status = TP_STATUS_AVAILABLE;
ae641949 2228 int hlen, tlen;
69e3c75f 2229
69e3c75f
JB
2230 mutex_lock(&po->pg_vec_lock);
2231
66e56cd4 2232 if (likely(saddr == NULL)) {
e40526cb 2233 dev = packet_cached_dev_get(po);
69e3c75f
JB
2234 proto = po->num;
2235 addr = NULL;
2236 } else {
2237 err = -EINVAL;
2238 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2239 goto out;
2240 if (msg->msg_namelen < (saddr->sll_halen
2241 + offsetof(struct sockaddr_ll,
2242 sll_addr)))
2243 goto out;
69e3c75f
JB
2244 proto = saddr->sll_protocol;
2245 addr = saddr->sll_addr;
827d9780 2246 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
69e3c75f
JB
2247 }
2248
69e3c75f
JB
2249 err = -ENXIO;
2250 if (unlikely(dev == NULL))
2251 goto out;
69e3c75f
JB
2252 err = -ENETDOWN;
2253 if (unlikely(!(dev->flags & IFF_UP)))
2254 goto out_put;
2255
52f1454f 2256 reserve = dev->hard_header_len + VLAN_HLEN;
69e3c75f 2257 size_max = po->tx_ring.frame_size
b5dd884e 2258 - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
69e3c75f 2259
09effa67
DM
2260 if (size_max > dev->mtu + reserve)
2261 size_max = dev->mtu + reserve;
2262
69e3c75f
JB
2263 do {
2264 ph = packet_current_frame(po, &po->tx_ring,
87a2fd28 2265 TP_STATUS_SEND_REQUEST);
69e3c75f 2266 if (unlikely(ph == NULL)) {
87a2fd28
DB
2267 if (need_wait && need_resched())
2268 schedule();
69e3c75f
JB
2269 continue;
2270 }
2271
2272 status = TP_STATUS_SEND_REQUEST;
ae641949
HX
2273 hlen = LL_RESERVED_SPACE(dev);
2274 tlen = dev->needed_tailroom;
69e3c75f 2275 skb = sock_alloc_send_skb(&po->sk,
ae641949 2276 hlen + tlen + sizeof(struct sockaddr_ll),
69e3c75f
JB
2277 0, &err);
2278
2279 if (unlikely(skb == NULL))
2280 goto out_status;
2281
2282 tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
52f1454f
DB
2283 addr, hlen);
2284 if (tp_len > dev->mtu + dev->hard_header_len) {
2285 struct ethhdr *ehdr;
2286 /* Earlier code assumed this would be a VLAN pkt,
2287 * double-check this now that we have the actual
2288 * packet in hand.
2289 */
69e3c75f 2290
52f1454f
DB
2291 skb_reset_mac_header(skb);
2292 ehdr = eth_hdr(skb);
2293 if (ehdr->h_proto != htons(ETH_P_8021Q))
2294 tp_len = -EMSGSIZE;
2295 }
69e3c75f
JB
2296 if (unlikely(tp_len < 0)) {
2297 if (po->tp_loss) {
2298 __packet_set_status(po, ph,
2299 TP_STATUS_AVAILABLE);
2300 packet_increment_head(&po->tx_ring);
2301 kfree_skb(skb);
2302 continue;
2303 } else {
2304 status = TP_STATUS_WRONG_FORMAT;
2305 err = tp_len;
2306 goto out_status;
2307 }
2308 }
2309
0fd5d57b
DB
2310 packet_pick_tx_queue(dev, skb);
2311
69e3c75f
JB
2312 skb->destructor = tpacket_destruct_skb;
2313 __packet_set_status(po, ph, TP_STATUS_SENDING);
b0138408 2314 packet_inc_pending(&po->tx_ring);
69e3c75f
JB
2315
2316 status = TP_STATUS_SEND_REQUEST;
d346a3fa 2317 err = po->xmit(skb);
eb70df13
JP
2318 if (unlikely(err > 0)) {
2319 err = net_xmit_errno(err);
2320 if (err && __packet_get_status(po, ph) ==
2321 TP_STATUS_AVAILABLE) {
2322 /* skb was destructed already */
2323 skb = NULL;
2324 goto out_status;
2325 }
2326 /*
2327 * skb was dropped but not destructed yet;
2328 * let's treat it like congestion or err < 0
2329 */
2330 err = 0;
2331 }
69e3c75f
JB
2332 packet_increment_head(&po->tx_ring);
2333 len_sum += tp_len;
b0138408
DB
2334 } while (likely((ph != NULL) ||
2335 /* Note: packet_read_pending() might be slow if we have
2336 * to call it as it's per_cpu variable, but in fast-path
2337 * we already short-circuit the loop with the first
2338 * condition, and luckily don't have to go that path
2339 * anyway.
2340 */
2341 (need_wait && packet_read_pending(&po->tx_ring))));
69e3c75f
JB
2342
2343 err = len_sum;
2344 goto out_put;
2345
69e3c75f
JB
2346out_status:
2347 __packet_set_status(po, ph, status);
2348 kfree_skb(skb);
2349out_put:
e40526cb 2350 dev_put(dev);
69e3c75f
JB
2351out:
2352 mutex_unlock(&po->pg_vec_lock);
2353 return err;
2354}
69e3c75f 2355
eea49cc9
OJ
2356static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
2357 size_t reserve, size_t len,
2358 size_t linear, int noblock,
2359 int *err)
bfd5f4a3
SS
2360{
2361 struct sk_buff *skb;
2362
2363 /* Under a page? Don't bother with paged skb. */
2364 if (prepad + len < PAGE_SIZE || !linear)
2365 linear = len;
2366
2367 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
28d64271 2368 err, 0);
bfd5f4a3
SS
2369 if (!skb)
2370 return NULL;
2371
2372 skb_reserve(skb, reserve);
2373 skb_put(skb, linear);
2374 skb->data_len = len - linear;
2375 skb->len += len - linear;
2376
2377 return skb;
2378}
2379
d346a3fa 2380static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
1da177e4
LT
2381{
2382 struct sock *sk = sock->sk;
342dfc30 2383 DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
1da177e4
LT
2384 struct sk_buff *skb;
2385 struct net_device *dev;
0e11c91e 2386 __be16 proto;
1da177e4 2387 unsigned char *addr;
827d9780 2388 int err, reserve = 0;
bfd5f4a3
SS
2389 struct virtio_net_hdr vnet_hdr = { 0 };
2390 int offset = 0;
2391 int vnet_hdr_len;
2392 struct packet_sock *po = pkt_sk(sk);
2393 unsigned short gso_type = 0;
ae641949 2394 int hlen, tlen;
3bdc0eba 2395 int extra_len = 0;
1da177e4
LT
2396
2397 /*
1ce4f28b 2398 * Get and verify the address.
1da177e4 2399 */
1ce4f28b 2400
66e56cd4 2401 if (likely(saddr == NULL)) {
e40526cb 2402 dev = packet_cached_dev_get(po);
1da177e4
LT
2403 proto = po->num;
2404 addr = NULL;
2405 } else {
2406 err = -EINVAL;
2407 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2408 goto out;
0fb375fb
EB
2409 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
2410 goto out;
1da177e4
LT
2411 proto = saddr->sll_protocol;
2412 addr = saddr->sll_addr;
827d9780 2413 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
1da177e4
LT
2414 }
2415
1da177e4 2416 err = -ENXIO;
e40526cb 2417 if (unlikely(dev == NULL))
1da177e4 2418 goto out_unlock;
d5e76b0a 2419 err = -ENETDOWN;
e40526cb 2420 if (unlikely(!(dev->flags & IFF_UP)))
d5e76b0a
DM
2421 goto out_unlock;
2422
e40526cb
DB
2423 if (sock->type == SOCK_RAW)
2424 reserve = dev->hard_header_len;
bfd5f4a3
SS
2425 if (po->has_vnet_hdr) {
2426 vnet_hdr_len = sizeof(vnet_hdr);
2427
2428 err = -EINVAL;
2429 if (len < vnet_hdr_len)
2430 goto out_unlock;
2431
2432 len -= vnet_hdr_len;
2433
2434 err = memcpy_fromiovec((void *)&vnet_hdr, msg->msg_iov,
2435 vnet_hdr_len);
2436 if (err < 0)
2437 goto out_unlock;
2438
2439 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
2440 (vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 >
2441 vnet_hdr.hdr_len))
2442 vnet_hdr.hdr_len = vnet_hdr.csum_start +
2443 vnet_hdr.csum_offset + 2;
2444
2445 err = -EINVAL;
2446 if (vnet_hdr.hdr_len > len)
2447 goto out_unlock;
2448
2449 if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
2450 switch (vnet_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
2451 case VIRTIO_NET_HDR_GSO_TCPV4:
2452 gso_type = SKB_GSO_TCPV4;
2453 break;
2454 case VIRTIO_NET_HDR_GSO_TCPV6:
2455 gso_type = SKB_GSO_TCPV6;
2456 break;
2457 case VIRTIO_NET_HDR_GSO_UDP:
2458 gso_type = SKB_GSO_UDP;
2459 break;
2460 default:
2461 goto out_unlock;
2462 }
2463
2464 if (vnet_hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
2465 gso_type |= SKB_GSO_TCP_ECN;
2466
2467 if (vnet_hdr.gso_size == 0)
2468 goto out_unlock;
2469
2470 }
2471 }
2472
3bdc0eba
BG
2473 if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
2474 if (!netif_supports_nofcs(dev)) {
2475 err = -EPROTONOSUPPORT;
2476 goto out_unlock;
2477 }
2478 extra_len = 4; /* We're doing our own CRC */
2479 }
2480
1da177e4 2481 err = -EMSGSIZE;
3bdc0eba 2482 if (!gso_type && (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
1da177e4
LT
2483 goto out_unlock;
2484
bfd5f4a3 2485 err = -ENOBUFS;
ae641949
HX
2486 hlen = LL_RESERVED_SPACE(dev);
2487 tlen = dev->needed_tailroom;
2488 skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, vnet_hdr.hdr_len,
bfd5f4a3 2489 msg->msg_flags & MSG_DONTWAIT, &err);
40d4e3df 2490 if (skb == NULL)
1da177e4
LT
2491 goto out_unlock;
2492
bfd5f4a3 2493 skb_set_network_header(skb, reserve);
1da177e4 2494
0c4e8581
SH
2495 err = -EINVAL;
2496 if (sock->type == SOCK_DGRAM &&
bfd5f4a3 2497 (offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len)) < 0)
0c4e8581 2498 goto out_free;
1da177e4
LT
2499
2500 /* Returns -EFAULT on error */
bfd5f4a3 2501 err = skb_copy_datagram_from_iovec(skb, offset, msg->msg_iov, 0, len);
1da177e4
LT
2502 if (err)
2503 goto out_free;
bf84a010
DB
2504
2505 sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
1da177e4 2506
3bdc0eba 2507 if (!gso_type && (len > dev->mtu + reserve + extra_len)) {
09effa67
DM
2508 /* Earlier code assumed this would be a VLAN pkt,
2509 * double-check this now that we have the actual
2510 * packet in hand.
2511 */
2512 struct ethhdr *ehdr;
2513 skb_reset_mac_header(skb);
2514 ehdr = eth_hdr(skb);
2515 if (ehdr->h_proto != htons(ETH_P_8021Q)) {
2516 err = -EMSGSIZE;
2517 goto out_free;
2518 }
57f89bfa
BG
2519 }
2520
09effa67
DM
2521 skb->protocol = proto;
2522 skb->dev = dev;
1da177e4 2523 skb->priority = sk->sk_priority;
2d37a186 2524 skb->mark = sk->sk_mark;
0fd5d57b
DB
2525
2526 packet_pick_tx_queue(dev, skb);
1da177e4 2527
bfd5f4a3
SS
2528 if (po->has_vnet_hdr) {
2529 if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
2530 if (!skb_partial_csum_set(skb, vnet_hdr.csum_start,
2531 vnet_hdr.csum_offset)) {
2532 err = -EINVAL;
2533 goto out_free;
2534 }
2535 }
2536
2537 skb_shinfo(skb)->gso_size = vnet_hdr.gso_size;
2538 skb_shinfo(skb)->gso_type = gso_type;
2539
2540 /* Header must be checked, and gso_segs computed. */
2541 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
2542 skb_shinfo(skb)->gso_segs = 0;
2543
2544 len += vnet_hdr_len;
2545 }
2546
d346a3fa
DB
2547 if (!packet_use_direct_xmit(po))
2548 skb_probe_transport_header(skb, reserve);
3bdc0eba
BG
2549 if (unlikely(extra_len == 4))
2550 skb->no_fcs = 1;
2551
d346a3fa 2552 err = po->xmit(skb);
1da177e4
LT
2553 if (err > 0 && (err = net_xmit_errno(err)) != 0)
2554 goto out_unlock;
2555
e40526cb 2556 dev_put(dev);
1da177e4 2557
40d4e3df 2558 return len;
1da177e4
LT
2559
2560out_free:
2561 kfree_skb(skb);
2562out_unlock:
e40526cb 2563 if (dev)
1da177e4
LT
2564 dev_put(dev);
2565out:
2566 return err;
2567}
2568
69e3c75f
JB
2569static int packet_sendmsg(struct kiocb *iocb, struct socket *sock,
2570 struct msghdr *msg, size_t len)
2571{
69e3c75f
JB
2572 struct sock *sk = sock->sk;
2573 struct packet_sock *po = pkt_sk(sk);
d346a3fa 2574
69e3c75f
JB
2575 if (po->tx_ring.pg_vec)
2576 return tpacket_snd(po, msg);
2577 else
69e3c75f
JB
2578 return packet_snd(sock, msg, len);
2579}
2580
1da177e4
LT
2581/*
2582 * Close a PACKET socket. This is fairly simple. We immediately go
2583 * to 'closed' state and remove our protocol entry in the device list.
2584 */
2585
2586static int packet_release(struct socket *sock)
2587{
2588 struct sock *sk = sock->sk;
2589 struct packet_sock *po;
d12d01d6 2590 struct net *net;
f6fb8f10 2591 union tpacket_req_u req_u;
1da177e4
LT
2592
2593 if (!sk)
2594 return 0;
2595
3b1e0a65 2596 net = sock_net(sk);
1da177e4
LT
2597 po = pkt_sk(sk);
2598
0fa7fa98 2599 mutex_lock(&net->packet.sklist_lock);
808f5114 2600 sk_del_node_init_rcu(sk);
0fa7fa98
PE
2601 mutex_unlock(&net->packet.sklist_lock);
2602
2603 preempt_disable();
920de804 2604 sock_prot_inuse_add(net, sk->sk_prot, -1);
0fa7fa98 2605 preempt_enable();
1da177e4 2606
808f5114 2607 spin_lock(&po->bind_lock);
ce06b03e 2608 unregister_prot_hook(sk, false);
66e56cd4
DB
2609 packet_cached_dev_reset(po);
2610
160ff18a
BG
2611 if (po->prot_hook.dev) {
2612 dev_put(po->prot_hook.dev);
2613 po->prot_hook.dev = NULL;
2614 }
808f5114 2615 spin_unlock(&po->bind_lock);
1da177e4 2616
1da177e4 2617 packet_flush_mclist(sk);
1da177e4 2618
9665d5d6
PS
2619 if (po->rx_ring.pg_vec) {
2620 memset(&req_u, 0, sizeof(req_u));
f6fb8f10 2621 packet_set_ring(sk, &req_u, 1, 0);
9665d5d6 2622 }
69e3c75f 2623
9665d5d6
PS
2624 if (po->tx_ring.pg_vec) {
2625 memset(&req_u, 0, sizeof(req_u));
f6fb8f10 2626 packet_set_ring(sk, &req_u, 1, 1);
9665d5d6 2627 }
1da177e4 2628
dc99f600
DM
2629 fanout_release(sk);
2630
808f5114 2631 synchronize_net();
1da177e4
LT
2632 /*
2633 * Now the socket is dead. No more input will appear.
2634 */
1da177e4
LT
2635 sock_orphan(sk);
2636 sock->sk = NULL;
2637
2638 /* Purge queues */
2639
2640 skb_queue_purge(&sk->sk_receive_queue);
b0138408 2641 packet_free_pending(po);
17ab56a2 2642 sk_refcnt_debug_release(sk);
1da177e4
LT
2643
2644 sock_put(sk);
2645 return 0;
2646}
2647
2648/*
2649 * Attach a packet hook.
2650 */
2651
902fefb8 2652static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto)
1da177e4
LT
2653{
2654 struct packet_sock *po = pkt_sk(sk);
902fefb8
DB
2655 const struct net_device *dev_curr;
2656 __be16 proto_curr;
2657 bool need_rehook;
dc99f600 2658
aef950b4
WY
2659 if (po->fanout) {
2660 if (dev)
2661 dev_put(dev);
2662
dc99f600 2663 return -EINVAL;
aef950b4 2664 }
1da177e4
LT
2665
2666 lock_sock(sk);
1da177e4 2667 spin_lock(&po->bind_lock);
66e56cd4 2668
902fefb8
DB
2669 proto_curr = po->prot_hook.type;
2670 dev_curr = po->prot_hook.dev;
2671
2672 need_rehook = proto_curr != proto || dev_curr != dev;
2673
2674 if (need_rehook) {
2675 unregister_prot_hook(sk, true);
1da177e4 2676
902fefb8
DB
2677 po->num = proto;
2678 po->prot_hook.type = proto;
1da177e4 2679
902fefb8
DB
2680 if (po->prot_hook.dev)
2681 dev_put(po->prot_hook.dev);
2682
2683 po->prot_hook.dev = dev;
2684
2685 po->ifindex = dev ? dev->ifindex : 0;
2686 packet_cached_dev_assign(po, dev);
2687 }
66e56cd4 2688
902fefb8 2689 if (proto == 0 || !need_rehook)
1da177e4
LT
2690 goto out_unlock;
2691
be85d4ad 2692 if (!dev || (dev->flags & IFF_UP)) {
ce06b03e 2693 register_prot_hook(sk);
be85d4ad
UT
2694 } else {
2695 sk->sk_err = ENETDOWN;
2696 if (!sock_flag(sk, SOCK_DEAD))
2697 sk->sk_error_report(sk);
1da177e4
LT
2698 }
2699
2700out_unlock:
2701 spin_unlock(&po->bind_lock);
2702 release_sock(sk);
2703 return 0;
2704}
2705
2706/*
2707 * Bind a packet socket to a device
2708 */
2709
40d4e3df
ED
2710static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
2711 int addr_len)
1da177e4 2712{
40d4e3df 2713 struct sock *sk = sock->sk;
1da177e4
LT
2714 char name[15];
2715 struct net_device *dev;
2716 int err = -ENODEV;
1ce4f28b 2717
1da177e4
LT
2718 /*
2719 * Check legality
2720 */
1ce4f28b 2721
8ae55f04 2722 if (addr_len != sizeof(struct sockaddr))
1da177e4 2723 return -EINVAL;
40d4e3df 2724 strlcpy(name, uaddr->sa_data, sizeof(name));
1da177e4 2725
3b1e0a65 2726 dev = dev_get_by_name(sock_net(sk), name);
160ff18a 2727 if (dev)
1da177e4 2728 err = packet_do_bind(sk, dev, pkt_sk(sk)->num);
1da177e4
LT
2729 return err;
2730}
1da177e4
LT
2731
2732static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
2733{
40d4e3df
ED
2734 struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
2735 struct sock *sk = sock->sk;
1da177e4
LT
2736 struct net_device *dev = NULL;
2737 int err;
2738
2739
2740 /*
2741 * Check legality
2742 */
1ce4f28b 2743
1da177e4
LT
2744 if (addr_len < sizeof(struct sockaddr_ll))
2745 return -EINVAL;
2746 if (sll->sll_family != AF_PACKET)
2747 return -EINVAL;
2748
2749 if (sll->sll_ifindex) {
2750 err = -ENODEV;
3b1e0a65 2751 dev = dev_get_by_index(sock_net(sk), sll->sll_ifindex);
1da177e4
LT
2752 if (dev == NULL)
2753 goto out;
2754 }
2755 err = packet_do_bind(sk, dev, sll->sll_protocol ? : pkt_sk(sk)->num);
1da177e4
LT
2756
2757out:
2758 return err;
2759}
2760
2761static struct proto packet_proto = {
2762 .name = "PACKET",
2763 .owner = THIS_MODULE,
2764 .obj_size = sizeof(struct packet_sock),
2765};
2766
2767/*
1ce4f28b 2768 * Create a packet of type SOCK_PACKET.
1da177e4
LT
2769 */
2770
3f378b68
EP
2771static int packet_create(struct net *net, struct socket *sock, int protocol,
2772 int kern)
1da177e4
LT
2773{
2774 struct sock *sk;
2775 struct packet_sock *po;
0e11c91e 2776 __be16 proto = (__force __be16)protocol; /* weird, but documented */
1da177e4
LT
2777 int err;
2778
df008c91 2779 if (!ns_capable(net->user_ns, CAP_NET_RAW))
1da177e4 2780 return -EPERM;
be02097c
DM
2781 if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
2782 sock->type != SOCK_PACKET)
1da177e4
LT
2783 return -ESOCKTNOSUPPORT;
2784
2785 sock->state = SS_UNCONNECTED;
2786
2787 err = -ENOBUFS;
6257ff21 2788 sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto);
1da177e4
LT
2789 if (sk == NULL)
2790 goto out;
2791
2792 sock->ops = &packet_ops;
1da177e4
LT
2793 if (sock->type == SOCK_PACKET)
2794 sock->ops = &packet_ops_spkt;
be02097c 2795
1da177e4
LT
2796 sock_init_data(sock, sk);
2797
2798 po = pkt_sk(sk);
2799 sk->sk_family = PF_PACKET;
0e11c91e 2800 po->num = proto;
d346a3fa 2801 po->xmit = dev_queue_xmit;
66e56cd4 2802
b0138408
DB
2803 err = packet_alloc_pending(po);
2804 if (err)
2805 goto out2;
2806
66e56cd4 2807 packet_cached_dev_reset(po);
1da177e4
LT
2808
2809 sk->sk_destruct = packet_sock_destruct;
17ab56a2 2810 sk_refcnt_debug_inc(sk);
1da177e4
LT
2811
2812 /*
2813 * Attach a protocol block
2814 */
2815
2816 spin_lock_init(&po->bind_lock);
905db440 2817 mutex_init(&po->pg_vec_lock);
1da177e4 2818 po->prot_hook.func = packet_rcv;
be02097c 2819
1da177e4
LT
2820 if (sock->type == SOCK_PACKET)
2821 po->prot_hook.func = packet_rcv_spkt;
be02097c 2822
1da177e4
LT
2823 po->prot_hook.af_packet_priv = sk;
2824
0e11c91e
AV
2825 if (proto) {
2826 po->prot_hook.type = proto;
ce06b03e 2827 register_prot_hook(sk);
1da177e4
LT
2828 }
2829
0fa7fa98 2830 mutex_lock(&net->packet.sklist_lock);
808f5114 2831 sk_add_node_rcu(sk, &net->packet.sklist);
0fa7fa98
PE
2832 mutex_unlock(&net->packet.sklist_lock);
2833
2834 preempt_disable();
3680453c 2835 sock_prot_inuse_add(net, &packet_proto, 1);
0fa7fa98 2836 preempt_enable();
808f5114 2837
40d4e3df 2838 return 0;
b0138408
DB
2839out2:
2840 sk_free(sk);
1da177e4
LT
2841out:
2842 return err;
2843}
2844
2845/*
2846 * Pull a packet from our receive queue and hand it to the user.
2847 * If necessary we block.
2848 */
2849
2850static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
2851 struct msghdr *msg, size_t len, int flags)
2852{
2853 struct sock *sk = sock->sk;
2854 struct sk_buff *skb;
2855 int copied, err;
bfd5f4a3 2856 int vnet_hdr_len = 0;
1da177e4
LT
2857
2858 err = -EINVAL;
ed85b565 2859 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
1da177e4
LT
2860 goto out;
2861
2862#if 0
2863 /* What error should we return now? EUNATTACH? */
2864 if (pkt_sk(sk)->ifindex < 0)
2865 return -ENODEV;
2866#endif
2867
ed85b565 2868 if (flags & MSG_ERRQUEUE) {
cb820f8e
RC
2869 err = sock_recv_errqueue(sk, msg, len,
2870 SOL_PACKET, PACKET_TX_TIMESTAMP);
ed85b565
RC
2871 goto out;
2872 }
2873
1da177e4
LT
2874 /*
2875 * Call the generic datagram receiver. This handles all sorts
2876 * of horrible races and re-entrancy so we can forget about it
2877 * in the protocol layers.
2878 *
2879 * Now it will return ENETDOWN, if device have just gone down,
2880 * but then it will block.
2881 */
2882
40d4e3df 2883 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
1da177e4
LT
2884
2885 /*
1ce4f28b 2886 * An error occurred so return it. Because skb_recv_datagram()
1da177e4
LT
2887 * handles the blocking we don't see and worry about blocking
2888 * retries.
2889 */
2890
8ae55f04 2891 if (skb == NULL)
1da177e4
LT
2892 goto out;
2893
bfd5f4a3
SS
2894 if (pkt_sk(sk)->has_vnet_hdr) {
2895 struct virtio_net_hdr vnet_hdr = { 0 };
2896
2897 err = -EINVAL;
2898 vnet_hdr_len = sizeof(vnet_hdr);
1f18b717 2899 if (len < vnet_hdr_len)
bfd5f4a3
SS
2900 goto out_free;
2901
1f18b717
MK
2902 len -= vnet_hdr_len;
2903
bfd5f4a3
SS
2904 if (skb_is_gso(skb)) {
2905 struct skb_shared_info *sinfo = skb_shinfo(skb);
2906
2907 /* This is a hint as to how much should be linear. */
2908 vnet_hdr.hdr_len = skb_headlen(skb);
2909 vnet_hdr.gso_size = sinfo->gso_size;
2910 if (sinfo->gso_type & SKB_GSO_TCPV4)
2911 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
2912 else if (sinfo->gso_type & SKB_GSO_TCPV6)
2913 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
2914 else if (sinfo->gso_type & SKB_GSO_UDP)
2915 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
2916 else if (sinfo->gso_type & SKB_GSO_FCOE)
2917 goto out_free;
2918 else
2919 BUG();
2920 if (sinfo->gso_type & SKB_GSO_TCP_ECN)
2921 vnet_hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
2922 } else
2923 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
2924
2925 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2926 vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
55508d60 2927 vnet_hdr.csum_start = skb_checksum_start_offset(skb);
bfd5f4a3 2928 vnet_hdr.csum_offset = skb->csum_offset;
10a8d94a
JW
2929 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
2930 vnet_hdr.flags = VIRTIO_NET_HDR_F_DATA_VALID;
bfd5f4a3
SS
2931 } /* else everything is zero */
2932
2933 err = memcpy_toiovec(msg->msg_iov, (void *)&vnet_hdr,
2934 vnet_hdr_len);
2935 if (err < 0)
2936 goto out_free;
2937 }
2938
f3d33426
HFS
2939 /* You lose any data beyond the buffer you gave. If it worries
2940 * a user program they can ask the device for its MTU
2941 * anyway.
1da177e4 2942 */
1da177e4 2943 copied = skb->len;
40d4e3df
ED
2944 if (copied > len) {
2945 copied = len;
2946 msg->msg_flags |= MSG_TRUNC;
1da177e4
LT
2947 }
2948
2949 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
2950 if (err)
2951 goto out_free;
2952
3b885787 2953 sock_recv_ts_and_drops(msg, sk, skb);
1da177e4 2954
f3d33426
HFS
2955 if (msg->msg_name) {
2956 /* If the address length field is there to be filled
2957 * in, we fill it in now.
2958 */
2959 if (sock->type == SOCK_PACKET) {
342dfc30 2960 __sockaddr_check_size(sizeof(struct sockaddr_pkt));
f3d33426
HFS
2961 msg->msg_namelen = sizeof(struct sockaddr_pkt);
2962 } else {
2963 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
2964 msg->msg_namelen = sll->sll_halen +
2965 offsetof(struct sockaddr_ll, sll_addr);
2966 }
ffbc6111
HX
2967 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
2968 msg->msg_namelen);
f3d33426 2969 }
1da177e4 2970
8dc41944 2971 if (pkt_sk(sk)->auxdata) {
ffbc6111
HX
2972 struct tpacket_auxdata aux;
2973
2974 aux.tp_status = TP_STATUS_USER;
2975 if (skb->ip_summed == CHECKSUM_PARTIAL)
2976 aux.tp_status |= TP_STATUS_CSUMNOTREADY;
2977 aux.tp_len = PACKET_SKB_CB(skb)->origlen;
2978 aux.tp_snaplen = skb->len;
2979 aux.tp_mac = 0;
bbe735e4 2980 aux.tp_net = skb_network_offset(skb);
a3bcc23e
BG
2981 if (vlan_tx_tag_present(skb)) {
2982 aux.tp_vlan_tci = vlan_tx_tag_get(skb);
a0cdfcf3
AW
2983 aux.tp_vlan_tpid = ntohs(skb->vlan_proto);
2984 aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
a3bcc23e
BG
2985 } else {
2986 aux.tp_vlan_tci = 0;
a0cdfcf3 2987 aux.tp_vlan_tpid = 0;
a3bcc23e 2988 }
ffbc6111 2989 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
8dc41944
HX
2990 }
2991
1da177e4
LT
2992 /*
2993 * Free or return the buffer as appropriate. Again this
2994 * hides all the races and re-entrancy issues from us.
2995 */
bfd5f4a3 2996 err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
1da177e4
LT
2997
2998out_free:
2999 skb_free_datagram(sk, skb);
3000out:
3001 return err;
3002}
3003
1da177e4
LT
3004static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
3005 int *uaddr_len, int peer)
3006{
3007 struct net_device *dev;
3008 struct sock *sk = sock->sk;
3009
3010 if (peer)
3011 return -EOPNOTSUPP;
3012
3013 uaddr->sa_family = AF_PACKET;
2dc85bf3 3014 memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
654d1f8a
ED
3015 rcu_read_lock();
3016 dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
3017 if (dev)
2dc85bf3 3018 strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
654d1f8a 3019 rcu_read_unlock();
1da177e4
LT
3020 *uaddr_len = sizeof(*uaddr);
3021
3022 return 0;
3023}
1da177e4
LT
3024
3025static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
3026 int *uaddr_len, int peer)
3027{
3028 struct net_device *dev;
3029 struct sock *sk = sock->sk;
3030 struct packet_sock *po = pkt_sk(sk);
13cfa97b 3031 DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
1da177e4
LT
3032
3033 if (peer)
3034 return -EOPNOTSUPP;
3035
3036 sll->sll_family = AF_PACKET;
3037 sll->sll_ifindex = po->ifindex;
3038 sll->sll_protocol = po->num;
67286640 3039 sll->sll_pkttype = 0;
654d1f8a
ED
3040 rcu_read_lock();
3041 dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
1da177e4
LT
3042 if (dev) {
3043 sll->sll_hatype = dev->type;
3044 sll->sll_halen = dev->addr_len;
3045 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
1da177e4
LT
3046 } else {
3047 sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */
3048 sll->sll_halen = 0;
3049 }
654d1f8a 3050 rcu_read_unlock();
0fb375fb 3051 *uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
1da177e4
LT
3052
3053 return 0;
3054}
3055
2aeb0b88
WC
3056static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
3057 int what)
1da177e4
LT
3058{
3059 switch (i->type) {
3060 case PACKET_MR_MULTICAST:
1162563f
JP
3061 if (i->alen != dev->addr_len)
3062 return -EINVAL;
1da177e4 3063 if (what > 0)
22bedad3 3064 return dev_mc_add(dev, i->addr);
1da177e4 3065 else
22bedad3 3066 return dev_mc_del(dev, i->addr);
1da177e4
LT
3067 break;
3068 case PACKET_MR_PROMISC:
2aeb0b88 3069 return dev_set_promiscuity(dev, what);
1da177e4 3070 case PACKET_MR_ALLMULTI:
2aeb0b88 3071 return dev_set_allmulti(dev, what);
d95ed927 3072 case PACKET_MR_UNICAST:
1162563f
JP
3073 if (i->alen != dev->addr_len)
3074 return -EINVAL;
d95ed927 3075 if (what > 0)
a748ee24 3076 return dev_uc_add(dev, i->addr);
d95ed927 3077 else
a748ee24 3078 return dev_uc_del(dev, i->addr);
d95ed927 3079 break;
40d4e3df
ED
3080 default:
3081 break;
1da177e4 3082 }
2aeb0b88 3083 return 0;
1da177e4
LT
3084}
3085
3086static void packet_dev_mclist(struct net_device *dev, struct packet_mclist *i, int what)
3087{
40d4e3df 3088 for ( ; i; i = i->next) {
1da177e4
LT
3089 if (i->ifindex == dev->ifindex)
3090 packet_dev_mc(dev, i, what);
3091 }
3092}
3093
0fb375fb 3094static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
1da177e4
LT
3095{
3096 struct packet_sock *po = pkt_sk(sk);
3097 struct packet_mclist *ml, *i;
3098 struct net_device *dev;
3099 int err;
3100
3101 rtnl_lock();
3102
3103 err = -ENODEV;
3b1e0a65 3104 dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
1da177e4
LT
3105 if (!dev)
3106 goto done;
3107
3108 err = -EINVAL;
1162563f 3109 if (mreq->mr_alen > dev->addr_len)
1da177e4
LT
3110 goto done;
3111
3112 err = -ENOBUFS;
8b3a7005 3113 i = kmalloc(sizeof(*i), GFP_KERNEL);
1da177e4
LT
3114 if (i == NULL)
3115 goto done;
3116
3117 err = 0;
3118 for (ml = po->mclist; ml; ml = ml->next) {
3119 if (ml->ifindex == mreq->mr_ifindex &&
3120 ml->type == mreq->mr_type &&
3121 ml->alen == mreq->mr_alen &&
3122 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3123 ml->count++;
3124 /* Free the new element ... */
3125 kfree(i);
3126 goto done;
3127 }
3128 }
3129
3130 i->type = mreq->mr_type;
3131 i->ifindex = mreq->mr_ifindex;
3132 i->alen = mreq->mr_alen;
3133 memcpy(i->addr, mreq->mr_address, i->alen);
3134 i->count = 1;
3135 i->next = po->mclist;
3136 po->mclist = i;
2aeb0b88
WC
3137 err = packet_dev_mc(dev, i, 1);
3138 if (err) {
3139 po->mclist = i->next;
3140 kfree(i);
3141 }
1da177e4
LT
3142
3143done:
3144 rtnl_unlock();
3145 return err;
3146}
3147
0fb375fb 3148static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
1da177e4
LT
3149{
3150 struct packet_mclist *ml, **mlp;
3151
3152 rtnl_lock();
3153
3154 for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
3155 if (ml->ifindex == mreq->mr_ifindex &&
3156 ml->type == mreq->mr_type &&
3157 ml->alen == mreq->mr_alen &&
3158 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3159 if (--ml->count == 0) {
3160 struct net_device *dev;
3161 *mlp = ml->next;
ad959e76
ED
3162 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3163 if (dev)
1da177e4 3164 packet_dev_mc(dev, ml, -1);
1da177e4
LT
3165 kfree(ml);
3166 }
3167 rtnl_unlock();
3168 return 0;
3169 }
3170 }
3171 rtnl_unlock();
3172 return -EADDRNOTAVAIL;
3173}
3174
3175static void packet_flush_mclist(struct sock *sk)
3176{
3177 struct packet_sock *po = pkt_sk(sk);
3178 struct packet_mclist *ml;
3179
3180 if (!po->mclist)
3181 return;
3182
3183 rtnl_lock();
3184 while ((ml = po->mclist) != NULL) {
3185 struct net_device *dev;
3186
3187 po->mclist = ml->next;
ad959e76
ED
3188 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3189 if (dev != NULL)
1da177e4 3190 packet_dev_mc(dev, ml, -1);
1da177e4
LT
3191 kfree(ml);
3192 }
3193 rtnl_unlock();
3194}
1da177e4
LT
3195
3196static int
b7058842 3197packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1da177e4
LT
3198{
3199 struct sock *sk = sock->sk;
8dc41944 3200 struct packet_sock *po = pkt_sk(sk);
1da177e4
LT
3201 int ret;
3202
3203 if (level != SOL_PACKET)
3204 return -ENOPROTOOPT;
3205
69e3c75f 3206 switch (optname) {
1ce4f28b 3207 case PACKET_ADD_MEMBERSHIP:
1da177e4
LT
3208 case PACKET_DROP_MEMBERSHIP:
3209 {
0fb375fb
EB
3210 struct packet_mreq_max mreq;
3211 int len = optlen;
3212 memset(&mreq, 0, sizeof(mreq));
3213 if (len < sizeof(struct packet_mreq))
1da177e4 3214 return -EINVAL;
0fb375fb
EB
3215 if (len > sizeof(mreq))
3216 len = sizeof(mreq);
40d4e3df 3217 if (copy_from_user(&mreq, optval, len))
1da177e4 3218 return -EFAULT;
0fb375fb
EB
3219 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
3220 return -EINVAL;
1da177e4
LT
3221 if (optname == PACKET_ADD_MEMBERSHIP)
3222 ret = packet_mc_add(sk, &mreq);
3223 else
3224 ret = packet_mc_drop(sk, &mreq);
3225 return ret;
3226 }
a2efcfa0 3227
1da177e4 3228 case PACKET_RX_RING:
69e3c75f 3229 case PACKET_TX_RING:
1da177e4 3230 {
f6fb8f10 3231 union tpacket_req_u req_u;
3232 int len;
1da177e4 3233
f6fb8f10 3234 switch (po->tp_version) {
3235 case TPACKET_V1:
3236 case TPACKET_V2:
3237 len = sizeof(req_u.req);
3238 break;
3239 case TPACKET_V3:
3240 default:
3241 len = sizeof(req_u.req3);
3242 break;
3243 }
3244 if (optlen < len)
1da177e4 3245 return -EINVAL;
bfd5f4a3
SS
3246 if (pkt_sk(sk)->has_vnet_hdr)
3247 return -EINVAL;
f6fb8f10 3248 if (copy_from_user(&req_u.req, optval, len))
1da177e4 3249 return -EFAULT;
f6fb8f10 3250 return packet_set_ring(sk, &req_u, 0,
3251 optname == PACKET_TX_RING);
1da177e4
LT
3252 }
3253 case PACKET_COPY_THRESH:
3254 {
3255 int val;
3256
40d4e3df 3257 if (optlen != sizeof(val))
1da177e4 3258 return -EINVAL;
40d4e3df 3259 if (copy_from_user(&val, optval, sizeof(val)))
1da177e4
LT
3260 return -EFAULT;
3261
3262 pkt_sk(sk)->copy_thresh = val;
3263 return 0;
3264 }
bbd6ef87
PM
3265 case PACKET_VERSION:
3266 {
3267 int val;
3268
3269 if (optlen != sizeof(val))
3270 return -EINVAL;
69e3c75f 3271 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
bbd6ef87
PM
3272 return -EBUSY;
3273 if (copy_from_user(&val, optval, sizeof(val)))
3274 return -EFAULT;
3275 switch (val) {
3276 case TPACKET_V1:
3277 case TPACKET_V2:
f6fb8f10 3278 case TPACKET_V3:
bbd6ef87
PM
3279 po->tp_version = val;
3280 return 0;
3281 default:
3282 return -EINVAL;
3283 }
3284 }
8913336a
PM
3285 case PACKET_RESERVE:
3286 {
3287 unsigned int val;
3288
3289 if (optlen != sizeof(val))
3290 return -EINVAL;
69e3c75f 3291 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
8913336a
PM
3292 return -EBUSY;
3293 if (copy_from_user(&val, optval, sizeof(val)))
3294 return -EFAULT;
3295 po->tp_reserve = val;
3296 return 0;
3297 }
69e3c75f
JB
3298 case PACKET_LOSS:
3299 {
3300 unsigned int val;
3301
3302 if (optlen != sizeof(val))
3303 return -EINVAL;
3304 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3305 return -EBUSY;
3306 if (copy_from_user(&val, optval, sizeof(val)))
3307 return -EFAULT;
3308 po->tp_loss = !!val;
3309 return 0;
3310 }
8dc41944
HX
3311 case PACKET_AUXDATA:
3312 {
3313 int val;
3314
3315 if (optlen < sizeof(val))
3316 return -EINVAL;
3317 if (copy_from_user(&val, optval, sizeof(val)))
3318 return -EFAULT;
3319
3320 po->auxdata = !!val;
3321 return 0;
3322 }
80feaacb
PWJ
3323 case PACKET_ORIGDEV:
3324 {
3325 int val;
3326
3327 if (optlen < sizeof(val))
3328 return -EINVAL;
3329 if (copy_from_user(&val, optval, sizeof(val)))
3330 return -EFAULT;
3331
3332 po->origdev = !!val;
3333 return 0;
3334 }
bfd5f4a3
SS
3335 case PACKET_VNET_HDR:
3336 {
3337 int val;
3338
3339 if (sock->type != SOCK_RAW)
3340 return -EINVAL;
3341 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3342 return -EBUSY;
3343 if (optlen < sizeof(val))
3344 return -EINVAL;
3345 if (copy_from_user(&val, optval, sizeof(val)))
3346 return -EFAULT;
3347
3348 po->has_vnet_hdr = !!val;
3349 return 0;
3350 }
614f60fa
SM
3351 case PACKET_TIMESTAMP:
3352 {
3353 int val;
3354
3355 if (optlen != sizeof(val))
3356 return -EINVAL;
3357 if (copy_from_user(&val, optval, sizeof(val)))
3358 return -EFAULT;
3359
3360 po->tp_tstamp = val;
3361 return 0;
3362 }
dc99f600
DM
3363 case PACKET_FANOUT:
3364 {
3365 int val;
3366
3367 if (optlen != sizeof(val))
3368 return -EINVAL;
3369 if (copy_from_user(&val, optval, sizeof(val)))
3370 return -EFAULT;
3371
3372 return fanout_add(sk, val & 0xffff, val >> 16);
3373 }
5920cd3a
PC
3374 case PACKET_TX_HAS_OFF:
3375 {
3376 unsigned int val;
3377
3378 if (optlen != sizeof(val))
3379 return -EINVAL;
3380 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3381 return -EBUSY;
3382 if (copy_from_user(&val, optval, sizeof(val)))
3383 return -EFAULT;
3384 po->tp_tx_has_off = !!val;
3385 return 0;
3386 }
d346a3fa
DB
3387 case PACKET_QDISC_BYPASS:
3388 {
3389 int val;
3390
3391 if (optlen != sizeof(val))
3392 return -EINVAL;
3393 if (copy_from_user(&val, optval, sizeof(val)))
3394 return -EFAULT;
3395
3396 po->xmit = val ? packet_direct_xmit : dev_queue_xmit;
3397 return 0;
3398 }
1da177e4
LT
3399 default:
3400 return -ENOPROTOOPT;
3401 }
3402}
3403
3404static int packet_getsockopt(struct socket *sock, int level, int optname,
3405 char __user *optval, int __user *optlen)
3406{
3407 int len;
c06fff6e 3408 int val, lv = sizeof(val);
1da177e4
LT
3409 struct sock *sk = sock->sk;
3410 struct packet_sock *po = pkt_sk(sk);
c06fff6e 3411 void *data = &val;
ee80fbf3 3412 union tpacket_stats_u st;
1da177e4
LT
3413
3414 if (level != SOL_PACKET)
3415 return -ENOPROTOOPT;
3416
8ae55f04
KK
3417 if (get_user(len, optlen))
3418 return -EFAULT;
1da177e4
LT
3419
3420 if (len < 0)
3421 return -EINVAL;
1ce4f28b 3422
69e3c75f 3423 switch (optname) {
1da177e4 3424 case PACKET_STATISTICS:
1da177e4 3425 spin_lock_bh(&sk->sk_receive_queue.lock);
ee80fbf3
DB
3426 memcpy(&st, &po->stats, sizeof(st));
3427 memset(&po->stats, 0, sizeof(po->stats));
3428 spin_unlock_bh(&sk->sk_receive_queue.lock);
3429
f6fb8f10 3430 if (po->tp_version == TPACKET_V3) {
c06fff6e 3431 lv = sizeof(struct tpacket_stats_v3);
8bcdeaff 3432 st.stats3.tp_packets += st.stats3.tp_drops;
ee80fbf3 3433 data = &st.stats3;
f6fb8f10 3434 } else {
c06fff6e 3435 lv = sizeof(struct tpacket_stats);
8bcdeaff 3436 st.stats1.tp_packets += st.stats1.tp_drops;
ee80fbf3 3437 data = &st.stats1;
f6fb8f10 3438 }
ee80fbf3 3439
8dc41944
HX
3440 break;
3441 case PACKET_AUXDATA:
8dc41944 3442 val = po->auxdata;
80feaacb
PWJ
3443 break;
3444 case PACKET_ORIGDEV:
80feaacb 3445 val = po->origdev;
bfd5f4a3
SS
3446 break;
3447 case PACKET_VNET_HDR:
bfd5f4a3 3448 val = po->has_vnet_hdr;
1da177e4 3449 break;
bbd6ef87 3450 case PACKET_VERSION:
bbd6ef87 3451 val = po->tp_version;
bbd6ef87
PM
3452 break;
3453 case PACKET_HDRLEN:
3454 if (len > sizeof(int))
3455 len = sizeof(int);
3456 if (copy_from_user(&val, optval, len))
3457 return -EFAULT;
3458 switch (val) {
3459 case TPACKET_V1:
3460 val = sizeof(struct tpacket_hdr);
3461 break;
3462 case TPACKET_V2:
3463 val = sizeof(struct tpacket2_hdr);
3464 break;
f6fb8f10 3465 case TPACKET_V3:
3466 val = sizeof(struct tpacket3_hdr);
3467 break;
bbd6ef87
PM
3468 default:
3469 return -EINVAL;
3470 }
bbd6ef87 3471 break;
8913336a 3472 case PACKET_RESERVE:
8913336a 3473 val = po->tp_reserve;
8913336a 3474 break;
69e3c75f 3475 case PACKET_LOSS:
69e3c75f 3476 val = po->tp_loss;
69e3c75f 3477 break;
614f60fa 3478 case PACKET_TIMESTAMP:
614f60fa 3479 val = po->tp_tstamp;
614f60fa 3480 break;
dc99f600 3481 case PACKET_FANOUT:
dc99f600
DM
3482 val = (po->fanout ?
3483 ((u32)po->fanout->id |
77f65ebd
WB
3484 ((u32)po->fanout->type << 16) |
3485 ((u32)po->fanout->flags << 24)) :
dc99f600 3486 0);
dc99f600 3487 break;
5920cd3a
PC
3488 case PACKET_TX_HAS_OFF:
3489 val = po->tp_tx_has_off;
3490 break;
d346a3fa
DB
3491 case PACKET_QDISC_BYPASS:
3492 val = packet_use_direct_xmit(po);
3493 break;
1da177e4
LT
3494 default:
3495 return -ENOPROTOOPT;
3496 }
3497
c06fff6e
ED
3498 if (len > lv)
3499 len = lv;
8ae55f04
KK
3500 if (put_user(len, optlen))
3501 return -EFAULT;
8dc41944
HX
3502 if (copy_to_user(optval, data, len))
3503 return -EFAULT;
8ae55f04 3504 return 0;
1da177e4
LT
3505}
3506
3507
351638e7
JP
3508static int packet_notifier(struct notifier_block *this,
3509 unsigned long msg, void *ptr)
1da177e4
LT
3510{
3511 struct sock *sk;
351638e7 3512 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
c346dca1 3513 struct net *net = dev_net(dev);
1da177e4 3514
808f5114 3515 rcu_read_lock();
b67bfe0d 3516 sk_for_each_rcu(sk, &net->packet.sklist) {
1da177e4
LT
3517 struct packet_sock *po = pkt_sk(sk);
3518
3519 switch (msg) {
3520 case NETDEV_UNREGISTER:
1da177e4
LT
3521 if (po->mclist)
3522 packet_dev_mclist(dev, po->mclist, -1);
a2efcfa0
DM
3523 /* fallthrough */
3524
1da177e4
LT
3525 case NETDEV_DOWN:
3526 if (dev->ifindex == po->ifindex) {
3527 spin_lock(&po->bind_lock);
3528 if (po->running) {
ce06b03e 3529 __unregister_prot_hook(sk, false);
1da177e4
LT
3530 sk->sk_err = ENETDOWN;
3531 if (!sock_flag(sk, SOCK_DEAD))
3532 sk->sk_error_report(sk);
3533 }
3534 if (msg == NETDEV_UNREGISTER) {
66e56cd4 3535 packet_cached_dev_reset(po);
1da177e4 3536 po->ifindex = -1;
160ff18a
BG
3537 if (po->prot_hook.dev)
3538 dev_put(po->prot_hook.dev);
1da177e4
LT
3539 po->prot_hook.dev = NULL;
3540 }
3541 spin_unlock(&po->bind_lock);
3542 }
3543 break;
3544 case NETDEV_UP:
808f5114 3545 if (dev->ifindex == po->ifindex) {
3546 spin_lock(&po->bind_lock);
ce06b03e
DM
3547 if (po->num)
3548 register_prot_hook(sk);
808f5114 3549 spin_unlock(&po->bind_lock);
1da177e4 3550 }
1da177e4
LT
3551 break;
3552 }
3553 }
808f5114 3554 rcu_read_unlock();
1da177e4
LT
3555 return NOTIFY_DONE;
3556}
3557
3558
3559static int packet_ioctl(struct socket *sock, unsigned int cmd,
3560 unsigned long arg)
3561{
3562 struct sock *sk = sock->sk;
3563
69e3c75f 3564 switch (cmd) {
40d4e3df
ED
3565 case SIOCOUTQ:
3566 {
3567 int amount = sk_wmem_alloc_get(sk);
31e6d363 3568
40d4e3df
ED
3569 return put_user(amount, (int __user *)arg);
3570 }
3571 case SIOCINQ:
3572 {
3573 struct sk_buff *skb;
3574 int amount = 0;
3575
3576 spin_lock_bh(&sk->sk_receive_queue.lock);
3577 skb = skb_peek(&sk->sk_receive_queue);
3578 if (skb)
3579 amount = skb->len;
3580 spin_unlock_bh(&sk->sk_receive_queue.lock);
3581 return put_user(amount, (int __user *)arg);
3582 }
3583 case SIOCGSTAMP:
3584 return sock_get_timestamp(sk, (struct timeval __user *)arg);
3585 case SIOCGSTAMPNS:
3586 return sock_get_timestampns(sk, (struct timespec __user *)arg);
1ce4f28b 3587
1da177e4 3588#ifdef CONFIG_INET
40d4e3df
ED
3589 case SIOCADDRT:
3590 case SIOCDELRT:
3591 case SIOCDARP:
3592 case SIOCGARP:
3593 case SIOCSARP:
3594 case SIOCGIFADDR:
3595 case SIOCSIFADDR:
3596 case SIOCGIFBRDADDR:
3597 case SIOCSIFBRDADDR:
3598 case SIOCGIFNETMASK:
3599 case SIOCSIFNETMASK:
3600 case SIOCGIFDSTADDR:
3601 case SIOCSIFDSTADDR:
3602 case SIOCSIFFLAGS:
40d4e3df 3603 return inet_dgram_ops.ioctl(sock, cmd, arg);
1da177e4
LT
3604#endif
3605
40d4e3df
ED
3606 default:
3607 return -ENOIOCTLCMD;
1da177e4
LT
3608 }
3609 return 0;
3610}
3611
40d4e3df 3612static unsigned int packet_poll(struct file *file, struct socket *sock,
1da177e4
LT
3613 poll_table *wait)
3614{
3615 struct sock *sk = sock->sk;
3616 struct packet_sock *po = pkt_sk(sk);
3617 unsigned int mask = datagram_poll(file, sock, wait);
3618
3619 spin_lock_bh(&sk->sk_receive_queue.lock);
69e3c75f 3620 if (po->rx_ring.pg_vec) {
f6fb8f10 3621 if (!packet_previous_rx_frame(po, &po->rx_ring,
3622 TP_STATUS_KERNEL))
1da177e4
LT
3623 mask |= POLLIN | POLLRDNORM;
3624 }
3625 spin_unlock_bh(&sk->sk_receive_queue.lock);
69e3c75f
JB
3626 spin_lock_bh(&sk->sk_write_queue.lock);
3627 if (po->tx_ring.pg_vec) {
3628 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
3629 mask |= POLLOUT | POLLWRNORM;
3630 }
3631 spin_unlock_bh(&sk->sk_write_queue.lock);
1da177e4
LT
3632 return mask;
3633}
3634
3635
3636/* Dirty? Well, I still did not learn better way to account
3637 * for user mmaps.
3638 */
3639
3640static void packet_mm_open(struct vm_area_struct *vma)
3641{
3642 struct file *file = vma->vm_file;
40d4e3df 3643 struct socket *sock = file->private_data;
1da177e4 3644 struct sock *sk = sock->sk;
1ce4f28b 3645
1da177e4
LT
3646 if (sk)
3647 atomic_inc(&pkt_sk(sk)->mapped);
3648}
3649
3650static void packet_mm_close(struct vm_area_struct *vma)
3651{
3652 struct file *file = vma->vm_file;
40d4e3df 3653 struct socket *sock = file->private_data;
1da177e4 3654 struct sock *sk = sock->sk;
1ce4f28b 3655
1da177e4
LT
3656 if (sk)
3657 atomic_dec(&pkt_sk(sk)->mapped);
3658}
3659
f0f37e2f 3660static const struct vm_operations_struct packet_mmap_ops = {
40d4e3df
ED
3661 .open = packet_mm_open,
3662 .close = packet_mm_close,
1da177e4
LT
3663};
3664
0e3125c7
NH
3665static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
3666 unsigned int len)
1da177e4
LT
3667{
3668 int i;
3669
4ebf0ae2 3670 for (i = 0; i < len; i++) {
0e3125c7 3671 if (likely(pg_vec[i].buffer)) {
c56b4d90 3672 if (is_vmalloc_addr(pg_vec[i].buffer))
0e3125c7
NH
3673 vfree(pg_vec[i].buffer);
3674 else
3675 free_pages((unsigned long)pg_vec[i].buffer,
3676 order);
3677 pg_vec[i].buffer = NULL;
3678 }
1da177e4
LT
3679 }
3680 kfree(pg_vec);
3681}
3682
eea49cc9 3683static char *alloc_one_pg_vec_page(unsigned long order)
4ebf0ae2 3684{
f0d4eb29 3685 char *buffer;
0e3125c7
NH
3686 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
3687 __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
3688
3689 buffer = (char *) __get_free_pages(gfp_flags, order);
0e3125c7
NH
3690 if (buffer)
3691 return buffer;
3692
f0d4eb29 3693 /* __get_free_pages failed, fall back to vmalloc */
bbce5a59 3694 buffer = vzalloc((1 << order) * PAGE_SIZE);
0e3125c7
NH
3695 if (buffer)
3696 return buffer;
3697
f0d4eb29 3698 /* vmalloc failed, lets dig into swap here */
0e3125c7 3699 gfp_flags &= ~__GFP_NORETRY;
f0d4eb29 3700 buffer = (char *) __get_free_pages(gfp_flags, order);
0e3125c7
NH
3701 if (buffer)
3702 return buffer;
3703
f0d4eb29 3704 /* complete and utter failure */
0e3125c7 3705 return NULL;
4ebf0ae2
DM
3706}
3707
0e3125c7 3708static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
4ebf0ae2
DM
3709{
3710 unsigned int block_nr = req->tp_block_nr;
0e3125c7 3711 struct pgv *pg_vec;
4ebf0ae2
DM
3712 int i;
3713
0e3125c7 3714 pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL);
4ebf0ae2
DM
3715 if (unlikely(!pg_vec))
3716 goto out;
3717
3718 for (i = 0; i < block_nr; i++) {
c56b4d90 3719 pg_vec[i].buffer = alloc_one_pg_vec_page(order);
0e3125c7 3720 if (unlikely(!pg_vec[i].buffer))
4ebf0ae2
DM
3721 goto out_free_pgvec;
3722 }
3723
3724out:
3725 return pg_vec;
3726
3727out_free_pgvec:
3728 free_pg_vec(pg_vec, order, block_nr);
3729 pg_vec = NULL;
3730 goto out;
3731}
1da177e4 3732
f6fb8f10 3733static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
69e3c75f 3734 int closing, int tx_ring)
1da177e4 3735{
0e3125c7 3736 struct pgv *pg_vec = NULL;
1da177e4 3737 struct packet_sock *po = pkt_sk(sk);
0e11c91e 3738 int was_running, order = 0;
69e3c75f
JB
3739 struct packet_ring_buffer *rb;
3740 struct sk_buff_head *rb_queue;
0e11c91e 3741 __be16 num;
f6fb8f10 3742 int err = -EINVAL;
3743 /* Added to avoid minimal code churn */
3744 struct tpacket_req *req = &req_u->req;
3745
3746 /* Opening a Tx-ring is NOT supported in TPACKET_V3 */
3747 if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
3748 WARN(1, "Tx-ring is not supported.\n");
3749 goto out;
3750 }
1ce4f28b 3751
69e3c75f
JB
3752 rb = tx_ring ? &po->tx_ring : &po->rx_ring;
3753 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
1da177e4 3754
69e3c75f
JB
3755 err = -EBUSY;
3756 if (!closing) {
3757 if (atomic_read(&po->mapped))
3758 goto out;
b0138408 3759 if (packet_read_pending(rb))
69e3c75f
JB
3760 goto out;
3761 }
1da177e4 3762
69e3c75f
JB
3763 if (req->tp_block_nr) {
3764 /* Sanity tests and some calculations */
3765 err = -EBUSY;
3766 if (unlikely(rb->pg_vec))
3767 goto out;
1da177e4 3768
bbd6ef87
PM
3769 switch (po->tp_version) {
3770 case TPACKET_V1:
3771 po->tp_hdrlen = TPACKET_HDRLEN;
3772 break;
3773 case TPACKET_V2:
3774 po->tp_hdrlen = TPACKET2_HDRLEN;
3775 break;
f6fb8f10 3776 case TPACKET_V3:
3777 po->tp_hdrlen = TPACKET3_HDRLEN;
3778 break;
bbd6ef87
PM
3779 }
3780
69e3c75f 3781 err = -EINVAL;
4ebf0ae2 3782 if (unlikely((int)req->tp_block_size <= 0))
69e3c75f 3783 goto out;
4ebf0ae2 3784 if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
69e3c75f 3785 goto out;
8913336a 3786 if (unlikely(req->tp_frame_size < po->tp_hdrlen +
69e3c75f
JB
3787 po->tp_reserve))
3788 goto out;
4ebf0ae2 3789 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
69e3c75f 3790 goto out;
1da177e4 3791
69e3c75f
JB
3792 rb->frames_per_block = req->tp_block_size/req->tp_frame_size;
3793 if (unlikely(rb->frames_per_block <= 0))
3794 goto out;
3795 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
3796 req->tp_frame_nr))
3797 goto out;
1da177e4
LT
3798
3799 err = -ENOMEM;
4ebf0ae2
DM
3800 order = get_order(req->tp_block_size);
3801 pg_vec = alloc_pg_vec(req, order);
3802 if (unlikely(!pg_vec))
1da177e4 3803 goto out;
f6fb8f10 3804 switch (po->tp_version) {
3805 case TPACKET_V3:
3806 /* Transmit path is not supported. We checked
3807 * it above but just being paranoid
3808 */
3809 if (!tx_ring)
3810 init_prb_bdqc(po, rb, pg_vec, req_u, tx_ring);
d7cf0c34 3811 break;
f6fb8f10 3812 default:
3813 break;
3814 }
69e3c75f
JB
3815 }
3816 /* Done */
3817 else {
3818 err = -EINVAL;
4ebf0ae2 3819 if (unlikely(req->tp_frame_nr))
69e3c75f 3820 goto out;
1da177e4
LT
3821 }
3822
3823 lock_sock(sk);
3824
3825 /* Detach socket from network */
3826 spin_lock(&po->bind_lock);
3827 was_running = po->running;
3828 num = po->num;
3829 if (was_running) {
1da177e4 3830 po->num = 0;
ce06b03e 3831 __unregister_prot_hook(sk, false);
1da177e4
LT
3832 }
3833 spin_unlock(&po->bind_lock);
1ce4f28b 3834
1da177e4
LT
3835 synchronize_net();
3836
3837 err = -EBUSY;
905db440 3838 mutex_lock(&po->pg_vec_lock);
1da177e4
LT
3839 if (closing || atomic_read(&po->mapped) == 0) {
3840 err = 0;
69e3c75f 3841 spin_lock_bh(&rb_queue->lock);
c053fd96 3842 swap(rb->pg_vec, pg_vec);
69e3c75f
JB
3843 rb->frame_max = (req->tp_frame_nr - 1);
3844 rb->head = 0;
3845 rb->frame_size = req->tp_frame_size;
3846 spin_unlock_bh(&rb_queue->lock);
3847
c053fd96
CG
3848 swap(rb->pg_vec_order, order);
3849 swap(rb->pg_vec_len, req->tp_block_nr);
69e3c75f
JB
3850
3851 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
3852 po->prot_hook.func = (po->rx_ring.pg_vec) ?
3853 tpacket_rcv : packet_rcv;
3854 skb_queue_purge(rb_queue);
1da177e4 3855 if (atomic_read(&po->mapped))
40d4e3df
ED
3856 pr_err("packet_mmap: vma is busy: %d\n",
3857 atomic_read(&po->mapped));
1da177e4 3858 }
905db440 3859 mutex_unlock(&po->pg_vec_lock);
1da177e4
LT
3860
3861 spin_lock(&po->bind_lock);
ce06b03e 3862 if (was_running) {
1da177e4 3863 po->num = num;
ce06b03e 3864 register_prot_hook(sk);
1da177e4
LT
3865 }
3866 spin_unlock(&po->bind_lock);
f6fb8f10 3867 if (closing && (po->tp_version > TPACKET_V2)) {
3868 /* Because we don't support block-based V3 on tx-ring */
3869 if (!tx_ring)
3870 prb_shutdown_retire_blk_timer(po, tx_ring, rb_queue);
3871 }
1da177e4
LT
3872 release_sock(sk);
3873
1da177e4
LT
3874 if (pg_vec)
3875 free_pg_vec(pg_vec, order, req->tp_block_nr);
3876out:
3877 return err;
3878}
3879
69e3c75f
JB
3880static int packet_mmap(struct file *file, struct socket *sock,
3881 struct vm_area_struct *vma)
1da177e4
LT
3882{
3883 struct sock *sk = sock->sk;
3884 struct packet_sock *po = pkt_sk(sk);
69e3c75f
JB
3885 unsigned long size, expected_size;
3886 struct packet_ring_buffer *rb;
1da177e4
LT
3887 unsigned long start;
3888 int err = -EINVAL;
3889 int i;
3890
3891 if (vma->vm_pgoff)
3892 return -EINVAL;
3893
905db440 3894 mutex_lock(&po->pg_vec_lock);
69e3c75f
JB
3895
3896 expected_size = 0;
3897 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
3898 if (rb->pg_vec) {
3899 expected_size += rb->pg_vec_len
3900 * rb->pg_vec_pages
3901 * PAGE_SIZE;
3902 }
3903 }
3904
3905 if (expected_size == 0)
1da177e4 3906 goto out;
69e3c75f
JB
3907
3908 size = vma->vm_end - vma->vm_start;
3909 if (size != expected_size)
1da177e4
LT
3910 goto out;
3911
1da177e4 3912 start = vma->vm_start;
69e3c75f
JB
3913 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
3914 if (rb->pg_vec == NULL)
3915 continue;
3916
3917 for (i = 0; i < rb->pg_vec_len; i++) {
0e3125c7
NH
3918 struct page *page;
3919 void *kaddr = rb->pg_vec[i].buffer;
69e3c75f
JB
3920 int pg_num;
3921
c56b4d90
CG
3922 for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
3923 page = pgv_to_page(kaddr);
69e3c75f
JB
3924 err = vm_insert_page(vma, start, page);
3925 if (unlikely(err))
3926 goto out;
3927 start += PAGE_SIZE;
0e3125c7 3928 kaddr += PAGE_SIZE;
69e3c75f 3929 }
4ebf0ae2 3930 }
1da177e4 3931 }
69e3c75f 3932
4ebf0ae2 3933 atomic_inc(&po->mapped);
1da177e4
LT
3934 vma->vm_ops = &packet_mmap_ops;
3935 err = 0;
3936
3937out:
905db440 3938 mutex_unlock(&po->pg_vec_lock);
1da177e4
LT
3939 return err;
3940}
1da177e4 3941
90ddc4f0 3942static const struct proto_ops packet_ops_spkt = {
1da177e4
LT
3943 .family = PF_PACKET,
3944 .owner = THIS_MODULE,
3945 .release = packet_release,
3946 .bind = packet_bind_spkt,
3947 .connect = sock_no_connect,
3948 .socketpair = sock_no_socketpair,
3949 .accept = sock_no_accept,
3950 .getname = packet_getname_spkt,
3951 .poll = datagram_poll,
3952 .ioctl = packet_ioctl,
3953 .listen = sock_no_listen,
3954 .shutdown = sock_no_shutdown,
3955 .setsockopt = sock_no_setsockopt,
3956 .getsockopt = sock_no_getsockopt,
3957 .sendmsg = packet_sendmsg_spkt,
3958 .recvmsg = packet_recvmsg,
3959 .mmap = sock_no_mmap,
3960 .sendpage = sock_no_sendpage,
3961};
1da177e4 3962
90ddc4f0 3963static const struct proto_ops packet_ops = {
1da177e4
LT
3964 .family = PF_PACKET,
3965 .owner = THIS_MODULE,
3966 .release = packet_release,
3967 .bind = packet_bind,
3968 .connect = sock_no_connect,
3969 .socketpair = sock_no_socketpair,
3970 .accept = sock_no_accept,
1ce4f28b 3971 .getname = packet_getname,
1da177e4
LT
3972 .poll = packet_poll,
3973 .ioctl = packet_ioctl,
3974 .listen = sock_no_listen,
3975 .shutdown = sock_no_shutdown,
3976 .setsockopt = packet_setsockopt,
3977 .getsockopt = packet_getsockopt,
3978 .sendmsg = packet_sendmsg,
3979 .recvmsg = packet_recvmsg,
3980 .mmap = packet_mmap,
3981 .sendpage = sock_no_sendpage,
3982};
3983
ec1b4cf7 3984static const struct net_proto_family packet_family_ops = {
1da177e4
LT
3985 .family = PF_PACKET,
3986 .create = packet_create,
3987 .owner = THIS_MODULE,
3988};
3989
3990static struct notifier_block packet_netdev_notifier = {
40d4e3df 3991 .notifier_call = packet_notifier,
1da177e4
LT
3992};
3993
3994#ifdef CONFIG_PROC_FS
1da177e4
LT
3995
3996static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
808f5114 3997 __acquires(RCU)
1da177e4 3998{
e372c414 3999 struct net *net = seq_file_net(seq);
808f5114 4000
4001 rcu_read_lock();
4002 return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
1da177e4
LT
4003}
4004
4005static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4006{
1bf40954 4007 struct net *net = seq_file_net(seq);
808f5114 4008 return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
1da177e4
LT
4009}
4010
4011static void packet_seq_stop(struct seq_file *seq, void *v)
808f5114 4012 __releases(RCU)
1da177e4 4013{
808f5114 4014 rcu_read_unlock();
1da177e4
LT
4015}
4016
1ce4f28b 4017static int packet_seq_show(struct seq_file *seq, void *v)
1da177e4
LT
4018{
4019 if (v == SEQ_START_TOKEN)
4020 seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n");
4021 else {
b7ceabd9 4022 struct sock *s = sk_entry(v);
1da177e4
LT
4023 const struct packet_sock *po = pkt_sk(s);
4024
4025 seq_printf(seq,
71338aa7 4026 "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
1da177e4
LT
4027 s,
4028 atomic_read(&s->sk_refcnt),
4029 s->sk_type,
4030 ntohs(po->num),
4031 po->ifindex,
4032 po->running,
4033 atomic_read(&s->sk_rmem_alloc),
a7cb5a49 4034 from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
40d4e3df 4035 sock_i_ino(s));
1da177e4
LT
4036 }
4037
4038 return 0;
4039}
4040
56b3d975 4041static const struct seq_operations packet_seq_ops = {
1da177e4
LT
4042 .start = packet_seq_start,
4043 .next = packet_seq_next,
4044 .stop = packet_seq_stop,
4045 .show = packet_seq_show,
4046};
4047
4048static int packet_seq_open(struct inode *inode, struct file *file)
4049{
e372c414
DL
4050 return seq_open_net(inode, file, &packet_seq_ops,
4051 sizeof(struct seq_net_private));
1da177e4
LT
4052}
4053
da7071d7 4054static const struct file_operations packet_seq_fops = {
1da177e4
LT
4055 .owner = THIS_MODULE,
4056 .open = packet_seq_open,
4057 .read = seq_read,
4058 .llseek = seq_lseek,
e372c414 4059 .release = seq_release_net,
1da177e4
LT
4060};
4061
4062#endif
4063
2c8c1e72 4064static int __net_init packet_net_init(struct net *net)
d12d01d6 4065{
0fa7fa98 4066 mutex_init(&net->packet.sklist_lock);
2aaef4e4 4067 INIT_HLIST_HEAD(&net->packet.sklist);
d12d01d6 4068
d4beaa66 4069 if (!proc_create("packet", 0, net->proc_net, &packet_seq_fops))
d12d01d6
DL
4070 return -ENOMEM;
4071
4072 return 0;
4073}
4074
2c8c1e72 4075static void __net_exit packet_net_exit(struct net *net)
d12d01d6 4076{
ece31ffd 4077 remove_proc_entry("packet", net->proc_net);
d12d01d6
DL
4078}
4079
4080static struct pernet_operations packet_net_ops = {
4081 .init = packet_net_init,
4082 .exit = packet_net_exit,
4083};
4084
4085
1da177e4
LT
4086static void __exit packet_exit(void)
4087{
1da177e4 4088 unregister_netdevice_notifier(&packet_netdev_notifier);
d12d01d6 4089 unregister_pernet_subsys(&packet_net_ops);
1da177e4
LT
4090 sock_unregister(PF_PACKET);
4091 proto_unregister(&packet_proto);
4092}
4093
4094static int __init packet_init(void)
4095{
4096 int rc = proto_register(&packet_proto, 0);
4097
4098 if (rc != 0)
4099 goto out;
4100
4101 sock_register(&packet_family_ops);
d12d01d6 4102 register_pernet_subsys(&packet_net_ops);
1da177e4 4103 register_netdevice_notifier(&packet_netdev_notifier);
1da177e4
LT
4104out:
4105 return rc;
4106}
4107
4108module_init(packet_init);
4109module_exit(packet_exit);
4110MODULE_LICENSE("GPL");
4111MODULE_ALIAS_NETPROTO(PF_PACKET);
This page took 1.068817 seconds and 5 git commands to generate.