Merge remote-tracking branches 'asoc/fix/max98371', 'asoc/fix/nau8825', 'asoc/fix...
[deliverable/linux.git] / net / packet / af_packet.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * PACKET - implements raw packet sockets.
7 *
02c30a84 8 * Authors: Ross Biro
1da177e4
LT
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
11 *
1ce4f28b 12 * Fixes:
1da177e4
LT
13 * Alan Cox : verify_area() now used correctly
14 * Alan Cox : new skbuff lists, look ma no backlogs!
15 * Alan Cox : tidied skbuff lists.
16 * Alan Cox : Now uses generic datagram routines I
17 * added. Also fixed the peek/read crash
18 * from all old Linux datagram code.
19 * Alan Cox : Uses the improved datagram code.
20 * Alan Cox : Added NULL's for socket options.
21 * Alan Cox : Re-commented the code.
22 * Alan Cox : Use new kernel side addressing
23 * Rob Janssen : Correct MTU usage.
24 * Dave Platt : Counter leaks caused by incorrect
25 * interrupt locking and some slightly
26 * dubious gcc output. Can you read
27 * compiler: it said _VOLATILE_
28 * Richard Kooijman : Timestamp fixes.
29 * Alan Cox : New buffers. Use sk->mac.raw.
30 * Alan Cox : sendmsg/recvmsg support.
31 * Alan Cox : Protocol setting support
32 * Alexey Kuznetsov : Untied from IPv4 stack.
33 * Cyrus Durgin : Fixed kerneld for kmod.
34 * Michal Ostrowski : Module initialization cleanup.
1ce4f28b 35 * Ulises Alonso : Frame number limit removal and
1da177e4 36 * packet_set_ring memory leak.
0fb375fb
EB
37 * Eric Biederman : Allow for > 8 byte hardware addresses.
38 * The convention is that longer addresses
39 * will simply extend the hardware address
1ce4f28b 40 * byte arrays at the end of sockaddr_ll
0fb375fb 41 * and packet_mreq.
69e3c75f 42 * Johann Baudy : Added TX RING.
f6fb8f10 43 * Chetan Loke : Implemented TPACKET_V3 block abstraction
44 * layer.
45 * Copyright (C) 2011, <lokec@ccs.neu.edu>
46 *
1da177e4
LT
47 *
48 * This program is free software; you can redistribute it and/or
49 * modify it under the terms of the GNU General Public License
50 * as published by the Free Software Foundation; either version
51 * 2 of the License, or (at your option) any later version.
52 *
53 */
1ce4f28b 54
1da177e4 55#include <linux/types.h>
1da177e4 56#include <linux/mm.h>
4fc268d2 57#include <linux/capability.h>
1da177e4
LT
58#include <linux/fcntl.h>
59#include <linux/socket.h>
60#include <linux/in.h>
61#include <linux/inet.h>
62#include <linux/netdevice.h>
63#include <linux/if_packet.h>
64#include <linux/wireless.h>
ffbc6111 65#include <linux/kernel.h>
1da177e4 66#include <linux/kmod.h>
5a0e3ad6 67#include <linux/slab.h>
0e3125c7 68#include <linux/vmalloc.h>
457c4cbc 69#include <net/net_namespace.h>
1da177e4
LT
70#include <net/ip.h>
71#include <net/protocol.h>
72#include <linux/skbuff.h>
73#include <net/sock.h>
74#include <linux/errno.h>
75#include <linux/timer.h>
1da177e4
LT
76#include <asm/uaccess.h>
77#include <asm/ioctls.h>
78#include <asm/page.h>
a1f8e7f7 79#include <asm/cacheflush.h>
1da177e4
LT
80#include <asm/io.h>
81#include <linux/proc_fs.h>
82#include <linux/seq_file.h>
83#include <linux/poll.h>
84#include <linux/module.h>
85#include <linux/init.h>
905db440 86#include <linux/mutex.h>
05423b24 87#include <linux/if_vlan.h>
bfd5f4a3 88#include <linux/virtio_net.h>
ed85b565 89#include <linux/errqueue.h>
614f60fa 90#include <linux/net_tstamp.h>
b0138408 91#include <linux/percpu.h>
1da177e4
LT
92#ifdef CONFIG_INET
93#include <net/inet_common.h>
94#endif
47dceb8e 95#include <linux/bpf.h>
719c44d3 96#include <net/compat.h>
1da177e4 97
2787b04b
PE
98#include "internal.h"
99
1da177e4
LT
100/*
101 Assumptions:
102 - if device has no dev->hard_header routine, it adds and removes ll header
103 inside itself. In this case ll header is invisible outside of device,
104 but higher levels still should reserve dev->hard_header_len.
105 Some devices are enough clever to reallocate skb, when header
106 will not fit to reserved space (tunnel), another ones are silly
107 (PPP).
108 - packet socket receives packets with pulled ll header,
109 so that SOCK_RAW should push it back.
110
111On receive:
112-----------
113
114Incoming, dev->hard_header!=NULL
b0e380b1
ACM
115 mac_header -> ll header
116 data -> data
1da177e4
LT
117
118Outgoing, dev->hard_header!=NULL
b0e380b1
ACM
119 mac_header -> ll header
120 data -> ll header
1da177e4
LT
121
122Incoming, dev->hard_header==NULL
b0e380b1
ACM
123 mac_header -> UNKNOWN position. It is very likely, that it points to ll
124 header. PPP makes it, that is wrong, because introduce
db0c58f9 125 assymetry between rx and tx paths.
b0e380b1 126 data -> data
1da177e4
LT
127
128Outgoing, dev->hard_header==NULL
b0e380b1
ACM
129 mac_header -> data. ll header is still not built!
130 data -> data
1da177e4
LT
131
132Resume
133 If dev->hard_header==NULL we are unlikely to restore sensible ll header.
134
135
136On transmit:
137------------
138
139dev->hard_header != NULL
b0e380b1
ACM
140 mac_header -> ll header
141 data -> ll header
1da177e4
LT
142
143dev->hard_header == NULL (ll header is added by device, we cannot control it)
b0e380b1
ACM
144 mac_header -> data
145 data -> data
1da177e4
LT
146
147 We should set nh.raw on output to correct posistion,
148 packet classifier depends on it.
149 */
150
1da177e4
LT
151/* Private packet socket structures. */
152
0fb375fb
EB
153/* identical to struct packet_mreq except it has
154 * a longer address field.
155 */
40d4e3df 156struct packet_mreq_max {
0fb375fb
EB
157 int mr_ifindex;
158 unsigned short mr_type;
159 unsigned short mr_alen;
160 unsigned char mr_address[MAX_ADDR_LEN];
1da177e4 161};
a2efcfa0 162
184f489e
DB
163union tpacket_uhdr {
164 struct tpacket_hdr *h1;
165 struct tpacket2_hdr *h2;
166 struct tpacket3_hdr *h3;
167 void *raw;
168};
169
f6fb8f10 170static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
69e3c75f
JB
171 int closing, int tx_ring);
172
f6fb8f10 173#define V3_ALIGNMENT (8)
174
bc59ba39 175#define BLK_HDR_LEN (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
f6fb8f10 176
177#define BLK_PLUS_PRIV(sz_of_priv) \
178 (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
179
f6fb8f10 180#define PGV_FROM_VMALLOC 1
69e3c75f 181
f6fb8f10 182#define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status)
183#define BLOCK_NUM_PKTS(x) ((x)->hdr.bh1.num_pkts)
184#define BLOCK_O2FP(x) ((x)->hdr.bh1.offset_to_first_pkt)
185#define BLOCK_LEN(x) ((x)->hdr.bh1.blk_len)
186#define BLOCK_SNUM(x) ((x)->hdr.bh1.seq_num)
187#define BLOCK_O2PRIV(x) ((x)->offset_to_priv)
188#define BLOCK_PRIV(x) ((void *)((char *)(x) + BLOCK_O2PRIV(x)))
189
69e3c75f
JB
190struct packet_sock;
191static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
77f65ebd
WB
192static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
193 struct packet_type *pt, struct net_device *orig_dev);
1da177e4 194
f6fb8f10 195static void *packet_previous_frame(struct packet_sock *po,
196 struct packet_ring_buffer *rb,
197 int status);
198static void packet_increment_head(struct packet_ring_buffer *buff);
bc59ba39 199static int prb_curr_blk_in_use(struct tpacket_kbdq_core *,
200 struct tpacket_block_desc *);
201static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
f6fb8f10 202 struct packet_sock *);
bc59ba39 203static void prb_retire_current_block(struct tpacket_kbdq_core *,
f6fb8f10 204 struct packet_sock *, unsigned int status);
bc59ba39 205static int prb_queue_frozen(struct tpacket_kbdq_core *);
206static void prb_open_block(struct tpacket_kbdq_core *,
207 struct tpacket_block_desc *);
f6fb8f10 208static void prb_retire_rx_blk_timer_expired(unsigned long);
bc59ba39 209static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
210static void prb_init_blk_timer(struct packet_sock *,
211 struct tpacket_kbdq_core *,
212 void (*func) (unsigned long));
213static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
214static void prb_clear_rxhash(struct tpacket_kbdq_core *,
215 struct tpacket3_hdr *);
216static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
217 struct tpacket3_hdr *);
1da177e4
LT
218static void packet_flush_mclist(struct sock *sk);
219
ffbc6111 220struct packet_skb_cb {
ffbc6111
HX
221 union {
222 struct sockaddr_pkt pkt;
2472d761
EB
223 union {
224 /* Trick: alias skb original length with
225 * ll.sll_family and ll.protocol in order
226 * to save room.
227 */
228 unsigned int origlen;
229 struct sockaddr_ll ll;
230 };
ffbc6111
HX
231 } sa;
232};
233
d3869efe
DW
234#define vio_le() virtio_legacy_is_little_endian()
235
ffbc6111 236#define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
8dc41944 237
bc59ba39 238#define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
f6fb8f10 239#define GET_PBLOCK_DESC(x, bid) \
bc59ba39 240 ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
f6fb8f10 241#define GET_CURR_PBLOCK_DESC_FROM_CORE(x) \
bc59ba39 242 ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
f6fb8f10 243#define GET_NEXT_PRB_BLK_NUM(x) \
244 (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
245 ((x)->kactive_blk_num+1) : 0)
246
dc99f600
DM
247static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
248static void __fanout_link(struct sock *sk, struct packet_sock *po);
249
d346a3fa
DB
250static int packet_direct_xmit(struct sk_buff *skb)
251{
252 struct net_device *dev = skb->dev;
d346a3fa
DB
253 netdev_features_t features;
254 struct netdev_queue *txq;
43279500 255 int ret = NETDEV_TX_BUSY;
d346a3fa
DB
256
257 if (unlikely(!netif_running(dev) ||
43279500
DB
258 !netif_carrier_ok(dev)))
259 goto drop;
d346a3fa
DB
260
261 features = netif_skb_features(skb);
262 if (skb_needs_linearize(skb, features) &&
43279500
DB
263 __skb_linearize(skb))
264 goto drop;
d346a3fa 265
10c51b56 266 txq = skb_get_tx_queue(dev, skb);
d346a3fa 267
43279500
DB
268 local_bh_disable();
269
270 HARD_TX_LOCK(dev, txq, smp_processor_id());
10b3ad8c 271 if (!netif_xmit_frozen_or_drv_stopped(txq))
fa2dbdc2 272 ret = netdev_start_xmit(skb, dev, txq, false);
43279500 273 HARD_TX_UNLOCK(dev, txq);
d346a3fa 274
43279500
DB
275 local_bh_enable();
276
277 if (!dev_xmit_complete(ret))
d346a3fa 278 kfree_skb(skb);
43279500 279
d346a3fa 280 return ret;
43279500 281drop:
0f97ede4 282 atomic_long_inc(&dev->tx_dropped);
43279500
DB
283 kfree_skb(skb);
284 return NET_XMIT_DROP;
d346a3fa
DB
285}
286
66e56cd4
DB
287static struct net_device *packet_cached_dev_get(struct packet_sock *po)
288{
289 struct net_device *dev;
290
291 rcu_read_lock();
292 dev = rcu_dereference(po->cached_dev);
293 if (likely(dev))
294 dev_hold(dev);
295 rcu_read_unlock();
296
297 return dev;
298}
299
300static void packet_cached_dev_assign(struct packet_sock *po,
301 struct net_device *dev)
302{
303 rcu_assign_pointer(po->cached_dev, dev);
304}
305
306static void packet_cached_dev_reset(struct packet_sock *po)
307{
308 RCU_INIT_POINTER(po->cached_dev, NULL);
309}
310
d346a3fa
DB
311static bool packet_use_direct_xmit(const struct packet_sock *po)
312{
313 return po->xmit == packet_direct_xmit;
314}
315
0fd5d57b 316static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
d346a3fa 317{
1cbac010 318 return (u16) raw_smp_processor_id() % dev->real_num_tx_queues;
d346a3fa
DB
319}
320
0fd5d57b
DB
321static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
322{
323 const struct net_device_ops *ops = dev->netdev_ops;
324 u16 queue_index;
325
326 if (ops->ndo_select_queue) {
327 queue_index = ops->ndo_select_queue(dev, skb, NULL,
328 __packet_pick_tx_queue);
329 queue_index = netdev_cap_txqueue(dev, queue_index);
330 } else {
331 queue_index = __packet_pick_tx_queue(dev, skb);
332 }
333
334 skb_set_queue_mapping(skb, queue_index);
335}
336
ce06b03e
DM
337/* register_prot_hook must be invoked with the po->bind_lock held,
338 * or from a context in which asynchronous accesses to the packet
339 * socket is not possible (packet_create()).
340 */
341static void register_prot_hook(struct sock *sk)
342{
343 struct packet_sock *po = pkt_sk(sk);
e40526cb 344
ce06b03e 345 if (!po->running) {
66e56cd4 346 if (po->fanout)
dc99f600 347 __fanout_link(sk, po);
66e56cd4 348 else
dc99f600 349 dev_add_pack(&po->prot_hook);
e40526cb 350
ce06b03e
DM
351 sock_hold(sk);
352 po->running = 1;
353 }
354}
355
356/* {,__}unregister_prot_hook() must be invoked with the po->bind_lock
357 * held. If the sync parameter is true, we will temporarily drop
358 * the po->bind_lock and do a synchronize_net to make sure no
359 * asynchronous packet processing paths still refer to the elements
360 * of po->prot_hook. If the sync parameter is false, it is the
361 * callers responsibility to take care of this.
362 */
363static void __unregister_prot_hook(struct sock *sk, bool sync)
364{
365 struct packet_sock *po = pkt_sk(sk);
366
367 po->running = 0;
66e56cd4
DB
368
369 if (po->fanout)
dc99f600 370 __fanout_unlink(sk, po);
66e56cd4 371 else
dc99f600 372 __dev_remove_pack(&po->prot_hook);
e40526cb 373
ce06b03e
DM
374 __sock_put(sk);
375
376 if (sync) {
377 spin_unlock(&po->bind_lock);
378 synchronize_net();
379 spin_lock(&po->bind_lock);
380 }
381}
382
383static void unregister_prot_hook(struct sock *sk, bool sync)
384{
385 struct packet_sock *po = pkt_sk(sk);
386
387 if (po->running)
388 __unregister_prot_hook(sk, sync);
389}
390
6e58040b 391static inline struct page * __pure pgv_to_page(void *addr)
0af55bb5
CG
392{
393 if (is_vmalloc_addr(addr))
394 return vmalloc_to_page(addr);
395 return virt_to_page(addr);
396}
397
69e3c75f 398static void __packet_set_status(struct packet_sock *po, void *frame, int status)
1da177e4 399{
184f489e 400 union tpacket_uhdr h;
1da177e4 401
69e3c75f 402 h.raw = frame;
bbd6ef87
PM
403 switch (po->tp_version) {
404 case TPACKET_V1:
69e3c75f 405 h.h1->tp_status = status;
0af55bb5 406 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
bbd6ef87
PM
407 break;
408 case TPACKET_V2:
69e3c75f 409 h.h2->tp_status = status;
0af55bb5 410 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
bbd6ef87 411 break;
f6fb8f10 412 case TPACKET_V3:
69e3c75f 413 default:
f6fb8f10 414 WARN(1, "TPACKET version not supported.\n");
69e3c75f 415 BUG();
bbd6ef87 416 }
69e3c75f
JB
417
418 smp_wmb();
bbd6ef87
PM
419}
420
69e3c75f 421static int __packet_get_status(struct packet_sock *po, void *frame)
bbd6ef87 422{
184f489e 423 union tpacket_uhdr h;
bbd6ef87 424
69e3c75f
JB
425 smp_rmb();
426
bbd6ef87
PM
427 h.raw = frame;
428 switch (po->tp_version) {
429 case TPACKET_V1:
0af55bb5 430 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
69e3c75f 431 return h.h1->tp_status;
bbd6ef87 432 case TPACKET_V2:
0af55bb5 433 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
69e3c75f 434 return h.h2->tp_status;
f6fb8f10 435 case TPACKET_V3:
69e3c75f 436 default:
f6fb8f10 437 WARN(1, "TPACKET version not supported.\n");
69e3c75f
JB
438 BUG();
439 return 0;
bbd6ef87 440 }
1da177e4 441}
69e3c75f 442
b9c32fb2
DB
443static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec *ts,
444 unsigned int flags)
7a51384c
DB
445{
446 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
447
68a360e8
WB
448 if (shhwtstamps &&
449 (flags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
450 ktime_to_timespec_cond(shhwtstamps->hwtstamp, ts))
451 return TP_STATUS_TS_RAW_HARDWARE;
7a51384c
DB
452
453 if (ktime_to_timespec_cond(skb->tstamp, ts))
b9c32fb2 454 return TP_STATUS_TS_SOFTWARE;
7a51384c 455
b9c32fb2 456 return 0;
7a51384c
DB
457}
458
b9c32fb2
DB
459static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame,
460 struct sk_buff *skb)
2e31396f
WB
461{
462 union tpacket_uhdr h;
463 struct timespec ts;
b9c32fb2 464 __u32 ts_status;
2e31396f 465
b9c32fb2
DB
466 if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
467 return 0;
2e31396f
WB
468
469 h.raw = frame;
470 switch (po->tp_version) {
471 case TPACKET_V1:
472 h.h1->tp_sec = ts.tv_sec;
473 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
474 break;
475 case TPACKET_V2:
476 h.h2->tp_sec = ts.tv_sec;
477 h.h2->tp_nsec = ts.tv_nsec;
478 break;
479 case TPACKET_V3:
480 default:
481 WARN(1, "TPACKET version not supported.\n");
482 BUG();
483 }
484
485 /* one flush is safe, as both fields always lie on the same cacheline */
486 flush_dcache_page(pgv_to_page(&h.h1->tp_sec));
487 smp_wmb();
b9c32fb2
DB
488
489 return ts_status;
2e31396f
WB
490}
491
69e3c75f
JB
492static void *packet_lookup_frame(struct packet_sock *po,
493 struct packet_ring_buffer *rb,
494 unsigned int position,
495 int status)
496{
497 unsigned int pg_vec_pos, frame_offset;
184f489e 498 union tpacket_uhdr h;
69e3c75f
JB
499
500 pg_vec_pos = position / rb->frames_per_block;
501 frame_offset = position % rb->frames_per_block;
502
0e3125c7
NH
503 h.raw = rb->pg_vec[pg_vec_pos].buffer +
504 (frame_offset * rb->frame_size);
69e3c75f
JB
505
506 if (status != __packet_get_status(po, h.raw))
507 return NULL;
508
509 return h.raw;
510}
511
eea49cc9 512static void *packet_current_frame(struct packet_sock *po,
69e3c75f
JB
513 struct packet_ring_buffer *rb,
514 int status)
515{
516 return packet_lookup_frame(po, rb, rb->head, status);
517}
518
bc59ba39 519static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
f6fb8f10 520{
521 del_timer_sync(&pkc->retire_blk_timer);
522}
523
524static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
f6fb8f10 525 struct sk_buff_head *rb_queue)
526{
bc59ba39 527 struct tpacket_kbdq_core *pkc;
f6fb8f10 528
73d0fcf2 529 pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
f6fb8f10 530
ec6f809f 531 spin_lock_bh(&rb_queue->lock);
f6fb8f10 532 pkc->delete_blk_timer = 1;
ec6f809f 533 spin_unlock_bh(&rb_queue->lock);
f6fb8f10 534
535 prb_del_retire_blk_timer(pkc);
536}
537
538static void prb_init_blk_timer(struct packet_sock *po,
bc59ba39 539 struct tpacket_kbdq_core *pkc,
f6fb8f10 540 void (*func) (unsigned long))
541{
542 init_timer(&pkc->retire_blk_timer);
543 pkc->retire_blk_timer.data = (long)po;
544 pkc->retire_blk_timer.function = func;
545 pkc->retire_blk_timer.expires = jiffies;
546}
547
e8e85cc5 548static void prb_setup_retire_blk_timer(struct packet_sock *po)
f6fb8f10 549{
bc59ba39 550 struct tpacket_kbdq_core *pkc;
f6fb8f10 551
e8e85cc5 552 pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
f6fb8f10 553 prb_init_blk_timer(po, pkc, prb_retire_rx_blk_timer_expired);
554}
555
556static int prb_calc_retire_blk_tmo(struct packet_sock *po,
557 int blk_size_in_bytes)
558{
559 struct net_device *dev;
560 unsigned int mbits = 0, msec = 0, div = 0, tmo = 0;
7cad1bac 561 struct ethtool_link_ksettings ecmd;
4bc71cb9 562 int err;
f6fb8f10 563
4bc71cb9
JP
564 rtnl_lock();
565 dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
566 if (unlikely(!dev)) {
567 rtnl_unlock();
f6fb8f10 568 return DEFAULT_PRB_RETIRE_TOV;
4bc71cb9 569 }
7cad1bac 570 err = __ethtool_get_link_ksettings(dev, &ecmd);
4bc71cb9
JP
571 rtnl_unlock();
572 if (!err) {
4bc71cb9
JP
573 /*
574 * If the link speed is so slow you don't really
575 * need to worry about perf anyways
576 */
7cad1bac
DD
577 if (ecmd.base.speed < SPEED_1000 ||
578 ecmd.base.speed == SPEED_UNKNOWN) {
4bc71cb9 579 return DEFAULT_PRB_RETIRE_TOV;
e440cf2c 580 } else {
581 msec = 1;
7cad1bac 582 div = ecmd.base.speed / 1000;
f6fb8f10 583 }
584 }
585
586 mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
587
588 if (div)
589 mbits /= div;
590
591 tmo = mbits * msec;
592
593 if (div)
594 return tmo+1;
595 return tmo;
596}
597
bc59ba39 598static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
f6fb8f10 599 union tpacket_req_u *req_u)
600{
601 p1->feature_req_word = req_u->req3.tp_feature_req_word;
602}
603
604static void init_prb_bdqc(struct packet_sock *po,
605 struct packet_ring_buffer *rb,
606 struct pgv *pg_vec,
e8e85cc5 607 union tpacket_req_u *req_u)
f6fb8f10 608{
22781a5b 609 struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb);
bc59ba39 610 struct tpacket_block_desc *pbd;
f6fb8f10 611
612 memset(p1, 0x0, sizeof(*p1));
613
614 p1->knxt_seq_num = 1;
615 p1->pkbdq = pg_vec;
bc59ba39 616 pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
e3192690 617 p1->pkblk_start = pg_vec[0].buffer;
f6fb8f10 618 p1->kblk_size = req_u->req3.tp_block_size;
619 p1->knum_blocks = req_u->req3.tp_block_nr;
620 p1->hdrlen = po->tp_hdrlen;
621 p1->version = po->tp_version;
622 p1->last_kactive_blk_num = 0;
ee80fbf3 623 po->stats.stats3.tp_freeze_q_cnt = 0;
f6fb8f10 624 if (req_u->req3.tp_retire_blk_tov)
625 p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
626 else
627 p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
628 req_u->req3.tp_block_size);
629 p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
630 p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
631
dc808110 632 p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
f6fb8f10 633 prb_init_ft_ops(p1, req_u);
e8e85cc5 634 prb_setup_retire_blk_timer(po);
f6fb8f10 635 prb_open_block(p1, pbd);
636}
637
638/* Do NOT update the last_blk_num first.
639 * Assumes sk_buff_head lock is held.
640 */
bc59ba39 641static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
f6fb8f10 642{
643 mod_timer(&pkc->retire_blk_timer,
644 jiffies + pkc->tov_in_jiffies);
645 pkc->last_kactive_blk_num = pkc->kactive_blk_num;
646}
647
648/*
649 * Timer logic:
650 * 1) We refresh the timer only when we open a block.
651 * By doing this we don't waste cycles refreshing the timer
652 * on packet-by-packet basis.
653 *
654 * With a 1MB block-size, on a 1Gbps line, it will take
655 * i) ~8 ms to fill a block + ii) memcpy etc.
656 * In this cut we are not accounting for the memcpy time.
657 *
658 * So, if the user sets the 'tmo' to 10ms then the timer
659 * will never fire while the block is still getting filled
660 * (which is what we want). However, the user could choose
661 * to close a block early and that's fine.
662 *
663 * But when the timer does fire, we check whether or not to refresh it.
664 * Since the tmo granularity is in msecs, it is not too expensive
665 * to refresh the timer, lets say every '8' msecs.
666 * Either the user can set the 'tmo' or we can derive it based on
667 * a) line-speed and b) block-size.
668 * prb_calc_retire_blk_tmo() calculates the tmo.
669 *
670 */
671static void prb_retire_rx_blk_timer_expired(unsigned long data)
672{
673 struct packet_sock *po = (struct packet_sock *)data;
22781a5b 674 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
f6fb8f10 675 unsigned int frozen;
bc59ba39 676 struct tpacket_block_desc *pbd;
f6fb8f10 677
678 spin_lock(&po->sk.sk_receive_queue.lock);
679
680 frozen = prb_queue_frozen(pkc);
681 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
682
683 if (unlikely(pkc->delete_blk_timer))
684 goto out;
685
686 /* We only need to plug the race when the block is partially filled.
687 * tpacket_rcv:
688 * lock(); increment BLOCK_NUM_PKTS; unlock()
689 * copy_bits() is in progress ...
690 * timer fires on other cpu:
691 * we can't retire the current block because copy_bits
692 * is in progress.
693 *
694 */
695 if (BLOCK_NUM_PKTS(pbd)) {
696 while (atomic_read(&pkc->blk_fill_in_prog)) {
697 /* Waiting for skb_copy_bits to finish... */
698 cpu_relax();
699 }
700 }
701
702 if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
703 if (!frozen) {
41a50d62
AD
704 if (!BLOCK_NUM_PKTS(pbd)) {
705 /* An empty block. Just refresh the timer. */
706 goto refresh_timer;
707 }
f6fb8f10 708 prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
709 if (!prb_dispatch_next_block(pkc, po))
710 goto refresh_timer;
711 else
712 goto out;
713 } else {
714 /* Case 1. Queue was frozen because user-space was
715 * lagging behind.
716 */
717 if (prb_curr_blk_in_use(pkc, pbd)) {
718 /*
719 * Ok, user-space is still behind.
720 * So just refresh the timer.
721 */
722 goto refresh_timer;
723 } else {
724 /* Case 2. queue was frozen,user-space caught up,
725 * now the link went idle && the timer fired.
726 * We don't have a block to close.So we open this
727 * block and restart the timer.
728 * opening a block thaws the queue,restarts timer
729 * Thawing/timer-refresh is a side effect.
730 */
731 prb_open_block(pkc, pbd);
732 goto out;
733 }
734 }
735 }
736
737refresh_timer:
738 _prb_refresh_rx_retire_blk_timer(pkc);
739
740out:
741 spin_unlock(&po->sk.sk_receive_queue.lock);
742}
743
eea49cc9 744static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
bc59ba39 745 struct tpacket_block_desc *pbd1, __u32 status)
f6fb8f10 746{
747 /* Flush everything minus the block header */
748
749#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
750 u8 *start, *end;
751
752 start = (u8 *)pbd1;
753
754 /* Skip the block header(we know header WILL fit in 4K) */
755 start += PAGE_SIZE;
756
757 end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
758 for (; start < end; start += PAGE_SIZE)
759 flush_dcache_page(pgv_to_page(start));
760
761 smp_wmb();
762#endif
763
764 /* Now update the block status. */
765
766 BLOCK_STATUS(pbd1) = status;
767
768 /* Flush the block header */
769
770#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
771 start = (u8 *)pbd1;
772 flush_dcache_page(pgv_to_page(start));
773
774 smp_wmb();
775#endif
776}
777
778/*
779 * Side effect:
780 *
781 * 1) flush the block
782 * 2) Increment active_blk_num
783 *
784 * Note:We DONT refresh the timer on purpose.
785 * Because almost always the next block will be opened.
786 */
bc59ba39 787static void prb_close_block(struct tpacket_kbdq_core *pkc1,
788 struct tpacket_block_desc *pbd1,
f6fb8f10 789 struct packet_sock *po, unsigned int stat)
790{
791 __u32 status = TP_STATUS_USER | stat;
792
793 struct tpacket3_hdr *last_pkt;
bc59ba39 794 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
da413eec 795 struct sock *sk = &po->sk;
f6fb8f10 796
ee80fbf3 797 if (po->stats.stats3.tp_drops)
f6fb8f10 798 status |= TP_STATUS_LOSING;
799
800 last_pkt = (struct tpacket3_hdr *)pkc1->prev;
801 last_pkt->tp_next_offset = 0;
802
803 /* Get the ts of the last pkt */
804 if (BLOCK_NUM_PKTS(pbd1)) {
805 h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
806 h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec;
807 } else {
41a50d62
AD
808 /* Ok, we tmo'd - so get the current time.
809 *
810 * It shouldn't really happen as we don't close empty
811 * blocks. See prb_retire_rx_blk_timer_expired().
812 */
f6fb8f10 813 struct timespec ts;
814 getnstimeofday(&ts);
815 h1->ts_last_pkt.ts_sec = ts.tv_sec;
816 h1->ts_last_pkt.ts_nsec = ts.tv_nsec;
817 }
818
819 smp_wmb();
820
821 /* Flush the block */
822 prb_flush_block(pkc1, pbd1, status);
823
da413eec
DC
824 sk->sk_data_ready(sk);
825
f6fb8f10 826 pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
827}
828
eea49cc9 829static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
f6fb8f10 830{
831 pkc->reset_pending_on_curr_blk = 0;
832}
833
834/*
835 * Side effect of opening a block:
836 *
837 * 1) prb_queue is thawed.
838 * 2) retire_blk_timer is refreshed.
839 *
840 */
bc59ba39 841static void prb_open_block(struct tpacket_kbdq_core *pkc1,
842 struct tpacket_block_desc *pbd1)
f6fb8f10 843{
844 struct timespec ts;
bc59ba39 845 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
f6fb8f10 846
847 smp_rmb();
848
8da3056c
DB
849 /* We could have just memset this but we will lose the
850 * flexibility of making the priv area sticky
851 */
f6fb8f10 852
8da3056c
DB
853 BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
854 BLOCK_NUM_PKTS(pbd1) = 0;
855 BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
f6fb8f10 856
8da3056c
DB
857 getnstimeofday(&ts);
858
859 h1->ts_first_pkt.ts_sec = ts.tv_sec;
860 h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
f6fb8f10 861
8da3056c
DB
862 pkc1->pkblk_start = (char *)pbd1;
863 pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
864
865 BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
866 BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
867
868 pbd1->version = pkc1->version;
869 pkc1->prev = pkc1->nxt_offset;
870 pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
871
872 prb_thaw_queue(pkc1);
873 _prb_refresh_rx_retire_blk_timer(pkc1);
874
875 smp_wmb();
f6fb8f10 876}
877
878/*
879 * Queue freeze logic:
880 * 1) Assume tp_block_nr = 8 blocks.
881 * 2) At time 't0', user opens Rx ring.
882 * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
883 * 4) user-space is either sleeping or processing block '0'.
884 * 5) tpacket_rcv is currently filling block '7', since there is no space left,
885 * it will close block-7,loop around and try to fill block '0'.
886 * call-flow:
887 * __packet_lookup_frame_in_block
888 * prb_retire_current_block()
889 * prb_dispatch_next_block()
890 * |->(BLOCK_STATUS == USER) evaluates to true
891 * 5.1) Since block-0 is currently in-use, we just freeze the queue.
892 * 6) Now there are two cases:
893 * 6.1) Link goes idle right after the queue is frozen.
894 * But remember, the last open_block() refreshed the timer.
895 * When this timer expires,it will refresh itself so that we can
896 * re-open block-0 in near future.
897 * 6.2) Link is busy and keeps on receiving packets. This is a simple
898 * case and __packet_lookup_frame_in_block will check if block-0
899 * is free and can now be re-used.
900 */
eea49cc9 901static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
f6fb8f10 902 struct packet_sock *po)
903{
904 pkc->reset_pending_on_curr_blk = 1;
ee80fbf3 905 po->stats.stats3.tp_freeze_q_cnt++;
f6fb8f10 906}
907
908#define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
909
910/*
911 * If the next block is free then we will dispatch it
912 * and return a good offset.
913 * Else, we will freeze the queue.
914 * So, caller must check the return value.
915 */
bc59ba39 916static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
f6fb8f10 917 struct packet_sock *po)
918{
bc59ba39 919 struct tpacket_block_desc *pbd;
f6fb8f10 920
921 smp_rmb();
922
923 /* 1. Get current block num */
924 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
925
926 /* 2. If this block is currently in_use then freeze the queue */
927 if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
928 prb_freeze_queue(pkc, po);
929 return NULL;
930 }
931
932 /*
933 * 3.
934 * open this block and return the offset where the first packet
935 * needs to get stored.
936 */
937 prb_open_block(pkc, pbd);
938 return (void *)pkc->nxt_offset;
939}
940
bc59ba39 941static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
f6fb8f10 942 struct packet_sock *po, unsigned int status)
943{
bc59ba39 944 struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
f6fb8f10 945
946 /* retire/close the current block */
947 if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
948 /*
949 * Plug the case where copy_bits() is in progress on
950 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
951 * have space to copy the pkt in the current block and
952 * called prb_retire_current_block()
953 *
954 * We don't need to worry about the TMO case because
955 * the timer-handler already handled this case.
956 */
957 if (!(status & TP_STATUS_BLK_TMO)) {
958 while (atomic_read(&pkc->blk_fill_in_prog)) {
959 /* Waiting for skb_copy_bits to finish... */
960 cpu_relax();
961 }
962 }
963 prb_close_block(pkc, pbd, po, status);
964 return;
965 }
f6fb8f10 966}
967
eea49cc9 968static int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc,
bc59ba39 969 struct tpacket_block_desc *pbd)
f6fb8f10 970{
971 return TP_STATUS_USER & BLOCK_STATUS(pbd);
972}
973
eea49cc9 974static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
f6fb8f10 975{
976 return pkc->reset_pending_on_curr_blk;
977}
978
eea49cc9 979static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
f6fb8f10 980{
bc59ba39 981 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
f6fb8f10 982 atomic_dec(&pkc->blk_fill_in_prog);
983}
984
eea49cc9 985static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
f6fb8f10 986 struct tpacket3_hdr *ppd)
987{
3958afa1 988 ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb);
f6fb8f10 989}
990
eea49cc9 991static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
f6fb8f10 992 struct tpacket3_hdr *ppd)
993{
994 ppd->hv1.tp_rxhash = 0;
995}
996
eea49cc9 997static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
f6fb8f10 998 struct tpacket3_hdr *ppd)
999{
df8a39de
JP
1000 if (skb_vlan_tag_present(pkc->skb)) {
1001 ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb);
a0cdfcf3
AW
1002 ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto);
1003 ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
f6fb8f10 1004 } else {
9e67030a 1005 ppd->hv1.tp_vlan_tci = 0;
a0cdfcf3 1006 ppd->hv1.tp_vlan_tpid = 0;
9e67030a 1007 ppd->tp_status = TP_STATUS_AVAILABLE;
f6fb8f10 1008 }
1009}
1010
bc59ba39 1011static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
f6fb8f10 1012 struct tpacket3_hdr *ppd)
1013{
a0cdfcf3 1014 ppd->hv1.tp_padding = 0;
f6fb8f10 1015 prb_fill_vlan_info(pkc, ppd);
1016
1017 if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
1018 prb_fill_rxhash(pkc, ppd);
1019 else
1020 prb_clear_rxhash(pkc, ppd);
1021}
1022
eea49cc9 1023static void prb_fill_curr_block(char *curr,
bc59ba39 1024 struct tpacket_kbdq_core *pkc,
1025 struct tpacket_block_desc *pbd,
f6fb8f10 1026 unsigned int len)
1027{
1028 struct tpacket3_hdr *ppd;
1029
1030 ppd = (struct tpacket3_hdr *)curr;
1031 ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
1032 pkc->prev = curr;
1033 pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
1034 BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
1035 BLOCK_NUM_PKTS(pbd) += 1;
1036 atomic_inc(&pkc->blk_fill_in_prog);
1037 prb_run_all_ft_ops(pkc, ppd);
1038}
1039
1040/* Assumes caller has the sk->rx_queue.lock */
1041static void *__packet_lookup_frame_in_block(struct packet_sock *po,
1042 struct sk_buff *skb,
1043 int status,
1044 unsigned int len
1045 )
1046{
bc59ba39 1047 struct tpacket_kbdq_core *pkc;
1048 struct tpacket_block_desc *pbd;
f6fb8f10 1049 char *curr, *end;
1050
e3192690 1051 pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
f6fb8f10 1052 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1053
1054 /* Queue is frozen when user space is lagging behind */
1055 if (prb_queue_frozen(pkc)) {
1056 /*
1057 * Check if that last block which caused the queue to freeze,
1058 * is still in_use by user-space.
1059 */
1060 if (prb_curr_blk_in_use(pkc, pbd)) {
1061 /* Can't record this packet */
1062 return NULL;
1063 } else {
1064 /*
1065 * Ok, the block was released by user-space.
1066 * Now let's open that block.
1067 * opening a block also thaws the queue.
1068 * Thawing is a side effect.
1069 */
1070 prb_open_block(pkc, pbd);
1071 }
1072 }
1073
1074 smp_mb();
1075 curr = pkc->nxt_offset;
1076 pkc->skb = skb;
e3192690 1077 end = (char *)pbd + pkc->kblk_size;
f6fb8f10 1078
1079 /* first try the current block */
1080 if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
1081 prb_fill_curr_block(curr, pkc, pbd, len);
1082 return (void *)curr;
1083 }
1084
1085 /* Ok, close the current block */
1086 prb_retire_current_block(pkc, po, 0);
1087
1088 /* Now, try to dispatch the next block */
1089 curr = (char *)prb_dispatch_next_block(pkc, po);
1090 if (curr) {
1091 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1092 prb_fill_curr_block(curr, pkc, pbd, len);
1093 return (void *)curr;
1094 }
1095
1096 /*
1097 * No free blocks are available.user_space hasn't caught up yet.
1098 * Queue was just frozen and now this packet will get dropped.
1099 */
1100 return NULL;
1101}
1102
eea49cc9 1103static void *packet_current_rx_frame(struct packet_sock *po,
f6fb8f10 1104 struct sk_buff *skb,
1105 int status, unsigned int len)
1106{
1107 char *curr = NULL;
1108 switch (po->tp_version) {
1109 case TPACKET_V1:
1110 case TPACKET_V2:
1111 curr = packet_lookup_frame(po, &po->rx_ring,
1112 po->rx_ring.head, status);
1113 return curr;
1114 case TPACKET_V3:
1115 return __packet_lookup_frame_in_block(po, skb, status, len);
1116 default:
1117 WARN(1, "TPACKET version not supported\n");
1118 BUG();
99aa3473 1119 return NULL;
f6fb8f10 1120 }
1121}
1122
eea49cc9 1123static void *prb_lookup_block(struct packet_sock *po,
f6fb8f10 1124 struct packet_ring_buffer *rb,
77f65ebd 1125 unsigned int idx,
f6fb8f10 1126 int status)
1127{
bc59ba39 1128 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
77f65ebd 1129 struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx);
f6fb8f10 1130
1131 if (status != BLOCK_STATUS(pbd))
1132 return NULL;
1133 return pbd;
1134}
1135
eea49cc9 1136static int prb_previous_blk_num(struct packet_ring_buffer *rb)
f6fb8f10 1137{
1138 unsigned int prev;
1139 if (rb->prb_bdqc.kactive_blk_num)
1140 prev = rb->prb_bdqc.kactive_blk_num-1;
1141 else
1142 prev = rb->prb_bdqc.knum_blocks-1;
1143 return prev;
1144}
1145
1146/* Assumes caller has held the rx_queue.lock */
eea49cc9 1147static void *__prb_previous_block(struct packet_sock *po,
f6fb8f10 1148 struct packet_ring_buffer *rb,
1149 int status)
1150{
1151 unsigned int previous = prb_previous_blk_num(rb);
1152 return prb_lookup_block(po, rb, previous, status);
1153}
1154
eea49cc9 1155static void *packet_previous_rx_frame(struct packet_sock *po,
f6fb8f10 1156 struct packet_ring_buffer *rb,
1157 int status)
1158{
1159 if (po->tp_version <= TPACKET_V2)
1160 return packet_previous_frame(po, rb, status);
1161
1162 return __prb_previous_block(po, rb, status);
1163}
1164
eea49cc9 1165static void packet_increment_rx_head(struct packet_sock *po,
f6fb8f10 1166 struct packet_ring_buffer *rb)
1167{
1168 switch (po->tp_version) {
1169 case TPACKET_V1:
1170 case TPACKET_V2:
1171 return packet_increment_head(rb);
1172 case TPACKET_V3:
1173 default:
1174 WARN(1, "TPACKET version not supported.\n");
1175 BUG();
1176 return;
1177 }
1178}
1179
eea49cc9 1180static void *packet_previous_frame(struct packet_sock *po,
69e3c75f
JB
1181 struct packet_ring_buffer *rb,
1182 int status)
1183{
1184 unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
1185 return packet_lookup_frame(po, rb, previous, status);
1186}
1187
eea49cc9 1188static void packet_increment_head(struct packet_ring_buffer *buff)
69e3c75f
JB
1189{
1190 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
1191}
1192
b0138408
DB
1193static void packet_inc_pending(struct packet_ring_buffer *rb)
1194{
1195 this_cpu_inc(*rb->pending_refcnt);
1196}
1197
1198static void packet_dec_pending(struct packet_ring_buffer *rb)
1199{
1200 this_cpu_dec(*rb->pending_refcnt);
1201}
1202
1203static unsigned int packet_read_pending(const struct packet_ring_buffer *rb)
1204{
1205 unsigned int refcnt = 0;
1206 int cpu;
1207
1208 /* We don't use pending refcount in rx_ring. */
1209 if (rb->pending_refcnt == NULL)
1210 return 0;
1211
1212 for_each_possible_cpu(cpu)
1213 refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu);
1214
1215 return refcnt;
1216}
1217
1218static int packet_alloc_pending(struct packet_sock *po)
1219{
1220 po->rx_ring.pending_refcnt = NULL;
1221
1222 po->tx_ring.pending_refcnt = alloc_percpu(unsigned int);
1223 if (unlikely(po->tx_ring.pending_refcnt == NULL))
1224 return -ENOBUFS;
1225
1226 return 0;
1227}
1228
1229static void packet_free_pending(struct packet_sock *po)
1230{
1231 free_percpu(po->tx_ring.pending_refcnt);
1232}
1233
9954729b
WB
1234#define ROOM_POW_OFF 2
1235#define ROOM_NONE 0x0
1236#define ROOM_LOW 0x1
1237#define ROOM_NORMAL 0x2
1238
1239static bool __tpacket_has_room(struct packet_sock *po, int pow_off)
77f65ebd 1240{
9954729b
WB
1241 int idx, len;
1242
1243 len = po->rx_ring.frame_max + 1;
1244 idx = po->rx_ring.head;
1245 if (pow_off)
1246 idx += len >> pow_off;
1247 if (idx >= len)
1248 idx -= len;
1249 return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1250}
1251
1252static bool __tpacket_v3_has_room(struct packet_sock *po, int pow_off)
1253{
1254 int idx, len;
1255
1256 len = po->rx_ring.prb_bdqc.knum_blocks;
1257 idx = po->rx_ring.prb_bdqc.kactive_blk_num;
1258 if (pow_off)
1259 idx += len >> pow_off;
1260 if (idx >= len)
1261 idx -= len;
1262 return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1263}
77f65ebd 1264
2ccdbaa6 1265static int __packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
9954729b
WB
1266{
1267 struct sock *sk = &po->sk;
1268 int ret = ROOM_NONE;
1269
1270 if (po->prot_hook.func != tpacket_rcv) {
1271 int avail = sk->sk_rcvbuf - atomic_read(&sk->sk_rmem_alloc)
2ccdbaa6 1272 - (skb ? skb->truesize : 0);
9954729b
WB
1273 if (avail > (sk->sk_rcvbuf >> ROOM_POW_OFF))
1274 return ROOM_NORMAL;
1275 else if (avail > 0)
1276 return ROOM_LOW;
1277 else
1278 return ROOM_NONE;
1279 }
77f65ebd 1280
9954729b
WB
1281 if (po->tp_version == TPACKET_V3) {
1282 if (__tpacket_v3_has_room(po, ROOM_POW_OFF))
1283 ret = ROOM_NORMAL;
1284 else if (__tpacket_v3_has_room(po, 0))
1285 ret = ROOM_LOW;
1286 } else {
1287 if (__tpacket_has_room(po, ROOM_POW_OFF))
1288 ret = ROOM_NORMAL;
1289 else if (__tpacket_has_room(po, 0))
1290 ret = ROOM_LOW;
1291 }
2ccdbaa6
WB
1292
1293 return ret;
1294}
1295
1296static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
1297{
1298 int ret;
1299 bool has_room;
1300
54d7c01d
WB
1301 spin_lock_bh(&po->sk.sk_receive_queue.lock);
1302 ret = __packet_rcv_has_room(po, skb);
2ccdbaa6
WB
1303 has_room = ret == ROOM_NORMAL;
1304 if (po->pressure == has_room)
54d7c01d
WB
1305 po->pressure = !has_room;
1306 spin_unlock_bh(&po->sk.sk_receive_queue.lock);
77f65ebd 1307
9954729b 1308 return ret;
77f65ebd
WB
1309}
1310
1da177e4
LT
1311static void packet_sock_destruct(struct sock *sk)
1312{
ed85b565
RC
1313 skb_queue_purge(&sk->sk_error_queue);
1314
547b792c
IJ
1315 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
1316 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
1da177e4
LT
1317
1318 if (!sock_flag(sk, SOCK_DEAD)) {
40d4e3df 1319 pr_err("Attempt to release alive packet socket: %p\n", sk);
1da177e4
LT
1320 return;
1321 }
1322
17ab56a2 1323 sk_refcnt_debug_dec(sk);
1da177e4
LT
1324}
1325
3b3a5b0a
WB
1326static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
1327{
1328 u32 rxhash;
1329 int i, count = 0;
1330
1331 rxhash = skb_get_hash(skb);
1332 for (i = 0; i < ROLLOVER_HLEN; i++)
1333 if (po->rollover->history[i] == rxhash)
1334 count++;
1335
1336 po->rollover->history[prandom_u32() % ROLLOVER_HLEN] = rxhash;
1337 return count > (ROLLOVER_HLEN >> 1);
1338}
1339
77f65ebd
WB
1340static unsigned int fanout_demux_hash(struct packet_fanout *f,
1341 struct sk_buff *skb,
1342 unsigned int num)
dc99f600 1343{
eb70db87 1344 return reciprocal_scale(__skb_get_hash_symmetric(skb), num);
dc99f600
DM
1345}
1346
77f65ebd
WB
1347static unsigned int fanout_demux_lb(struct packet_fanout *f,
1348 struct sk_buff *skb,
1349 unsigned int num)
dc99f600 1350{
468479e6 1351 unsigned int val = atomic_inc_return(&f->rr_cur);
dc99f600 1352
468479e6 1353 return val % num;
77f65ebd
WB
1354}
1355
1356static unsigned int fanout_demux_cpu(struct packet_fanout *f,
1357 struct sk_buff *skb,
1358 unsigned int num)
1359{
1360 return smp_processor_id() % num;
dc99f600
DM
1361}
1362
5df0ddfb
DB
1363static unsigned int fanout_demux_rnd(struct packet_fanout *f,
1364 struct sk_buff *skb,
1365 unsigned int num)
1366{
f337db64 1367 return prandom_u32_max(num);
5df0ddfb
DB
1368}
1369
77f65ebd
WB
1370static unsigned int fanout_demux_rollover(struct packet_fanout *f,
1371 struct sk_buff *skb,
ad377cab 1372 unsigned int idx, bool try_self,
77f65ebd 1373 unsigned int num)
95ec3eb4 1374{
4633c9e0 1375 struct packet_sock *po, *po_next, *po_skip = NULL;
a9b63918 1376 unsigned int i, j, room = ROOM_NONE;
95ec3eb4 1377
0648ab70 1378 po = pkt_sk(f->arr[idx]);
3b3a5b0a
WB
1379
1380 if (try_self) {
1381 room = packet_rcv_has_room(po, skb);
1382 if (room == ROOM_NORMAL ||
1383 (room == ROOM_LOW && !fanout_flow_is_huge(po, skb)))
1384 return idx;
4633c9e0 1385 po_skip = po;
3b3a5b0a 1386 }
ad377cab 1387
0648ab70 1388 i = j = min_t(int, po->rollover->sock, num - 1);
77f65ebd 1389 do {
2ccdbaa6 1390 po_next = pkt_sk(f->arr[i]);
4633c9e0 1391 if (po_next != po_skip && !po_next->pressure &&
2ccdbaa6 1392 packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) {
77f65ebd 1393 if (i != j)
0648ab70 1394 po->rollover->sock = i;
a9b63918
WB
1395 atomic_long_inc(&po->rollover->num);
1396 if (room == ROOM_LOW)
1397 atomic_long_inc(&po->rollover->num_huge);
77f65ebd
WB
1398 return i;
1399 }
ad377cab 1400
77f65ebd
WB
1401 if (++i == num)
1402 i = 0;
1403 } while (i != j);
1404
a9b63918 1405 atomic_long_inc(&po->rollover->num_failed);
77f65ebd
WB
1406 return idx;
1407}
1408
2d36097d
NH
1409static unsigned int fanout_demux_qm(struct packet_fanout *f,
1410 struct sk_buff *skb,
1411 unsigned int num)
1412{
1413 return skb_get_queue_mapping(skb) % num;
1414}
1415
47dceb8e
WB
1416static unsigned int fanout_demux_bpf(struct packet_fanout *f,
1417 struct sk_buff *skb,
1418 unsigned int num)
1419{
1420 struct bpf_prog *prog;
1421 unsigned int ret = 0;
1422
1423 rcu_read_lock();
1424 prog = rcu_dereference(f->bpf_prog);
1425 if (prog)
ff936a04 1426 ret = bpf_prog_run_clear_cb(prog, skb) % num;
47dceb8e
WB
1427 rcu_read_unlock();
1428
1429 return ret;
1430}
1431
77f65ebd
WB
1432static bool fanout_has_flag(struct packet_fanout *f, u16 flag)
1433{
1434 return f->flags & (flag >> 8);
95ec3eb4
DM
1435}
1436
95ec3eb4
DM
1437static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1438 struct packet_type *pt, struct net_device *orig_dev)
dc99f600
DM
1439{
1440 struct packet_fanout *f = pt->af_packet_priv;
f98f4514 1441 unsigned int num = READ_ONCE(f->num_members);
19bcf9f2 1442 struct net *net = read_pnet(&f->net);
dc99f600 1443 struct packet_sock *po;
77f65ebd 1444 unsigned int idx;
dc99f600 1445
19bcf9f2 1446 if (!net_eq(dev_net(dev), net) || !num) {
dc99f600
DM
1447 kfree_skb(skb);
1448 return 0;
1449 }
1450
3f34b24a 1451 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
19bcf9f2 1452 skb = ip_check_defrag(net, skb, IP_DEFRAG_AF_PACKET);
3f34b24a
AD
1453 if (!skb)
1454 return 0;
1455 }
95ec3eb4
DM
1456 switch (f->type) {
1457 case PACKET_FANOUT_HASH:
1458 default:
77f65ebd 1459 idx = fanout_demux_hash(f, skb, num);
95ec3eb4
DM
1460 break;
1461 case PACKET_FANOUT_LB:
77f65ebd 1462 idx = fanout_demux_lb(f, skb, num);
95ec3eb4
DM
1463 break;
1464 case PACKET_FANOUT_CPU:
77f65ebd
WB
1465 idx = fanout_demux_cpu(f, skb, num);
1466 break;
5df0ddfb
DB
1467 case PACKET_FANOUT_RND:
1468 idx = fanout_demux_rnd(f, skb, num);
1469 break;
2d36097d
NH
1470 case PACKET_FANOUT_QM:
1471 idx = fanout_demux_qm(f, skb, num);
1472 break;
77f65ebd 1473 case PACKET_FANOUT_ROLLOVER:
ad377cab 1474 idx = fanout_demux_rollover(f, skb, 0, false, num);
95ec3eb4 1475 break;
47dceb8e 1476 case PACKET_FANOUT_CBPF:
f2e52095 1477 case PACKET_FANOUT_EBPF:
47dceb8e
WB
1478 idx = fanout_demux_bpf(f, skb, num);
1479 break;
dc99f600
DM
1480 }
1481
ad377cab
WB
1482 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER))
1483 idx = fanout_demux_rollover(f, skb, idx, true, num);
dc99f600 1484
ad377cab 1485 po = pkt_sk(f->arr[idx]);
dc99f600
DM
1486 return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1487}
1488
fff3321d
PE
1489DEFINE_MUTEX(fanout_mutex);
1490EXPORT_SYMBOL_GPL(fanout_mutex);
dc99f600
DM
1491static LIST_HEAD(fanout_list);
1492
1493static void __fanout_link(struct sock *sk, struct packet_sock *po)
1494{
1495 struct packet_fanout *f = po->fanout;
1496
1497 spin_lock(&f->lock);
1498 f->arr[f->num_members] = sk;
1499 smp_wmb();
1500 f->num_members++;
1501 spin_unlock(&f->lock);
1502}
1503
1504static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1505{
1506 struct packet_fanout *f = po->fanout;
1507 int i;
1508
1509 spin_lock(&f->lock);
1510 for (i = 0; i < f->num_members; i++) {
1511 if (f->arr[i] == sk)
1512 break;
1513 }
1514 BUG_ON(i >= f->num_members);
1515 f->arr[i] = f->arr[f->num_members - 1];
1516 f->num_members--;
1517 spin_unlock(&f->lock);
1518}
1519
d4dd8aee 1520static bool match_fanout_group(struct packet_type *ptype, struct sock *sk)
c0de08d0 1521{
161642e2
ED
1522 if (sk->sk_family != PF_PACKET)
1523 return false;
c0de08d0 1524
161642e2 1525 return ptype->af_packet_priv == pkt_sk(sk)->fanout;
c0de08d0
EL
1526}
1527
47dceb8e
WB
1528static void fanout_init_data(struct packet_fanout *f)
1529{
1530 switch (f->type) {
1531 case PACKET_FANOUT_LB:
1532 atomic_set(&f->rr_cur, 0);
1533 break;
1534 case PACKET_FANOUT_CBPF:
f2e52095 1535 case PACKET_FANOUT_EBPF:
47dceb8e
WB
1536 RCU_INIT_POINTER(f->bpf_prog, NULL);
1537 break;
1538 }
1539}
1540
1541static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new)
1542{
1543 struct bpf_prog *old;
1544
1545 spin_lock(&f->lock);
1546 old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock));
1547 rcu_assign_pointer(f->bpf_prog, new);
1548 spin_unlock(&f->lock);
1549
1550 if (old) {
1551 synchronize_net();
1552 bpf_prog_destroy(old);
1553 }
1554}
1555
1556static int fanout_set_data_cbpf(struct packet_sock *po, char __user *data,
1557 unsigned int len)
1558{
1559 struct bpf_prog *new;
1560 struct sock_fprog fprog;
1561 int ret;
1562
1563 if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1564 return -EPERM;
1565 if (len != sizeof(fprog))
1566 return -EINVAL;
1567 if (copy_from_user(&fprog, data, len))
1568 return -EFAULT;
1569
bab18991 1570 ret = bpf_prog_create_from_user(&new, &fprog, NULL, false);
47dceb8e
WB
1571 if (ret)
1572 return ret;
1573
1574 __fanout_set_data_bpf(po->fanout, new);
1575 return 0;
1576}
1577
f2e52095
WB
1578static int fanout_set_data_ebpf(struct packet_sock *po, char __user *data,
1579 unsigned int len)
1580{
1581 struct bpf_prog *new;
1582 u32 fd;
1583
1584 if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1585 return -EPERM;
1586 if (len != sizeof(fd))
1587 return -EINVAL;
1588 if (copy_from_user(&fd, data, len))
1589 return -EFAULT;
1590
113214be 1591 new = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
f2e52095
WB
1592 if (IS_ERR(new))
1593 return PTR_ERR(new);
f2e52095
WB
1594
1595 __fanout_set_data_bpf(po->fanout, new);
1596 return 0;
1597}
1598
47dceb8e
WB
1599static int fanout_set_data(struct packet_sock *po, char __user *data,
1600 unsigned int len)
1601{
1602 switch (po->fanout->type) {
1603 case PACKET_FANOUT_CBPF:
1604 return fanout_set_data_cbpf(po, data, len);
f2e52095
WB
1605 case PACKET_FANOUT_EBPF:
1606 return fanout_set_data_ebpf(po, data, len);
47dceb8e
WB
1607 default:
1608 return -EINVAL;
1609 };
1610}
1611
1612static void fanout_release_data(struct packet_fanout *f)
1613{
1614 switch (f->type) {
1615 case PACKET_FANOUT_CBPF:
f2e52095 1616 case PACKET_FANOUT_EBPF:
47dceb8e
WB
1617 __fanout_set_data_bpf(f, NULL);
1618 };
1619}
1620
7736d33f 1621static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
dc99f600
DM
1622{
1623 struct packet_sock *po = pkt_sk(sk);
1624 struct packet_fanout *f, *match;
7736d33f 1625 u8 type = type_flags & 0xff;
77f65ebd 1626 u8 flags = type_flags >> 8;
dc99f600
DM
1627 int err;
1628
1629 switch (type) {
77f65ebd
WB
1630 case PACKET_FANOUT_ROLLOVER:
1631 if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)
1632 return -EINVAL;
dc99f600
DM
1633 case PACKET_FANOUT_HASH:
1634 case PACKET_FANOUT_LB:
95ec3eb4 1635 case PACKET_FANOUT_CPU:
5df0ddfb 1636 case PACKET_FANOUT_RND:
2d36097d 1637 case PACKET_FANOUT_QM:
47dceb8e 1638 case PACKET_FANOUT_CBPF:
f2e52095 1639 case PACKET_FANOUT_EBPF:
dc99f600
DM
1640 break;
1641 default:
1642 return -EINVAL;
1643 }
1644
1645 if (!po->running)
1646 return -EINVAL;
1647
1648 if (po->fanout)
1649 return -EALREADY;
1650
4633c9e0
WB
1651 if (type == PACKET_FANOUT_ROLLOVER ||
1652 (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) {
0648ab70
WB
1653 po->rollover = kzalloc(sizeof(*po->rollover), GFP_KERNEL);
1654 if (!po->rollover)
1655 return -ENOMEM;
a9b63918
WB
1656 atomic_long_set(&po->rollover->num, 0);
1657 atomic_long_set(&po->rollover->num_huge, 0);
1658 atomic_long_set(&po->rollover->num_failed, 0);
0648ab70
WB
1659 }
1660
dc99f600
DM
1661 mutex_lock(&fanout_mutex);
1662 match = NULL;
1663 list_for_each_entry(f, &fanout_list, list) {
1664 if (f->id == id &&
1665 read_pnet(&f->net) == sock_net(sk)) {
1666 match = f;
1667 break;
1668 }
1669 }
afe62c68 1670 err = -EINVAL;
77f65ebd 1671 if (match && match->flags != flags)
afe62c68 1672 goto out;
dc99f600 1673 if (!match) {
afe62c68 1674 err = -ENOMEM;
dc99f600 1675 match = kzalloc(sizeof(*match), GFP_KERNEL);
afe62c68
ED
1676 if (!match)
1677 goto out;
1678 write_pnet(&match->net, sock_net(sk));
1679 match->id = id;
1680 match->type = type;
77f65ebd 1681 match->flags = flags;
afe62c68
ED
1682 INIT_LIST_HEAD(&match->list);
1683 spin_lock_init(&match->lock);
1684 atomic_set(&match->sk_ref, 0);
47dceb8e 1685 fanout_init_data(match);
afe62c68
ED
1686 match->prot_hook.type = po->prot_hook.type;
1687 match->prot_hook.dev = po->prot_hook.dev;
1688 match->prot_hook.func = packet_rcv_fanout;
1689 match->prot_hook.af_packet_priv = match;
c0de08d0 1690 match->prot_hook.id_match = match_fanout_group;
afe62c68
ED
1691 dev_add_pack(&match->prot_hook);
1692 list_add(&match->list, &fanout_list);
dc99f600 1693 }
afe62c68
ED
1694 err = -EINVAL;
1695 if (match->type == type &&
1696 match->prot_hook.type == po->prot_hook.type &&
1697 match->prot_hook.dev == po->prot_hook.dev) {
1698 err = -ENOSPC;
1699 if (atomic_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
1700 __dev_remove_pack(&po->prot_hook);
1701 po->fanout = match;
1702 atomic_inc(&match->sk_ref);
1703 __fanout_link(sk, po);
1704 err = 0;
dc99f600
DM
1705 }
1706 }
afe62c68 1707out:
dc99f600 1708 mutex_unlock(&fanout_mutex);
0648ab70
WB
1709 if (err) {
1710 kfree(po->rollover);
1711 po->rollover = NULL;
1712 }
dc99f600
DM
1713 return err;
1714}
1715
1716static void fanout_release(struct sock *sk)
1717{
1718 struct packet_sock *po = pkt_sk(sk);
1719 struct packet_fanout *f;
1720
1721 f = po->fanout;
1722 if (!f)
1723 return;
1724
fff3321d 1725 mutex_lock(&fanout_mutex);
dc99f600
DM
1726 po->fanout = NULL;
1727
dc99f600
DM
1728 if (atomic_dec_and_test(&f->sk_ref)) {
1729 list_del(&f->list);
1730 dev_remove_pack(&f->prot_hook);
47dceb8e 1731 fanout_release_data(f);
dc99f600
DM
1732 kfree(f);
1733 }
1734 mutex_unlock(&fanout_mutex);
0648ab70 1735
59f21118
WB
1736 if (po->rollover)
1737 kfree_rcu(po->rollover, rcu);
dc99f600 1738}
1da177e4 1739
3c70c132
DB
1740static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
1741 struct sk_buff *skb)
1742{
1743 /* Earlier code assumed this would be a VLAN pkt, double-check
1744 * this now that we have the actual packet in hand. We can only
1745 * do this check on Ethernet devices.
1746 */
1747 if (unlikely(dev->type != ARPHRD_ETHER))
1748 return false;
1749
1750 skb_reset_mac_header(skb);
1751 return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q));
1752}
1753
90ddc4f0 1754static const struct proto_ops packet_ops;
1da177e4 1755
90ddc4f0 1756static const struct proto_ops packet_ops_spkt;
1da177e4 1757
40d4e3df
ED
1758static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
1759 struct packet_type *pt, struct net_device *orig_dev)
1da177e4
LT
1760{
1761 struct sock *sk;
1762 struct sockaddr_pkt *spkt;
1763
1764 /*
1765 * When we registered the protocol we saved the socket in the data
1766 * field for just this event.
1767 */
1768
1769 sk = pt->af_packet_priv;
1ce4f28b 1770
1da177e4
LT
1771 /*
1772 * Yank back the headers [hope the device set this
1773 * right or kerboom...]
1774 *
1775 * Incoming packets have ll header pulled,
1776 * push it back.
1777 *
98e399f8 1778 * For outgoing ones skb->data == skb_mac_header(skb)
1da177e4
LT
1779 * so that this procedure is noop.
1780 */
1781
1782 if (skb->pkt_type == PACKET_LOOPBACK)
1783 goto out;
1784
09ad9bc7 1785 if (!net_eq(dev_net(dev), sock_net(sk)))
d12d01d6
DL
1786 goto out;
1787
40d4e3df
ED
1788 skb = skb_share_check(skb, GFP_ATOMIC);
1789 if (skb == NULL)
1da177e4
LT
1790 goto oom;
1791
1792 /* drop any routing info */
adf30907 1793 skb_dst_drop(skb);
1da177e4 1794
84531c24
PO
1795 /* drop conntrack reference */
1796 nf_reset(skb);
1797
ffbc6111 1798 spkt = &PACKET_SKB_CB(skb)->sa.pkt;
1da177e4 1799
98e399f8 1800 skb_push(skb, skb->data - skb_mac_header(skb));
1da177e4
LT
1801
1802 /*
1803 * The SOCK_PACKET socket receives _all_ frames.
1804 */
1805
1806 spkt->spkt_family = dev->type;
1807 strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
1808 spkt->spkt_protocol = skb->protocol;
1809
1810 /*
1811 * Charge the memory to the socket. This is done specifically
1812 * to prevent sockets using all the memory up.
1813 */
1814
40d4e3df 1815 if (sock_queue_rcv_skb(sk, skb) == 0)
1da177e4
LT
1816 return 0;
1817
1818out:
1819 kfree_skb(skb);
1820oom:
1821 return 0;
1822}
1823
1824
1825/*
1826 * Output a raw packet to a device layer. This bypasses all the other
1827 * protocol layers and you must therefore supply it with a complete frame
1828 */
1ce4f28b 1829
1b784140
YX
1830static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg,
1831 size_t len)
1da177e4
LT
1832{
1833 struct sock *sk = sock->sk;
342dfc30 1834 DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name);
1a35ca80 1835 struct sk_buff *skb = NULL;
1da177e4 1836 struct net_device *dev;
c14ac945 1837 struct sockcm_cookie sockc;
40d4e3df 1838 __be16 proto = 0;
1da177e4 1839 int err;
3bdc0eba 1840 int extra_len = 0;
1ce4f28b 1841
1da177e4 1842 /*
1ce4f28b 1843 * Get and verify the address.
1da177e4
LT
1844 */
1845
40d4e3df 1846 if (saddr) {
1da177e4 1847 if (msg->msg_namelen < sizeof(struct sockaddr))
40d4e3df
ED
1848 return -EINVAL;
1849 if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
1850 proto = saddr->spkt_protocol;
1851 } else
1852 return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */
1da177e4
LT
1853
1854 /*
1ce4f28b 1855 * Find the device first to size check it
1da177e4
LT
1856 */
1857
de74e92a 1858 saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
1a35ca80 1859retry:
654d1f8a
ED
1860 rcu_read_lock();
1861 dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
1da177e4
LT
1862 err = -ENODEV;
1863 if (dev == NULL)
1864 goto out_unlock;
1ce4f28b 1865
d5e76b0a
DM
1866 err = -ENETDOWN;
1867 if (!(dev->flags & IFF_UP))
1868 goto out_unlock;
1869
1da177e4 1870 /*
40d4e3df
ED
1871 * You may not queue a frame bigger than the mtu. This is the lowest level
1872 * raw protocol and you must do your own fragmentation at this level.
1da177e4 1873 */
1ce4f28b 1874
3bdc0eba
BG
1875 if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
1876 if (!netif_supports_nofcs(dev)) {
1877 err = -EPROTONOSUPPORT;
1878 goto out_unlock;
1879 }
1880 extra_len = 4; /* We're doing our own CRC */
1881 }
1882
1da177e4 1883 err = -EMSGSIZE;
3bdc0eba 1884 if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
1da177e4
LT
1885 goto out_unlock;
1886
1a35ca80
ED
1887 if (!skb) {
1888 size_t reserved = LL_RESERVED_SPACE(dev);
4ce40912 1889 int tlen = dev->needed_tailroom;
1a35ca80
ED
1890 unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
1891
1892 rcu_read_unlock();
4ce40912 1893 skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
1a35ca80
ED
1894 if (skb == NULL)
1895 return -ENOBUFS;
1896 /* FIXME: Save some space for broken drivers that write a hard
1897 * header at transmission time by themselves. PPP is the notable
1898 * one here. This should really be fixed at the driver level.
1899 */
1900 skb_reserve(skb, reserved);
1901 skb_reset_network_header(skb);
1902
1903 /* Try to align data part correctly */
1904 if (hhlen) {
1905 skb->data -= hhlen;
1906 skb->tail -= hhlen;
1907 if (len < hhlen)
1908 skb_reset_network_header(skb);
1909 }
6ce8e9ce 1910 err = memcpy_from_msg(skb_put(skb, len), msg, len);
1a35ca80
ED
1911 if (err)
1912 goto out_free;
1913 goto retry;
1da177e4
LT
1914 }
1915
9ed988cd
WB
1916 if (!dev_validate_header(dev, skb->data, len)) {
1917 err = -EINVAL;
1918 goto out_unlock;
1919 }
3c70c132
DB
1920 if (len > (dev->mtu + dev->hard_header_len + extra_len) &&
1921 !packet_extra_vlan_len_allowed(dev, skb)) {
1922 err = -EMSGSIZE;
1923 goto out_unlock;
57f89bfa 1924 }
1a35ca80 1925
edbe7746 1926 sockc.tsflags = sk->sk_tsflags;
c14ac945
SHY
1927 if (msg->msg_controllen) {
1928 err = sock_cmsg_send(sk, msg, &sockc);
f8e7718c 1929 if (unlikely(err))
c14ac945 1930 goto out_unlock;
c14ac945
SHY
1931 }
1932
1da177e4
LT
1933 skb->protocol = proto;
1934 skb->dev = dev;
1935 skb->priority = sk->sk_priority;
2d37a186 1936 skb->mark = sk->sk_mark;
bf84a010 1937
c14ac945 1938 sock_tx_timestamp(sk, sockc.tsflags, &skb_shinfo(skb)->tx_flags);
1da177e4 1939
3bdc0eba
BG
1940 if (unlikely(extra_len == 4))
1941 skb->no_fcs = 1;
1942
40893fd0 1943 skb_probe_transport_header(skb, 0);
c1aad275 1944
1da177e4 1945 dev_queue_xmit(skb);
654d1f8a 1946 rcu_read_unlock();
40d4e3df 1947 return len;
1da177e4 1948
1da177e4 1949out_unlock:
654d1f8a 1950 rcu_read_unlock();
1a35ca80
ED
1951out_free:
1952 kfree_skb(skb);
1da177e4
LT
1953 return err;
1954}
1da177e4 1955
ff936a04
AS
1956static unsigned int run_filter(struct sk_buff *skb,
1957 const struct sock *sk,
1958 unsigned int res)
1da177e4
LT
1959{
1960 struct sk_filter *filter;
fda9ef5d 1961
80f8f102
ED
1962 rcu_read_lock();
1963 filter = rcu_dereference(sk->sk_filter);
dbcb5855 1964 if (filter != NULL)
ff936a04 1965 res = bpf_prog_run_clear_cb(filter->prog, skb);
80f8f102 1966 rcu_read_unlock();
1da177e4 1967
dbcb5855 1968 return res;
1da177e4
LT
1969}
1970
16cc1400
WB
1971static int __packet_rcv_vnet(const struct sk_buff *skb,
1972 struct virtio_net_hdr *vnet_hdr)
1973{
1974 *vnet_hdr = (const struct virtio_net_hdr) { 0 };
1975
1276f24e
MR
1976 if (virtio_net_hdr_from_skb(skb, vnet_hdr, vio_le()))
1977 BUG();
16cc1400
WB
1978
1979 return 0;
1980}
1981
1982static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb,
1983 size_t *len)
1984{
1985 struct virtio_net_hdr vnet_hdr;
1986
1987 if (*len < sizeof(vnet_hdr))
1988 return -EINVAL;
1989 *len -= sizeof(vnet_hdr);
1990
1991 if (__packet_rcv_vnet(skb, &vnet_hdr))
1992 return -EINVAL;
1993
1994 return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr));
1995}
1996
1da177e4 1997/*
62ab0812
ED
1998 * This function makes lazy skb cloning in hope that most of packets
1999 * are discarded by BPF.
2000 *
2001 * Note tricky part: we DO mangle shared skb! skb->data, skb->len
2002 * and skb->cb are mangled. It works because (and until) packets
2003 * falling here are owned by current CPU. Output packets are cloned
2004 * by dev_queue_xmit_nit(), input packets are processed by net_bh
2005 * sequencially, so that if we return skb to original state on exit,
2006 * we will not harm anyone.
1da177e4
LT
2007 */
2008
40d4e3df
ED
2009static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
2010 struct packet_type *pt, struct net_device *orig_dev)
1da177e4
LT
2011{
2012 struct sock *sk;
2013 struct sockaddr_ll *sll;
2014 struct packet_sock *po;
40d4e3df 2015 u8 *skb_head = skb->data;
1da177e4 2016 int skb_len = skb->len;
dbcb5855 2017 unsigned int snaplen, res;
da37845f 2018 bool is_drop_n_account = false;
1da177e4
LT
2019
2020 if (skb->pkt_type == PACKET_LOOPBACK)
2021 goto drop;
2022
2023 sk = pt->af_packet_priv;
2024 po = pkt_sk(sk);
2025
09ad9bc7 2026 if (!net_eq(dev_net(dev), sock_net(sk)))
d12d01d6
DL
2027 goto drop;
2028
1da177e4
LT
2029 skb->dev = dev;
2030
3b04ddde 2031 if (dev->header_ops) {
1da177e4 2032 /* The device has an explicit notion of ll header,
62ab0812
ED
2033 * exported to higher levels.
2034 *
2035 * Otherwise, the device hides details of its frame
2036 * structure, so that corresponding packet head is
2037 * never delivered to user.
1da177e4
LT
2038 */
2039 if (sk->sk_type != SOCK_DGRAM)
98e399f8 2040 skb_push(skb, skb->data - skb_mac_header(skb));
1da177e4
LT
2041 else if (skb->pkt_type == PACKET_OUTGOING) {
2042 /* Special case: outgoing packets have ll header at head */
bbe735e4 2043 skb_pull(skb, skb_network_offset(skb));
1da177e4
LT
2044 }
2045 }
2046
2047 snaplen = skb->len;
2048
dbcb5855
DM
2049 res = run_filter(skb, sk, snaplen);
2050 if (!res)
fda9ef5d 2051 goto drop_n_restore;
dbcb5855
DM
2052 if (snaplen > res)
2053 snaplen = res;
1da177e4 2054
0fd7bac6 2055 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
1da177e4
LT
2056 goto drop_n_acct;
2057
2058 if (skb_shared(skb)) {
2059 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
2060 if (nskb == NULL)
2061 goto drop_n_acct;
2062
2063 if (skb_head != skb->data) {
2064 skb->data = skb_head;
2065 skb->len = skb_len;
2066 }
abc4e4fa 2067 consume_skb(skb);
1da177e4
LT
2068 skb = nskb;
2069 }
2070
b4772ef8 2071 sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8);
ffbc6111
HX
2072
2073 sll = &PACKET_SKB_CB(skb)->sa.ll;
1da177e4 2074 sll->sll_hatype = dev->type;
1da177e4 2075 sll->sll_pkttype = skb->pkt_type;
8032b464 2076 if (unlikely(po->origdev))
80feaacb
PWJ
2077 sll->sll_ifindex = orig_dev->ifindex;
2078 else
2079 sll->sll_ifindex = dev->ifindex;
1da177e4 2080
b95cce35 2081 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
1da177e4 2082
2472d761
EB
2083 /* sll->sll_family and sll->sll_protocol are set in packet_recvmsg().
2084 * Use their space for storing the original skb length.
2085 */
2086 PACKET_SKB_CB(skb)->sa.origlen = skb->len;
8dc41944 2087
1da177e4
LT
2088 if (pskb_trim(skb, snaplen))
2089 goto drop_n_acct;
2090
2091 skb_set_owner_r(skb, sk);
2092 skb->dev = NULL;
adf30907 2093 skb_dst_drop(skb);
1da177e4 2094
84531c24
PO
2095 /* drop conntrack reference */
2096 nf_reset(skb);
2097
1da177e4 2098 spin_lock(&sk->sk_receive_queue.lock);
ee80fbf3 2099 po->stats.stats1.tp_packets++;
3bc3b96f 2100 sock_skb_set_dropcount(sk, skb);
1da177e4
LT
2101 __skb_queue_tail(&sk->sk_receive_queue, skb);
2102 spin_unlock(&sk->sk_receive_queue.lock);
676d2369 2103 sk->sk_data_ready(sk);
1da177e4
LT
2104 return 0;
2105
2106drop_n_acct:
da37845f 2107 is_drop_n_account = true;
7091fbd8 2108 spin_lock(&sk->sk_receive_queue.lock);
ee80fbf3 2109 po->stats.stats1.tp_drops++;
7091fbd8
WB
2110 atomic_inc(&sk->sk_drops);
2111 spin_unlock(&sk->sk_receive_queue.lock);
1da177e4
LT
2112
2113drop_n_restore:
2114 if (skb_head != skb->data && skb_shared(skb)) {
2115 skb->data = skb_head;
2116 skb->len = skb_len;
2117 }
2118drop:
da37845f
WJ
2119 if (!is_drop_n_account)
2120 consume_skb(skb);
2121 else
2122 kfree_skb(skb);
1da177e4
LT
2123 return 0;
2124}
2125
40d4e3df
ED
2126static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
2127 struct packet_type *pt, struct net_device *orig_dev)
1da177e4
LT
2128{
2129 struct sock *sk;
2130 struct packet_sock *po;
2131 struct sockaddr_ll *sll;
184f489e 2132 union tpacket_uhdr h;
40d4e3df 2133 u8 *skb_head = skb->data;
1da177e4 2134 int skb_len = skb->len;
dbcb5855 2135 unsigned int snaplen, res;
f6fb8f10 2136 unsigned long status = TP_STATUS_USER;
bbd6ef87 2137 unsigned short macoff, netoff, hdrlen;
1da177e4 2138 struct sk_buff *copy_skb = NULL;
bbd6ef87 2139 struct timespec ts;
b9c32fb2 2140 __u32 ts_status;
da37845f 2141 bool is_drop_n_account = false;
1da177e4 2142
51846355
AW
2143 /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
2144 * We may add members to them until current aligned size without forcing
2145 * userspace to call getsockopt(..., PACKET_HDRLEN, ...).
2146 */
2147 BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32);
2148 BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48);
2149
1da177e4
LT
2150 if (skb->pkt_type == PACKET_LOOPBACK)
2151 goto drop;
2152
2153 sk = pt->af_packet_priv;
2154 po = pkt_sk(sk);
2155
09ad9bc7 2156 if (!net_eq(dev_net(dev), sock_net(sk)))
d12d01d6
DL
2157 goto drop;
2158
3b04ddde 2159 if (dev->header_ops) {
1da177e4 2160 if (sk->sk_type != SOCK_DGRAM)
98e399f8 2161 skb_push(skb, skb->data - skb_mac_header(skb));
1da177e4
LT
2162 else if (skb->pkt_type == PACKET_OUTGOING) {
2163 /* Special case: outgoing packets have ll header at head */
bbe735e4 2164 skb_pull(skb, skb_network_offset(skb));
1da177e4
LT
2165 }
2166 }
2167
2168 snaplen = skb->len;
2169
dbcb5855
DM
2170 res = run_filter(skb, sk, snaplen);
2171 if (!res)
fda9ef5d 2172 goto drop_n_restore;
68c2e5de
AD
2173
2174 if (skb->ip_summed == CHECKSUM_PARTIAL)
2175 status |= TP_STATUS_CSUMNOTREADY;
682f048b
AD
2176 else if (skb->pkt_type != PACKET_OUTGOING &&
2177 (skb->ip_summed == CHECKSUM_COMPLETE ||
2178 skb_csum_unnecessary(skb)))
2179 status |= TP_STATUS_CSUM_VALID;
68c2e5de 2180
dbcb5855
DM
2181 if (snaplen > res)
2182 snaplen = res;
1da177e4
LT
2183
2184 if (sk->sk_type == SOCK_DGRAM) {
8913336a
PM
2185 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
2186 po->tp_reserve;
1da177e4 2187 } else {
95c96174 2188 unsigned int maclen = skb_network_offset(skb);
bbd6ef87 2189 netoff = TPACKET_ALIGN(po->tp_hdrlen +
8913336a 2190 (maclen < 16 ? 16 : maclen)) +
58d19b19
WB
2191 po->tp_reserve;
2192 if (po->has_vnet_hdr)
2193 netoff += sizeof(struct virtio_net_hdr);
1da177e4
LT
2194 macoff = netoff - maclen;
2195 }
f6fb8f10 2196 if (po->tp_version <= TPACKET_V2) {
2197 if (macoff + snaplen > po->rx_ring.frame_size) {
2198 if (po->copy_thresh &&
0fd7bac6 2199 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
f6fb8f10 2200 if (skb_shared(skb)) {
2201 copy_skb = skb_clone(skb, GFP_ATOMIC);
2202 } else {
2203 copy_skb = skb_get(skb);
2204 skb_head = skb->data;
2205 }
2206 if (copy_skb)
2207 skb_set_owner_r(copy_skb, sk);
1da177e4 2208 }
f6fb8f10 2209 snaplen = po->rx_ring.frame_size - macoff;
2210 if ((int)snaplen < 0)
2211 snaplen = 0;
1da177e4 2212 }
dc808110
ED
2213 } else if (unlikely(macoff + snaplen >
2214 GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
2215 u32 nval;
2216
2217 nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff;
2218 pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n",
2219 snaplen, nval, macoff);
2220 snaplen = nval;
2221 if (unlikely((int)snaplen < 0)) {
2222 snaplen = 0;
2223 macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
2224 }
1da177e4 2225 }
1da177e4 2226 spin_lock(&sk->sk_receive_queue.lock);
f6fb8f10 2227 h.raw = packet_current_rx_frame(po, skb,
2228 TP_STATUS_KERNEL, (macoff+snaplen));
bbd6ef87 2229 if (!h.raw)
58d19b19 2230 goto drop_n_account;
f6fb8f10 2231 if (po->tp_version <= TPACKET_V2) {
2232 packet_increment_rx_head(po, &po->rx_ring);
2233 /*
2234 * LOSING will be reported till you read the stats,
2235 * because it's COR - Clear On Read.
2236 * Anyways, moving it for V1/V2 only as V3 doesn't need this
2237 * at packet level.
2238 */
ee80fbf3 2239 if (po->stats.stats1.tp_drops)
f6fb8f10 2240 status |= TP_STATUS_LOSING;
2241 }
ee80fbf3 2242 po->stats.stats1.tp_packets++;
1da177e4
LT
2243 if (copy_skb) {
2244 status |= TP_STATUS_COPY;
2245 __skb_queue_tail(&sk->sk_receive_queue, copy_skb);
2246 }
1da177e4
LT
2247 spin_unlock(&sk->sk_receive_queue.lock);
2248
58d19b19
WB
2249 if (po->has_vnet_hdr) {
2250 if (__packet_rcv_vnet(skb, h.raw + macoff -
2251 sizeof(struct virtio_net_hdr))) {
2252 spin_lock(&sk->sk_receive_queue.lock);
2253 goto drop_n_account;
2254 }
2255 }
2256
bbd6ef87 2257 skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
b9c32fb2
DB
2258
2259 if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
7a51384c 2260 getnstimeofday(&ts);
1da177e4 2261
b9c32fb2
DB
2262 status |= ts_status;
2263
bbd6ef87
PM
2264 switch (po->tp_version) {
2265 case TPACKET_V1:
2266 h.h1->tp_len = skb->len;
2267 h.h1->tp_snaplen = snaplen;
2268 h.h1->tp_mac = macoff;
2269 h.h1->tp_net = netoff;
4b457bdf
DB
2270 h.h1->tp_sec = ts.tv_sec;
2271 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
bbd6ef87
PM
2272 hdrlen = sizeof(*h.h1);
2273 break;
2274 case TPACKET_V2:
2275 h.h2->tp_len = skb->len;
2276 h.h2->tp_snaplen = snaplen;
2277 h.h2->tp_mac = macoff;
2278 h.h2->tp_net = netoff;
bbd6ef87
PM
2279 h.h2->tp_sec = ts.tv_sec;
2280 h.h2->tp_nsec = ts.tv_nsec;
df8a39de
JP
2281 if (skb_vlan_tag_present(skb)) {
2282 h.h2->tp_vlan_tci = skb_vlan_tag_get(skb);
a0cdfcf3
AW
2283 h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto);
2284 status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
a3bcc23e
BG
2285 } else {
2286 h.h2->tp_vlan_tci = 0;
a0cdfcf3 2287 h.h2->tp_vlan_tpid = 0;
a3bcc23e 2288 }
e4d26f4b 2289 memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding));
bbd6ef87
PM
2290 hdrlen = sizeof(*h.h2);
2291 break;
f6fb8f10 2292 case TPACKET_V3:
2293 /* tp_nxt_offset,vlan are already populated above.
2294 * So DONT clear those fields here
2295 */
2296 h.h3->tp_status |= status;
2297 h.h3->tp_len = skb->len;
2298 h.h3->tp_snaplen = snaplen;
2299 h.h3->tp_mac = macoff;
2300 h.h3->tp_net = netoff;
f6fb8f10 2301 h.h3->tp_sec = ts.tv_sec;
2302 h.h3->tp_nsec = ts.tv_nsec;
e4d26f4b 2303 memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding));
f6fb8f10 2304 hdrlen = sizeof(*h.h3);
2305 break;
bbd6ef87
PM
2306 default:
2307 BUG();
2308 }
1da177e4 2309
bbd6ef87 2310 sll = h.raw + TPACKET_ALIGN(hdrlen);
b95cce35 2311 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
1da177e4
LT
2312 sll->sll_family = AF_PACKET;
2313 sll->sll_hatype = dev->type;
2314 sll->sll_protocol = skb->protocol;
2315 sll->sll_pkttype = skb->pkt_type;
8032b464 2316 if (unlikely(po->origdev))
80feaacb
PWJ
2317 sll->sll_ifindex = orig_dev->ifindex;
2318 else
2319 sll->sll_ifindex = dev->ifindex;
1da177e4 2320
e16aa207 2321 smp_mb();
f0d4eb29 2322
f6dafa95 2323#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
f0d4eb29 2324 if (po->tp_version <= TPACKET_V2) {
0af55bb5
CG
2325 u8 *start, *end;
2326
f0d4eb29
DB
2327 end = (u8 *) PAGE_ALIGN((unsigned long) h.raw +
2328 macoff + snaplen);
2329
2330 for (start = h.raw; start < end; start += PAGE_SIZE)
2331 flush_dcache_page(pgv_to_page(start));
1da177e4 2332 }
f0d4eb29 2333 smp_wmb();
f6dafa95 2334#endif
f0d4eb29 2335
da413eec 2336 if (po->tp_version <= TPACKET_V2) {
f6fb8f10 2337 __packet_set_status(po, h.raw, status);
da413eec
DC
2338 sk->sk_data_ready(sk);
2339 } else {
f6fb8f10 2340 prb_clear_blk_fill_status(&po->rx_ring);
da413eec 2341 }
1da177e4
LT
2342
2343drop_n_restore:
2344 if (skb_head != skb->data && skb_shared(skb)) {
2345 skb->data = skb_head;
2346 skb->len = skb_len;
2347 }
2348drop:
da37845f
WJ
2349 if (!is_drop_n_account)
2350 consume_skb(skb);
2351 else
2352 kfree_skb(skb);
1da177e4
LT
2353 return 0;
2354
58d19b19 2355drop_n_account:
da37845f 2356 is_drop_n_account = true;
ee80fbf3 2357 po->stats.stats1.tp_drops++;
1da177e4
LT
2358 spin_unlock(&sk->sk_receive_queue.lock);
2359
676d2369 2360 sk->sk_data_ready(sk);
acb5d75b 2361 kfree_skb(copy_skb);
1da177e4
LT
2362 goto drop_n_restore;
2363}
2364
69e3c75f
JB
2365static void tpacket_destruct_skb(struct sk_buff *skb)
2366{
2367 struct packet_sock *po = pkt_sk(skb->sk);
1da177e4 2368
69e3c75f 2369 if (likely(po->tx_ring.pg_vec)) {
f0d4eb29 2370 void *ph;
b9c32fb2
DB
2371 __u32 ts;
2372
69e3c75f 2373 ph = skb_shinfo(skb)->destructor_arg;
b0138408 2374 packet_dec_pending(&po->tx_ring);
b9c32fb2
DB
2375
2376 ts = __packet_set_timestamp(po, ph, skb);
2377 __packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts);
69e3c75f
JB
2378 }
2379
2380 sock_wfree(skb);
2381}
2382
c72219b7
DB
2383static void tpacket_set_protocol(const struct net_device *dev,
2384 struct sk_buff *skb)
2385{
2386 if (dev->type == ARPHRD_ETHER) {
2387 skb_reset_mac_header(skb);
2388 skb->protocol = eth_hdr(skb)->h_proto;
2389 }
2390}
2391
16cc1400
WB
2392static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len)
2393{
2394 unsigned short gso_type = 0;
2395
2396 if ((vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
2397 (__virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2398 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2 >
2399 __virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len)))
2400 vnet_hdr->hdr_len = __cpu_to_virtio16(vio_le(),
2401 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2402 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2);
2403
2404 if (__virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len) > len)
2405 return -EINVAL;
2406
2407 if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
2408 switch (vnet_hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
2409 case VIRTIO_NET_HDR_GSO_TCPV4:
2410 gso_type = SKB_GSO_TCPV4;
2411 break;
2412 case VIRTIO_NET_HDR_GSO_TCPV6:
2413 gso_type = SKB_GSO_TCPV6;
2414 break;
2415 case VIRTIO_NET_HDR_GSO_UDP:
2416 gso_type = SKB_GSO_UDP;
2417 break;
2418 default:
2419 return -EINVAL;
2420 }
2421
2422 if (vnet_hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN)
2423 gso_type |= SKB_GSO_TCP_ECN;
2424
2425 if (vnet_hdr->gso_size == 0)
2426 return -EINVAL;
2427 }
2428
2429 vnet_hdr->gso_type = gso_type; /* changes type, temporary storage */
2430 return 0;
2431}
2432
2433static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len,
2434 struct virtio_net_hdr *vnet_hdr)
2435{
2436 int n;
2437
2438 if (*len < sizeof(*vnet_hdr))
2439 return -EINVAL;
2440 *len -= sizeof(*vnet_hdr);
2441
2442 n = copy_from_iter(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter);
2443 if (n != sizeof(*vnet_hdr))
2444 return -EFAULT;
2445
2446 return __packet_snd_vnet_parse(vnet_hdr, *len);
2447}
2448
2449static int packet_snd_vnet_gso(struct sk_buff *skb,
2450 struct virtio_net_hdr *vnet_hdr)
2451{
2452 if (vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
2453 u16 s = __virtio16_to_cpu(vio_le(), vnet_hdr->csum_start);
2454 u16 o = __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset);
2455
2456 if (!skb_partial_csum_set(skb, s, o))
2457 return -EINVAL;
2458 }
2459
2460 skb_shinfo(skb)->gso_size =
2461 __virtio16_to_cpu(vio_le(), vnet_hdr->gso_size);
2462 skb_shinfo(skb)->gso_type = vnet_hdr->gso_type;
2463
2464 /* Header must be checked, and gso_segs computed. */
2465 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
2466 skb_shinfo(skb)->gso_segs = 0;
2467 return 0;
2468}
2469
40d4e3df 2470static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
8d39b4a6 2471 void *frame, struct net_device *dev, void *data, int tp_len,
c14ac945
SHY
2472 __be16 proto, unsigned char *addr, int hlen, int copylen,
2473 const struct sockcm_cookie *sockc)
69e3c75f 2474{
184f489e 2475 union tpacket_uhdr ph;
8d39b4a6 2476 int to_write, offset, len, nr_frags, len_max;
69e3c75f
JB
2477 struct socket *sock = po->sk.sk_socket;
2478 struct page *page;
69e3c75f
JB
2479 int err;
2480
2481 ph.raw = frame;
2482
2483 skb->protocol = proto;
2484 skb->dev = dev;
2485 skb->priority = po->sk.sk_priority;
2d37a186 2486 skb->mark = po->sk.sk_mark;
c14ac945 2487 sock_tx_timestamp(&po->sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags);
69e3c75f
JB
2488 skb_shinfo(skb)->destructor_arg = ph.raw;
2489
ae641949 2490 skb_reserve(skb, hlen);
69e3c75f 2491 skb_reset_network_header(skb);
c1aad275 2492
69e3c75f
JB
2493 to_write = tp_len;
2494
2495 if (sock->type == SOCK_DGRAM) {
2496 err = dev_hard_header(skb, dev, ntohs(proto), addr,
2497 NULL, tp_len);
2498 if (unlikely(err < 0))
2499 return -EINVAL;
1d036d25 2500 } else if (copylen) {
9ed988cd
WB
2501 int hdrlen = min_t(int, copylen, tp_len);
2502
69e3c75f 2503 skb_push(skb, dev->hard_header_len);
1d036d25 2504 skb_put(skb, copylen - dev->hard_header_len);
9ed988cd 2505 err = skb_store_bits(skb, 0, data, hdrlen);
69e3c75f
JB
2506 if (unlikely(err))
2507 return err;
9ed988cd
WB
2508 if (!dev_validate_header(dev, skb->data, hdrlen))
2509 return -EINVAL;
c72219b7
DB
2510 if (!skb->protocol)
2511 tpacket_set_protocol(dev, skb);
69e3c75f 2512
9ed988cd
WB
2513 data += hdrlen;
2514 to_write -= hdrlen;
69e3c75f
JB
2515 }
2516
69e3c75f
JB
2517 offset = offset_in_page(data);
2518 len_max = PAGE_SIZE - offset;
2519 len = ((to_write > len_max) ? len_max : to_write);
2520
2521 skb->data_len = to_write;
2522 skb->len += to_write;
2523 skb->truesize += to_write;
2524 atomic_add(to_write, &po->sk.sk_wmem_alloc);
2525
2526 while (likely(to_write)) {
2527 nr_frags = skb_shinfo(skb)->nr_frags;
2528
2529 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
40d4e3df
ED
2530 pr_err("Packet exceed the number of skb frags(%lu)\n",
2531 MAX_SKB_FRAGS);
69e3c75f
JB
2532 return -EFAULT;
2533 }
2534
0af55bb5
CG
2535 page = pgv_to_page(data);
2536 data += len;
69e3c75f
JB
2537 flush_dcache_page(page);
2538 get_page(page);
0af55bb5 2539 skb_fill_page_desc(skb, nr_frags, page, offset, len);
69e3c75f
JB
2540 to_write -= len;
2541 offset = 0;
2542 len_max = PAGE_SIZE;
2543 len = ((to_write > len_max) ? len_max : to_write);
2544 }
2545
8fd6c80d 2546 skb_probe_transport_header(skb, 0);
efdfa2f7 2547
69e3c75f
JB
2548 return tp_len;
2549}
2550
8d39b4a6
WB
2551static int tpacket_parse_header(struct packet_sock *po, void *frame,
2552 int size_max, void **data)
2553{
2554 union tpacket_uhdr ph;
2555 int tp_len, off;
2556
2557 ph.raw = frame;
2558
2559 switch (po->tp_version) {
2560 case TPACKET_V2:
2561 tp_len = ph.h2->tp_len;
2562 break;
2563 default:
2564 tp_len = ph.h1->tp_len;
2565 break;
2566 }
2567 if (unlikely(tp_len > size_max)) {
2568 pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
2569 return -EMSGSIZE;
2570 }
2571
2572 if (unlikely(po->tp_tx_has_off)) {
2573 int off_min, off_max;
2574
2575 off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2576 off_max = po->tx_ring.frame_size - tp_len;
2577 if (po->sk.sk_type == SOCK_DGRAM) {
2578 switch (po->tp_version) {
2579 case TPACKET_V2:
2580 off = ph.h2->tp_net;
2581 break;
2582 default:
2583 off = ph.h1->tp_net;
2584 break;
2585 }
2586 } else {
2587 switch (po->tp_version) {
2588 case TPACKET_V2:
2589 off = ph.h2->tp_mac;
2590 break;
2591 default:
2592 off = ph.h1->tp_mac;
2593 break;
2594 }
2595 }
2596 if (unlikely((off < off_min) || (off_max < off)))
2597 return -EINVAL;
2598 } else {
2599 off = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2600 }
2601
2602 *data = frame + off;
2603 return tp_len;
2604}
2605
69e3c75f
JB
2606static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2607{
69e3c75f
JB
2608 struct sk_buff *skb;
2609 struct net_device *dev;
1d036d25 2610 struct virtio_net_hdr *vnet_hdr = NULL;
c14ac945 2611 struct sockcm_cookie sockc;
69e3c75f 2612 __be16 proto;
09effa67 2613 int err, reserve = 0;
40d4e3df 2614 void *ph;
342dfc30 2615 DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
87a2fd28 2616 bool need_wait = !(msg->msg_flags & MSG_DONTWAIT);
69e3c75f
JB
2617 int tp_len, size_max;
2618 unsigned char *addr;
8d39b4a6 2619 void *data;
69e3c75f 2620 int len_sum = 0;
9e67030a 2621 int status = TP_STATUS_AVAILABLE;
1d036d25 2622 int hlen, tlen, copylen = 0;
69e3c75f 2623
69e3c75f
JB
2624 mutex_lock(&po->pg_vec_lock);
2625
66e56cd4 2626 if (likely(saddr == NULL)) {
e40526cb 2627 dev = packet_cached_dev_get(po);
69e3c75f
JB
2628 proto = po->num;
2629 addr = NULL;
2630 } else {
2631 err = -EINVAL;
2632 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2633 goto out;
2634 if (msg->msg_namelen < (saddr->sll_halen
2635 + offsetof(struct sockaddr_ll,
2636 sll_addr)))
2637 goto out;
69e3c75f
JB
2638 proto = saddr->sll_protocol;
2639 addr = saddr->sll_addr;
827d9780 2640 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
69e3c75f
JB
2641 }
2642
edbe7746 2643 sockc.tsflags = po->sk.sk_tsflags;
c14ac945
SHY
2644 if (msg->msg_controllen) {
2645 err = sock_cmsg_send(&po->sk, msg, &sockc);
2646 if (unlikely(err))
2647 goto out;
2648 }
2649
69e3c75f
JB
2650 err = -ENXIO;
2651 if (unlikely(dev == NULL))
2652 goto out;
69e3c75f
JB
2653 err = -ENETDOWN;
2654 if (unlikely(!(dev->flags & IFF_UP)))
2655 goto out_put;
2656
5cfb4c8d
DB
2657 if (po->sk.sk_socket->type == SOCK_RAW)
2658 reserve = dev->hard_header_len;
69e3c75f 2659 size_max = po->tx_ring.frame_size
b5dd884e 2660 - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
69e3c75f 2661
1d036d25 2662 if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !po->has_vnet_hdr)
5cfb4c8d 2663 size_max = dev->mtu + reserve + VLAN_HLEN;
09effa67 2664
69e3c75f
JB
2665 do {
2666 ph = packet_current_frame(po, &po->tx_ring,
87a2fd28 2667 TP_STATUS_SEND_REQUEST);
69e3c75f 2668 if (unlikely(ph == NULL)) {
87a2fd28
DB
2669 if (need_wait && need_resched())
2670 schedule();
69e3c75f
JB
2671 continue;
2672 }
2673
8d39b4a6
WB
2674 skb = NULL;
2675 tp_len = tpacket_parse_header(po, ph, size_max, &data);
2676 if (tp_len < 0)
2677 goto tpacket_error;
2678
69e3c75f 2679 status = TP_STATUS_SEND_REQUEST;
ae641949
HX
2680 hlen = LL_RESERVED_SPACE(dev);
2681 tlen = dev->needed_tailroom;
1d036d25
WB
2682 if (po->has_vnet_hdr) {
2683 vnet_hdr = data;
2684 data += sizeof(*vnet_hdr);
2685 tp_len -= sizeof(*vnet_hdr);
2686 if (tp_len < 0 ||
2687 __packet_snd_vnet_parse(vnet_hdr, tp_len)) {
2688 tp_len = -EINVAL;
2689 goto tpacket_error;
2690 }
2691 copylen = __virtio16_to_cpu(vio_le(),
2692 vnet_hdr->hdr_len);
2693 }
9ed988cd 2694 copylen = max_t(int, copylen, dev->hard_header_len);
69e3c75f 2695 skb = sock_alloc_send_skb(&po->sk,
1d036d25
WB
2696 hlen + tlen + sizeof(struct sockaddr_ll) +
2697 (copylen - dev->hard_header_len),
fbf33a28 2698 !need_wait, &err);
69e3c75f 2699
fbf33a28
KM
2700 if (unlikely(skb == NULL)) {
2701 /* we assume the socket was initially writeable ... */
2702 if (likely(len_sum > 0))
2703 err = len_sum;
69e3c75f 2704 goto out_status;
fbf33a28 2705 }
8d39b4a6 2706 tp_len = tpacket_fill_skb(po, skb, ph, dev, data, tp_len, proto,
c14ac945 2707 addr, hlen, copylen, &sockc);
dbd46ab4 2708 if (likely(tp_len >= 0) &&
5cfb4c8d 2709 tp_len > dev->mtu + reserve &&
1d036d25 2710 !po->has_vnet_hdr &&
3c70c132
DB
2711 !packet_extra_vlan_len_allowed(dev, skb))
2712 tp_len = -EMSGSIZE;
69e3c75f
JB
2713
2714 if (unlikely(tp_len < 0)) {
8d39b4a6 2715tpacket_error:
69e3c75f
JB
2716 if (po->tp_loss) {
2717 __packet_set_status(po, ph,
2718 TP_STATUS_AVAILABLE);
2719 packet_increment_head(&po->tx_ring);
2720 kfree_skb(skb);
2721 continue;
2722 } else {
2723 status = TP_STATUS_WRONG_FORMAT;
2724 err = tp_len;
2725 goto out_status;
2726 }
2727 }
2728
1d036d25
WB
2729 if (po->has_vnet_hdr && packet_snd_vnet_gso(skb, vnet_hdr)) {
2730 tp_len = -EINVAL;
2731 goto tpacket_error;
2732 }
2733
0fd5d57b
DB
2734 packet_pick_tx_queue(dev, skb);
2735
69e3c75f
JB
2736 skb->destructor = tpacket_destruct_skb;
2737 __packet_set_status(po, ph, TP_STATUS_SENDING);
b0138408 2738 packet_inc_pending(&po->tx_ring);
69e3c75f
JB
2739
2740 status = TP_STATUS_SEND_REQUEST;
d346a3fa 2741 err = po->xmit(skb);
eb70df13
JP
2742 if (unlikely(err > 0)) {
2743 err = net_xmit_errno(err);
2744 if (err && __packet_get_status(po, ph) ==
2745 TP_STATUS_AVAILABLE) {
2746 /* skb was destructed already */
2747 skb = NULL;
2748 goto out_status;
2749 }
2750 /*
2751 * skb was dropped but not destructed yet;
2752 * let's treat it like congestion or err < 0
2753 */
2754 err = 0;
2755 }
69e3c75f
JB
2756 packet_increment_head(&po->tx_ring);
2757 len_sum += tp_len;
b0138408
DB
2758 } while (likely((ph != NULL) ||
2759 /* Note: packet_read_pending() might be slow if we have
2760 * to call it as it's per_cpu variable, but in fast-path
2761 * we already short-circuit the loop with the first
2762 * condition, and luckily don't have to go that path
2763 * anyway.
2764 */
2765 (need_wait && packet_read_pending(&po->tx_ring))));
69e3c75f
JB
2766
2767 err = len_sum;
2768 goto out_put;
2769
69e3c75f
JB
2770out_status:
2771 __packet_set_status(po, ph, status);
2772 kfree_skb(skb);
2773out_put:
e40526cb 2774 dev_put(dev);
69e3c75f
JB
2775out:
2776 mutex_unlock(&po->pg_vec_lock);
2777 return err;
2778}
69e3c75f 2779
eea49cc9
OJ
2780static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
2781 size_t reserve, size_t len,
2782 size_t linear, int noblock,
2783 int *err)
bfd5f4a3
SS
2784{
2785 struct sk_buff *skb;
2786
2787 /* Under a page? Don't bother with paged skb. */
2788 if (prepad + len < PAGE_SIZE || !linear)
2789 linear = len;
2790
2791 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
28d64271 2792 err, 0);
bfd5f4a3
SS
2793 if (!skb)
2794 return NULL;
2795
2796 skb_reserve(skb, reserve);
2797 skb_put(skb, linear);
2798 skb->data_len = len - linear;
2799 skb->len += len - linear;
2800
2801 return skb;
2802}
2803
d346a3fa 2804static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
1da177e4
LT
2805{
2806 struct sock *sk = sock->sk;
342dfc30 2807 DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
1da177e4
LT
2808 struct sk_buff *skb;
2809 struct net_device *dev;
0e11c91e 2810 __be16 proto;
1da177e4 2811 unsigned char *addr;
827d9780 2812 int err, reserve = 0;
c7d39e32 2813 struct sockcm_cookie sockc;
bfd5f4a3
SS
2814 struct virtio_net_hdr vnet_hdr = { 0 };
2815 int offset = 0;
bfd5f4a3 2816 struct packet_sock *po = pkt_sk(sk);
ae641949 2817 int hlen, tlen;
3bdc0eba 2818 int extra_len = 0;
1da177e4
LT
2819
2820 /*
1ce4f28b 2821 * Get and verify the address.
1da177e4 2822 */
1ce4f28b 2823
66e56cd4 2824 if (likely(saddr == NULL)) {
e40526cb 2825 dev = packet_cached_dev_get(po);
1da177e4
LT
2826 proto = po->num;
2827 addr = NULL;
2828 } else {
2829 err = -EINVAL;
2830 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2831 goto out;
0fb375fb
EB
2832 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
2833 goto out;
1da177e4
LT
2834 proto = saddr->sll_protocol;
2835 addr = saddr->sll_addr;
827d9780 2836 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
1da177e4
LT
2837 }
2838
1da177e4 2839 err = -ENXIO;
e40526cb 2840 if (unlikely(dev == NULL))
1da177e4 2841 goto out_unlock;
d5e76b0a 2842 err = -ENETDOWN;
e40526cb 2843 if (unlikely(!(dev->flags & IFF_UP)))
d5e76b0a
DM
2844 goto out_unlock;
2845
edbe7746 2846 sockc.tsflags = sk->sk_tsflags;
c7d39e32
EJ
2847 sockc.mark = sk->sk_mark;
2848 if (msg->msg_controllen) {
2849 err = sock_cmsg_send(sk, msg, &sockc);
2850 if (unlikely(err))
2851 goto out_unlock;
2852 }
2853
e40526cb
DB
2854 if (sock->type == SOCK_RAW)
2855 reserve = dev->hard_header_len;
bfd5f4a3 2856 if (po->has_vnet_hdr) {
16cc1400
WB
2857 err = packet_snd_vnet_parse(msg, &len, &vnet_hdr);
2858 if (err)
bfd5f4a3 2859 goto out_unlock;
bfd5f4a3
SS
2860 }
2861
3bdc0eba
BG
2862 if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
2863 if (!netif_supports_nofcs(dev)) {
2864 err = -EPROTONOSUPPORT;
2865 goto out_unlock;
2866 }
2867 extra_len = 4; /* We're doing our own CRC */
2868 }
2869
1da177e4 2870 err = -EMSGSIZE;
16cc1400
WB
2871 if (!vnet_hdr.gso_type &&
2872 (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
1da177e4
LT
2873 goto out_unlock;
2874
bfd5f4a3 2875 err = -ENOBUFS;
ae641949
HX
2876 hlen = LL_RESERVED_SPACE(dev);
2877 tlen = dev->needed_tailroom;
dc9e5153 2878 skb = packet_alloc_skb(sk, hlen + tlen, hlen, len,
d3869efe 2879 __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len),
bfd5f4a3 2880 msg->msg_flags & MSG_DONTWAIT, &err);
40d4e3df 2881 if (skb == NULL)
1da177e4
LT
2882 goto out_unlock;
2883
bfd5f4a3 2884 skb_set_network_header(skb, reserve);
1da177e4 2885
0c4e8581 2886 err = -EINVAL;
9c707762
WB
2887 if (sock->type == SOCK_DGRAM) {
2888 offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
46d2cfb1 2889 if (unlikely(offset < 0))
9c707762 2890 goto out_free;
9c707762 2891 }
1da177e4
LT
2892
2893 /* Returns -EFAULT on error */
c0371da6 2894 err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len);
1da177e4
LT
2895 if (err)
2896 goto out_free;
bf84a010 2897
9ed988cd
WB
2898 if (sock->type == SOCK_RAW &&
2899 !dev_validate_header(dev, skb->data, len)) {
2900 err = -EINVAL;
2901 goto out_free;
2902 }
2903
c14ac945 2904 sock_tx_timestamp(sk, sockc.tsflags, &skb_shinfo(skb)->tx_flags);
1da177e4 2905
16cc1400 2906 if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) &&
3c70c132
DB
2907 !packet_extra_vlan_len_allowed(dev, skb)) {
2908 err = -EMSGSIZE;
2909 goto out_free;
57f89bfa
BG
2910 }
2911
09effa67
DM
2912 skb->protocol = proto;
2913 skb->dev = dev;
1da177e4 2914 skb->priority = sk->sk_priority;
c7d39e32 2915 skb->mark = sockc.mark;
0fd5d57b
DB
2916
2917 packet_pick_tx_queue(dev, skb);
1da177e4 2918
bfd5f4a3 2919 if (po->has_vnet_hdr) {
16cc1400
WB
2920 err = packet_snd_vnet_gso(skb, &vnet_hdr);
2921 if (err)
2922 goto out_free;
2923 len += sizeof(vnet_hdr);
bfd5f4a3
SS
2924 }
2925
8fd6c80d
DB
2926 skb_probe_transport_header(skb, reserve);
2927
3bdc0eba
BG
2928 if (unlikely(extra_len == 4))
2929 skb->no_fcs = 1;
2930
d346a3fa 2931 err = po->xmit(skb);
1da177e4
LT
2932 if (err > 0 && (err = net_xmit_errno(err)) != 0)
2933 goto out_unlock;
2934
e40526cb 2935 dev_put(dev);
1da177e4 2936
40d4e3df 2937 return len;
1da177e4
LT
2938
2939out_free:
2940 kfree_skb(skb);
2941out_unlock:
e40526cb 2942 if (dev)
1da177e4
LT
2943 dev_put(dev);
2944out:
2945 return err;
2946}
2947
1b784140 2948static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
69e3c75f 2949{
69e3c75f
JB
2950 struct sock *sk = sock->sk;
2951 struct packet_sock *po = pkt_sk(sk);
d346a3fa 2952
69e3c75f
JB
2953 if (po->tx_ring.pg_vec)
2954 return tpacket_snd(po, msg);
2955 else
69e3c75f
JB
2956 return packet_snd(sock, msg, len);
2957}
2958
1da177e4
LT
2959/*
2960 * Close a PACKET socket. This is fairly simple. We immediately go
2961 * to 'closed' state and remove our protocol entry in the device list.
2962 */
2963
2964static int packet_release(struct socket *sock)
2965{
2966 struct sock *sk = sock->sk;
2967 struct packet_sock *po;
d12d01d6 2968 struct net *net;
f6fb8f10 2969 union tpacket_req_u req_u;
1da177e4
LT
2970
2971 if (!sk)
2972 return 0;
2973
3b1e0a65 2974 net = sock_net(sk);
1da177e4
LT
2975 po = pkt_sk(sk);
2976
0fa7fa98 2977 mutex_lock(&net->packet.sklist_lock);
808f5114 2978 sk_del_node_init_rcu(sk);
0fa7fa98
PE
2979 mutex_unlock(&net->packet.sklist_lock);
2980
2981 preempt_disable();
920de804 2982 sock_prot_inuse_add(net, sk->sk_prot, -1);
0fa7fa98 2983 preempt_enable();
1da177e4 2984
808f5114 2985 spin_lock(&po->bind_lock);
ce06b03e 2986 unregister_prot_hook(sk, false);
66e56cd4
DB
2987 packet_cached_dev_reset(po);
2988
160ff18a
BG
2989 if (po->prot_hook.dev) {
2990 dev_put(po->prot_hook.dev);
2991 po->prot_hook.dev = NULL;
2992 }
808f5114 2993 spin_unlock(&po->bind_lock);
1da177e4 2994
1da177e4 2995 packet_flush_mclist(sk);
1da177e4 2996
9665d5d6
PS
2997 if (po->rx_ring.pg_vec) {
2998 memset(&req_u, 0, sizeof(req_u));
f6fb8f10 2999 packet_set_ring(sk, &req_u, 1, 0);
9665d5d6 3000 }
69e3c75f 3001
9665d5d6
PS
3002 if (po->tx_ring.pg_vec) {
3003 memset(&req_u, 0, sizeof(req_u));
f6fb8f10 3004 packet_set_ring(sk, &req_u, 1, 1);
9665d5d6 3005 }
1da177e4 3006
dc99f600
DM
3007 fanout_release(sk);
3008
808f5114 3009 synchronize_net();
1da177e4
LT
3010 /*
3011 * Now the socket is dead. No more input will appear.
3012 */
1da177e4
LT
3013 sock_orphan(sk);
3014 sock->sk = NULL;
3015
3016 /* Purge queues */
3017
3018 skb_queue_purge(&sk->sk_receive_queue);
b0138408 3019 packet_free_pending(po);
17ab56a2 3020 sk_refcnt_debug_release(sk);
1da177e4
LT
3021
3022 sock_put(sk);
3023 return 0;
3024}
3025
3026/*
3027 * Attach a packet hook.
3028 */
3029
30f7ea1c
FR
3030static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
3031 __be16 proto)
1da177e4
LT
3032{
3033 struct packet_sock *po = pkt_sk(sk);
158cd4af 3034 struct net_device *dev_curr;
902fefb8
DB
3035 __be16 proto_curr;
3036 bool need_rehook;
30f7ea1c
FR
3037 struct net_device *dev = NULL;
3038 int ret = 0;
3039 bool unlisted = false;
dc99f600 3040
30f7ea1c 3041 if (po->fanout)
dc99f600 3042 return -EINVAL;
1da177e4
LT
3043
3044 lock_sock(sk);
1da177e4 3045 spin_lock(&po->bind_lock);
30f7ea1c
FR
3046 rcu_read_lock();
3047
3048 if (name) {
3049 dev = dev_get_by_name_rcu(sock_net(sk), name);
3050 if (!dev) {
3051 ret = -ENODEV;
3052 goto out_unlock;
3053 }
3054 } else if (ifindex) {
3055 dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
3056 if (!dev) {
3057 ret = -ENODEV;
3058 goto out_unlock;
3059 }
3060 }
3061
3062 if (dev)
3063 dev_hold(dev);
66e56cd4 3064
902fefb8
DB
3065 proto_curr = po->prot_hook.type;
3066 dev_curr = po->prot_hook.dev;
3067
3068 need_rehook = proto_curr != proto || dev_curr != dev;
3069
3070 if (need_rehook) {
30f7ea1c
FR
3071 if (po->running) {
3072 rcu_read_unlock();
3073 __unregister_prot_hook(sk, true);
3074 rcu_read_lock();
3075 dev_curr = po->prot_hook.dev;
3076 if (dev)
3077 unlisted = !dev_get_by_index_rcu(sock_net(sk),
3078 dev->ifindex);
3079 }
1da177e4 3080
902fefb8
DB
3081 po->num = proto;
3082 po->prot_hook.type = proto;
902fefb8 3083
30f7ea1c
FR
3084 if (unlikely(unlisted)) {
3085 dev_put(dev);
3086 po->prot_hook.dev = NULL;
3087 po->ifindex = -1;
3088 packet_cached_dev_reset(po);
3089 } else {
3090 po->prot_hook.dev = dev;
3091 po->ifindex = dev ? dev->ifindex : 0;
3092 packet_cached_dev_assign(po, dev);
3093 }
902fefb8 3094 }
158cd4af
LW
3095 if (dev_curr)
3096 dev_put(dev_curr);
66e56cd4 3097
902fefb8 3098 if (proto == 0 || !need_rehook)
1da177e4
LT
3099 goto out_unlock;
3100
30f7ea1c 3101 if (!unlisted && (!dev || (dev->flags & IFF_UP))) {
ce06b03e 3102 register_prot_hook(sk);
be85d4ad
UT
3103 } else {
3104 sk->sk_err = ENETDOWN;
3105 if (!sock_flag(sk, SOCK_DEAD))
3106 sk->sk_error_report(sk);
1da177e4
LT
3107 }
3108
3109out_unlock:
30f7ea1c 3110 rcu_read_unlock();
1da177e4
LT
3111 spin_unlock(&po->bind_lock);
3112 release_sock(sk);
30f7ea1c 3113 return ret;
1da177e4
LT
3114}
3115
3116/*
3117 * Bind a packet socket to a device
3118 */
3119
40d4e3df
ED
3120static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
3121 int addr_len)
1da177e4 3122{
40d4e3df 3123 struct sock *sk = sock->sk;
1da177e4 3124 char name[15];
1ce4f28b 3125
1da177e4
LT
3126 /*
3127 * Check legality
3128 */
1ce4f28b 3129
8ae55f04 3130 if (addr_len != sizeof(struct sockaddr))
1da177e4 3131 return -EINVAL;
40d4e3df 3132 strlcpy(name, uaddr->sa_data, sizeof(name));
1da177e4 3133
30f7ea1c 3134 return packet_do_bind(sk, name, 0, pkt_sk(sk)->num);
1da177e4 3135}
1da177e4
LT
3136
3137static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
3138{
40d4e3df
ED
3139 struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
3140 struct sock *sk = sock->sk;
1da177e4
LT
3141
3142 /*
3143 * Check legality
3144 */
1ce4f28b 3145
1da177e4
LT
3146 if (addr_len < sizeof(struct sockaddr_ll))
3147 return -EINVAL;
3148 if (sll->sll_family != AF_PACKET)
3149 return -EINVAL;
3150
30f7ea1c
FR
3151 return packet_do_bind(sk, NULL, sll->sll_ifindex,
3152 sll->sll_protocol ? : pkt_sk(sk)->num);
1da177e4
LT
3153}
3154
3155static struct proto packet_proto = {
3156 .name = "PACKET",
3157 .owner = THIS_MODULE,
3158 .obj_size = sizeof(struct packet_sock),
3159};
3160
3161/*
1ce4f28b 3162 * Create a packet of type SOCK_PACKET.
1da177e4
LT
3163 */
3164
3f378b68
EP
3165static int packet_create(struct net *net, struct socket *sock, int protocol,
3166 int kern)
1da177e4
LT
3167{
3168 struct sock *sk;
3169 struct packet_sock *po;
0e11c91e 3170 __be16 proto = (__force __be16)protocol; /* weird, but documented */
1da177e4
LT
3171 int err;
3172
df008c91 3173 if (!ns_capable(net->user_ns, CAP_NET_RAW))
1da177e4 3174 return -EPERM;
be02097c
DM
3175 if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
3176 sock->type != SOCK_PACKET)
1da177e4
LT
3177 return -ESOCKTNOSUPPORT;
3178
3179 sock->state = SS_UNCONNECTED;
3180
3181 err = -ENOBUFS;
11aa9c28 3182 sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern);
1da177e4
LT
3183 if (sk == NULL)
3184 goto out;
3185
3186 sock->ops = &packet_ops;
1da177e4
LT
3187 if (sock->type == SOCK_PACKET)
3188 sock->ops = &packet_ops_spkt;
be02097c 3189
1da177e4
LT
3190 sock_init_data(sock, sk);
3191
3192 po = pkt_sk(sk);
3193 sk->sk_family = PF_PACKET;
0e11c91e 3194 po->num = proto;
d346a3fa 3195 po->xmit = dev_queue_xmit;
66e56cd4 3196
b0138408
DB
3197 err = packet_alloc_pending(po);
3198 if (err)
3199 goto out2;
3200
66e56cd4 3201 packet_cached_dev_reset(po);
1da177e4
LT
3202
3203 sk->sk_destruct = packet_sock_destruct;
17ab56a2 3204 sk_refcnt_debug_inc(sk);
1da177e4
LT
3205
3206 /*
3207 * Attach a protocol block
3208 */
3209
3210 spin_lock_init(&po->bind_lock);
905db440 3211 mutex_init(&po->pg_vec_lock);
0648ab70 3212 po->rollover = NULL;
1da177e4 3213 po->prot_hook.func = packet_rcv;
be02097c 3214
1da177e4
LT
3215 if (sock->type == SOCK_PACKET)
3216 po->prot_hook.func = packet_rcv_spkt;
be02097c 3217
1da177e4
LT
3218 po->prot_hook.af_packet_priv = sk;
3219
0e11c91e
AV
3220 if (proto) {
3221 po->prot_hook.type = proto;
ce06b03e 3222 register_prot_hook(sk);
1da177e4
LT
3223 }
3224
0fa7fa98 3225 mutex_lock(&net->packet.sklist_lock);
808f5114 3226 sk_add_node_rcu(sk, &net->packet.sklist);
0fa7fa98
PE
3227 mutex_unlock(&net->packet.sklist_lock);
3228
3229 preempt_disable();
3680453c 3230 sock_prot_inuse_add(net, &packet_proto, 1);
0fa7fa98 3231 preempt_enable();
808f5114 3232
40d4e3df 3233 return 0;
b0138408
DB
3234out2:
3235 sk_free(sk);
1da177e4
LT
3236out:
3237 return err;
3238}
3239
3240/*
3241 * Pull a packet from our receive queue and hand it to the user.
3242 * If necessary we block.
3243 */
3244
1b784140
YX
3245static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
3246 int flags)
1da177e4
LT
3247{
3248 struct sock *sk = sock->sk;
3249 struct sk_buff *skb;
3250 int copied, err;
bfd5f4a3 3251 int vnet_hdr_len = 0;
2472d761 3252 unsigned int origlen = 0;
1da177e4
LT
3253
3254 err = -EINVAL;
ed85b565 3255 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
1da177e4
LT
3256 goto out;
3257
3258#if 0
3259 /* What error should we return now? EUNATTACH? */
3260 if (pkt_sk(sk)->ifindex < 0)
3261 return -ENODEV;
3262#endif
3263
ed85b565 3264 if (flags & MSG_ERRQUEUE) {
cb820f8e
RC
3265 err = sock_recv_errqueue(sk, msg, len,
3266 SOL_PACKET, PACKET_TX_TIMESTAMP);
ed85b565
RC
3267 goto out;
3268 }
3269
1da177e4
LT
3270 /*
3271 * Call the generic datagram receiver. This handles all sorts
3272 * of horrible races and re-entrancy so we can forget about it
3273 * in the protocol layers.
3274 *
3275 * Now it will return ENETDOWN, if device have just gone down,
3276 * but then it will block.
3277 */
3278
40d4e3df 3279 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
1da177e4
LT
3280
3281 /*
1ce4f28b 3282 * An error occurred so return it. Because skb_recv_datagram()
1da177e4
LT
3283 * handles the blocking we don't see and worry about blocking
3284 * retries.
3285 */
3286
8ae55f04 3287 if (skb == NULL)
1da177e4
LT
3288 goto out;
3289
2ccdbaa6
WB
3290 if (pkt_sk(sk)->pressure)
3291 packet_rcv_has_room(pkt_sk(sk), NULL);
3292
bfd5f4a3 3293 if (pkt_sk(sk)->has_vnet_hdr) {
16cc1400
WB
3294 err = packet_rcv_vnet(msg, skb, &len);
3295 if (err)
bfd5f4a3 3296 goto out_free;
16cc1400 3297 vnet_hdr_len = sizeof(struct virtio_net_hdr);
bfd5f4a3
SS
3298 }
3299
f3d33426
HFS
3300 /* You lose any data beyond the buffer you gave. If it worries
3301 * a user program they can ask the device for its MTU
3302 * anyway.
1da177e4 3303 */
1da177e4 3304 copied = skb->len;
40d4e3df
ED
3305 if (copied > len) {
3306 copied = len;
3307 msg->msg_flags |= MSG_TRUNC;
1da177e4
LT
3308 }
3309
51f3d02b 3310 err = skb_copy_datagram_msg(skb, 0, msg, copied);
1da177e4
LT
3311 if (err)
3312 goto out_free;
3313
2472d761
EB
3314 if (sock->type != SOCK_PACKET) {
3315 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3316
3317 /* Original length was stored in sockaddr_ll fields */
3318 origlen = PACKET_SKB_CB(skb)->sa.origlen;
3319 sll->sll_family = AF_PACKET;
3320 sll->sll_protocol = skb->protocol;
3321 }
3322
3b885787 3323 sock_recv_ts_and_drops(msg, sk, skb);
1da177e4 3324
f3d33426
HFS
3325 if (msg->msg_name) {
3326 /* If the address length field is there to be filled
3327 * in, we fill it in now.
3328 */
3329 if (sock->type == SOCK_PACKET) {
342dfc30 3330 __sockaddr_check_size(sizeof(struct sockaddr_pkt));
f3d33426
HFS
3331 msg->msg_namelen = sizeof(struct sockaddr_pkt);
3332 } else {
3333 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
2472d761 3334
f3d33426
HFS
3335 msg->msg_namelen = sll->sll_halen +
3336 offsetof(struct sockaddr_ll, sll_addr);
3337 }
ffbc6111
HX
3338 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
3339 msg->msg_namelen);
f3d33426 3340 }
1da177e4 3341
8dc41944 3342 if (pkt_sk(sk)->auxdata) {
ffbc6111
HX
3343 struct tpacket_auxdata aux;
3344
3345 aux.tp_status = TP_STATUS_USER;
3346 if (skb->ip_summed == CHECKSUM_PARTIAL)
3347 aux.tp_status |= TP_STATUS_CSUMNOTREADY;
682f048b
AD
3348 else if (skb->pkt_type != PACKET_OUTGOING &&
3349 (skb->ip_summed == CHECKSUM_COMPLETE ||
3350 skb_csum_unnecessary(skb)))
3351 aux.tp_status |= TP_STATUS_CSUM_VALID;
3352
2472d761 3353 aux.tp_len = origlen;
ffbc6111
HX
3354 aux.tp_snaplen = skb->len;
3355 aux.tp_mac = 0;
bbe735e4 3356 aux.tp_net = skb_network_offset(skb);
df8a39de
JP
3357 if (skb_vlan_tag_present(skb)) {
3358 aux.tp_vlan_tci = skb_vlan_tag_get(skb);
a0cdfcf3
AW
3359 aux.tp_vlan_tpid = ntohs(skb->vlan_proto);
3360 aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
a3bcc23e
BG
3361 } else {
3362 aux.tp_vlan_tci = 0;
a0cdfcf3 3363 aux.tp_vlan_tpid = 0;
a3bcc23e 3364 }
ffbc6111 3365 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
8dc41944
HX
3366 }
3367
1da177e4
LT
3368 /*
3369 * Free or return the buffer as appropriate. Again this
3370 * hides all the races and re-entrancy issues from us.
3371 */
bfd5f4a3 3372 err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
1da177e4
LT
3373
3374out_free:
3375 skb_free_datagram(sk, skb);
3376out:
3377 return err;
3378}
3379
1da177e4
LT
3380static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
3381 int *uaddr_len, int peer)
3382{
3383 struct net_device *dev;
3384 struct sock *sk = sock->sk;
3385
3386 if (peer)
3387 return -EOPNOTSUPP;
3388
3389 uaddr->sa_family = AF_PACKET;
2dc85bf3 3390 memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
654d1f8a
ED
3391 rcu_read_lock();
3392 dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
3393 if (dev)
2dc85bf3 3394 strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
654d1f8a 3395 rcu_read_unlock();
1da177e4
LT
3396 *uaddr_len = sizeof(*uaddr);
3397
3398 return 0;
3399}
1da177e4
LT
3400
3401static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
3402 int *uaddr_len, int peer)
3403{
3404 struct net_device *dev;
3405 struct sock *sk = sock->sk;
3406 struct packet_sock *po = pkt_sk(sk);
13cfa97b 3407 DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
1da177e4
LT
3408
3409 if (peer)
3410 return -EOPNOTSUPP;
3411
3412 sll->sll_family = AF_PACKET;
3413 sll->sll_ifindex = po->ifindex;
3414 sll->sll_protocol = po->num;
67286640 3415 sll->sll_pkttype = 0;
654d1f8a
ED
3416 rcu_read_lock();
3417 dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
1da177e4
LT
3418 if (dev) {
3419 sll->sll_hatype = dev->type;
3420 sll->sll_halen = dev->addr_len;
3421 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
1da177e4
LT
3422 } else {
3423 sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */
3424 sll->sll_halen = 0;
3425 }
654d1f8a 3426 rcu_read_unlock();
0fb375fb 3427 *uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
1da177e4
LT
3428
3429 return 0;
3430}
3431
2aeb0b88
WC
3432static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
3433 int what)
1da177e4
LT
3434{
3435 switch (i->type) {
3436 case PACKET_MR_MULTICAST:
1162563f
JP
3437 if (i->alen != dev->addr_len)
3438 return -EINVAL;
1da177e4 3439 if (what > 0)
22bedad3 3440 return dev_mc_add(dev, i->addr);
1da177e4 3441 else
22bedad3 3442 return dev_mc_del(dev, i->addr);
1da177e4
LT
3443 break;
3444 case PACKET_MR_PROMISC:
2aeb0b88 3445 return dev_set_promiscuity(dev, what);
1da177e4 3446 case PACKET_MR_ALLMULTI:
2aeb0b88 3447 return dev_set_allmulti(dev, what);
d95ed927 3448 case PACKET_MR_UNICAST:
1162563f
JP
3449 if (i->alen != dev->addr_len)
3450 return -EINVAL;
d95ed927 3451 if (what > 0)
a748ee24 3452 return dev_uc_add(dev, i->addr);
d95ed927 3453 else
a748ee24 3454 return dev_uc_del(dev, i->addr);
d95ed927 3455 break;
40d4e3df
ED
3456 default:
3457 break;
1da177e4 3458 }
2aeb0b88 3459 return 0;
1da177e4
LT
3460}
3461
82f17091
FR
3462static void packet_dev_mclist_delete(struct net_device *dev,
3463 struct packet_mclist **mlp)
1da177e4 3464{
82f17091
FR
3465 struct packet_mclist *ml;
3466
3467 while ((ml = *mlp) != NULL) {
3468 if (ml->ifindex == dev->ifindex) {
3469 packet_dev_mc(dev, ml, -1);
3470 *mlp = ml->next;
3471 kfree(ml);
3472 } else
3473 mlp = &ml->next;
1da177e4
LT
3474 }
3475}
3476
0fb375fb 3477static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
1da177e4
LT
3478{
3479 struct packet_sock *po = pkt_sk(sk);
3480 struct packet_mclist *ml, *i;
3481 struct net_device *dev;
3482 int err;
3483
3484 rtnl_lock();
3485
3486 err = -ENODEV;
3b1e0a65 3487 dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
1da177e4
LT
3488 if (!dev)
3489 goto done;
3490
3491 err = -EINVAL;
1162563f 3492 if (mreq->mr_alen > dev->addr_len)
1da177e4
LT
3493 goto done;
3494
3495 err = -ENOBUFS;
8b3a7005 3496 i = kmalloc(sizeof(*i), GFP_KERNEL);
1da177e4
LT
3497 if (i == NULL)
3498 goto done;
3499
3500 err = 0;
3501 for (ml = po->mclist; ml; ml = ml->next) {
3502 if (ml->ifindex == mreq->mr_ifindex &&
3503 ml->type == mreq->mr_type &&
3504 ml->alen == mreq->mr_alen &&
3505 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3506 ml->count++;
3507 /* Free the new element ... */
3508 kfree(i);
3509 goto done;
3510 }
3511 }
3512
3513 i->type = mreq->mr_type;
3514 i->ifindex = mreq->mr_ifindex;
3515 i->alen = mreq->mr_alen;
3516 memcpy(i->addr, mreq->mr_address, i->alen);
309cf37f 3517 memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen);
1da177e4
LT
3518 i->count = 1;
3519 i->next = po->mclist;
3520 po->mclist = i;
2aeb0b88
WC
3521 err = packet_dev_mc(dev, i, 1);
3522 if (err) {
3523 po->mclist = i->next;
3524 kfree(i);
3525 }
1da177e4
LT
3526
3527done:
3528 rtnl_unlock();
3529 return err;
3530}
3531
0fb375fb 3532static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
1da177e4
LT
3533{
3534 struct packet_mclist *ml, **mlp;
3535
3536 rtnl_lock();
3537
3538 for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
3539 if (ml->ifindex == mreq->mr_ifindex &&
3540 ml->type == mreq->mr_type &&
3541 ml->alen == mreq->mr_alen &&
3542 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3543 if (--ml->count == 0) {
3544 struct net_device *dev;
3545 *mlp = ml->next;
ad959e76
ED
3546 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3547 if (dev)
1da177e4 3548 packet_dev_mc(dev, ml, -1);
1da177e4
LT
3549 kfree(ml);
3550 }
82f17091 3551 break;
1da177e4
LT
3552 }
3553 }
3554 rtnl_unlock();
82f17091 3555 return 0;
1da177e4
LT
3556}
3557
3558static void packet_flush_mclist(struct sock *sk)
3559{
3560 struct packet_sock *po = pkt_sk(sk);
3561 struct packet_mclist *ml;
3562
3563 if (!po->mclist)
3564 return;
3565
3566 rtnl_lock();
3567 while ((ml = po->mclist) != NULL) {
3568 struct net_device *dev;
3569
3570 po->mclist = ml->next;
ad959e76
ED
3571 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3572 if (dev != NULL)
1da177e4 3573 packet_dev_mc(dev, ml, -1);
1da177e4
LT
3574 kfree(ml);
3575 }
3576 rtnl_unlock();
3577}
1da177e4
LT
3578
3579static int
b7058842 3580packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1da177e4
LT
3581{
3582 struct sock *sk = sock->sk;
8dc41944 3583 struct packet_sock *po = pkt_sk(sk);
1da177e4
LT
3584 int ret;
3585
3586 if (level != SOL_PACKET)
3587 return -ENOPROTOOPT;
3588
69e3c75f 3589 switch (optname) {
1ce4f28b 3590 case PACKET_ADD_MEMBERSHIP:
1da177e4
LT
3591 case PACKET_DROP_MEMBERSHIP:
3592 {
0fb375fb
EB
3593 struct packet_mreq_max mreq;
3594 int len = optlen;
3595 memset(&mreq, 0, sizeof(mreq));
3596 if (len < sizeof(struct packet_mreq))
1da177e4 3597 return -EINVAL;
0fb375fb
EB
3598 if (len > sizeof(mreq))
3599 len = sizeof(mreq);
40d4e3df 3600 if (copy_from_user(&mreq, optval, len))
1da177e4 3601 return -EFAULT;
0fb375fb
EB
3602 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
3603 return -EINVAL;
1da177e4
LT
3604 if (optname == PACKET_ADD_MEMBERSHIP)
3605 ret = packet_mc_add(sk, &mreq);
3606 else
3607 ret = packet_mc_drop(sk, &mreq);
3608 return ret;
3609 }
a2efcfa0 3610
1da177e4 3611 case PACKET_RX_RING:
69e3c75f 3612 case PACKET_TX_RING:
1da177e4 3613 {
f6fb8f10 3614 union tpacket_req_u req_u;
3615 int len;
1da177e4 3616
f6fb8f10 3617 switch (po->tp_version) {
3618 case TPACKET_V1:
3619 case TPACKET_V2:
3620 len = sizeof(req_u.req);
3621 break;
3622 case TPACKET_V3:
3623 default:
3624 len = sizeof(req_u.req3);
3625 break;
3626 }
3627 if (optlen < len)
1da177e4 3628 return -EINVAL;
f6fb8f10 3629 if (copy_from_user(&req_u.req, optval, len))
1da177e4 3630 return -EFAULT;
f6fb8f10 3631 return packet_set_ring(sk, &req_u, 0,
3632 optname == PACKET_TX_RING);
1da177e4
LT
3633 }
3634 case PACKET_COPY_THRESH:
3635 {
3636 int val;
3637
40d4e3df 3638 if (optlen != sizeof(val))
1da177e4 3639 return -EINVAL;
40d4e3df 3640 if (copy_from_user(&val, optval, sizeof(val)))
1da177e4
LT
3641 return -EFAULT;
3642
3643 pkt_sk(sk)->copy_thresh = val;
3644 return 0;
3645 }
bbd6ef87
PM
3646 case PACKET_VERSION:
3647 {
3648 int val;
3649
3650 if (optlen != sizeof(val))
3651 return -EINVAL;
69e3c75f 3652 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
bbd6ef87
PM
3653 return -EBUSY;
3654 if (copy_from_user(&val, optval, sizeof(val)))
3655 return -EFAULT;
3656 switch (val) {
3657 case TPACKET_V1:
3658 case TPACKET_V2:
f6fb8f10 3659 case TPACKET_V3:
bbd6ef87
PM
3660 po->tp_version = val;
3661 return 0;
3662 default:
3663 return -EINVAL;
3664 }
3665 }
8913336a
PM
3666 case PACKET_RESERVE:
3667 {
3668 unsigned int val;
3669
3670 if (optlen != sizeof(val))
3671 return -EINVAL;
69e3c75f 3672 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
8913336a
PM
3673 return -EBUSY;
3674 if (copy_from_user(&val, optval, sizeof(val)))
3675 return -EFAULT;
3676 po->tp_reserve = val;
3677 return 0;
3678 }
69e3c75f
JB
3679 case PACKET_LOSS:
3680 {
3681 unsigned int val;
3682
3683 if (optlen != sizeof(val))
3684 return -EINVAL;
3685 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3686 return -EBUSY;
3687 if (copy_from_user(&val, optval, sizeof(val)))
3688 return -EFAULT;
3689 po->tp_loss = !!val;
3690 return 0;
3691 }
8dc41944
HX
3692 case PACKET_AUXDATA:
3693 {
3694 int val;
3695
3696 if (optlen < sizeof(val))
3697 return -EINVAL;
3698 if (copy_from_user(&val, optval, sizeof(val)))
3699 return -EFAULT;
3700
3701 po->auxdata = !!val;
3702 return 0;
3703 }
80feaacb
PWJ
3704 case PACKET_ORIGDEV:
3705 {
3706 int val;
3707
3708 if (optlen < sizeof(val))
3709 return -EINVAL;
3710 if (copy_from_user(&val, optval, sizeof(val)))
3711 return -EFAULT;
3712
3713 po->origdev = !!val;
3714 return 0;
3715 }
bfd5f4a3
SS
3716 case PACKET_VNET_HDR:
3717 {
3718 int val;
3719
3720 if (sock->type != SOCK_RAW)
3721 return -EINVAL;
3722 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3723 return -EBUSY;
3724 if (optlen < sizeof(val))
3725 return -EINVAL;
3726 if (copy_from_user(&val, optval, sizeof(val)))
3727 return -EFAULT;
3728
3729 po->has_vnet_hdr = !!val;
3730 return 0;
3731 }
614f60fa
SM
3732 case PACKET_TIMESTAMP:
3733 {
3734 int val;
3735
3736 if (optlen != sizeof(val))
3737 return -EINVAL;
3738 if (copy_from_user(&val, optval, sizeof(val)))
3739 return -EFAULT;
3740
3741 po->tp_tstamp = val;
3742 return 0;
3743 }
dc99f600
DM
3744 case PACKET_FANOUT:
3745 {
3746 int val;
3747
3748 if (optlen != sizeof(val))
3749 return -EINVAL;
3750 if (copy_from_user(&val, optval, sizeof(val)))
3751 return -EFAULT;
3752
3753 return fanout_add(sk, val & 0xffff, val >> 16);
3754 }
47dceb8e
WB
3755 case PACKET_FANOUT_DATA:
3756 {
3757 if (!po->fanout)
3758 return -EINVAL;
3759
3760 return fanout_set_data(po, optval, optlen);
3761 }
5920cd3a
PC
3762 case PACKET_TX_HAS_OFF:
3763 {
3764 unsigned int val;
3765
3766 if (optlen != sizeof(val))
3767 return -EINVAL;
3768 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3769 return -EBUSY;
3770 if (copy_from_user(&val, optval, sizeof(val)))
3771 return -EFAULT;
3772 po->tp_tx_has_off = !!val;
3773 return 0;
3774 }
d346a3fa
DB
3775 case PACKET_QDISC_BYPASS:
3776 {
3777 int val;
3778
3779 if (optlen != sizeof(val))
3780 return -EINVAL;
3781 if (copy_from_user(&val, optval, sizeof(val)))
3782 return -EFAULT;
3783
3784 po->xmit = val ? packet_direct_xmit : dev_queue_xmit;
3785 return 0;
3786 }
1da177e4
LT
3787 default:
3788 return -ENOPROTOOPT;
3789 }
3790}
3791
3792static int packet_getsockopt(struct socket *sock, int level, int optname,
3793 char __user *optval, int __user *optlen)
3794{
3795 int len;
c06fff6e 3796 int val, lv = sizeof(val);
1da177e4
LT
3797 struct sock *sk = sock->sk;
3798 struct packet_sock *po = pkt_sk(sk);
c06fff6e 3799 void *data = &val;
ee80fbf3 3800 union tpacket_stats_u st;
a9b63918 3801 struct tpacket_rollover_stats rstats;
1da177e4
LT
3802
3803 if (level != SOL_PACKET)
3804 return -ENOPROTOOPT;
3805
8ae55f04
KK
3806 if (get_user(len, optlen))
3807 return -EFAULT;
1da177e4
LT
3808
3809 if (len < 0)
3810 return -EINVAL;
1ce4f28b 3811
69e3c75f 3812 switch (optname) {
1da177e4 3813 case PACKET_STATISTICS:
1da177e4 3814 spin_lock_bh(&sk->sk_receive_queue.lock);
ee80fbf3
DB
3815 memcpy(&st, &po->stats, sizeof(st));
3816 memset(&po->stats, 0, sizeof(po->stats));
3817 spin_unlock_bh(&sk->sk_receive_queue.lock);
3818
f6fb8f10 3819 if (po->tp_version == TPACKET_V3) {
c06fff6e 3820 lv = sizeof(struct tpacket_stats_v3);
8bcdeaff 3821 st.stats3.tp_packets += st.stats3.tp_drops;
ee80fbf3 3822 data = &st.stats3;
f6fb8f10 3823 } else {
c06fff6e 3824 lv = sizeof(struct tpacket_stats);
8bcdeaff 3825 st.stats1.tp_packets += st.stats1.tp_drops;
ee80fbf3 3826 data = &st.stats1;
f6fb8f10 3827 }
ee80fbf3 3828
8dc41944
HX
3829 break;
3830 case PACKET_AUXDATA:
8dc41944 3831 val = po->auxdata;
80feaacb
PWJ
3832 break;
3833 case PACKET_ORIGDEV:
80feaacb 3834 val = po->origdev;
bfd5f4a3
SS
3835 break;
3836 case PACKET_VNET_HDR:
bfd5f4a3 3837 val = po->has_vnet_hdr;
1da177e4 3838 break;
bbd6ef87 3839 case PACKET_VERSION:
bbd6ef87 3840 val = po->tp_version;
bbd6ef87
PM
3841 break;
3842 case PACKET_HDRLEN:
3843 if (len > sizeof(int))
3844 len = sizeof(int);
3845 if (copy_from_user(&val, optval, len))
3846 return -EFAULT;
3847 switch (val) {
3848 case TPACKET_V1:
3849 val = sizeof(struct tpacket_hdr);
3850 break;
3851 case TPACKET_V2:
3852 val = sizeof(struct tpacket2_hdr);
3853 break;
f6fb8f10 3854 case TPACKET_V3:
3855 val = sizeof(struct tpacket3_hdr);
3856 break;
bbd6ef87
PM
3857 default:
3858 return -EINVAL;
3859 }
bbd6ef87 3860 break;
8913336a 3861 case PACKET_RESERVE:
8913336a 3862 val = po->tp_reserve;
8913336a 3863 break;
69e3c75f 3864 case PACKET_LOSS:
69e3c75f 3865 val = po->tp_loss;
69e3c75f 3866 break;
614f60fa 3867 case PACKET_TIMESTAMP:
614f60fa 3868 val = po->tp_tstamp;
614f60fa 3869 break;
dc99f600 3870 case PACKET_FANOUT:
dc99f600
DM
3871 val = (po->fanout ?
3872 ((u32)po->fanout->id |
77f65ebd
WB
3873 ((u32)po->fanout->type << 16) |
3874 ((u32)po->fanout->flags << 24)) :
dc99f600 3875 0);
dc99f600 3876 break;
a9b63918
WB
3877 case PACKET_ROLLOVER_STATS:
3878 if (!po->rollover)
3879 return -EINVAL;
3880 rstats.tp_all = atomic_long_read(&po->rollover->num);
3881 rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
3882 rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
3883 data = &rstats;
3884 lv = sizeof(rstats);
3885 break;
5920cd3a
PC
3886 case PACKET_TX_HAS_OFF:
3887 val = po->tp_tx_has_off;
3888 break;
d346a3fa
DB
3889 case PACKET_QDISC_BYPASS:
3890 val = packet_use_direct_xmit(po);
3891 break;
1da177e4
LT
3892 default:
3893 return -ENOPROTOOPT;
3894 }
3895
c06fff6e
ED
3896 if (len > lv)
3897 len = lv;
8ae55f04
KK
3898 if (put_user(len, optlen))
3899 return -EFAULT;
8dc41944
HX
3900 if (copy_to_user(optval, data, len))
3901 return -EFAULT;
8ae55f04 3902 return 0;
1da177e4
LT
3903}
3904
3905
719c44d3
WB
3906#ifdef CONFIG_COMPAT
3907static int compat_packet_setsockopt(struct socket *sock, int level, int optname,
3908 char __user *optval, unsigned int optlen)
3909{
3910 struct packet_sock *po = pkt_sk(sock->sk);
3911
3912 if (level != SOL_PACKET)
3913 return -ENOPROTOOPT;
3914
3915 if (optname == PACKET_FANOUT_DATA &&
3916 po->fanout && po->fanout->type == PACKET_FANOUT_CBPF) {
3917 optval = (char __user *)get_compat_bpf_fprog(optval);
3918 if (!optval)
3919 return -EFAULT;
3920 optlen = sizeof(struct sock_fprog);
3921 }
3922
3923 return packet_setsockopt(sock, level, optname, optval, optlen);
3924}
3925#endif
3926
351638e7
JP
3927static int packet_notifier(struct notifier_block *this,
3928 unsigned long msg, void *ptr)
1da177e4
LT
3929{
3930 struct sock *sk;
351638e7 3931 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
c346dca1 3932 struct net *net = dev_net(dev);
1da177e4 3933
808f5114 3934 rcu_read_lock();
b67bfe0d 3935 sk_for_each_rcu(sk, &net->packet.sklist) {
1da177e4
LT
3936 struct packet_sock *po = pkt_sk(sk);
3937
3938 switch (msg) {
3939 case NETDEV_UNREGISTER:
1da177e4 3940 if (po->mclist)
82f17091 3941 packet_dev_mclist_delete(dev, &po->mclist);
a2efcfa0
DM
3942 /* fallthrough */
3943
1da177e4
LT
3944 case NETDEV_DOWN:
3945 if (dev->ifindex == po->ifindex) {
3946 spin_lock(&po->bind_lock);
3947 if (po->running) {
ce06b03e 3948 __unregister_prot_hook(sk, false);
1da177e4
LT
3949 sk->sk_err = ENETDOWN;
3950 if (!sock_flag(sk, SOCK_DEAD))
3951 sk->sk_error_report(sk);
3952 }
3953 if (msg == NETDEV_UNREGISTER) {
66e56cd4 3954 packet_cached_dev_reset(po);
1da177e4 3955 po->ifindex = -1;
160ff18a
BG
3956 if (po->prot_hook.dev)
3957 dev_put(po->prot_hook.dev);
1da177e4
LT
3958 po->prot_hook.dev = NULL;
3959 }
3960 spin_unlock(&po->bind_lock);
3961 }
3962 break;
3963 case NETDEV_UP:
808f5114 3964 if (dev->ifindex == po->ifindex) {
3965 spin_lock(&po->bind_lock);
ce06b03e
DM
3966 if (po->num)
3967 register_prot_hook(sk);
808f5114 3968 spin_unlock(&po->bind_lock);
1da177e4 3969 }
1da177e4
LT
3970 break;
3971 }
3972 }
808f5114 3973 rcu_read_unlock();
1da177e4
LT
3974 return NOTIFY_DONE;
3975}
3976
3977
3978static int packet_ioctl(struct socket *sock, unsigned int cmd,
3979 unsigned long arg)
3980{
3981 struct sock *sk = sock->sk;
3982
69e3c75f 3983 switch (cmd) {
40d4e3df
ED
3984 case SIOCOUTQ:
3985 {
3986 int amount = sk_wmem_alloc_get(sk);
31e6d363 3987
40d4e3df
ED
3988 return put_user(amount, (int __user *)arg);
3989 }
3990 case SIOCINQ:
3991 {
3992 struct sk_buff *skb;
3993 int amount = 0;
3994
3995 spin_lock_bh(&sk->sk_receive_queue.lock);
3996 skb = skb_peek(&sk->sk_receive_queue);
3997 if (skb)
3998 amount = skb->len;
3999 spin_unlock_bh(&sk->sk_receive_queue.lock);
4000 return put_user(amount, (int __user *)arg);
4001 }
4002 case SIOCGSTAMP:
4003 return sock_get_timestamp(sk, (struct timeval __user *)arg);
4004 case SIOCGSTAMPNS:
4005 return sock_get_timestampns(sk, (struct timespec __user *)arg);
1ce4f28b 4006
1da177e4 4007#ifdef CONFIG_INET
40d4e3df
ED
4008 case SIOCADDRT:
4009 case SIOCDELRT:
4010 case SIOCDARP:
4011 case SIOCGARP:
4012 case SIOCSARP:
4013 case SIOCGIFADDR:
4014 case SIOCSIFADDR:
4015 case SIOCGIFBRDADDR:
4016 case SIOCSIFBRDADDR:
4017 case SIOCGIFNETMASK:
4018 case SIOCSIFNETMASK:
4019 case SIOCGIFDSTADDR:
4020 case SIOCSIFDSTADDR:
4021 case SIOCSIFFLAGS:
40d4e3df 4022 return inet_dgram_ops.ioctl(sock, cmd, arg);
1da177e4
LT
4023#endif
4024
40d4e3df
ED
4025 default:
4026 return -ENOIOCTLCMD;
1da177e4
LT
4027 }
4028 return 0;
4029}
4030
40d4e3df 4031static unsigned int packet_poll(struct file *file, struct socket *sock,
1da177e4
LT
4032 poll_table *wait)
4033{
4034 struct sock *sk = sock->sk;
4035 struct packet_sock *po = pkt_sk(sk);
4036 unsigned int mask = datagram_poll(file, sock, wait);
4037
4038 spin_lock_bh(&sk->sk_receive_queue.lock);
69e3c75f 4039 if (po->rx_ring.pg_vec) {
f6fb8f10 4040 if (!packet_previous_rx_frame(po, &po->rx_ring,
4041 TP_STATUS_KERNEL))
1da177e4
LT
4042 mask |= POLLIN | POLLRDNORM;
4043 }
2ccdbaa6 4044 if (po->pressure && __packet_rcv_has_room(po, NULL) == ROOM_NORMAL)
54d7c01d 4045 po->pressure = 0;
1da177e4 4046 spin_unlock_bh(&sk->sk_receive_queue.lock);
69e3c75f
JB
4047 spin_lock_bh(&sk->sk_write_queue.lock);
4048 if (po->tx_ring.pg_vec) {
4049 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
4050 mask |= POLLOUT | POLLWRNORM;
4051 }
4052 spin_unlock_bh(&sk->sk_write_queue.lock);
1da177e4
LT
4053 return mask;
4054}
4055
4056
4057/* Dirty? Well, I still did not learn better way to account
4058 * for user mmaps.
4059 */
4060
4061static void packet_mm_open(struct vm_area_struct *vma)
4062{
4063 struct file *file = vma->vm_file;
40d4e3df 4064 struct socket *sock = file->private_data;
1da177e4 4065 struct sock *sk = sock->sk;
1ce4f28b 4066
1da177e4
LT
4067 if (sk)
4068 atomic_inc(&pkt_sk(sk)->mapped);
4069}
4070
4071static void packet_mm_close(struct vm_area_struct *vma)
4072{
4073 struct file *file = vma->vm_file;
40d4e3df 4074 struct socket *sock = file->private_data;
1da177e4 4075 struct sock *sk = sock->sk;
1ce4f28b 4076
1da177e4
LT
4077 if (sk)
4078 atomic_dec(&pkt_sk(sk)->mapped);
4079}
4080
f0f37e2f 4081static const struct vm_operations_struct packet_mmap_ops = {
40d4e3df
ED
4082 .open = packet_mm_open,
4083 .close = packet_mm_close,
1da177e4
LT
4084};
4085
0e3125c7
NH
4086static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
4087 unsigned int len)
1da177e4
LT
4088{
4089 int i;
4090
4ebf0ae2 4091 for (i = 0; i < len; i++) {
0e3125c7 4092 if (likely(pg_vec[i].buffer)) {
c56b4d90 4093 if (is_vmalloc_addr(pg_vec[i].buffer))
0e3125c7
NH
4094 vfree(pg_vec[i].buffer);
4095 else
4096 free_pages((unsigned long)pg_vec[i].buffer,
4097 order);
4098 pg_vec[i].buffer = NULL;
4099 }
1da177e4
LT
4100 }
4101 kfree(pg_vec);
4102}
4103
eea49cc9 4104static char *alloc_one_pg_vec_page(unsigned long order)
4ebf0ae2 4105{
f0d4eb29 4106 char *buffer;
0e3125c7
NH
4107 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
4108 __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
4109
4110 buffer = (char *) __get_free_pages(gfp_flags, order);
0e3125c7
NH
4111 if (buffer)
4112 return buffer;
4113
f0d4eb29 4114 /* __get_free_pages failed, fall back to vmalloc */
bbce5a59 4115 buffer = vzalloc((1 << order) * PAGE_SIZE);
0e3125c7
NH
4116 if (buffer)
4117 return buffer;
4118
f0d4eb29 4119 /* vmalloc failed, lets dig into swap here */
0e3125c7 4120 gfp_flags &= ~__GFP_NORETRY;
f0d4eb29 4121 buffer = (char *) __get_free_pages(gfp_flags, order);
0e3125c7
NH
4122 if (buffer)
4123 return buffer;
4124
f0d4eb29 4125 /* complete and utter failure */
0e3125c7 4126 return NULL;
4ebf0ae2
DM
4127}
4128
0e3125c7 4129static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
4ebf0ae2
DM
4130{
4131 unsigned int block_nr = req->tp_block_nr;
0e3125c7 4132 struct pgv *pg_vec;
4ebf0ae2
DM
4133 int i;
4134
0e3125c7 4135 pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL);
4ebf0ae2
DM
4136 if (unlikely(!pg_vec))
4137 goto out;
4138
4139 for (i = 0; i < block_nr; i++) {
c56b4d90 4140 pg_vec[i].buffer = alloc_one_pg_vec_page(order);
0e3125c7 4141 if (unlikely(!pg_vec[i].buffer))
4ebf0ae2
DM
4142 goto out_free_pgvec;
4143 }
4144
4145out:
4146 return pg_vec;
4147
4148out_free_pgvec:
4149 free_pg_vec(pg_vec, order, block_nr);
4150 pg_vec = NULL;
4151 goto out;
4152}
1da177e4 4153
f6fb8f10 4154static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
69e3c75f 4155 int closing, int tx_ring)
1da177e4 4156{
0e3125c7 4157 struct pgv *pg_vec = NULL;
1da177e4 4158 struct packet_sock *po = pkt_sk(sk);
0e11c91e 4159 int was_running, order = 0;
69e3c75f
JB
4160 struct packet_ring_buffer *rb;
4161 struct sk_buff_head *rb_queue;
0e11c91e 4162 __be16 num;
f6fb8f10 4163 int err = -EINVAL;
4164 /* Added to avoid minimal code churn */
4165 struct tpacket_req *req = &req_u->req;
4166
4167 /* Opening a Tx-ring is NOT supported in TPACKET_V3 */
4168 if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
6ae81ced 4169 net_warn_ratelimited("Tx-ring is not supported.\n");
f6fb8f10 4170 goto out;
4171 }
1ce4f28b 4172
69e3c75f
JB
4173 rb = tx_ring ? &po->tx_ring : &po->rx_ring;
4174 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
1da177e4 4175
69e3c75f
JB
4176 err = -EBUSY;
4177 if (!closing) {
4178 if (atomic_read(&po->mapped))
4179 goto out;
b0138408 4180 if (packet_read_pending(rb))
69e3c75f
JB
4181 goto out;
4182 }
1da177e4 4183
69e3c75f
JB
4184 if (req->tp_block_nr) {
4185 /* Sanity tests and some calculations */
4186 err = -EBUSY;
4187 if (unlikely(rb->pg_vec))
4188 goto out;
1da177e4 4189
bbd6ef87
PM
4190 switch (po->tp_version) {
4191 case TPACKET_V1:
4192 po->tp_hdrlen = TPACKET_HDRLEN;
4193 break;
4194 case TPACKET_V2:
4195 po->tp_hdrlen = TPACKET2_HDRLEN;
4196 break;
f6fb8f10 4197 case TPACKET_V3:
4198 po->tp_hdrlen = TPACKET3_HDRLEN;
4199 break;
bbd6ef87
PM
4200 }
4201
69e3c75f 4202 err = -EINVAL;
4ebf0ae2 4203 if (unlikely((int)req->tp_block_size <= 0))
69e3c75f 4204 goto out;
90836b67 4205 if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
69e3c75f 4206 goto out;
dc808110
ED
4207 if (po->tp_version >= TPACKET_V3 &&
4208 (int)(req->tp_block_size -
4209 BLK_PLUS_PRIV(req_u->req3.tp_sizeof_priv)) <= 0)
4210 goto out;
8913336a 4211 if (unlikely(req->tp_frame_size < po->tp_hdrlen +
69e3c75f
JB
4212 po->tp_reserve))
4213 goto out;
4ebf0ae2 4214 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
69e3c75f 4215 goto out;
1da177e4 4216
4194b491
TK
4217 rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
4218 if (unlikely(rb->frames_per_block == 0))
69e3c75f
JB
4219 goto out;
4220 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
4221 req->tp_frame_nr))
4222 goto out;
1da177e4
LT
4223
4224 err = -ENOMEM;
4ebf0ae2
DM
4225 order = get_order(req->tp_block_size);
4226 pg_vec = alloc_pg_vec(req, order);
4227 if (unlikely(!pg_vec))
1da177e4 4228 goto out;
f6fb8f10 4229 switch (po->tp_version) {
4230 case TPACKET_V3:
4231 /* Transmit path is not supported. We checked
4232 * it above but just being paranoid
4233 */
4234 if (!tx_ring)
e8e85cc5 4235 init_prb_bdqc(po, rb, pg_vec, req_u);
d7cf0c34 4236 break;
f6fb8f10 4237 default:
4238 break;
4239 }
69e3c75f
JB
4240 }
4241 /* Done */
4242 else {
4243 err = -EINVAL;
4ebf0ae2 4244 if (unlikely(req->tp_frame_nr))
69e3c75f 4245 goto out;
1da177e4
LT
4246 }
4247
4248 lock_sock(sk);
4249
4250 /* Detach socket from network */
4251 spin_lock(&po->bind_lock);
4252 was_running = po->running;
4253 num = po->num;
4254 if (was_running) {
1da177e4 4255 po->num = 0;
ce06b03e 4256 __unregister_prot_hook(sk, false);
1da177e4
LT
4257 }
4258 spin_unlock(&po->bind_lock);
1ce4f28b 4259
1da177e4
LT
4260 synchronize_net();
4261
4262 err = -EBUSY;
905db440 4263 mutex_lock(&po->pg_vec_lock);
1da177e4
LT
4264 if (closing || atomic_read(&po->mapped) == 0) {
4265 err = 0;
69e3c75f 4266 spin_lock_bh(&rb_queue->lock);
c053fd96 4267 swap(rb->pg_vec, pg_vec);
69e3c75f
JB
4268 rb->frame_max = (req->tp_frame_nr - 1);
4269 rb->head = 0;
4270 rb->frame_size = req->tp_frame_size;
4271 spin_unlock_bh(&rb_queue->lock);
4272
c053fd96
CG
4273 swap(rb->pg_vec_order, order);
4274 swap(rb->pg_vec_len, req->tp_block_nr);
69e3c75f
JB
4275
4276 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
4277 po->prot_hook.func = (po->rx_ring.pg_vec) ?
4278 tpacket_rcv : packet_rcv;
4279 skb_queue_purge(rb_queue);
1da177e4 4280 if (atomic_read(&po->mapped))
40d4e3df
ED
4281 pr_err("packet_mmap: vma is busy: %d\n",
4282 atomic_read(&po->mapped));
1da177e4 4283 }
905db440 4284 mutex_unlock(&po->pg_vec_lock);
1da177e4
LT
4285
4286 spin_lock(&po->bind_lock);
ce06b03e 4287 if (was_running) {
1da177e4 4288 po->num = num;
ce06b03e 4289 register_prot_hook(sk);
1da177e4
LT
4290 }
4291 spin_unlock(&po->bind_lock);
f6fb8f10 4292 if (closing && (po->tp_version > TPACKET_V2)) {
4293 /* Because we don't support block-based V3 on tx-ring */
4294 if (!tx_ring)
73d0fcf2 4295 prb_shutdown_retire_blk_timer(po, rb_queue);
f6fb8f10 4296 }
1da177e4
LT
4297 release_sock(sk);
4298
1da177e4
LT
4299 if (pg_vec)
4300 free_pg_vec(pg_vec, order, req->tp_block_nr);
4301out:
4302 return err;
4303}
4304
69e3c75f
JB
4305static int packet_mmap(struct file *file, struct socket *sock,
4306 struct vm_area_struct *vma)
1da177e4
LT
4307{
4308 struct sock *sk = sock->sk;
4309 struct packet_sock *po = pkt_sk(sk);
69e3c75f
JB
4310 unsigned long size, expected_size;
4311 struct packet_ring_buffer *rb;
1da177e4
LT
4312 unsigned long start;
4313 int err = -EINVAL;
4314 int i;
4315
4316 if (vma->vm_pgoff)
4317 return -EINVAL;
4318
905db440 4319 mutex_lock(&po->pg_vec_lock);
69e3c75f
JB
4320
4321 expected_size = 0;
4322 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4323 if (rb->pg_vec) {
4324 expected_size += rb->pg_vec_len
4325 * rb->pg_vec_pages
4326 * PAGE_SIZE;
4327 }
4328 }
4329
4330 if (expected_size == 0)
1da177e4 4331 goto out;
69e3c75f
JB
4332
4333 size = vma->vm_end - vma->vm_start;
4334 if (size != expected_size)
1da177e4
LT
4335 goto out;
4336
1da177e4 4337 start = vma->vm_start;
69e3c75f
JB
4338 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4339 if (rb->pg_vec == NULL)
4340 continue;
4341
4342 for (i = 0; i < rb->pg_vec_len; i++) {
0e3125c7
NH
4343 struct page *page;
4344 void *kaddr = rb->pg_vec[i].buffer;
69e3c75f
JB
4345 int pg_num;
4346
c56b4d90
CG
4347 for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
4348 page = pgv_to_page(kaddr);
69e3c75f
JB
4349 err = vm_insert_page(vma, start, page);
4350 if (unlikely(err))
4351 goto out;
4352 start += PAGE_SIZE;
0e3125c7 4353 kaddr += PAGE_SIZE;
69e3c75f 4354 }
4ebf0ae2 4355 }
1da177e4 4356 }
69e3c75f 4357
4ebf0ae2 4358 atomic_inc(&po->mapped);
1da177e4
LT
4359 vma->vm_ops = &packet_mmap_ops;
4360 err = 0;
4361
4362out:
905db440 4363 mutex_unlock(&po->pg_vec_lock);
1da177e4
LT
4364 return err;
4365}
1da177e4 4366
90ddc4f0 4367static const struct proto_ops packet_ops_spkt = {
1da177e4
LT
4368 .family = PF_PACKET,
4369 .owner = THIS_MODULE,
4370 .release = packet_release,
4371 .bind = packet_bind_spkt,
4372 .connect = sock_no_connect,
4373 .socketpair = sock_no_socketpair,
4374 .accept = sock_no_accept,
4375 .getname = packet_getname_spkt,
4376 .poll = datagram_poll,
4377 .ioctl = packet_ioctl,
4378 .listen = sock_no_listen,
4379 .shutdown = sock_no_shutdown,
4380 .setsockopt = sock_no_setsockopt,
4381 .getsockopt = sock_no_getsockopt,
4382 .sendmsg = packet_sendmsg_spkt,
4383 .recvmsg = packet_recvmsg,
4384 .mmap = sock_no_mmap,
4385 .sendpage = sock_no_sendpage,
4386};
1da177e4 4387
90ddc4f0 4388static const struct proto_ops packet_ops = {
1da177e4
LT
4389 .family = PF_PACKET,
4390 .owner = THIS_MODULE,
4391 .release = packet_release,
4392 .bind = packet_bind,
4393 .connect = sock_no_connect,
4394 .socketpair = sock_no_socketpair,
4395 .accept = sock_no_accept,
1ce4f28b 4396 .getname = packet_getname,
1da177e4
LT
4397 .poll = packet_poll,
4398 .ioctl = packet_ioctl,
4399 .listen = sock_no_listen,
4400 .shutdown = sock_no_shutdown,
4401 .setsockopt = packet_setsockopt,
4402 .getsockopt = packet_getsockopt,
719c44d3
WB
4403#ifdef CONFIG_COMPAT
4404 .compat_setsockopt = compat_packet_setsockopt,
4405#endif
1da177e4
LT
4406 .sendmsg = packet_sendmsg,
4407 .recvmsg = packet_recvmsg,
4408 .mmap = packet_mmap,
4409 .sendpage = sock_no_sendpage,
4410};
4411
ec1b4cf7 4412static const struct net_proto_family packet_family_ops = {
1da177e4
LT
4413 .family = PF_PACKET,
4414 .create = packet_create,
4415 .owner = THIS_MODULE,
4416};
4417
4418static struct notifier_block packet_netdev_notifier = {
40d4e3df 4419 .notifier_call = packet_notifier,
1da177e4
LT
4420};
4421
4422#ifdef CONFIG_PROC_FS
1da177e4
LT
4423
4424static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
808f5114 4425 __acquires(RCU)
1da177e4 4426{
e372c414 4427 struct net *net = seq_file_net(seq);
808f5114 4428
4429 rcu_read_lock();
4430 return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
1da177e4
LT
4431}
4432
4433static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4434{
1bf40954 4435 struct net *net = seq_file_net(seq);
808f5114 4436 return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
1da177e4
LT
4437}
4438
4439static void packet_seq_stop(struct seq_file *seq, void *v)
808f5114 4440 __releases(RCU)
1da177e4 4441{
808f5114 4442 rcu_read_unlock();
1da177e4
LT
4443}
4444
1ce4f28b 4445static int packet_seq_show(struct seq_file *seq, void *v)
1da177e4
LT
4446{
4447 if (v == SEQ_START_TOKEN)
4448 seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n");
4449 else {
b7ceabd9 4450 struct sock *s = sk_entry(v);
1da177e4
LT
4451 const struct packet_sock *po = pkt_sk(s);
4452
4453 seq_printf(seq,
71338aa7 4454 "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
1da177e4
LT
4455 s,
4456 atomic_read(&s->sk_refcnt),
4457 s->sk_type,
4458 ntohs(po->num),
4459 po->ifindex,
4460 po->running,
4461 atomic_read(&s->sk_rmem_alloc),
a7cb5a49 4462 from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
40d4e3df 4463 sock_i_ino(s));
1da177e4
LT
4464 }
4465
4466 return 0;
4467}
4468
56b3d975 4469static const struct seq_operations packet_seq_ops = {
1da177e4
LT
4470 .start = packet_seq_start,
4471 .next = packet_seq_next,
4472 .stop = packet_seq_stop,
4473 .show = packet_seq_show,
4474};
4475
4476static int packet_seq_open(struct inode *inode, struct file *file)
4477{
e372c414
DL
4478 return seq_open_net(inode, file, &packet_seq_ops,
4479 sizeof(struct seq_net_private));
1da177e4
LT
4480}
4481
da7071d7 4482static const struct file_operations packet_seq_fops = {
1da177e4
LT
4483 .owner = THIS_MODULE,
4484 .open = packet_seq_open,
4485 .read = seq_read,
4486 .llseek = seq_lseek,
e372c414 4487 .release = seq_release_net,
1da177e4
LT
4488};
4489
4490#endif
4491
2c8c1e72 4492static int __net_init packet_net_init(struct net *net)
d12d01d6 4493{
0fa7fa98 4494 mutex_init(&net->packet.sklist_lock);
2aaef4e4 4495 INIT_HLIST_HEAD(&net->packet.sklist);
d12d01d6 4496
d4beaa66 4497 if (!proc_create("packet", 0, net->proc_net, &packet_seq_fops))
d12d01d6
DL
4498 return -ENOMEM;
4499
4500 return 0;
4501}
4502
2c8c1e72 4503static void __net_exit packet_net_exit(struct net *net)
d12d01d6 4504{
ece31ffd 4505 remove_proc_entry("packet", net->proc_net);
d12d01d6
DL
4506}
4507
4508static struct pernet_operations packet_net_ops = {
4509 .init = packet_net_init,
4510 .exit = packet_net_exit,
4511};
4512
4513
1da177e4
LT
4514static void __exit packet_exit(void)
4515{
1da177e4 4516 unregister_netdevice_notifier(&packet_netdev_notifier);
d12d01d6 4517 unregister_pernet_subsys(&packet_net_ops);
1da177e4
LT
4518 sock_unregister(PF_PACKET);
4519 proto_unregister(&packet_proto);
4520}
4521
4522static int __init packet_init(void)
4523{
4524 int rc = proto_register(&packet_proto, 0);
4525
4526 if (rc != 0)
4527 goto out;
4528
4529 sock_register(&packet_family_ops);
d12d01d6 4530 register_pernet_subsys(&packet_net_ops);
1da177e4 4531 register_netdevice_notifier(&packet_netdev_notifier);
1da177e4
LT
4532out:
4533 return rc;
4534}
4535
4536module_init(packet_init);
4537module_exit(packet_exit);
4538MODULE_LICENSE("GPL");
4539MODULE_ALIAS_NETPROTO(PF_PACKET);
This page took 1.31335 seconds and 5 git commands to generate.