[INET]: Introduce inet_sk_rebuild_header
[deliverable/linux.git] / net / netfilter / nfnetlink_queue.c
CommitLineData
7af4cc3f
HW
1/*
2 * This is a module which is used for queueing packets and communicating with
3 * userspace via nfetlink.
4 *
5 * (C) 2005 by Harald Welte <laforge@netfilter.org>
6 *
7 * Based on the old ipv4-only ip_queue.c:
8 * (C) 2000-2002 James Morris <jmorris@intercode.com.au>
9 * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 *
15 */
16#include <linux/module.h>
17#include <linux/skbuff.h>
18#include <linux/init.h>
19#include <linux/spinlock.h>
20#include <linux/notifier.h>
21#include <linux/netdevice.h>
22#include <linux/netfilter.h>
23#include <linux/netfilter_ipv4.h>
24#include <linux/netfilter_ipv6.h>
25#include <linux/netfilter/nfnetlink.h>
26#include <linux/netfilter/nfnetlink_queue.h>
27#include <linux/list.h>
28#include <net/sock.h>
29
30#include <asm/atomic.h>
31
32#define NFQNL_QMAX_DEFAULT 1024
33
34#if 0
35#define QDEBUG(x, args ...) printk(KERN_DEBUG "%s(%d):%s(): " x, \
36 __FILE__, __LINE__, __FUNCTION__, \
37 ## args)
38#else
39#define QDEBUG(x, ...)
40#endif
41
42struct nfqnl_queue_entry {
43 struct list_head list;
44 struct nf_info *info;
45 struct sk_buff *skb;
46 unsigned int id;
47};
48
49struct nfqnl_instance {
50 struct hlist_node hlist; /* global list of queues */
51
52 int peer_pid;
53 unsigned int queue_maxlen;
54 unsigned int copy_range;
55 unsigned int queue_total;
56 unsigned int queue_dropped;
57 unsigned int queue_user_dropped;
58
59 atomic_t id_sequence; /* 'sequence' of pkt ids */
60
61 u_int16_t queue_num; /* number of this queue */
62 u_int8_t copy_mode;
63
64 spinlock_t lock;
65
66 struct list_head queue_list; /* packets in queue */
67};
68
69typedef int (*nfqnl_cmpfn)(struct nfqnl_queue_entry *, unsigned long);
70
71static DEFINE_RWLOCK(instances_lock);
72
73u_int64_t htonll(u_int64_t in)
74{
75 u_int64_t out;
76 int i;
77
78 for (i = 0; i < sizeof(u_int64_t); i++)
79 ((u_int8_t *)&out)[sizeof(u_int64_t)-1] = ((u_int8_t *)&in)[i];
80
81 return out;
82}
83
84#define INSTANCE_BUCKETS 16
85static struct hlist_head instance_table[INSTANCE_BUCKETS];
86
87static inline u_int8_t instance_hashfn(u_int16_t queue_num)
88{
89 return ((queue_num >> 8) | queue_num) % INSTANCE_BUCKETS;
90}
91
92static struct nfqnl_instance *
93__instance_lookup(u_int16_t queue_num)
94{
95 struct hlist_head *head;
96 struct hlist_node *pos;
97 struct nfqnl_instance *inst;
98
99 head = &instance_table[instance_hashfn(queue_num)];
100 hlist_for_each_entry(inst, pos, head, hlist) {
101 if (inst->queue_num == queue_num)
102 return inst;
103 }
104 return NULL;
105}
106
107static struct nfqnl_instance *
108instance_lookup(u_int16_t queue_num)
109{
110 struct nfqnl_instance *inst;
111
112 read_lock_bh(&instances_lock);
113 inst = __instance_lookup(queue_num);
114 read_unlock_bh(&instances_lock);
115
116 return inst;
117}
118
119static struct nfqnl_instance *
120instance_create(u_int16_t queue_num, int pid)
121{
122 struct nfqnl_instance *inst;
123
124 QDEBUG("entering for queue_num=%u, pid=%d\n", queue_num, pid);
125
126 write_lock_bh(&instances_lock);
127 if (__instance_lookup(queue_num)) {
128 inst = NULL;
129 QDEBUG("aborting, instance already exists\n");
130 goto out_unlock;
131 }
132
133 inst = kmalloc(sizeof(*inst), GFP_ATOMIC);
134 if (!inst)
135 goto out_unlock;
136
137 memset(inst, 0, sizeof(*inst));
138 inst->queue_num = queue_num;
139 inst->peer_pid = pid;
140 inst->queue_maxlen = NFQNL_QMAX_DEFAULT;
141 inst->copy_range = 0xfffff;
142 inst->copy_mode = NFQNL_COPY_NONE;
143 atomic_set(&inst->id_sequence, 0);
144 inst->lock = SPIN_LOCK_UNLOCKED;
145 INIT_LIST_HEAD(&inst->queue_list);
146
147 if (!try_module_get(THIS_MODULE))
148 goto out_free;
149
150 hlist_add_head(&inst->hlist,
151 &instance_table[instance_hashfn(queue_num)]);
152
153 write_unlock_bh(&instances_lock);
154
155 QDEBUG("successfully created new instance\n");
156
157 return inst;
158
159out_free:
160 kfree(inst);
161out_unlock:
162 write_unlock_bh(&instances_lock);
163 return NULL;
164}
165
166static void nfqnl_flush(struct nfqnl_instance *queue, int verdict);
167
168static void
169_instance_destroy2(struct nfqnl_instance *inst, int lock)
170{
171 /* first pull it out of the global list */
172 if (lock)
173 write_lock_bh(&instances_lock);
174
175 QDEBUG("removing instance %p (queuenum=%u) from hash\n",
176 inst, inst->queue_num);
177 hlist_del(&inst->hlist);
178
179 if (lock)
180 write_unlock_bh(&instances_lock);
181
182 /* then flush all pending skbs from the queue */
183 nfqnl_flush(inst, NF_DROP);
184
185 /* and finally free the data structure */
186 kfree(inst);
187
188 module_put(THIS_MODULE);
189}
190
191static inline void
192__instance_destroy(struct nfqnl_instance *inst)
193{
194 _instance_destroy2(inst, 0);
195}
196
197static inline void
198instance_destroy(struct nfqnl_instance *inst)
199{
200 _instance_destroy2(inst, 1);
201}
202
203
204
205static void
206issue_verdict(struct nfqnl_queue_entry *entry, int verdict)
207{
208 QDEBUG("entering for entry %p, verdict %u\n", entry, verdict);
209
210 /* TCP input path (and probably other bits) assume to be called
211 * from softirq context, not from syscall, like issue_verdict is
212 * called. TCP input path deadlocks with locks taken from timer
213 * softirq, e.g. We therefore emulate this by local_bh_disable() */
214
215 local_bh_disable();
216 nf_reinject(entry->skb, entry->info, verdict);
217 local_bh_enable();
218
219 kfree(entry);
220}
221
222static inline void
223__enqueue_entry(struct nfqnl_instance *queue,
224 struct nfqnl_queue_entry *entry)
225{
226 list_add(&entry->list, &queue->queue_list);
227 queue->queue_total++;
228}
229
230/*
231 * Find and return a queued entry matched by cmpfn, or return the last
232 * entry if cmpfn is NULL.
233 */
234static inline struct nfqnl_queue_entry *
235__find_entry(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn,
236 unsigned long data)
237{
238 struct list_head *p;
239
240 list_for_each_prev(p, &queue->queue_list) {
241 struct nfqnl_queue_entry *entry = (struct nfqnl_queue_entry *)p;
242
243 if (!cmpfn || cmpfn(entry, data))
244 return entry;
245 }
246 return NULL;
247}
248
249static inline void
250__dequeue_entry(struct nfqnl_instance *q, struct nfqnl_queue_entry *entry)
251{
252 list_del(&entry->list);
253 q->queue_total--;
254}
255
256static inline struct nfqnl_queue_entry *
257__find_dequeue_entry(struct nfqnl_instance *queue,
258 nfqnl_cmpfn cmpfn, unsigned long data)
259{
260 struct nfqnl_queue_entry *entry;
261
262 entry = __find_entry(queue, cmpfn, data);
263 if (entry == NULL)
264 return NULL;
265
266 __dequeue_entry(queue, entry);
267 return entry;
268}
269
270
271static inline void
272__nfqnl_flush(struct nfqnl_instance *queue, int verdict)
273{
274 struct nfqnl_queue_entry *entry;
275
276 while ((entry = __find_dequeue_entry(queue, NULL, 0)))
277 issue_verdict(entry, verdict);
278}
279
280static inline int
281__nfqnl_set_mode(struct nfqnl_instance *queue,
282 unsigned char mode, unsigned int range)
283{
284 int status = 0;
285
286 switch (mode) {
287 case NFQNL_COPY_NONE:
288 case NFQNL_COPY_META:
289 queue->copy_mode = mode;
290 queue->copy_range = 0;
291 break;
292
293 case NFQNL_COPY_PACKET:
294 queue->copy_mode = mode;
295 /* we're using struct nfattr which has 16bit nfa_len */
296 if (range > 0xffff)
297 queue->copy_range = 0xffff;
298 else
299 queue->copy_range = range;
300 break;
301
302 default:
303 status = -EINVAL;
304
305 }
306 return status;
307}
308
309static struct nfqnl_queue_entry *
310find_dequeue_entry(struct nfqnl_instance *queue,
311 nfqnl_cmpfn cmpfn, unsigned long data)
312{
313 struct nfqnl_queue_entry *entry;
314
315 spin_lock_bh(&queue->lock);
316 entry = __find_dequeue_entry(queue, cmpfn, data);
317 spin_unlock_bh(&queue->lock);
318
319 return entry;
320}
321
322static void
323nfqnl_flush(struct nfqnl_instance *queue, int verdict)
324{
325 spin_lock_bh(&queue->lock);
326 __nfqnl_flush(queue, verdict);
327 spin_unlock_bh(&queue->lock);
328}
329
330static struct sk_buff *
331nfqnl_build_packet_message(struct nfqnl_instance *queue,
332 struct nfqnl_queue_entry *entry, int *errp)
333{
334 unsigned char *old_tail;
335 size_t size;
336 size_t data_len = 0;
337 struct sk_buff *skb;
338 struct nfqnl_msg_packet_hdr pmsg;
339 struct nlmsghdr *nlh;
340 struct nfgenmsg *nfmsg;
341 unsigned int tmp_uint;
342
343 QDEBUG("entered\n");
344
345 /* all macros expand to constant values at compile time */
346 size = NLMSG_SPACE(sizeof(struct nfqnl_msg_packet_hdr))
347 + NLMSG_SPACE(sizeof(u_int32_t)) /* ifindex */
348 + NLMSG_SPACE(sizeof(u_int32_t)) /* ifindex */
349 + NLMSG_SPACE(sizeof(u_int32_t)) /* mark */
350 + NLMSG_SPACE(sizeof(struct nfqnl_msg_packet_hw))
351 + NLMSG_SPACE(sizeof(struct nfqnl_msg_packet_timestamp));
352
353 spin_lock_bh(&queue->lock);
354
355 switch (queue->copy_mode) {
356 case NFQNL_COPY_META:
357 case NFQNL_COPY_NONE:
358 data_len = 0;
359 break;
360
361 case NFQNL_COPY_PACKET:
362 if (queue->copy_range == 0
363 || queue->copy_range > entry->skb->len)
364 data_len = entry->skb->len;
365 else
366 data_len = queue->copy_range;
367
368 size += NLMSG_SPACE(data_len);
369 break;
370
371 default:
372 *errp = -EINVAL;
373 spin_unlock_bh(&queue->lock);
374 return NULL;
375 }
376
377 spin_unlock_bh(&queue->lock);
378
379 skb = alloc_skb(size, GFP_ATOMIC);
380 if (!skb)
381 goto nlmsg_failure;
382
383 old_tail= skb->tail;
384 nlh = NLMSG_PUT(skb, 0, 0,
385 NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET,
386 sizeof(struct nfgenmsg));
387 nfmsg = NLMSG_DATA(nlh);
388 nfmsg->nfgen_family = entry->info->pf;
389 nfmsg->version = NFNETLINK_V0;
390 nfmsg->res_id = htons(queue->queue_num);
391
392 pmsg.packet_id = htonl(entry->id);
393 pmsg.hw_protocol = htons(entry->skb->protocol);
394 pmsg.hook = entry->info->hook;
395
396 NFA_PUT(skb, NFQA_PACKET_HDR, sizeof(pmsg), &pmsg);
397
398 if (entry->info->indev) {
399 tmp_uint = htonl(entry->info->indev->ifindex);
400 NFA_PUT(skb, NFQA_IFINDEX_INDEV, sizeof(tmp_uint), &tmp_uint);
401 }
402
403 if (entry->info->outdev) {
404 tmp_uint = htonl(entry->info->outdev->ifindex);
405 NFA_PUT(skb, NFQA_IFINDEX_OUTDEV, sizeof(tmp_uint), &tmp_uint);
406 }
407
408 if (entry->skb->nfmark) {
409 tmp_uint = htonl(entry->skb->nfmark);
410 NFA_PUT(skb, NFQA_MARK, sizeof(u_int32_t), &tmp_uint);
411 }
412
413 if (entry->info->indev && entry->skb->dev
414 && entry->skb->dev->hard_header_parse) {
415 struct nfqnl_msg_packet_hw phw;
416
417 phw.hw_addrlen =
418 entry->skb->dev->hard_header_parse(entry->skb,
419 phw.hw_addr);
420 phw.hw_addrlen = htons(phw.hw_addrlen);
421 NFA_PUT(skb, NFQA_HWADDR, sizeof(phw), &phw);
422 }
423
424 if (entry->skb->stamp.tv_sec) {
425 struct nfqnl_msg_packet_timestamp ts;
426
427 ts.sec = htonll(entry->skb->stamp.tv_sec);
428 ts.usec = htonll(entry->skb->stamp.tv_usec);
429
430 NFA_PUT(skb, NFQA_TIMESTAMP, sizeof(ts), &ts);
431 }
432
433 if (data_len) {
434 struct nfattr *nfa;
435 int size = NFA_LENGTH(data_len);
436
437 if (skb_tailroom(skb) < (int)NFA_SPACE(data_len)) {
438 printk(KERN_WARNING "nf_queue: no tailroom!\n");
439 goto nlmsg_failure;
440 }
441
442 nfa = (struct nfattr *)skb_put(skb, NFA_ALIGN(size));
443 nfa->nfa_type = NFQA_PAYLOAD;
444 nfa->nfa_len = size;
445
446 if (skb_copy_bits(entry->skb, 0, NFA_DATA(nfa), data_len))
447 BUG();
448 }
449
450 nlh->nlmsg_len = skb->tail - old_tail;
451 return skb;
452
453nlmsg_failure:
454nfattr_failure:
455 if (skb)
456 kfree_skb(skb);
457 *errp = -EINVAL;
458 if (net_ratelimit())
459 printk(KERN_ERR "nf_queue: error creating packet message\n");
460 return NULL;
461}
462
463static int
464nfqnl_enqueue_packet(struct sk_buff *skb, struct nf_info *info,
465 unsigned int queuenum, void *data)
466{
467 int status = -EINVAL;
468 struct sk_buff *nskb;
469 struct nfqnl_instance *queue;
470 struct nfqnl_queue_entry *entry;
471
472 QDEBUG("entered\n");
473
474 queue = instance_lookup(queuenum);
475 if (!queue) {
476 QDEBUG("no queue instance matching\n");
477 return -EINVAL;
478 }
479
480 if (queue->copy_mode == NFQNL_COPY_NONE) {
481 QDEBUG("mode COPY_NONE, aborting\n");
482 return -EAGAIN;
483 }
484
485 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
486 if (entry == NULL) {
487 if (net_ratelimit())
488 printk(KERN_ERR
489 "nf_queue: OOM in nfqnl_enqueue_packet()\n");
490 return -ENOMEM;
491 }
492
493 entry->info = info;
494 entry->skb = skb;
495 entry->id = atomic_inc_return(&queue->id_sequence);
496
497 nskb = nfqnl_build_packet_message(queue, entry, &status);
498 if (nskb == NULL)
499 goto err_out_free;
500
501 spin_lock_bh(&queue->lock);
502
503 if (!queue->peer_pid)
504 goto err_out_free_nskb;
505
506 if (queue->queue_total >= queue->queue_maxlen) {
507 queue->queue_dropped++;
508 status = -ENOSPC;
509 if (net_ratelimit())
510 printk(KERN_WARNING "ip_queue: full at %d entries, "
511 "dropping packets(s). Dropped: %d\n",
512 queue->queue_total, queue->queue_dropped);
513 goto err_out_free_nskb;
514 }
515
516 /* nfnetlink_unicast will either free the nskb or add it to a socket */
517 status = nfnetlink_unicast(nskb, queue->peer_pid, MSG_DONTWAIT);
518 if (status < 0) {
519 queue->queue_user_dropped++;
520 goto err_out_unlock;
521 }
522
523 __enqueue_entry(queue, entry);
524
525 spin_unlock_bh(&queue->lock);
526 return status;
527
528err_out_free_nskb:
529 kfree_skb(nskb);
530
531err_out_unlock:
532 spin_unlock_bh(&queue->lock);
533
534err_out_free:
535 kfree(entry);
536 return status;
537}
538
539static int
540nfqnl_mangle(void *data, int data_len, struct nfqnl_queue_entry *e)
541{
542 int diff;
543
544 diff = data_len - e->skb->len;
545 if (diff < 0)
546 skb_trim(e->skb, data_len);
547 else if (diff > 0) {
548 if (data_len > 0xFFFF)
549 return -EINVAL;
550 if (diff > skb_tailroom(e->skb)) {
551 struct sk_buff *newskb;
552
553 newskb = skb_copy_expand(e->skb,
554 skb_headroom(e->skb),
555 diff,
556 GFP_ATOMIC);
557 if (newskb == NULL) {
558 printk(KERN_WARNING "ip_queue: OOM "
559 "in mangle, dropping packet\n");
560 return -ENOMEM;
561 }
562 if (e->skb->sk)
563 skb_set_owner_w(newskb, e->skb->sk);
564 kfree_skb(e->skb);
565 e->skb = newskb;
566 }
567 skb_put(e->skb, diff);
568 }
569 if (!skb_make_writable(&e->skb, data_len))
570 return -ENOMEM;
571 memcpy(e->skb->data, data, data_len);
572
573 return 0;
574}
575
576static inline int
577id_cmp(struct nfqnl_queue_entry *e, unsigned long id)
578{
579 return (id == e->id);
580}
581
582static int
583nfqnl_set_mode(struct nfqnl_instance *queue,
584 unsigned char mode, unsigned int range)
585{
586 int status;
587
588 spin_lock_bh(&queue->lock);
589 status = __nfqnl_set_mode(queue, mode, range);
590 spin_unlock_bh(&queue->lock);
591
592 return status;
593}
594
595static int
596dev_cmp(struct nfqnl_queue_entry *entry, unsigned long ifindex)
597{
598 if (entry->info->indev)
599 if (entry->info->indev->ifindex == ifindex)
600 return 1;
601
602 if (entry->info->outdev)
603 if (entry->info->outdev->ifindex == ifindex)
604 return 1;
605
606 return 0;
607}
608
609/* drop all packets with either indev or outdev == ifindex from all queue
610 * instances */
611static void
612nfqnl_dev_drop(int ifindex)
613{
614 int i;
615
616 QDEBUG("entering for ifindex %u\n", ifindex);
617
618 /* this only looks like we have to hold the readlock for a way too long
619 * time, issue_verdict(), nf_reinject(), ... - but we always only
620 * issue NF_DROP, which is processed directly in nf_reinject() */
621 read_lock_bh(&instances_lock);
622
623 for (i = 0; i < INSTANCE_BUCKETS; i++) {
624 struct hlist_node *tmp;
625 struct nfqnl_instance *inst;
626 struct hlist_head *head = &instance_table[i];
627
628 hlist_for_each_entry(inst, tmp, head, hlist) {
629 struct nfqnl_queue_entry *entry;
630 while ((entry = find_dequeue_entry(inst, dev_cmp,
631 ifindex)) != NULL)
632 issue_verdict(entry, NF_DROP);
633 }
634 }
635
636 read_unlock_bh(&instances_lock);
637}
638
639#define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0)
640
641static int
642nfqnl_rcv_dev_event(struct notifier_block *this,
643 unsigned long event, void *ptr)
644{
645 struct net_device *dev = ptr;
646
647 /* Drop any packets associated with the downed device */
648 if (event == NETDEV_DOWN)
649 nfqnl_dev_drop(dev->ifindex);
650 return NOTIFY_DONE;
651}
652
653static struct notifier_block nfqnl_dev_notifier = {
654 .notifier_call = nfqnl_rcv_dev_event,
655};
656
657static int
658nfqnl_rcv_nl_event(struct notifier_block *this,
659 unsigned long event, void *ptr)
660{
661 struct netlink_notify *n = ptr;
662
663 if (event == NETLINK_URELEASE &&
664 n->protocol == NETLINK_NETFILTER && n->pid) {
665 int i;
666
667 /* destroy all instances for this pid */
668 write_lock_bh(&instances_lock);
669 for (i = 0; i < INSTANCE_BUCKETS; i++) {
670 struct hlist_node *tmp, *t2;
671 struct nfqnl_instance *inst;
672 struct hlist_head *head = &instance_table[i];
673
674 hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) {
675 if (n->pid == inst->peer_pid)
676 __instance_destroy(inst);
677 }
678 }
679 write_unlock_bh(&instances_lock);
680 }
681 return NOTIFY_DONE;
682}
683
684static struct notifier_block nfqnl_rtnl_notifier = {
685 .notifier_call = nfqnl_rcv_nl_event,
686};
687
688static int
689nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
690 struct nlmsghdr *nlh, struct nfattr *nfqa[], int *errp)
691{
692 struct nfgenmsg *nfmsg = NLMSG_DATA(nlh);
693 u_int16_t queue_num = ntohs(nfmsg->res_id);
694
695 struct nfqnl_msg_verdict_hdr *vhdr;
696 struct nfqnl_instance *queue;
697 unsigned int verdict;
698 struct nfqnl_queue_entry *entry;
699
700 queue = instance_lookup(queue_num);
701 if (!queue)
702 return -ENODEV;
703
704 if (queue->peer_pid != NETLINK_CB(skb).pid)
705 return -EPERM;
706
707 if (!nfqa[NFQA_VERDICT_HDR-1])
708 return -EINVAL;
709
710 vhdr = NFA_DATA(nfqa[NFQA_VERDICT_HDR-1]);
711 verdict = ntohl(vhdr->verdict);
712
713 if ((verdict & NF_VERDICT_MASK) > NF_MAX_VERDICT)
714 return -EINVAL;
715
716 entry = find_dequeue_entry(queue, id_cmp, ntohl(vhdr->id));
717 if (entry == NULL)
718 return -ENOENT;
719
720 if (nfqa[NFQA_PAYLOAD-1]) {
721 if (nfqnl_mangle(NFA_DATA(nfqa[NFQA_PAYLOAD-1]),
722 NFA_PAYLOAD(nfqa[NFQA_PAYLOAD-1]), entry) < 0)
723 verdict = NF_DROP;
724 }
725
726 if (nfqa[NFQA_MARK-1])
727 skb->nfmark = ntohl(*(u_int32_t *)NFA_DATA(nfqa[NFQA_MARK-1]));
728
729 issue_verdict(entry, verdict);
730 return 0;
731}
732
733static int
734nfqnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb,
735 struct nlmsghdr *nlh, struct nfattr *nfqa[], int *errp)
736{
737 return -ENOTSUPP;
738}
739
740static int
741nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
742 struct nlmsghdr *nlh, struct nfattr *nfqa[], int *errp)
743{
744 struct nfgenmsg *nfmsg = NLMSG_DATA(nlh);
745 u_int16_t queue_num = ntohs(nfmsg->res_id);
746 struct nfqnl_instance *queue;
747
748 QDEBUG("entering for msg %u\n", NFNL_MSG_TYPE(nlh->nlmsg_type));
749
750 queue = instance_lookup(queue_num);
751 if (nfqa[NFQA_CFG_CMD-1]) {
752 struct nfqnl_msg_config_cmd *cmd;
753 cmd = NFA_DATA(nfqa[NFQA_CFG_CMD-1]);
754 QDEBUG("found CFG_CMD\n");
755
756 switch (cmd->command) {
757 case NFQNL_CFG_CMD_BIND:
758 if (queue)
759 return -EBUSY;
760
761 queue = instance_create(queue_num, NETLINK_CB(skb).pid);
762 if (!queue)
763 return -EINVAL;
764 break;
765 case NFQNL_CFG_CMD_UNBIND:
766 if (!queue)
767 return -ENODEV;
768
769 if (queue->peer_pid != NETLINK_CB(skb).pid)
770 return -EPERM;
771
772 instance_destroy(queue);
773 break;
774 case NFQNL_CFG_CMD_PF_BIND:
775 QDEBUG("registering queue handler for pf=%u\n",
776 ntohs(cmd->pf));
777 return nf_register_queue_handler(ntohs(cmd->pf),
778 nfqnl_enqueue_packet,
779 NULL);
780
781 break;
782 case NFQNL_CFG_CMD_PF_UNBIND:
783 QDEBUG("unregistering queue handler for pf=%u\n",
784 ntohs(cmd->pf));
785 /* This is a bug and a feature. We can unregister
786 * other handlers(!) */
787 return nf_unregister_queue_handler(ntohs(cmd->pf));
788 break;
789 default:
790 return -EINVAL;
791 }
792 } else {
793 if (!queue) {
794 QDEBUG("no config command, and no instance ENOENT\n");
795 return -ENOENT;
796 }
797
798 if (queue->peer_pid != NETLINK_CB(skb).pid) {
799 QDEBUG("no config command, and wrong pid\n");
800 return -EPERM;
801 }
802 }
803
804 if (nfqa[NFQA_CFG_PARAMS-1]) {
805 struct nfqnl_msg_config_params *params;
806 params = NFA_DATA(nfqa[NFQA_CFG_PARAMS-1]);
807
808 nfqnl_set_mode(queue, params->copy_mode,
809 ntohl(params->copy_range));
810 }
811
812 return 0;
813}
814
815static struct nfnl_callback nfqnl_cb[NFQNL_MSG_MAX] = {
816 [NFQNL_MSG_PACKET] = { .call = nfqnl_recv_unsupp,
817 .cap_required = CAP_NET_ADMIN },
818 [NFQNL_MSG_VERDICT] = { .call = nfqnl_recv_verdict,
819 .cap_required = CAP_NET_ADMIN },
820 [NFQNL_MSG_CONFIG] = { .call = nfqnl_recv_config,
821 .cap_required = CAP_NET_ADMIN },
822};
823
824static struct nfnetlink_subsystem nfqnl_subsys = {
825 .name = "nf_queue",
826 .subsys_id = NFNL_SUBSYS_QUEUE,
827 .cb_count = NFQNL_MSG_MAX,
828 .attr_count = NFQA_MAX,
829 .cb = nfqnl_cb,
830};
831
832static int
833init_or_cleanup(int init)
834{
835 int status = -ENOMEM;
836
837 if (!init)
838 goto cleanup;
839
840 netlink_register_notifier(&nfqnl_rtnl_notifier);
841 status = nfnetlink_subsys_register(&nfqnl_subsys);
842 if (status < 0) {
843 printk(KERN_ERR "nf_queue: failed to create netlink socket\n");
844 goto cleanup_netlink_notifier;
845 }
846
847 register_netdevice_notifier(&nfqnl_dev_notifier);
848 return status;
849
850cleanup:
851 nf_unregister_queue_handlers(nfqnl_enqueue_packet);
852 unregister_netdevice_notifier(&nfqnl_dev_notifier);
853 nfnetlink_subsys_unregister(&nfqnl_subsys);
854
855cleanup_netlink_notifier:
856 netlink_unregister_notifier(&nfqnl_rtnl_notifier);
857 return status;
858}
859
860static int __init init(void)
861{
862
863 return init_or_cleanup(1);
864}
865
866static void __exit fini(void)
867{
868 init_or_cleanup(0);
869}
870
871MODULE_DESCRIPTION("netfilter packet queue handler");
872MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
873MODULE_LICENSE("GPL");
874MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_QUEUE);
875
876module_init(init);
877module_exit(fini);
This page took 0.062535 seconds and 5 git commands to generate.