[NETFILTER]: nf_queue: make queue_handler const
[deliverable/linux.git] / net / ipv4 / netfilter / ip_queue.c
1 /*
2 * This is a module which is used for queueing IPv4 packets and
3 * communicating with userspace via netlink.
4 *
5 * (C) 2000-2002 James Morris <jmorris@intercode.com.au>
6 * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12 #include <linux/module.h>
13 #include <linux/skbuff.h>
14 #include <linux/init.h>
15 #include <linux/ip.h>
16 #include <linux/notifier.h>
17 #include <linux/netdevice.h>
18 #include <linux/netfilter.h>
19 #include <linux/netfilter_ipv4/ip_queue.h>
20 #include <linux/netfilter_ipv4/ip_tables.h>
21 #include <linux/netlink.h>
22 #include <linux/spinlock.h>
23 #include <linux/sysctl.h>
24 #include <linux/proc_fs.h>
25 #include <linux/seq_file.h>
26 #include <linux/security.h>
27 #include <linux/mutex.h>
28 #include <net/net_namespace.h>
29 #include <net/sock.h>
30 #include <net/route.h>
31
32 #define IPQ_QMAX_DEFAULT 1024
33 #define IPQ_PROC_FS_NAME "ip_queue"
34 #define NET_IPQ_QMAX 2088
35 #define NET_IPQ_QMAX_NAME "ip_queue_maxlen"
36
37 struct ipq_queue_entry {
38 struct list_head list;
39 struct nf_info *info;
40 struct sk_buff *skb;
41 };
42
43 typedef int (*ipq_cmpfn)(struct ipq_queue_entry *, unsigned long);
44
45 static unsigned char copy_mode __read_mostly = IPQ_COPY_NONE;
46 static unsigned int queue_maxlen __read_mostly = IPQ_QMAX_DEFAULT;
47 static DEFINE_RWLOCK(queue_lock);
48 static int peer_pid __read_mostly;
49 static unsigned int copy_range __read_mostly;
50 static unsigned int queue_total;
51 static unsigned int queue_dropped = 0;
52 static unsigned int queue_user_dropped = 0;
53 static struct sock *ipqnl __read_mostly;
54 static LIST_HEAD(queue_list);
55 static DEFINE_MUTEX(ipqnl_mutex);
56
57 static void
58 ipq_issue_verdict(struct ipq_queue_entry *entry, int verdict)
59 {
60 /* TCP input path (and probably other bits) assume to be called
61 * from softirq context, not from syscall, like ipq_issue_verdict is
62 * called. TCP input path deadlocks with locks taken from timer
63 * softirq, e.g. We therefore emulate this by local_bh_disable() */
64
65 local_bh_disable();
66 nf_reinject(entry->skb, entry->info, verdict);
67 local_bh_enable();
68
69 kfree(entry);
70 }
71
72 static inline void
73 __ipq_enqueue_entry(struct ipq_queue_entry *entry)
74 {
75 list_add(&entry->list, &queue_list);
76 queue_total++;
77 }
78
79 /*
80 * Find and return a queued entry matched by cmpfn, or return the last
81 * entry if cmpfn is NULL.
82 */
83 static inline struct ipq_queue_entry *
84 __ipq_find_entry(ipq_cmpfn cmpfn, unsigned long data)
85 {
86 struct list_head *p;
87
88 list_for_each_prev(p, &queue_list) {
89 struct ipq_queue_entry *entry = (struct ipq_queue_entry *)p;
90
91 if (!cmpfn || cmpfn(entry, data))
92 return entry;
93 }
94 return NULL;
95 }
96
97 static inline void
98 __ipq_dequeue_entry(struct ipq_queue_entry *entry)
99 {
100 list_del(&entry->list);
101 queue_total--;
102 }
103
104 static inline struct ipq_queue_entry *
105 __ipq_find_dequeue_entry(ipq_cmpfn cmpfn, unsigned long data)
106 {
107 struct ipq_queue_entry *entry;
108
109 entry = __ipq_find_entry(cmpfn, data);
110 if (entry == NULL)
111 return NULL;
112
113 __ipq_dequeue_entry(entry);
114 return entry;
115 }
116
117
118 static inline void
119 __ipq_flush(int verdict)
120 {
121 struct ipq_queue_entry *entry;
122
123 while ((entry = __ipq_find_dequeue_entry(NULL, 0)))
124 ipq_issue_verdict(entry, verdict);
125 }
126
127 static inline int
128 __ipq_set_mode(unsigned char mode, unsigned int range)
129 {
130 int status = 0;
131
132 switch(mode) {
133 case IPQ_COPY_NONE:
134 case IPQ_COPY_META:
135 copy_mode = mode;
136 copy_range = 0;
137 break;
138
139 case IPQ_COPY_PACKET:
140 copy_mode = mode;
141 copy_range = range;
142 if (copy_range > 0xFFFF)
143 copy_range = 0xFFFF;
144 break;
145
146 default:
147 status = -EINVAL;
148
149 }
150 return status;
151 }
152
153 static inline void
154 __ipq_reset(void)
155 {
156 peer_pid = 0;
157 net_disable_timestamp();
158 __ipq_set_mode(IPQ_COPY_NONE, 0);
159 __ipq_flush(NF_DROP);
160 }
161
162 static struct ipq_queue_entry *
163 ipq_find_dequeue_entry(ipq_cmpfn cmpfn, unsigned long data)
164 {
165 struct ipq_queue_entry *entry;
166
167 write_lock_bh(&queue_lock);
168 entry = __ipq_find_dequeue_entry(cmpfn, data);
169 write_unlock_bh(&queue_lock);
170 return entry;
171 }
172
173 static void
174 ipq_flush(int verdict)
175 {
176 write_lock_bh(&queue_lock);
177 __ipq_flush(verdict);
178 write_unlock_bh(&queue_lock);
179 }
180
181 static struct sk_buff *
182 ipq_build_packet_message(struct ipq_queue_entry *entry, int *errp)
183 {
184 sk_buff_data_t old_tail;
185 size_t size = 0;
186 size_t data_len = 0;
187 struct sk_buff *skb;
188 struct ipq_packet_msg *pmsg;
189 struct nlmsghdr *nlh;
190 struct timeval tv;
191
192 read_lock_bh(&queue_lock);
193
194 switch (copy_mode) {
195 case IPQ_COPY_META:
196 case IPQ_COPY_NONE:
197 size = NLMSG_SPACE(sizeof(*pmsg));
198 data_len = 0;
199 break;
200
201 case IPQ_COPY_PACKET:
202 if ((entry->skb->ip_summed == CHECKSUM_PARTIAL ||
203 entry->skb->ip_summed == CHECKSUM_COMPLETE) &&
204 (*errp = skb_checksum_help(entry->skb))) {
205 read_unlock_bh(&queue_lock);
206 return NULL;
207 }
208 if (copy_range == 0 || copy_range > entry->skb->len)
209 data_len = entry->skb->len;
210 else
211 data_len = copy_range;
212
213 size = NLMSG_SPACE(sizeof(*pmsg) + data_len);
214 break;
215
216 default:
217 *errp = -EINVAL;
218 read_unlock_bh(&queue_lock);
219 return NULL;
220 }
221
222 read_unlock_bh(&queue_lock);
223
224 skb = alloc_skb(size, GFP_ATOMIC);
225 if (!skb)
226 goto nlmsg_failure;
227
228 old_tail = skb->tail;
229 nlh = NLMSG_PUT(skb, 0, 0, IPQM_PACKET, size - sizeof(*nlh));
230 pmsg = NLMSG_DATA(nlh);
231 memset(pmsg, 0, sizeof(*pmsg));
232
233 pmsg->packet_id = (unsigned long )entry;
234 pmsg->data_len = data_len;
235 tv = ktime_to_timeval(entry->skb->tstamp);
236 pmsg->timestamp_sec = tv.tv_sec;
237 pmsg->timestamp_usec = tv.tv_usec;
238 pmsg->mark = entry->skb->mark;
239 pmsg->hook = entry->info->hook;
240 pmsg->hw_protocol = entry->skb->protocol;
241
242 if (entry->info->indev)
243 strcpy(pmsg->indev_name, entry->info->indev->name);
244 else
245 pmsg->indev_name[0] = '\0';
246
247 if (entry->info->outdev)
248 strcpy(pmsg->outdev_name, entry->info->outdev->name);
249 else
250 pmsg->outdev_name[0] = '\0';
251
252 if (entry->info->indev && entry->skb->dev) {
253 pmsg->hw_type = entry->skb->dev->type;
254 pmsg->hw_addrlen = dev_parse_header(entry->skb,
255 pmsg->hw_addr);
256 }
257
258 if (data_len)
259 if (skb_copy_bits(entry->skb, 0, pmsg->payload, data_len))
260 BUG();
261
262 nlh->nlmsg_len = skb->tail - old_tail;
263 return skb;
264
265 nlmsg_failure:
266 if (skb)
267 kfree_skb(skb);
268 *errp = -EINVAL;
269 printk(KERN_ERR "ip_queue: error creating packet message\n");
270 return NULL;
271 }
272
273 static int
274 ipq_enqueue_packet(struct sk_buff *skb, struct nf_info *info,
275 unsigned int queuenum, void *data)
276 {
277 int status = -EINVAL;
278 struct sk_buff *nskb;
279 struct ipq_queue_entry *entry;
280
281 if (copy_mode == IPQ_COPY_NONE)
282 return -EAGAIN;
283
284 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
285 if (entry == NULL) {
286 printk(KERN_ERR "ip_queue: OOM in ipq_enqueue_packet()\n");
287 return -ENOMEM;
288 }
289
290 entry->info = info;
291 entry->skb = skb;
292
293 nskb = ipq_build_packet_message(entry, &status);
294 if (nskb == NULL)
295 goto err_out_free;
296
297 write_lock_bh(&queue_lock);
298
299 if (!peer_pid)
300 goto err_out_free_nskb;
301
302 if (queue_total >= queue_maxlen) {
303 queue_dropped++;
304 status = -ENOSPC;
305 if (net_ratelimit())
306 printk (KERN_WARNING "ip_queue: full at %d entries, "
307 "dropping packets(s). Dropped: %d\n", queue_total,
308 queue_dropped);
309 goto err_out_free_nskb;
310 }
311
312 /* netlink_unicast will either free the nskb or attach it to a socket */
313 status = netlink_unicast(ipqnl, nskb, peer_pid, MSG_DONTWAIT);
314 if (status < 0) {
315 queue_user_dropped++;
316 goto err_out_unlock;
317 }
318
319 __ipq_enqueue_entry(entry);
320
321 write_unlock_bh(&queue_lock);
322 return status;
323
324 err_out_free_nskb:
325 kfree_skb(nskb);
326
327 err_out_unlock:
328 write_unlock_bh(&queue_lock);
329
330 err_out_free:
331 kfree(entry);
332 return status;
333 }
334
335 static int
336 ipq_mangle_ipv4(ipq_verdict_msg_t *v, struct ipq_queue_entry *e)
337 {
338 int diff;
339 int err;
340 struct iphdr *user_iph = (struct iphdr *)v->payload;
341
342 if (v->data_len < sizeof(*user_iph))
343 return 0;
344 diff = v->data_len - e->skb->len;
345 if (diff < 0) {
346 if (pskb_trim(e->skb, v->data_len))
347 return -ENOMEM;
348 } else if (diff > 0) {
349 if (v->data_len > 0xFFFF)
350 return -EINVAL;
351 if (diff > skb_tailroom(e->skb)) {
352 err = pskb_expand_head(e->skb, 0,
353 diff - skb_tailroom(e->skb),
354 GFP_ATOMIC);
355 if (err) {
356 printk(KERN_WARNING "ip_queue: error "
357 "in mangle, dropping packet: %d\n", -err);
358 return err;
359 }
360 }
361 skb_put(e->skb, diff);
362 }
363 if (!skb_make_writable(e->skb, v->data_len))
364 return -ENOMEM;
365 skb_copy_to_linear_data(e->skb, v->payload, v->data_len);
366 e->skb->ip_summed = CHECKSUM_NONE;
367
368 return 0;
369 }
370
371 static inline int
372 id_cmp(struct ipq_queue_entry *e, unsigned long id)
373 {
374 return (id == (unsigned long )e);
375 }
376
377 static int
378 ipq_set_verdict(struct ipq_verdict_msg *vmsg, unsigned int len)
379 {
380 struct ipq_queue_entry *entry;
381
382 if (vmsg->value > NF_MAX_VERDICT)
383 return -EINVAL;
384
385 entry = ipq_find_dequeue_entry(id_cmp, vmsg->id);
386 if (entry == NULL)
387 return -ENOENT;
388 else {
389 int verdict = vmsg->value;
390
391 if (vmsg->data_len && vmsg->data_len == len)
392 if (ipq_mangle_ipv4(vmsg, entry) < 0)
393 verdict = NF_DROP;
394
395 ipq_issue_verdict(entry, verdict);
396 return 0;
397 }
398 }
399
400 static int
401 ipq_set_mode(unsigned char mode, unsigned int range)
402 {
403 int status;
404
405 write_lock_bh(&queue_lock);
406 status = __ipq_set_mode(mode, range);
407 write_unlock_bh(&queue_lock);
408 return status;
409 }
410
411 static int
412 ipq_receive_peer(struct ipq_peer_msg *pmsg,
413 unsigned char type, unsigned int len)
414 {
415 int status = 0;
416
417 if (len < sizeof(*pmsg))
418 return -EINVAL;
419
420 switch (type) {
421 case IPQM_MODE:
422 status = ipq_set_mode(pmsg->msg.mode.value,
423 pmsg->msg.mode.range);
424 break;
425
426 case IPQM_VERDICT:
427 if (pmsg->msg.verdict.value > NF_MAX_VERDICT)
428 status = -EINVAL;
429 else
430 status = ipq_set_verdict(&pmsg->msg.verdict,
431 len - sizeof(*pmsg));
432 break;
433 default:
434 status = -EINVAL;
435 }
436 return status;
437 }
438
439 static int
440 dev_cmp(struct ipq_queue_entry *entry, unsigned long ifindex)
441 {
442 if (entry->info->indev)
443 if (entry->info->indev->ifindex == ifindex)
444 return 1;
445 if (entry->info->outdev)
446 if (entry->info->outdev->ifindex == ifindex)
447 return 1;
448 #ifdef CONFIG_BRIDGE_NETFILTER
449 if (entry->skb->nf_bridge) {
450 if (entry->skb->nf_bridge->physindev &&
451 entry->skb->nf_bridge->physindev->ifindex == ifindex)
452 return 1;
453 if (entry->skb->nf_bridge->physoutdev &&
454 entry->skb->nf_bridge->physoutdev->ifindex == ifindex)
455 return 1;
456 }
457 #endif
458 return 0;
459 }
460
461 static void
462 ipq_dev_drop(int ifindex)
463 {
464 struct ipq_queue_entry *entry;
465
466 while ((entry = ipq_find_dequeue_entry(dev_cmp, ifindex)) != NULL)
467 ipq_issue_verdict(entry, NF_DROP);
468 }
469
470 #define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0)
471
472 static inline void
473 __ipq_rcv_skb(struct sk_buff *skb)
474 {
475 int status, type, pid, flags, nlmsglen, skblen;
476 struct nlmsghdr *nlh;
477
478 skblen = skb->len;
479 if (skblen < sizeof(*nlh))
480 return;
481
482 nlh = nlmsg_hdr(skb);
483 nlmsglen = nlh->nlmsg_len;
484 if (nlmsglen < sizeof(*nlh) || skblen < nlmsglen)
485 return;
486
487 pid = nlh->nlmsg_pid;
488 flags = nlh->nlmsg_flags;
489
490 if(pid <= 0 || !(flags & NLM_F_REQUEST) || flags & NLM_F_MULTI)
491 RCV_SKB_FAIL(-EINVAL);
492
493 if (flags & MSG_TRUNC)
494 RCV_SKB_FAIL(-ECOMM);
495
496 type = nlh->nlmsg_type;
497 if (type < NLMSG_NOOP || type >= IPQM_MAX)
498 RCV_SKB_FAIL(-EINVAL);
499
500 if (type <= IPQM_BASE)
501 return;
502
503 if (security_netlink_recv(skb, CAP_NET_ADMIN))
504 RCV_SKB_FAIL(-EPERM);
505
506 write_lock_bh(&queue_lock);
507
508 if (peer_pid) {
509 if (peer_pid != pid) {
510 write_unlock_bh(&queue_lock);
511 RCV_SKB_FAIL(-EBUSY);
512 }
513 } else {
514 net_enable_timestamp();
515 peer_pid = pid;
516 }
517
518 write_unlock_bh(&queue_lock);
519
520 status = ipq_receive_peer(NLMSG_DATA(nlh), type,
521 nlmsglen - NLMSG_LENGTH(0));
522 if (status < 0)
523 RCV_SKB_FAIL(status);
524
525 if (flags & NLM_F_ACK)
526 netlink_ack(skb, nlh, 0);
527 return;
528 }
529
530 static void
531 ipq_rcv_skb(struct sk_buff *skb)
532 {
533 mutex_lock(&ipqnl_mutex);
534 __ipq_rcv_skb(skb);
535 mutex_unlock(&ipqnl_mutex);
536 }
537
538 static int
539 ipq_rcv_dev_event(struct notifier_block *this,
540 unsigned long event, void *ptr)
541 {
542 struct net_device *dev = ptr;
543
544 if (dev->nd_net != &init_net)
545 return NOTIFY_DONE;
546
547 /* Drop any packets associated with the downed device */
548 if (event == NETDEV_DOWN)
549 ipq_dev_drop(dev->ifindex);
550 return NOTIFY_DONE;
551 }
552
553 static struct notifier_block ipq_dev_notifier = {
554 .notifier_call = ipq_rcv_dev_event,
555 };
556
557 static int
558 ipq_rcv_nl_event(struct notifier_block *this,
559 unsigned long event, void *ptr)
560 {
561 struct netlink_notify *n = ptr;
562
563 if (event == NETLINK_URELEASE &&
564 n->protocol == NETLINK_FIREWALL && n->pid) {
565 write_lock_bh(&queue_lock);
566 if ((n->net == &init_net) && (n->pid == peer_pid))
567 __ipq_reset();
568 write_unlock_bh(&queue_lock);
569 }
570 return NOTIFY_DONE;
571 }
572
573 static struct notifier_block ipq_nl_notifier = {
574 .notifier_call = ipq_rcv_nl_event,
575 };
576
577 static struct ctl_table_header *ipq_sysctl_header;
578
579 static ctl_table ipq_table[] = {
580 {
581 .ctl_name = NET_IPQ_QMAX,
582 .procname = NET_IPQ_QMAX_NAME,
583 .data = &queue_maxlen,
584 .maxlen = sizeof(queue_maxlen),
585 .mode = 0644,
586 .proc_handler = proc_dointvec
587 },
588 { .ctl_name = 0 }
589 };
590
591 static ctl_table ipq_dir_table[] = {
592 {
593 .ctl_name = NET_IPV4,
594 .procname = "ipv4",
595 .mode = 0555,
596 .child = ipq_table
597 },
598 { .ctl_name = 0 }
599 };
600
601 static ctl_table ipq_root_table[] = {
602 {
603 .ctl_name = CTL_NET,
604 .procname = "net",
605 .mode = 0555,
606 .child = ipq_dir_table
607 },
608 { .ctl_name = 0 }
609 };
610
611 static int ip_queue_show(struct seq_file *m, void *v)
612 {
613 read_lock_bh(&queue_lock);
614
615 seq_printf(m,
616 "Peer PID : %d\n"
617 "Copy mode : %hu\n"
618 "Copy range : %u\n"
619 "Queue length : %u\n"
620 "Queue max. length : %u\n"
621 "Queue dropped : %u\n"
622 "Netlink dropped : %u\n",
623 peer_pid,
624 copy_mode,
625 copy_range,
626 queue_total,
627 queue_maxlen,
628 queue_dropped,
629 queue_user_dropped);
630
631 read_unlock_bh(&queue_lock);
632 return 0;
633 }
634
635 static int ip_queue_open(struct inode *inode, struct file *file)
636 {
637 return single_open(file, ip_queue_show, NULL);
638 }
639
640 static const struct file_operations ip_queue_proc_fops = {
641 .open = ip_queue_open,
642 .read = seq_read,
643 .llseek = seq_lseek,
644 .release = single_release,
645 .owner = THIS_MODULE,
646 };
647
648 static const struct nf_queue_handler nfqh = {
649 .name = "ip_queue",
650 .outfn = &ipq_enqueue_packet,
651 };
652
653 static int __init ip_queue_init(void)
654 {
655 int status = -ENOMEM;
656 struct proc_dir_entry *proc;
657
658 netlink_register_notifier(&ipq_nl_notifier);
659 ipqnl = netlink_kernel_create(&init_net, NETLINK_FIREWALL, 0,
660 ipq_rcv_skb, NULL, THIS_MODULE);
661 if (ipqnl == NULL) {
662 printk(KERN_ERR "ip_queue: failed to create netlink socket\n");
663 goto cleanup_netlink_notifier;
664 }
665
666 proc = create_proc_entry(IPQ_PROC_FS_NAME, 0, init_net.proc_net);
667 if (proc) {
668 proc->owner = THIS_MODULE;
669 proc->proc_fops = &ip_queue_proc_fops;
670 } else {
671 printk(KERN_ERR "ip_queue: failed to create proc entry\n");
672 goto cleanup_ipqnl;
673 }
674
675 register_netdevice_notifier(&ipq_dev_notifier);
676 ipq_sysctl_header = register_sysctl_table(ipq_root_table);
677
678 status = nf_register_queue_handler(PF_INET, &nfqh);
679 if (status < 0) {
680 printk(KERN_ERR "ip_queue: failed to register queue handler\n");
681 goto cleanup_sysctl;
682 }
683 return status;
684
685 cleanup_sysctl:
686 unregister_sysctl_table(ipq_sysctl_header);
687 unregister_netdevice_notifier(&ipq_dev_notifier);
688 proc_net_remove(&init_net, IPQ_PROC_FS_NAME);
689 cleanup_ipqnl:
690 sock_release(ipqnl->sk_socket);
691 mutex_lock(&ipqnl_mutex);
692 mutex_unlock(&ipqnl_mutex);
693
694 cleanup_netlink_notifier:
695 netlink_unregister_notifier(&ipq_nl_notifier);
696 return status;
697 }
698
699 static void __exit ip_queue_fini(void)
700 {
701 nf_unregister_queue_handlers(&nfqh);
702 synchronize_net();
703 ipq_flush(NF_DROP);
704
705 unregister_sysctl_table(ipq_sysctl_header);
706 unregister_netdevice_notifier(&ipq_dev_notifier);
707 proc_net_remove(&init_net, IPQ_PROC_FS_NAME);
708
709 sock_release(ipqnl->sk_socket);
710 mutex_lock(&ipqnl_mutex);
711 mutex_unlock(&ipqnl_mutex);
712
713 netlink_unregister_notifier(&ipq_nl_notifier);
714 }
715
716 MODULE_DESCRIPTION("IPv4 packet queue handler");
717 MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
718 MODULE_LICENSE("GPL");
719
720 module_init(ip_queue_init);
721 module_exit(ip_queue_fini);
This page took 0.07451 seconds and 5 git commands to generate.