1 /* netfilter.c: look after the filters for various protocols.
2 * Heavily influenced by the old firewall.c by David Bonn and Alan Cox.
4 * Thanks to Rob `CmdrTaco' Malda for not influencing this code in any
7 * Rusty Russell (C)2000 -- This code is GPL.
8 * Patrick McHardy (c) 2006-2012
10 #include <linux/kernel.h>
11 #include <linux/netfilter.h>
12 #include <net/protocol.h>
13 #include <linux/init.h>
14 #include <linux/skbuff.h>
15 #include <linux/wait.h>
16 #include <linux/module.h>
17 #include <linux/interrupt.h>
19 #include <linux/netdevice.h>
20 #include <linux/netfilter_ipv6.h>
21 #include <linux/inetdevice.h>
22 #include <linux/proc_fs.h>
23 #include <linux/mutex.h>
24 #include <linux/slab.h>
25 #include <net/net_namespace.h>
28 #include "nf_internals.h"
30 static DEFINE_MUTEX(afinfo_mutex
);
32 const struct nf_afinfo __rcu
*nf_afinfo
[NFPROTO_NUMPROTO
] __read_mostly
;
33 EXPORT_SYMBOL(nf_afinfo
);
34 const struct nf_ipv6_ops __rcu
*nf_ipv6_ops __read_mostly
;
35 EXPORT_SYMBOL_GPL(nf_ipv6_ops
);
37 DEFINE_PER_CPU(bool, nf_skb_duplicated
);
38 EXPORT_SYMBOL_GPL(nf_skb_duplicated
);
40 int nf_register_afinfo(const struct nf_afinfo
*afinfo
)
42 mutex_lock(&afinfo_mutex
);
43 RCU_INIT_POINTER(nf_afinfo
[afinfo
->family
], afinfo
);
44 mutex_unlock(&afinfo_mutex
);
47 EXPORT_SYMBOL_GPL(nf_register_afinfo
);
49 void nf_unregister_afinfo(const struct nf_afinfo
*afinfo
)
51 mutex_lock(&afinfo_mutex
);
52 RCU_INIT_POINTER(nf_afinfo
[afinfo
->family
], NULL
);
53 mutex_unlock(&afinfo_mutex
);
56 EXPORT_SYMBOL_GPL(nf_unregister_afinfo
);
58 #ifdef HAVE_JUMP_LABEL
59 struct static_key nf_hooks_needed
[NFPROTO_NUMPROTO
][NF_MAX_HOOKS
];
60 EXPORT_SYMBOL(nf_hooks_needed
);
63 static DEFINE_MUTEX(nf_hook_mutex
);
65 static struct list_head
*nf_find_hook_list(struct net
*net
,
66 const struct nf_hook_ops
*reg
)
68 struct list_head
*hook_list
= NULL
;
70 if (reg
->pf
!= NFPROTO_NETDEV
)
71 hook_list
= &net
->nf
.hooks
[reg
->pf
][reg
->hooknum
];
72 else if (reg
->hooknum
== NF_NETDEV_INGRESS
) {
73 #ifdef CONFIG_NETFILTER_INGRESS
74 if (reg
->dev
&& dev_net(reg
->dev
) == net
)
75 hook_list
= ®
->dev
->nf_hooks_ingress
;
81 struct nf_hook_entry
{
82 const struct nf_hook_ops
*orig_ops
;
83 struct nf_hook_ops ops
;
86 int nf_register_net_hook(struct net
*net
, const struct nf_hook_ops
*reg
)
88 struct list_head
*hook_list
;
89 struct nf_hook_entry
*entry
;
90 struct nf_hook_ops
*elem
;
92 entry
= kmalloc(sizeof(*entry
), GFP_KERNEL
);
96 entry
->orig_ops
= reg
;
99 hook_list
= nf_find_hook_list(net
, reg
);
105 mutex_lock(&nf_hook_mutex
);
106 list_for_each_entry(elem
, hook_list
, list
) {
107 if (reg
->priority
< elem
->priority
)
110 list_add_rcu(&entry
->ops
.list
, elem
->list
.prev
);
111 mutex_unlock(&nf_hook_mutex
);
112 #ifdef CONFIG_NETFILTER_INGRESS
113 if (reg
->pf
== NFPROTO_NETDEV
&& reg
->hooknum
== NF_NETDEV_INGRESS
)
114 net_inc_ingress_queue();
116 #ifdef HAVE_JUMP_LABEL
117 static_key_slow_inc(&nf_hooks_needed
[reg
->pf
][reg
->hooknum
]);
121 EXPORT_SYMBOL(nf_register_net_hook
);
123 void nf_unregister_net_hook(struct net
*net
, const struct nf_hook_ops
*reg
)
125 struct list_head
*hook_list
;
126 struct nf_hook_entry
*entry
;
127 struct nf_hook_ops
*elem
;
129 hook_list
= nf_find_hook_list(net
, reg
);
133 mutex_lock(&nf_hook_mutex
);
134 list_for_each_entry(elem
, hook_list
, list
) {
135 entry
= container_of(elem
, struct nf_hook_entry
, ops
);
136 if (entry
->orig_ops
== reg
) {
137 list_del_rcu(&entry
->ops
.list
);
141 mutex_unlock(&nf_hook_mutex
);
142 if (&elem
->list
== hook_list
) {
143 WARN(1, "nf_unregister_net_hook: hook not found!\n");
146 #ifdef CONFIG_NETFILTER_INGRESS
147 if (reg
->pf
== NFPROTO_NETDEV
&& reg
->hooknum
== NF_NETDEV_INGRESS
)
148 net_dec_ingress_queue();
150 #ifdef HAVE_JUMP_LABEL
151 static_key_slow_dec(&nf_hooks_needed
[reg
->pf
][reg
->hooknum
]);
154 nf_queue_nf_hook_drop(net
, &entry
->ops
);
157 EXPORT_SYMBOL(nf_unregister_net_hook
);
159 int nf_register_net_hooks(struct net
*net
, const struct nf_hook_ops
*reg
,
165 for (i
= 0; i
< n
; i
++) {
166 err
= nf_register_net_hook(net
, ®
[i
]);
174 nf_unregister_net_hooks(net
, reg
, i
);
177 EXPORT_SYMBOL(nf_register_net_hooks
);
179 void nf_unregister_net_hooks(struct net
*net
, const struct nf_hook_ops
*reg
,
183 nf_unregister_net_hook(net
, ®
[n
]);
185 EXPORT_SYMBOL(nf_unregister_net_hooks
);
187 static LIST_HEAD(nf_hook_list
);
189 int nf_register_hook(struct nf_hook_ops
*reg
)
191 struct net
*net
, *last
;
196 ret
= nf_register_net_hook(net
, reg
);
197 if (ret
&& ret
!= -ENOENT
)
200 list_add_tail(®
->list
, &nf_hook_list
);
209 nf_unregister_net_hook(net
, reg
);
214 EXPORT_SYMBOL(nf_register_hook
);
216 void nf_unregister_hook(struct nf_hook_ops
*reg
)
221 list_del(®
->list
);
223 nf_unregister_net_hook(net
, reg
);
226 EXPORT_SYMBOL(nf_unregister_hook
);
228 int nf_register_hooks(struct nf_hook_ops
*reg
, unsigned int n
)
233 for (i
= 0; i
< n
; i
++) {
234 err
= nf_register_hook(®
[i
]);
242 nf_unregister_hooks(reg
, i
);
245 EXPORT_SYMBOL(nf_register_hooks
);
247 void nf_unregister_hooks(struct nf_hook_ops
*reg
, unsigned int n
)
250 nf_unregister_hook(®
[n
]);
252 EXPORT_SYMBOL(nf_unregister_hooks
);
254 unsigned int nf_iterate(struct list_head
*head
,
256 struct nf_hook_state
*state
,
257 struct nf_hook_ops
**elemp
)
259 unsigned int verdict
;
262 * The caller must not block between calls to this
263 * function because of risk of continuing from deleted element.
265 list_for_each_entry_continue_rcu((*elemp
), head
, list
) {
266 if (state
->thresh
> (*elemp
)->priority
)
269 /* Optimization: we don't need to hold module
270 reference here, since function can't sleep. --RR */
272 verdict
= (*elemp
)->hook(*elemp
, skb
, state
);
273 if (verdict
!= NF_ACCEPT
) {
274 #ifdef CONFIG_NETFILTER_DEBUG
275 if (unlikely((verdict
& NF_VERDICT_MASK
)
277 NFDEBUG("Evil return from %p(%u).\n",
278 (*elemp
)->hook
, state
->hook
);
282 if (verdict
!= NF_REPEAT
)
291 /* Returns 1 if okfn() needs to be executed by the caller,
292 * -EPERM for NF_DROP, 0 otherwise. */
293 int nf_hook_slow(struct sk_buff
*skb
, struct nf_hook_state
*state
)
295 struct nf_hook_ops
*elem
;
296 unsigned int verdict
;
299 /* We may already have this, but read-locks nest anyway */
302 elem
= list_entry_rcu(state
->hook_list
, struct nf_hook_ops
, list
);
304 verdict
= nf_iterate(state
->hook_list
, skb
, state
, &elem
);
305 if (verdict
== NF_ACCEPT
|| verdict
== NF_STOP
) {
307 } else if ((verdict
& NF_VERDICT_MASK
) == NF_DROP
) {
309 ret
= NF_DROP_GETERR(verdict
);
312 } else if ((verdict
& NF_VERDICT_MASK
) == NF_QUEUE
) {
313 int err
= nf_queue(skb
, elem
, state
,
314 verdict
>> NF_VERDICT_QBITS
);
316 if (err
== -ECANCELED
)
319 (verdict
& NF_VERDICT_FLAG_QUEUE_BYPASS
))
327 EXPORT_SYMBOL(nf_hook_slow
);
330 int skb_make_writable(struct sk_buff
*skb
, unsigned int writable_len
)
332 if (writable_len
> skb
->len
)
335 /* Not exclusive use of packet? Must copy. */
336 if (!skb_cloned(skb
)) {
337 if (writable_len
<= skb_headlen(skb
))
339 } else if (skb_clone_writable(skb
, writable_len
))
342 if (writable_len
<= skb_headlen(skb
))
345 writable_len
-= skb_headlen(skb
);
347 return !!__pskb_pull_tail(skb
, writable_len
);
349 EXPORT_SYMBOL(skb_make_writable
);
351 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
352 /* This does not belong here, but locally generated errors need it if connection
353 tracking in use: without this, connection may not be in hash table, and hence
354 manufactured ICMP or RST packets will not be associated with it. */
355 void (*ip_ct_attach
)(struct sk_buff
*, const struct sk_buff
*)
357 EXPORT_SYMBOL(ip_ct_attach
);
359 void nf_ct_attach(struct sk_buff
*new, const struct sk_buff
*skb
)
361 void (*attach
)(struct sk_buff
*, const struct sk_buff
*);
365 attach
= rcu_dereference(ip_ct_attach
);
371 EXPORT_SYMBOL(nf_ct_attach
);
373 void (*nf_ct_destroy
)(struct nf_conntrack
*) __rcu __read_mostly
;
374 EXPORT_SYMBOL(nf_ct_destroy
);
376 void nf_conntrack_destroy(struct nf_conntrack
*nfct
)
378 void (*destroy
)(struct nf_conntrack
*);
381 destroy
= rcu_dereference(nf_ct_destroy
);
382 BUG_ON(destroy
== NULL
);
386 EXPORT_SYMBOL(nf_conntrack_destroy
);
388 struct nfq_ct_hook __rcu
*nfq_ct_hook __read_mostly
;
389 EXPORT_SYMBOL_GPL(nfq_ct_hook
);
391 /* Built-in default zone used e.g. by modules. */
392 const struct nf_conntrack_zone nf_ct_zone_dflt
= {
393 .id
= NF_CT_DEFAULT_ZONE_ID
,
394 .dir
= NF_CT_DEFAULT_ZONE_DIR
,
396 EXPORT_SYMBOL_GPL(nf_ct_zone_dflt
);
397 #endif /* CONFIG_NF_CONNTRACK */
399 #ifdef CONFIG_NF_NAT_NEEDED
400 void (*nf_nat_decode_session_hook
)(struct sk_buff
*, struct flowi
*);
401 EXPORT_SYMBOL(nf_nat_decode_session_hook
);
404 static int nf_register_hook_list(struct net
*net
)
406 struct nf_hook_ops
*elem
;
410 list_for_each_entry(elem
, &nf_hook_list
, list
) {
411 ret
= nf_register_net_hook(net
, elem
);
412 if (ret
&& ret
!= -ENOENT
)
419 list_for_each_entry_continue_reverse(elem
, &nf_hook_list
, list
)
420 nf_unregister_net_hook(net
, elem
);
425 static void nf_unregister_hook_list(struct net
*net
)
427 struct nf_hook_ops
*elem
;
430 list_for_each_entry(elem
, &nf_hook_list
, list
)
431 nf_unregister_net_hook(net
, elem
);
435 static int __net_init
netfilter_net_init(struct net
*net
)
439 for (i
= 0; i
< ARRAY_SIZE(net
->nf
.hooks
); i
++) {
440 for (h
= 0; h
< NF_MAX_HOOKS
; h
++)
441 INIT_LIST_HEAD(&net
->nf
.hooks
[i
][h
]);
444 #ifdef CONFIG_PROC_FS
445 net
->nf
.proc_netfilter
= proc_net_mkdir(net
, "netfilter",
447 if (!net
->nf
.proc_netfilter
) {
448 if (!net_eq(net
, &init_net
))
449 pr_err("cannot create netfilter proc entry");
454 ret
= nf_register_hook_list(net
);
456 remove_proc_entry("netfilter", net
->proc_net
);
461 static void __net_exit
netfilter_net_exit(struct net
*net
)
463 nf_unregister_hook_list(net
);
464 remove_proc_entry("netfilter", net
->proc_net
);
467 static struct pernet_operations netfilter_net_ops
= {
468 .init
= netfilter_net_init
,
469 .exit
= netfilter_net_exit
,
472 int __init
netfilter_init(void)
476 ret
= register_pernet_subsys(&netfilter_net_ops
);
480 ret
= netfilter_log_init();
486 unregister_pernet_subsys(&netfilter_net_ops
);