Merge branch 'kconfig' of git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild
[deliverable/linux.git] / net / ipv4 / inet_fragment.c
1 /*
2 * inet fragments management
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Pavel Emelyanov <xemul@openvz.org>
10 * Started as consolidation of ipv4/ip_fragment.c,
11 * ipv6/reassembly. and ipv6 nf conntrack reassembly
12 */
13
14 #include <linux/list.h>
15 #include <linux/spinlock.h>
16 #include <linux/module.h>
17 #include <linux/timer.h>
18 #include <linux/mm.h>
19 #include <linux/random.h>
20 #include <linux/skbuff.h>
21 #include <linux/rtnetlink.h>
22 #include <linux/slab.h>
23
24 #include <net/sock.h>
25 #include <net/inet_frag.h>
26 #include <net/inet_ecn.h>
27
28 #define INETFRAGS_EVICT_BUCKETS 128
29 #define INETFRAGS_EVICT_MAX 512
30
31 /* don't rebuild inetfrag table with new secret more often than this */
32 #define INETFRAGS_MIN_REBUILD_INTERVAL (5 * HZ)
33
34 /* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
35 * Value : 0xff if frame should be dropped.
36 * 0 or INET_ECN_CE value, to be ORed in to final iph->tos field
37 */
38 const u8 ip_frag_ecn_table[16] = {
39 /* at least one fragment had CE, and others ECT_0 or ECT_1 */
40 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = INET_ECN_CE,
41 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
42 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
43
44 /* invalid combinations : drop frame */
45 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE] = 0xff,
46 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0] = 0xff,
47 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_1] = 0xff,
48 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
49 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = 0xff,
50 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = 0xff,
51 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
52 };
53 EXPORT_SYMBOL(ip_frag_ecn_table);
54
55 static unsigned int
56 inet_frag_hashfn(const struct inet_frags *f, const struct inet_frag_queue *q)
57 {
58 return f->hashfn(q) & (INETFRAGS_HASHSZ - 1);
59 }
60
61 static bool inet_frag_may_rebuild(struct inet_frags *f)
62 {
63 return time_after(jiffies,
64 f->last_rebuild_jiffies + INETFRAGS_MIN_REBUILD_INTERVAL);
65 }
66
67 static void inet_frag_secret_rebuild(struct inet_frags *f)
68 {
69 int i;
70
71 write_seqlock_bh(&f->rnd_seqlock);
72
73 if (!inet_frag_may_rebuild(f))
74 goto out;
75
76 get_random_bytes(&f->rnd, sizeof(u32));
77
78 for (i = 0; i < INETFRAGS_HASHSZ; i++) {
79 struct inet_frag_bucket *hb;
80 struct inet_frag_queue *q;
81 struct hlist_node *n;
82
83 hb = &f->hash[i];
84 spin_lock(&hb->chain_lock);
85
86 hlist_for_each_entry_safe(q, n, &hb->chain, list) {
87 unsigned int hval = inet_frag_hashfn(f, q);
88
89 if (hval != i) {
90 struct inet_frag_bucket *hb_dest;
91
92 hlist_del(&q->list);
93
94 /* Relink to new hash chain. */
95 hb_dest = &f->hash[hval];
96
97 /* This is the only place where we take
98 * another chain_lock while already holding
99 * one. As this will not run concurrently,
100 * we cannot deadlock on hb_dest lock below, if its
101 * already locked it will be released soon since
102 * other caller cannot be waiting for hb lock
103 * that we've taken above.
104 */
105 spin_lock_nested(&hb_dest->chain_lock,
106 SINGLE_DEPTH_NESTING);
107 hlist_add_head(&q->list, &hb_dest->chain);
108 spin_unlock(&hb_dest->chain_lock);
109 }
110 }
111 spin_unlock(&hb->chain_lock);
112 }
113
114 f->rebuild = false;
115 f->last_rebuild_jiffies = jiffies;
116 out:
117 write_sequnlock_bh(&f->rnd_seqlock);
118 }
119
120 static bool inet_fragq_should_evict(const struct inet_frag_queue *q)
121 {
122 return q->net->low_thresh == 0 ||
123 frag_mem_limit(q->net) >= q->net->low_thresh;
124 }
125
126 static unsigned int
127 inet_evict_bucket(struct inet_frags *f, struct inet_frag_bucket *hb)
128 {
129 struct inet_frag_queue *fq;
130 struct hlist_node *n;
131 unsigned int evicted = 0;
132 HLIST_HEAD(expired);
133
134 spin_lock(&hb->chain_lock);
135
136 hlist_for_each_entry_safe(fq, n, &hb->chain, list) {
137 if (!inet_fragq_should_evict(fq))
138 continue;
139
140 if (!del_timer(&fq->timer))
141 continue;
142
143 hlist_add_head(&fq->list_evictor, &expired);
144 ++evicted;
145 }
146
147 spin_unlock(&hb->chain_lock);
148
149 hlist_for_each_entry_safe(fq, n, &expired, list_evictor)
150 f->frag_expire((unsigned long) fq);
151
152 return evicted;
153 }
154
155 static void inet_frag_worker(struct work_struct *work)
156 {
157 unsigned int budget = INETFRAGS_EVICT_BUCKETS;
158 unsigned int i, evicted = 0;
159 struct inet_frags *f;
160
161 f = container_of(work, struct inet_frags, frags_work);
162
163 BUILD_BUG_ON(INETFRAGS_EVICT_BUCKETS >= INETFRAGS_HASHSZ);
164
165 local_bh_disable();
166
167 for (i = ACCESS_ONCE(f->next_bucket); budget; --budget) {
168 evicted += inet_evict_bucket(f, &f->hash[i]);
169 i = (i + 1) & (INETFRAGS_HASHSZ - 1);
170 if (evicted > INETFRAGS_EVICT_MAX)
171 break;
172 }
173
174 f->next_bucket = i;
175
176 local_bh_enable();
177
178 if (f->rebuild && inet_frag_may_rebuild(f))
179 inet_frag_secret_rebuild(f);
180 }
181
182 static void inet_frag_schedule_worker(struct inet_frags *f)
183 {
184 if (unlikely(!work_pending(&f->frags_work)))
185 schedule_work(&f->frags_work);
186 }
187
188 int inet_frags_init(struct inet_frags *f)
189 {
190 int i;
191
192 INIT_WORK(&f->frags_work, inet_frag_worker);
193
194 for (i = 0; i < INETFRAGS_HASHSZ; i++) {
195 struct inet_frag_bucket *hb = &f->hash[i];
196
197 spin_lock_init(&hb->chain_lock);
198 INIT_HLIST_HEAD(&hb->chain);
199 }
200
201 seqlock_init(&f->rnd_seqlock);
202 f->last_rebuild_jiffies = 0;
203 f->frags_cachep = kmem_cache_create(f->frags_cache_name, f->qsize, 0, 0,
204 NULL);
205 if (!f->frags_cachep)
206 return -ENOMEM;
207
208 return 0;
209 }
210 EXPORT_SYMBOL(inet_frags_init);
211
212 void inet_frags_fini(struct inet_frags *f)
213 {
214 cancel_work_sync(&f->frags_work);
215 kmem_cache_destroy(f->frags_cachep);
216 }
217 EXPORT_SYMBOL(inet_frags_fini);
218
219 void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
220 {
221 unsigned int seq;
222 int i;
223
224 nf->low_thresh = 0;
225
226 evict_again:
227 local_bh_disable();
228 seq = read_seqbegin(&f->rnd_seqlock);
229
230 for (i = 0; i < INETFRAGS_HASHSZ ; i++)
231 inet_evict_bucket(f, &f->hash[i]);
232
233 local_bh_enable();
234 cond_resched();
235
236 if (read_seqretry(&f->rnd_seqlock, seq) ||
237 percpu_counter_sum(&nf->mem))
238 goto evict_again;
239
240 percpu_counter_destroy(&nf->mem);
241 }
242 EXPORT_SYMBOL(inet_frags_exit_net);
243
244 static struct inet_frag_bucket *
245 get_frag_bucket_locked(struct inet_frag_queue *fq, struct inet_frags *f)
246 __acquires(hb->chain_lock)
247 {
248 struct inet_frag_bucket *hb;
249 unsigned int seq, hash;
250
251 restart:
252 seq = read_seqbegin(&f->rnd_seqlock);
253
254 hash = inet_frag_hashfn(f, fq);
255 hb = &f->hash[hash];
256
257 spin_lock(&hb->chain_lock);
258 if (read_seqretry(&f->rnd_seqlock, seq)) {
259 spin_unlock(&hb->chain_lock);
260 goto restart;
261 }
262
263 return hb;
264 }
265
266 static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
267 {
268 struct inet_frag_bucket *hb;
269
270 hb = get_frag_bucket_locked(fq, f);
271 hlist_del(&fq->list);
272 fq->flags |= INET_FRAG_COMPLETE;
273 spin_unlock(&hb->chain_lock);
274 }
275
276 void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
277 {
278 if (del_timer(&fq->timer))
279 atomic_dec(&fq->refcnt);
280
281 if (!(fq->flags & INET_FRAG_COMPLETE)) {
282 fq_unlink(fq, f);
283 atomic_dec(&fq->refcnt);
284 }
285 }
286 EXPORT_SYMBOL(inet_frag_kill);
287
288 static inline void frag_kfree_skb(struct netns_frags *nf, struct inet_frags *f,
289 struct sk_buff *skb)
290 {
291 if (f->skb_free)
292 f->skb_free(skb);
293 kfree_skb(skb);
294 }
295
296 void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f)
297 {
298 struct sk_buff *fp;
299 struct netns_frags *nf;
300 unsigned int sum, sum_truesize = 0;
301
302 WARN_ON(!(q->flags & INET_FRAG_COMPLETE));
303 WARN_ON(del_timer(&q->timer) != 0);
304
305 /* Release all fragment data. */
306 fp = q->fragments;
307 nf = q->net;
308 while (fp) {
309 struct sk_buff *xp = fp->next;
310
311 sum_truesize += fp->truesize;
312 frag_kfree_skb(nf, f, fp);
313 fp = xp;
314 }
315 sum = sum_truesize + f->qsize;
316
317 if (f->destructor)
318 f->destructor(q);
319 kmem_cache_free(f->frags_cachep, q);
320
321 sub_frag_mem_limit(nf, sum);
322 }
323 EXPORT_SYMBOL(inet_frag_destroy);
324
325 static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
326 struct inet_frag_queue *qp_in,
327 struct inet_frags *f,
328 void *arg)
329 {
330 struct inet_frag_bucket *hb = get_frag_bucket_locked(qp_in, f);
331 struct inet_frag_queue *qp;
332
333 #ifdef CONFIG_SMP
334 /* With SMP race we have to recheck hash table, because
335 * such entry could have been created on other cpu before
336 * we acquired hash bucket lock.
337 */
338 hlist_for_each_entry(qp, &hb->chain, list) {
339 if (qp->net == nf && f->match(qp, arg)) {
340 atomic_inc(&qp->refcnt);
341 spin_unlock(&hb->chain_lock);
342 qp_in->flags |= INET_FRAG_COMPLETE;
343 inet_frag_put(qp_in, f);
344 return qp;
345 }
346 }
347 #endif
348 qp = qp_in;
349 if (!mod_timer(&qp->timer, jiffies + nf->timeout))
350 atomic_inc(&qp->refcnt);
351
352 atomic_inc(&qp->refcnt);
353 hlist_add_head(&qp->list, &hb->chain);
354
355 spin_unlock(&hb->chain_lock);
356
357 return qp;
358 }
359
360 static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
361 struct inet_frags *f,
362 void *arg)
363 {
364 struct inet_frag_queue *q;
365
366 if (frag_mem_limit(nf) > nf->high_thresh) {
367 inet_frag_schedule_worker(f);
368 return NULL;
369 }
370
371 q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
372 if (!q)
373 return NULL;
374
375 q->net = nf;
376 f->constructor(q, arg);
377 add_frag_mem_limit(nf, f->qsize);
378
379 setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
380 spin_lock_init(&q->lock);
381 atomic_set(&q->refcnt, 1);
382
383 return q;
384 }
385
386 static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
387 struct inet_frags *f,
388 void *arg)
389 {
390 struct inet_frag_queue *q;
391
392 q = inet_frag_alloc(nf, f, arg);
393 if (!q)
394 return NULL;
395
396 return inet_frag_intern(nf, q, f, arg);
397 }
398
399 struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
400 struct inet_frags *f, void *key,
401 unsigned int hash)
402 {
403 struct inet_frag_bucket *hb;
404 struct inet_frag_queue *q;
405 int depth = 0;
406
407 if (frag_mem_limit(nf) > nf->low_thresh)
408 inet_frag_schedule_worker(f);
409
410 hash &= (INETFRAGS_HASHSZ - 1);
411 hb = &f->hash[hash];
412
413 spin_lock(&hb->chain_lock);
414 hlist_for_each_entry(q, &hb->chain, list) {
415 if (q->net == nf && f->match(q, key)) {
416 atomic_inc(&q->refcnt);
417 spin_unlock(&hb->chain_lock);
418 return q;
419 }
420 depth++;
421 }
422 spin_unlock(&hb->chain_lock);
423
424 if (depth <= INETFRAGS_MAXDEPTH)
425 return inet_frag_create(nf, f, key);
426
427 if (inet_frag_may_rebuild(f)) {
428 if (!f->rebuild)
429 f->rebuild = true;
430 inet_frag_schedule_worker(f);
431 }
432
433 return ERR_PTR(-ENOBUFS);
434 }
435 EXPORT_SYMBOL(inet_frag_find);
436
437 void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
438 const char *prefix)
439 {
440 static const char msg[] = "inet_frag_find: Fragment hash bucket"
441 " list length grew over limit " __stringify(INETFRAGS_MAXDEPTH)
442 ". Dropping fragment.\n";
443
444 if (PTR_ERR(q) == -ENOBUFS)
445 net_dbg_ratelimited("%s%s", prefix, msg);
446 }
447 EXPORT_SYMBOL(inet_frag_maybe_warn_overflow);
This page took 0.045891 seconds and 6 git commands to generate.