Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * INET An implementation of the TCP/IP protocol suite for the LINUX | |
3 | * operating system. INET is implemented using the BSD Socket | |
4 | * interface as the means of communication with the user level. | |
5 | * | |
6 | * The IP fragmentation functionality. | |
7 | * | |
8 | * Version: $Id: ip_fragment.c,v 1.59 2002/01/12 07:54:56 davem Exp $ | |
9 | * | |
10 | * Authors: Fred N. van Kempen <waltje@uWalt.NL.Mugnet.ORG> | |
11 | * Alan Cox <Alan.Cox@linux.org> | |
12 | * | |
13 | * Fixes: | |
14 | * Alan Cox : Split from ip.c , see ip_input.c for history. | |
15 | * David S. Miller : Begin massive cleanup... | |
16 | * Andi Kleen : Add sysctls. | |
17 | * xxxx : Overlapfrag bug. | |
18 | * Ultima : ip_expire() kernel panic. | |
19 | * Bill Hawes : Frag accounting and evictor fixes. | |
20 | * John McDonald : 0 length frag bug. | |
21 | * Alexey Kuznetsov: SMP races, threading, cleanup. | |
22 | * Patrick McHardy : LRU queue of frag heads for evictor. | |
23 | */ | |
24 | ||
25 | #include <linux/config.h> | |
26 | #include <linux/module.h> | |
27 | #include <linux/types.h> | |
28 | #include <linux/mm.h> | |
29 | #include <linux/jiffies.h> | |
30 | #include <linux/skbuff.h> | |
31 | #include <linux/list.h> | |
32 | #include <linux/ip.h> | |
33 | #include <linux/icmp.h> | |
34 | #include <linux/netdevice.h> | |
35 | #include <linux/jhash.h> | |
36 | #include <linux/random.h> | |
37 | #include <net/sock.h> | |
38 | #include <net/ip.h> | |
39 | #include <net/icmp.h> | |
40 | #include <net/checksum.h> | |
41 | #include <linux/tcp.h> | |
42 | #include <linux/udp.h> | |
43 | #include <linux/inet.h> | |
44 | #include <linux/netfilter_ipv4.h> | |
45 | ||
46 | /* NOTE. Logic of IP defragmentation is parallel to corresponding IPv6 | |
47 | * code now. If you change something here, _PLEASE_ update ipv6/reassembly.c | |
48 | * as well. Or notify me, at least. --ANK | |
49 | */ | |
50 | ||
51 | /* Fragment cache limits. We will commit 256K at one time. Should we | |
52 | * cross that limit we will prune down to 192K. This should cope with | |
53 | * even the most extreme cases without allowing an attacker to measurably | |
54 | * harm machine performance. | |
55 | */ | |
56 | int sysctl_ipfrag_high_thresh = 256*1024; | |
57 | int sysctl_ipfrag_low_thresh = 192*1024; | |
58 | ||
59 | /* Important NOTE! Fragment queue must be destroyed before MSL expires. | |
60 | * RFC791 is wrong proposing to prolongate timer each fragment arrival by TTL. | |
61 | */ | |
62 | int sysctl_ipfrag_time = IP_FRAG_TIME; | |
63 | ||
64 | struct ipfrag_skb_cb | |
65 | { | |
66 | struct inet_skb_parm h; | |
67 | int offset; | |
68 | }; | |
69 | ||
70 | #define FRAG_CB(skb) ((struct ipfrag_skb_cb*)((skb)->cb)) | |
71 | ||
72 | /* Describe an entry in the "incomplete datagrams" queue. */ | |
73 | struct ipq { | |
74 | struct ipq *next; /* linked list pointers */ | |
75 | struct list_head lru_list; /* lru list member */ | |
76 | u32 user; | |
77 | u32 saddr; | |
78 | u32 daddr; | |
79 | u16 id; | |
80 | u8 protocol; | |
81 | u8 last_in; | |
82 | #define COMPLETE 4 | |
83 | #define FIRST_IN 2 | |
84 | #define LAST_IN 1 | |
85 | ||
86 | struct sk_buff *fragments; /* linked list of received fragments */ | |
87 | int len; /* total length of original datagram */ | |
88 | int meat; | |
89 | spinlock_t lock; | |
90 | atomic_t refcnt; | |
91 | struct timer_list timer; /* when will this queue expire? */ | |
92 | struct ipq **pprev; | |
93 | int iif; | |
94 | struct timeval stamp; | |
95 | }; | |
96 | ||
97 | /* Hash table. */ | |
98 | ||
99 | #define IPQ_HASHSZ 64 | |
100 | ||
101 | /* Per-bucket lock is easy to add now. */ | |
102 | static struct ipq *ipq_hash[IPQ_HASHSZ]; | |
103 | static DEFINE_RWLOCK(ipfrag_lock); | |
104 | static u32 ipfrag_hash_rnd; | |
105 | static LIST_HEAD(ipq_lru_list); | |
106 | int ip_frag_nqueues = 0; | |
107 | ||
108 | static __inline__ void __ipq_unlink(struct ipq *qp) | |
109 | { | |
110 | if(qp->next) | |
111 | qp->next->pprev = qp->pprev; | |
112 | *qp->pprev = qp->next; | |
113 | list_del(&qp->lru_list); | |
114 | ip_frag_nqueues--; | |
115 | } | |
116 | ||
117 | static __inline__ void ipq_unlink(struct ipq *ipq) | |
118 | { | |
119 | write_lock(&ipfrag_lock); | |
120 | __ipq_unlink(ipq); | |
121 | write_unlock(&ipfrag_lock); | |
122 | } | |
123 | ||
124 | static unsigned int ipqhashfn(u16 id, u32 saddr, u32 daddr, u8 prot) | |
125 | { | |
126 | return jhash_3words((u32)id << 16 | prot, saddr, daddr, | |
127 | ipfrag_hash_rnd) & (IPQ_HASHSZ - 1); | |
128 | } | |
129 | ||
130 | static struct timer_list ipfrag_secret_timer; | |
131 | int sysctl_ipfrag_secret_interval = 10 * 60 * HZ; | |
132 | ||
133 | static void ipfrag_secret_rebuild(unsigned long dummy) | |
134 | { | |
135 | unsigned long now = jiffies; | |
136 | int i; | |
137 | ||
138 | write_lock(&ipfrag_lock); | |
139 | get_random_bytes(&ipfrag_hash_rnd, sizeof(u32)); | |
140 | for (i = 0; i < IPQ_HASHSZ; i++) { | |
141 | struct ipq *q; | |
142 | ||
143 | q = ipq_hash[i]; | |
144 | while (q) { | |
145 | struct ipq *next = q->next; | |
146 | unsigned int hval = ipqhashfn(q->id, q->saddr, | |
147 | q->daddr, q->protocol); | |
148 | ||
149 | if (hval != i) { | |
150 | /* Unlink. */ | |
151 | if (q->next) | |
152 | q->next->pprev = q->pprev; | |
153 | *q->pprev = q->next; | |
154 | ||
155 | /* Relink to new hash chain. */ | |
156 | if ((q->next = ipq_hash[hval]) != NULL) | |
157 | q->next->pprev = &q->next; | |
158 | ipq_hash[hval] = q; | |
159 | q->pprev = &ipq_hash[hval]; | |
160 | } | |
161 | ||
162 | q = next; | |
163 | } | |
164 | } | |
165 | write_unlock(&ipfrag_lock); | |
166 | ||
167 | mod_timer(&ipfrag_secret_timer, now + sysctl_ipfrag_secret_interval); | |
168 | } | |
169 | ||
170 | atomic_t ip_frag_mem = ATOMIC_INIT(0); /* Memory used for fragments */ | |
171 | ||
172 | /* Memory Tracking Functions. */ | |
173 | static __inline__ void frag_kfree_skb(struct sk_buff *skb, int *work) | |
174 | { | |
175 | if (work) | |
176 | *work -= skb->truesize; | |
177 | atomic_sub(skb->truesize, &ip_frag_mem); | |
178 | kfree_skb(skb); | |
179 | } | |
180 | ||
181 | static __inline__ void frag_free_queue(struct ipq *qp, int *work) | |
182 | { | |
183 | if (work) | |
184 | *work -= sizeof(struct ipq); | |
185 | atomic_sub(sizeof(struct ipq), &ip_frag_mem); | |
186 | kfree(qp); | |
187 | } | |
188 | ||
189 | static __inline__ struct ipq *frag_alloc_queue(void) | |
190 | { | |
191 | struct ipq *qp = kmalloc(sizeof(struct ipq), GFP_ATOMIC); | |
192 | ||
193 | if(!qp) | |
194 | return NULL; | |
195 | atomic_add(sizeof(struct ipq), &ip_frag_mem); | |
196 | return qp; | |
197 | } | |
198 | ||
199 | ||
200 | /* Destruction primitives. */ | |
201 | ||
202 | /* Complete destruction of ipq. */ | |
203 | static void ip_frag_destroy(struct ipq *qp, int *work) | |
204 | { | |
205 | struct sk_buff *fp; | |
206 | ||
207 | BUG_TRAP(qp->last_in&COMPLETE); | |
208 | BUG_TRAP(del_timer(&qp->timer) == 0); | |
209 | ||
210 | /* Release all fragment data. */ | |
211 | fp = qp->fragments; | |
212 | while (fp) { | |
213 | struct sk_buff *xp = fp->next; | |
214 | ||
215 | frag_kfree_skb(fp, work); | |
216 | fp = xp; | |
217 | } | |
218 | ||
219 | /* Finally, release the queue descriptor itself. */ | |
220 | frag_free_queue(qp, work); | |
221 | } | |
222 | ||
223 | static __inline__ void ipq_put(struct ipq *ipq, int *work) | |
224 | { | |
225 | if (atomic_dec_and_test(&ipq->refcnt)) | |
226 | ip_frag_destroy(ipq, work); | |
227 | } | |
228 | ||
229 | /* Kill ipq entry. It is not destroyed immediately, | |
230 | * because caller (and someone more) holds reference count. | |
231 | */ | |
232 | static void ipq_kill(struct ipq *ipq) | |
233 | { | |
234 | if (del_timer(&ipq->timer)) | |
235 | atomic_dec(&ipq->refcnt); | |
236 | ||
237 | if (!(ipq->last_in & COMPLETE)) { | |
238 | ipq_unlink(ipq); | |
239 | atomic_dec(&ipq->refcnt); | |
240 | ipq->last_in |= COMPLETE; | |
241 | } | |
242 | } | |
243 | ||
244 | /* Memory limiting on fragments. Evictor trashes the oldest | |
245 | * fragment queue until we are back under the threshold. | |
246 | */ | |
247 | static void ip_evictor(void) | |
248 | { | |
249 | struct ipq *qp; | |
250 | struct list_head *tmp; | |
251 | int work; | |
252 | ||
253 | work = atomic_read(&ip_frag_mem) - sysctl_ipfrag_low_thresh; | |
254 | if (work <= 0) | |
255 | return; | |
256 | ||
257 | while (work > 0) { | |
258 | read_lock(&ipfrag_lock); | |
259 | if (list_empty(&ipq_lru_list)) { | |
260 | read_unlock(&ipfrag_lock); | |
261 | return; | |
262 | } | |
263 | tmp = ipq_lru_list.next; | |
264 | qp = list_entry(tmp, struct ipq, lru_list); | |
265 | atomic_inc(&qp->refcnt); | |
266 | read_unlock(&ipfrag_lock); | |
267 | ||
268 | spin_lock(&qp->lock); | |
269 | if (!(qp->last_in&COMPLETE)) | |
270 | ipq_kill(qp); | |
271 | spin_unlock(&qp->lock); | |
272 | ||
273 | ipq_put(qp, &work); | |
274 | IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS); | |
275 | } | |
276 | } | |
277 | ||
278 | /* | |
279 | * Oops, a fragment queue timed out. Kill it and send an ICMP reply. | |
280 | */ | |
281 | static void ip_expire(unsigned long arg) | |
282 | { | |
283 | struct ipq *qp = (struct ipq *) arg; | |
284 | ||
285 | spin_lock(&qp->lock); | |
286 | ||
287 | if (qp->last_in & COMPLETE) | |
288 | goto out; | |
289 | ||
290 | ipq_kill(qp); | |
291 | ||
292 | IP_INC_STATS_BH(IPSTATS_MIB_REASMTIMEOUT); | |
293 | IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS); | |
294 | ||
295 | if ((qp->last_in&FIRST_IN) && qp->fragments != NULL) { | |
296 | struct sk_buff *head = qp->fragments; | |
297 | /* Send an ICMP "Fragment Reassembly Timeout" message. */ | |
298 | if ((head->dev = dev_get_by_index(qp->iif)) != NULL) { | |
299 | icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); | |
300 | dev_put(head->dev); | |
301 | } | |
302 | } | |
303 | out: | |
304 | spin_unlock(&qp->lock); | |
305 | ipq_put(qp, NULL); | |
306 | } | |
307 | ||
308 | /* Creation primitives. */ | |
309 | ||
310 | static struct ipq *ip_frag_intern(unsigned int hash, struct ipq *qp_in) | |
311 | { | |
312 | struct ipq *qp; | |
313 | ||
314 | write_lock(&ipfrag_lock); | |
315 | #ifdef CONFIG_SMP | |
316 | /* With SMP race we have to recheck hash table, because | |
317 | * such entry could be created on other cpu, while we | |
318 | * promoted read lock to write lock. | |
319 | */ | |
320 | for(qp = ipq_hash[hash]; qp; qp = qp->next) { | |
321 | if(qp->id == qp_in->id && | |
322 | qp->saddr == qp_in->saddr && | |
323 | qp->daddr == qp_in->daddr && | |
324 | qp->protocol == qp_in->protocol && | |
325 | qp->user == qp_in->user) { | |
326 | atomic_inc(&qp->refcnt); | |
327 | write_unlock(&ipfrag_lock); | |
328 | qp_in->last_in |= COMPLETE; | |
329 | ipq_put(qp_in, NULL); | |
330 | return qp; | |
331 | } | |
332 | } | |
333 | #endif | |
334 | qp = qp_in; | |
335 | ||
336 | if (!mod_timer(&qp->timer, jiffies + sysctl_ipfrag_time)) | |
337 | atomic_inc(&qp->refcnt); | |
338 | ||
339 | atomic_inc(&qp->refcnt); | |
340 | if((qp->next = ipq_hash[hash]) != NULL) | |
341 | qp->next->pprev = &qp->next; | |
342 | ipq_hash[hash] = qp; | |
343 | qp->pprev = &ipq_hash[hash]; | |
344 | INIT_LIST_HEAD(&qp->lru_list); | |
345 | list_add_tail(&qp->lru_list, &ipq_lru_list); | |
346 | ip_frag_nqueues++; | |
347 | write_unlock(&ipfrag_lock); | |
348 | return qp; | |
349 | } | |
350 | ||
351 | /* Add an entry to the 'ipq' queue for a newly received IP datagram. */ | |
352 | static struct ipq *ip_frag_create(unsigned hash, struct iphdr *iph, u32 user) | |
353 | { | |
354 | struct ipq *qp; | |
355 | ||
356 | if ((qp = frag_alloc_queue()) == NULL) | |
357 | goto out_nomem; | |
358 | ||
359 | qp->protocol = iph->protocol; | |
360 | qp->last_in = 0; | |
361 | qp->id = iph->id; | |
362 | qp->saddr = iph->saddr; | |
363 | qp->daddr = iph->daddr; | |
364 | qp->user = user; | |
365 | qp->len = 0; | |
366 | qp->meat = 0; | |
367 | qp->fragments = NULL; | |
368 | qp->iif = 0; | |
369 | ||
370 | /* Initialize a timer for this entry. */ | |
371 | init_timer(&qp->timer); | |
372 | qp->timer.data = (unsigned long) qp; /* pointer to queue */ | |
373 | qp->timer.function = ip_expire; /* expire function */ | |
374 | spin_lock_init(&qp->lock); | |
375 | atomic_set(&qp->refcnt, 1); | |
376 | ||
377 | return ip_frag_intern(hash, qp); | |
378 | ||
379 | out_nomem: | |
380 | NETDEBUG(if (net_ratelimit()) printk(KERN_ERR "ip_frag_create: no memory left !\n")); | |
381 | return NULL; | |
382 | } | |
383 | ||
384 | /* Find the correct entry in the "incomplete datagrams" queue for | |
385 | * this IP datagram, and create new one, if nothing is found. | |
386 | */ | |
387 | static inline struct ipq *ip_find(struct iphdr *iph, u32 user) | |
388 | { | |
389 | __u16 id = iph->id; | |
390 | __u32 saddr = iph->saddr; | |
391 | __u32 daddr = iph->daddr; | |
392 | __u8 protocol = iph->protocol; | |
393 | unsigned int hash = ipqhashfn(id, saddr, daddr, protocol); | |
394 | struct ipq *qp; | |
395 | ||
396 | read_lock(&ipfrag_lock); | |
397 | for(qp = ipq_hash[hash]; qp; qp = qp->next) { | |
398 | if(qp->id == id && | |
399 | qp->saddr == saddr && | |
400 | qp->daddr == daddr && | |
401 | qp->protocol == protocol && | |
402 | qp->user == user) { | |
403 | atomic_inc(&qp->refcnt); | |
404 | read_unlock(&ipfrag_lock); | |
405 | return qp; | |
406 | } | |
407 | } | |
408 | read_unlock(&ipfrag_lock); | |
409 | ||
410 | return ip_frag_create(hash, iph, user); | |
411 | } | |
412 | ||
413 | /* Add new segment to existing queue. */ | |
414 | static void ip_frag_queue(struct ipq *qp, struct sk_buff *skb) | |
415 | { | |
416 | struct sk_buff *prev, *next; | |
417 | int flags, offset; | |
418 | int ihl, end; | |
419 | ||
420 | if (qp->last_in & COMPLETE) | |
421 | goto err; | |
422 | ||
423 | offset = ntohs(skb->nh.iph->frag_off); | |
424 | flags = offset & ~IP_OFFSET; | |
425 | offset &= IP_OFFSET; | |
426 | offset <<= 3; /* offset is in 8-byte chunks */ | |
427 | ihl = skb->nh.iph->ihl * 4; | |
428 | ||
429 | /* Determine the position of this fragment. */ | |
430 | end = offset + skb->len - ihl; | |
431 | ||
432 | /* Is this the final fragment? */ | |
433 | if ((flags & IP_MF) == 0) { | |
434 | /* If we already have some bits beyond end | |
435 | * or have different end, the segment is corrrupted. | |
436 | */ | |
437 | if (end < qp->len || | |
438 | ((qp->last_in & LAST_IN) && end != qp->len)) | |
439 | goto err; | |
440 | qp->last_in |= LAST_IN; | |
441 | qp->len = end; | |
442 | } else { | |
443 | if (end&7) { | |
444 | end &= ~7; | |
445 | if (skb->ip_summed != CHECKSUM_UNNECESSARY) | |
446 | skb->ip_summed = CHECKSUM_NONE; | |
447 | } | |
448 | if (end > qp->len) { | |
449 | /* Some bits beyond end -> corruption. */ | |
450 | if (qp->last_in & LAST_IN) | |
451 | goto err; | |
452 | qp->len = end; | |
453 | } | |
454 | } | |
455 | if (end == offset) | |
456 | goto err; | |
457 | ||
458 | if (pskb_pull(skb, ihl) == NULL) | |
459 | goto err; | |
460 | if (pskb_trim(skb, end-offset)) | |
461 | goto err; | |
462 | ||
463 | /* Find out which fragments are in front and at the back of us | |
464 | * in the chain of fragments so far. We must know where to put | |
465 | * this fragment, right? | |
466 | */ | |
467 | prev = NULL; | |
468 | for(next = qp->fragments; next != NULL; next = next->next) { | |
469 | if (FRAG_CB(next)->offset >= offset) | |
470 | break; /* bingo! */ | |
471 | prev = next; | |
472 | } | |
473 | ||
474 | /* We found where to put this one. Check for overlap with | |
475 | * preceding fragment, and, if needed, align things so that | |
476 | * any overlaps are eliminated. | |
477 | */ | |
478 | if (prev) { | |
479 | int i = (FRAG_CB(prev)->offset + prev->len) - offset; | |
480 | ||
481 | if (i > 0) { | |
482 | offset += i; | |
483 | if (end <= offset) | |
484 | goto err; | |
485 | if (!pskb_pull(skb, i)) | |
486 | goto err; | |
487 | if (skb->ip_summed != CHECKSUM_UNNECESSARY) | |
488 | skb->ip_summed = CHECKSUM_NONE; | |
489 | } | |
490 | } | |
491 | ||
492 | while (next && FRAG_CB(next)->offset < end) { | |
493 | int i = end - FRAG_CB(next)->offset; /* overlap is 'i' bytes */ | |
494 | ||
495 | if (i < next->len) { | |
496 | /* Eat head of the next overlapped fragment | |
497 | * and leave the loop. The next ones cannot overlap. | |
498 | */ | |
499 | if (!pskb_pull(next, i)) | |
500 | goto err; | |
501 | FRAG_CB(next)->offset += i; | |
502 | qp->meat -= i; | |
503 | if (next->ip_summed != CHECKSUM_UNNECESSARY) | |
504 | next->ip_summed = CHECKSUM_NONE; | |
505 | break; | |
506 | } else { | |
507 | struct sk_buff *free_it = next; | |
508 | ||
509 | /* Old fragmnet is completely overridden with | |
510 | * new one drop it. | |
511 | */ | |
512 | next = next->next; | |
513 | ||
514 | if (prev) | |
515 | prev->next = next; | |
516 | else | |
517 | qp->fragments = next; | |
518 | ||
519 | qp->meat -= free_it->len; | |
520 | frag_kfree_skb(free_it, NULL); | |
521 | } | |
522 | } | |
523 | ||
524 | FRAG_CB(skb)->offset = offset; | |
525 | ||
526 | /* Insert this fragment in the chain of fragments. */ | |
527 | skb->next = next; | |
528 | if (prev) | |
529 | prev->next = skb; | |
530 | else | |
531 | qp->fragments = skb; | |
532 | ||
533 | if (skb->dev) | |
534 | qp->iif = skb->dev->ifindex; | |
535 | skb->dev = NULL; | |
536 | qp->stamp = skb->stamp; | |
537 | qp->meat += skb->len; | |
538 | atomic_add(skb->truesize, &ip_frag_mem); | |
539 | if (offset == 0) | |
540 | qp->last_in |= FIRST_IN; | |
541 | ||
542 | write_lock(&ipfrag_lock); | |
543 | list_move_tail(&qp->lru_list, &ipq_lru_list); | |
544 | write_unlock(&ipfrag_lock); | |
545 | ||
546 | return; | |
547 | ||
548 | err: | |
549 | kfree_skb(skb); | |
550 | } | |
551 | ||
552 | ||
553 | /* Build a new IP datagram from all its fragments. */ | |
554 | ||
555 | static struct sk_buff *ip_frag_reasm(struct ipq *qp, struct net_device *dev) | |
556 | { | |
557 | struct iphdr *iph; | |
558 | struct sk_buff *fp, *head = qp->fragments; | |
559 | int len; | |
560 | int ihlen; | |
561 | ||
562 | ipq_kill(qp); | |
563 | ||
564 | BUG_TRAP(head != NULL); | |
565 | BUG_TRAP(FRAG_CB(head)->offset == 0); | |
566 | ||
567 | /* Allocate a new buffer for the datagram. */ | |
568 | ihlen = head->nh.iph->ihl*4; | |
569 | len = ihlen + qp->len; | |
570 | ||
571 | if(len > 65535) | |
572 | goto out_oversize; | |
573 | ||
574 | /* Head of list must not be cloned. */ | |
575 | if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC)) | |
576 | goto out_nomem; | |
577 | ||
578 | /* If the first fragment is fragmented itself, we split | |
579 | * it to two chunks: the first with data and paged part | |
580 | * and the second, holding only fragments. */ | |
581 | if (skb_shinfo(head)->frag_list) { | |
582 | struct sk_buff *clone; | |
583 | int i, plen = 0; | |
584 | ||
585 | if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL) | |
586 | goto out_nomem; | |
587 | clone->next = head->next; | |
588 | head->next = clone; | |
589 | skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; | |
590 | skb_shinfo(head)->frag_list = NULL; | |
591 | for (i=0; i<skb_shinfo(head)->nr_frags; i++) | |
592 | plen += skb_shinfo(head)->frags[i].size; | |
593 | clone->len = clone->data_len = head->data_len - plen; | |
594 | head->data_len -= clone->len; | |
595 | head->len -= clone->len; | |
596 | clone->csum = 0; | |
597 | clone->ip_summed = head->ip_summed; | |
598 | atomic_add(clone->truesize, &ip_frag_mem); | |
599 | } | |
600 | ||
601 | skb_shinfo(head)->frag_list = head->next; | |
602 | skb_push(head, head->data - head->nh.raw); | |
603 | atomic_sub(head->truesize, &ip_frag_mem); | |
604 | ||
605 | for (fp=head->next; fp; fp = fp->next) { | |
606 | head->data_len += fp->len; | |
607 | head->len += fp->len; | |
608 | if (head->ip_summed != fp->ip_summed) | |
609 | head->ip_summed = CHECKSUM_NONE; | |
610 | else if (head->ip_summed == CHECKSUM_HW) | |
611 | head->csum = csum_add(head->csum, fp->csum); | |
612 | head->truesize += fp->truesize; | |
613 | atomic_sub(fp->truesize, &ip_frag_mem); | |
614 | } | |
615 | ||
616 | head->next = NULL; | |
617 | head->dev = dev; | |
618 | head->stamp = qp->stamp; | |
619 | ||
620 | iph = head->nh.iph; | |
621 | iph->frag_off = 0; | |
622 | iph->tot_len = htons(len); | |
623 | IP_INC_STATS_BH(IPSTATS_MIB_REASMOKS); | |
624 | qp->fragments = NULL; | |
625 | return head; | |
626 | ||
627 | out_nomem: | |
628 | NETDEBUG(if (net_ratelimit()) | |
629 | printk(KERN_ERR | |
630 | "IP: queue_glue: no memory for gluing queue %p\n", | |
631 | qp)); | |
632 | goto out_fail; | |
633 | out_oversize: | |
634 | if (net_ratelimit()) | |
635 | printk(KERN_INFO | |
636 | "Oversized IP packet from %d.%d.%d.%d.\n", | |
637 | NIPQUAD(qp->saddr)); | |
638 | out_fail: | |
639 | IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS); | |
640 | return NULL; | |
641 | } | |
642 | ||
643 | /* Process an incoming IP datagram fragment. */ | |
644 | struct sk_buff *ip_defrag(struct sk_buff *skb, u32 user) | |
645 | { | |
646 | struct iphdr *iph = skb->nh.iph; | |
647 | struct ipq *qp; | |
648 | struct net_device *dev; | |
649 | ||
650 | IP_INC_STATS_BH(IPSTATS_MIB_REASMREQDS); | |
651 | ||
652 | /* Start by cleaning up the memory. */ | |
653 | if (atomic_read(&ip_frag_mem) > sysctl_ipfrag_high_thresh) | |
654 | ip_evictor(); | |
655 | ||
656 | dev = skb->dev; | |
657 | ||
658 | /* Lookup (or create) queue header */ | |
659 | if ((qp = ip_find(iph, user)) != NULL) { | |
660 | struct sk_buff *ret = NULL; | |
661 | ||
662 | spin_lock(&qp->lock); | |
663 | ||
664 | ip_frag_queue(qp, skb); | |
665 | ||
666 | if (qp->last_in == (FIRST_IN|LAST_IN) && | |
667 | qp->meat == qp->len) | |
668 | ret = ip_frag_reasm(qp, dev); | |
669 | ||
670 | spin_unlock(&qp->lock); | |
671 | ipq_put(qp, NULL); | |
672 | return ret; | |
673 | } | |
674 | ||
675 | IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS); | |
676 | kfree_skb(skb); | |
677 | return NULL; | |
678 | } | |
679 | ||
680 | void ipfrag_init(void) | |
681 | { | |
682 | ipfrag_hash_rnd = (u32) ((num_physpages ^ (num_physpages>>7)) ^ | |
683 | (jiffies ^ (jiffies >> 6))); | |
684 | ||
685 | init_timer(&ipfrag_secret_timer); | |
686 | ipfrag_secret_timer.function = ipfrag_secret_rebuild; | |
687 | ipfrag_secret_timer.expires = jiffies + sysctl_ipfrag_secret_interval; | |
688 | add_timer(&ipfrag_secret_timer); | |
689 | } | |
690 | ||
691 | EXPORT_SYMBOL(ip_defrag); |