Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Routines having to do with the 'struct sk_buff' memory handlers. | |
3 | * | |
113aa838 | 4 | * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk> |
1da177e4 LT |
5 | * Florian La Roche <rzsfl@rz.uni-sb.de> |
6 | * | |
1da177e4 LT |
7 | * Fixes: |
8 | * Alan Cox : Fixed the worst of the load | |
9 | * balancer bugs. | |
10 | * Dave Platt : Interrupt stacking fix. | |
11 | * Richard Kooijman : Timestamp fixes. | |
12 | * Alan Cox : Changed buffer format. | |
13 | * Alan Cox : destructor hook for AF_UNIX etc. | |
14 | * Linus Torvalds : Better skb_clone. | |
15 | * Alan Cox : Added skb_copy. | |
16 | * Alan Cox : Added all the changed routines Linus | |
17 | * only put in the headers | |
18 | * Ray VanTassle : Fixed --skb->lock in free | |
19 | * Alan Cox : skb_copy copy arp field | |
20 | * Andi Kleen : slabified it. | |
21 | * Robert Olsson : Removed skb_head_pool | |
22 | * | |
23 | * NOTE: | |
24 | * The __skb_ routines should be called with interrupts | |
25 | * disabled, or you better be *real* sure that the operation is atomic | |
26 | * with respect to whatever list is being frobbed (e.g. via lock_sock() | |
27 | * or via disabling bottom half handlers, etc). | |
28 | * | |
29 | * This program is free software; you can redistribute it and/or | |
30 | * modify it under the terms of the GNU General Public License | |
31 | * as published by the Free Software Foundation; either version | |
32 | * 2 of the License, or (at your option) any later version. | |
33 | */ | |
34 | ||
35 | /* | |
36 | * The functions in this file will not compile correctly with gcc 2.4.x | |
37 | */ | |
38 | ||
e005d193 JP |
39 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
40 | ||
1da177e4 LT |
41 | #include <linux/module.h> |
42 | #include <linux/types.h> | |
43 | #include <linux/kernel.h> | |
fe55f6d5 | 44 | #include <linux/kmemcheck.h> |
1da177e4 LT |
45 | #include <linux/mm.h> |
46 | #include <linux/interrupt.h> | |
47 | #include <linux/in.h> | |
48 | #include <linux/inet.h> | |
49 | #include <linux/slab.h> | |
50 | #include <linux/netdevice.h> | |
51 | #ifdef CONFIG_NET_CLS_ACT | |
52 | #include <net/pkt_sched.h> | |
53 | #endif | |
54 | #include <linux/string.h> | |
55 | #include <linux/skbuff.h> | |
9c55e01c | 56 | #include <linux/splice.h> |
1da177e4 LT |
57 | #include <linux/cache.h> |
58 | #include <linux/rtnetlink.h> | |
59 | #include <linux/init.h> | |
716ea3a7 | 60 | #include <linux/scatterlist.h> |
ac45f602 | 61 | #include <linux/errqueue.h> |
268bb0ce | 62 | #include <linux/prefetch.h> |
1da177e4 LT |
63 | |
64 | #include <net/protocol.h> | |
65 | #include <net/dst.h> | |
66 | #include <net/sock.h> | |
67 | #include <net/checksum.h> | |
ed1f50c3 | 68 | #include <net/ip6_checksum.h> |
1da177e4 LT |
69 | #include <net/xfrm.h> |
70 | ||
71 | #include <asm/uaccess.h> | |
ad8d75ff | 72 | #include <trace/events/skb.h> |
51c56b00 | 73 | #include <linux/highmem.h> |
a1f8e7f7 | 74 | |
d7e8883c | 75 | struct kmem_cache *skbuff_head_cache __read_mostly; |
e18b890b | 76 | static struct kmem_cache *skbuff_fclone_cache __read_mostly; |
1da177e4 | 77 | |
9c55e01c JA |
78 | static void sock_pipe_buf_release(struct pipe_inode_info *pipe, |
79 | struct pipe_buffer *buf) | |
80 | { | |
8b9d3728 | 81 | put_page(buf->page); |
9c55e01c JA |
82 | } |
83 | ||
84 | static void sock_pipe_buf_get(struct pipe_inode_info *pipe, | |
85 | struct pipe_buffer *buf) | |
86 | { | |
8b9d3728 | 87 | get_page(buf->page); |
9c55e01c JA |
88 | } |
89 | ||
90 | static int sock_pipe_buf_steal(struct pipe_inode_info *pipe, | |
91 | struct pipe_buffer *buf) | |
92 | { | |
93 | return 1; | |
94 | } | |
95 | ||
96 | ||
97 | /* Pipe buffer operations for a socket. */ | |
28dfef8f | 98 | static const struct pipe_buf_operations sock_pipe_buf_ops = { |
9c55e01c JA |
99 | .can_merge = 0, |
100 | .map = generic_pipe_buf_map, | |
101 | .unmap = generic_pipe_buf_unmap, | |
102 | .confirm = generic_pipe_buf_confirm, | |
103 | .release = sock_pipe_buf_release, | |
104 | .steal = sock_pipe_buf_steal, | |
105 | .get = sock_pipe_buf_get, | |
106 | }; | |
107 | ||
1da177e4 | 108 | /** |
f05de73b JS |
109 | * skb_panic - private function for out-of-line support |
110 | * @skb: buffer | |
111 | * @sz: size | |
112 | * @addr: address | |
99d5851e | 113 | * @msg: skb_over_panic or skb_under_panic |
1da177e4 | 114 | * |
f05de73b JS |
115 | * Out-of-line support for skb_put() and skb_push(). |
116 | * Called via the wrapper skb_over_panic() or skb_under_panic(). | |
117 | * Keep out of line to prevent kernel bloat. | |
118 | * __builtin_return_address is not used because it is not always reliable. | |
1da177e4 | 119 | */ |
f05de73b | 120 | static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr, |
99d5851e | 121 | const char msg[]) |
1da177e4 | 122 | { |
e005d193 | 123 | pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n", |
99d5851e | 124 | msg, addr, skb->len, sz, skb->head, skb->data, |
e005d193 JP |
125 | (unsigned long)skb->tail, (unsigned long)skb->end, |
126 | skb->dev ? skb->dev->name : "<NULL>"); | |
1da177e4 LT |
127 | BUG(); |
128 | } | |
129 | ||
f05de73b | 130 | static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr) |
1da177e4 | 131 | { |
f05de73b | 132 | skb_panic(skb, sz, addr, __func__); |
1da177e4 LT |
133 | } |
134 | ||
f05de73b JS |
135 | static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr) |
136 | { | |
137 | skb_panic(skb, sz, addr, __func__); | |
138 | } | |
c93bdd0e MG |
139 | |
140 | /* | |
141 | * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells | |
142 | * the caller if emergency pfmemalloc reserves are being used. If it is and | |
143 | * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves | |
144 | * may be used. Otherwise, the packet data may be discarded until enough | |
145 | * memory is free | |
146 | */ | |
147 | #define kmalloc_reserve(size, gfp, node, pfmemalloc) \ | |
148 | __kmalloc_reserve(size, gfp, node, _RET_IP_, pfmemalloc) | |
61c5e88a | 149 | |
150 | static void *__kmalloc_reserve(size_t size, gfp_t flags, int node, | |
151 | unsigned long ip, bool *pfmemalloc) | |
c93bdd0e MG |
152 | { |
153 | void *obj; | |
154 | bool ret_pfmemalloc = false; | |
155 | ||
156 | /* | |
157 | * Try a regular allocation, when that fails and we're not entitled | |
158 | * to the reserves, fail. | |
159 | */ | |
160 | obj = kmalloc_node_track_caller(size, | |
161 | flags | __GFP_NOMEMALLOC | __GFP_NOWARN, | |
162 | node); | |
163 | if (obj || !(gfp_pfmemalloc_allowed(flags))) | |
164 | goto out; | |
165 | ||
166 | /* Try again but now we are using pfmemalloc reserves */ | |
167 | ret_pfmemalloc = true; | |
168 | obj = kmalloc_node_track_caller(size, flags, node); | |
169 | ||
170 | out: | |
171 | if (pfmemalloc) | |
172 | *pfmemalloc = ret_pfmemalloc; | |
173 | ||
174 | return obj; | |
175 | } | |
176 | ||
1da177e4 LT |
177 | /* Allocate a new skbuff. We do this ourselves so we can fill in a few |
178 | * 'private' fields and also do memory statistics to find all the | |
179 | * [BEEP] leaks. | |
180 | * | |
181 | */ | |
182 | ||
0ebd0ac5 PM |
183 | struct sk_buff *__alloc_skb_head(gfp_t gfp_mask, int node) |
184 | { | |
185 | struct sk_buff *skb; | |
186 | ||
187 | /* Get the HEAD */ | |
188 | skb = kmem_cache_alloc_node(skbuff_head_cache, | |
189 | gfp_mask & ~__GFP_DMA, node); | |
190 | if (!skb) | |
191 | goto out; | |
192 | ||
193 | /* | |
194 | * Only clear those fields we need to clear, not those that we will | |
195 | * actually initialise below. Hence, don't put any more fields after | |
196 | * the tail pointer in struct sk_buff! | |
197 | */ | |
198 | memset(skb, 0, offsetof(struct sk_buff, tail)); | |
5e71d9d7 | 199 | skb->head = NULL; |
0ebd0ac5 PM |
200 | skb->truesize = sizeof(struct sk_buff); |
201 | atomic_set(&skb->users, 1); | |
202 | ||
35d04610 | 203 | skb->mac_header = (typeof(skb->mac_header))~0U; |
0ebd0ac5 PM |
204 | out: |
205 | return skb; | |
206 | } | |
207 | ||
1da177e4 | 208 | /** |
d179cd12 | 209 | * __alloc_skb - allocate a network buffer |
1da177e4 LT |
210 | * @size: size to allocate |
211 | * @gfp_mask: allocation mask | |
c93bdd0e MG |
212 | * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache |
213 | * instead of head cache and allocate a cloned (child) skb. | |
214 | * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for | |
215 | * allocations in case the data is required for writeback | |
b30973f8 | 216 | * @node: numa node to allocate memory on |
1da177e4 LT |
217 | * |
218 | * Allocate a new &sk_buff. The returned buffer has no headroom and a | |
94b6042c BH |
219 | * tail room of at least size bytes. The object has a reference count |
220 | * of one. The return is the buffer. On a failure the return is %NULL. | |
1da177e4 LT |
221 | * |
222 | * Buffers may only be allocated from interrupts using a @gfp_mask of | |
223 | * %GFP_ATOMIC. | |
224 | */ | |
dd0fc66f | 225 | struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, |
c93bdd0e | 226 | int flags, int node) |
1da177e4 | 227 | { |
e18b890b | 228 | struct kmem_cache *cache; |
4947d3ef | 229 | struct skb_shared_info *shinfo; |
1da177e4 LT |
230 | struct sk_buff *skb; |
231 | u8 *data; | |
c93bdd0e | 232 | bool pfmemalloc; |
1da177e4 | 233 | |
c93bdd0e MG |
234 | cache = (flags & SKB_ALLOC_FCLONE) |
235 | ? skbuff_fclone_cache : skbuff_head_cache; | |
236 | ||
237 | if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX)) | |
238 | gfp_mask |= __GFP_MEMALLOC; | |
8798b3fb | 239 | |
1da177e4 | 240 | /* Get the HEAD */ |
b30973f8 | 241 | skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); |
1da177e4 LT |
242 | if (!skb) |
243 | goto out; | |
ec7d2f2c | 244 | prefetchw(skb); |
1da177e4 | 245 | |
87fb4b7b ED |
246 | /* We do our best to align skb_shared_info on a separate cache |
247 | * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives | |
248 | * aligned memory blocks, unless SLUB/SLAB debug is enabled. | |
249 | * Both skb->head and skb_shared_info are cache line aligned. | |
250 | */ | |
bc417e30 | 251 | size = SKB_DATA_ALIGN(size); |
87fb4b7b | 252 | size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
c93bdd0e | 253 | data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc); |
1da177e4 LT |
254 | if (!data) |
255 | goto nodata; | |
87fb4b7b ED |
256 | /* kmalloc(size) might give us more room than requested. |
257 | * Put skb_shared_info exactly at the end of allocated zone, | |
258 | * to allow max possible filling before reallocation. | |
259 | */ | |
260 | size = SKB_WITH_OVERHEAD(ksize(data)); | |
ec7d2f2c | 261 | prefetchw(data + size); |
1da177e4 | 262 | |
ca0605a7 | 263 | /* |
c8005785 JB |
264 | * Only clear those fields we need to clear, not those that we will |
265 | * actually initialise below. Hence, don't put any more fields after | |
266 | * the tail pointer in struct sk_buff! | |
ca0605a7 ACM |
267 | */ |
268 | memset(skb, 0, offsetof(struct sk_buff, tail)); | |
87fb4b7b ED |
269 | /* Account for allocated memory : skb + skb->head */ |
270 | skb->truesize = SKB_TRUESIZE(size); | |
c93bdd0e | 271 | skb->pfmemalloc = pfmemalloc; |
1da177e4 LT |
272 | atomic_set(&skb->users, 1); |
273 | skb->head = data; | |
274 | skb->data = data; | |
27a884dc | 275 | skb_reset_tail_pointer(skb); |
4305b541 | 276 | skb->end = skb->tail + size; |
35d04610 CW |
277 | skb->mac_header = (typeof(skb->mac_header))~0U; |
278 | skb->transport_header = (typeof(skb->transport_header))~0U; | |
19633e12 | 279 | |
4947d3ef BL |
280 | /* make sure we initialize shinfo sequentially */ |
281 | shinfo = skb_shinfo(skb); | |
ec7d2f2c | 282 | memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); |
4947d3ef | 283 | atomic_set(&shinfo->dataref, 1); |
c2aa3665 | 284 | kmemcheck_annotate_variable(shinfo->destructor_arg); |
4947d3ef | 285 | |
c93bdd0e | 286 | if (flags & SKB_ALLOC_FCLONE) { |
d179cd12 DM |
287 | struct sk_buff *child = skb + 1; |
288 | atomic_t *fclone_ref = (atomic_t *) (child + 1); | |
1da177e4 | 289 | |
fe55f6d5 VN |
290 | kmemcheck_annotate_bitfield(child, flags1); |
291 | kmemcheck_annotate_bitfield(child, flags2); | |
d179cd12 DM |
292 | skb->fclone = SKB_FCLONE_ORIG; |
293 | atomic_set(fclone_ref, 1); | |
294 | ||
295 | child->fclone = SKB_FCLONE_UNAVAILABLE; | |
c93bdd0e | 296 | child->pfmemalloc = pfmemalloc; |
d179cd12 | 297 | } |
1da177e4 LT |
298 | out: |
299 | return skb; | |
300 | nodata: | |
8798b3fb | 301 | kmem_cache_free(cache, skb); |
1da177e4 LT |
302 | skb = NULL; |
303 | goto out; | |
1da177e4 | 304 | } |
b4ac530f | 305 | EXPORT_SYMBOL(__alloc_skb); |
1da177e4 | 306 | |
b2b5ce9d ED |
307 | /** |
308 | * build_skb - build a network buffer | |
309 | * @data: data buffer provided by caller | |
d3836f21 | 310 | * @frag_size: size of fragment, or 0 if head was kmalloced |
b2b5ce9d ED |
311 | * |
312 | * Allocate a new &sk_buff. Caller provides space holding head and | |
deceb4c0 FF |
313 | * skb_shared_info. @data must have been allocated by kmalloc() only if |
314 | * @frag_size is 0, otherwise data should come from the page allocator. | |
b2b5ce9d ED |
315 | * The return is the new skb buffer. |
316 | * On a failure the return is %NULL, and @data is not freed. | |
317 | * Notes : | |
318 | * Before IO, driver allocates only data buffer where NIC put incoming frame | |
319 | * Driver should add room at head (NET_SKB_PAD) and | |
320 | * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info)) | |
321 | * After IO, driver calls build_skb(), to allocate sk_buff and populate it | |
322 | * before giving packet to stack. | |
323 | * RX rings only contains data buffers, not full skbs. | |
324 | */ | |
d3836f21 | 325 | struct sk_buff *build_skb(void *data, unsigned int frag_size) |
b2b5ce9d ED |
326 | { |
327 | struct skb_shared_info *shinfo; | |
328 | struct sk_buff *skb; | |
d3836f21 | 329 | unsigned int size = frag_size ? : ksize(data); |
b2b5ce9d ED |
330 | |
331 | skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); | |
332 | if (!skb) | |
333 | return NULL; | |
334 | ||
d3836f21 | 335 | size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
b2b5ce9d ED |
336 | |
337 | memset(skb, 0, offsetof(struct sk_buff, tail)); | |
338 | skb->truesize = SKB_TRUESIZE(size); | |
d3836f21 | 339 | skb->head_frag = frag_size != 0; |
b2b5ce9d ED |
340 | atomic_set(&skb->users, 1); |
341 | skb->head = data; | |
342 | skb->data = data; | |
343 | skb_reset_tail_pointer(skb); | |
344 | skb->end = skb->tail + size; | |
35d04610 CW |
345 | skb->mac_header = (typeof(skb->mac_header))~0U; |
346 | skb->transport_header = (typeof(skb->transport_header))~0U; | |
b2b5ce9d ED |
347 | |
348 | /* make sure we initialize shinfo sequentially */ | |
349 | shinfo = skb_shinfo(skb); | |
350 | memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); | |
351 | atomic_set(&shinfo->dataref, 1); | |
352 | kmemcheck_annotate_variable(shinfo->destructor_arg); | |
353 | ||
354 | return skb; | |
355 | } | |
356 | EXPORT_SYMBOL(build_skb); | |
357 | ||
a1c7fff7 | 358 | struct netdev_alloc_cache { |
69b08f62 ED |
359 | struct page_frag frag; |
360 | /* we maintain a pagecount bias, so that we dont dirty cache line | |
361 | * containing page->_count every time we allocate a fragment. | |
362 | */ | |
363 | unsigned int pagecnt_bias; | |
a1c7fff7 ED |
364 | }; |
365 | static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache); | |
366 | ||
c93bdd0e | 367 | static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) |
6f532612 ED |
368 | { |
369 | struct netdev_alloc_cache *nc; | |
370 | void *data = NULL; | |
69b08f62 | 371 | int order; |
6f532612 ED |
372 | unsigned long flags; |
373 | ||
374 | local_irq_save(flags); | |
375 | nc = &__get_cpu_var(netdev_alloc_cache); | |
69b08f62 | 376 | if (unlikely(!nc->frag.page)) { |
6f532612 | 377 | refill: |
69b08f62 ED |
378 | for (order = NETDEV_FRAG_PAGE_MAX_ORDER; ;) { |
379 | gfp_t gfp = gfp_mask; | |
380 | ||
381 | if (order) | |
382 | gfp |= __GFP_COMP | __GFP_NOWARN; | |
383 | nc->frag.page = alloc_pages(gfp, order); | |
384 | if (likely(nc->frag.page)) | |
385 | break; | |
386 | if (--order < 0) | |
387 | goto end; | |
388 | } | |
389 | nc->frag.size = PAGE_SIZE << order; | |
540eb7bf | 390 | recycle: |
69b08f62 ED |
391 | atomic_set(&nc->frag.page->_count, NETDEV_PAGECNT_MAX_BIAS); |
392 | nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS; | |
393 | nc->frag.offset = 0; | |
6f532612 | 394 | } |
540eb7bf | 395 | |
69b08f62 | 396 | if (nc->frag.offset + fragsz > nc->frag.size) { |
540eb7bf | 397 | /* avoid unnecessary locked operations if possible */ |
69b08f62 ED |
398 | if ((atomic_read(&nc->frag.page->_count) == nc->pagecnt_bias) || |
399 | atomic_sub_and_test(nc->pagecnt_bias, &nc->frag.page->_count)) | |
540eb7bf AD |
400 | goto recycle; |
401 | goto refill; | |
6f532612 | 402 | } |
540eb7bf | 403 | |
69b08f62 ED |
404 | data = page_address(nc->frag.page) + nc->frag.offset; |
405 | nc->frag.offset += fragsz; | |
540eb7bf AD |
406 | nc->pagecnt_bias--; |
407 | end: | |
6f532612 ED |
408 | local_irq_restore(flags); |
409 | return data; | |
410 | } | |
c93bdd0e MG |
411 | |
412 | /** | |
413 | * netdev_alloc_frag - allocate a page fragment | |
414 | * @fragsz: fragment size | |
415 | * | |
416 | * Allocates a frag from a page for receive buffer. | |
417 | * Uses GFP_ATOMIC allocations. | |
418 | */ | |
419 | void *netdev_alloc_frag(unsigned int fragsz) | |
420 | { | |
421 | return __netdev_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD); | |
422 | } | |
6f532612 ED |
423 | EXPORT_SYMBOL(netdev_alloc_frag); |
424 | ||
8af27456 CH |
425 | /** |
426 | * __netdev_alloc_skb - allocate an skbuff for rx on a specific device | |
427 | * @dev: network device to receive on | |
428 | * @length: length to allocate | |
429 | * @gfp_mask: get_free_pages mask, passed to alloc_skb | |
430 | * | |
431 | * Allocate a new &sk_buff and assign it a usage count of one. The | |
432 | * buffer has unspecified headroom built in. Users should allocate | |
433 | * the headroom they think they need without accounting for the | |
434 | * built in space. The built in space is used for optimisations. | |
435 | * | |
436 | * %NULL is returned if there is no free memory. | |
437 | */ | |
438 | struct sk_buff *__netdev_alloc_skb(struct net_device *dev, | |
6f532612 | 439 | unsigned int length, gfp_t gfp_mask) |
8af27456 | 440 | { |
6f532612 | 441 | struct sk_buff *skb = NULL; |
a1c7fff7 ED |
442 | unsigned int fragsz = SKB_DATA_ALIGN(length + NET_SKB_PAD) + |
443 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | |
444 | ||
310e158c | 445 | if (fragsz <= PAGE_SIZE && !(gfp_mask & (__GFP_WAIT | GFP_DMA))) { |
c93bdd0e MG |
446 | void *data; |
447 | ||
448 | if (sk_memalloc_socks()) | |
449 | gfp_mask |= __GFP_MEMALLOC; | |
450 | ||
451 | data = __netdev_alloc_frag(fragsz, gfp_mask); | |
a1c7fff7 | 452 | |
6f532612 ED |
453 | if (likely(data)) { |
454 | skb = build_skb(data, fragsz); | |
455 | if (unlikely(!skb)) | |
456 | put_page(virt_to_head_page(data)); | |
a1c7fff7 | 457 | } |
a1c7fff7 | 458 | } else { |
c93bdd0e MG |
459 | skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, |
460 | SKB_ALLOC_RX, NUMA_NO_NODE); | |
a1c7fff7 | 461 | } |
7b2e497a | 462 | if (likely(skb)) { |
8af27456 | 463 | skb_reserve(skb, NET_SKB_PAD); |
7b2e497a CH |
464 | skb->dev = dev; |
465 | } | |
8af27456 CH |
466 | return skb; |
467 | } | |
b4ac530f | 468 | EXPORT_SYMBOL(__netdev_alloc_skb); |
1da177e4 | 469 | |
654bed16 | 470 | void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, |
50269e19 | 471 | int size, unsigned int truesize) |
654bed16 PZ |
472 | { |
473 | skb_fill_page_desc(skb, i, page, off, size); | |
474 | skb->len += size; | |
475 | skb->data_len += size; | |
50269e19 | 476 | skb->truesize += truesize; |
654bed16 PZ |
477 | } |
478 | EXPORT_SYMBOL(skb_add_rx_frag); | |
479 | ||
f8e617e1 JW |
480 | void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size, |
481 | unsigned int truesize) | |
482 | { | |
483 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | |
484 | ||
485 | skb_frag_size_add(frag, size); | |
486 | skb->len += size; | |
487 | skb->data_len += size; | |
488 | skb->truesize += truesize; | |
489 | } | |
490 | EXPORT_SYMBOL(skb_coalesce_rx_frag); | |
491 | ||
27b437c8 | 492 | static void skb_drop_list(struct sk_buff **listp) |
1da177e4 | 493 | { |
bd8a7036 | 494 | kfree_skb_list(*listp); |
27b437c8 | 495 | *listp = NULL; |
1da177e4 LT |
496 | } |
497 | ||
27b437c8 HX |
498 | static inline void skb_drop_fraglist(struct sk_buff *skb) |
499 | { | |
500 | skb_drop_list(&skb_shinfo(skb)->frag_list); | |
501 | } | |
502 | ||
1da177e4 LT |
503 | static void skb_clone_fraglist(struct sk_buff *skb) |
504 | { | |
505 | struct sk_buff *list; | |
506 | ||
fbb398a8 | 507 | skb_walk_frags(skb, list) |
1da177e4 LT |
508 | skb_get(list); |
509 | } | |
510 | ||
d3836f21 ED |
511 | static void skb_free_head(struct sk_buff *skb) |
512 | { | |
513 | if (skb->head_frag) | |
514 | put_page(virt_to_head_page(skb->head)); | |
515 | else | |
516 | kfree(skb->head); | |
517 | } | |
518 | ||
5bba1712 | 519 | static void skb_release_data(struct sk_buff *skb) |
1da177e4 LT |
520 | { |
521 | if (!skb->cloned || | |
522 | !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, | |
523 | &skb_shinfo(skb)->dataref)) { | |
524 | if (skb_shinfo(skb)->nr_frags) { | |
525 | int i; | |
526 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) | |
ea2ab693 | 527 | skb_frag_unref(skb, i); |
1da177e4 LT |
528 | } |
529 | ||
a6686f2f SM |
530 | /* |
531 | * If skb buf is from userspace, we need to notify the caller | |
532 | * the lower device DMA has done; | |
533 | */ | |
534 | if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { | |
535 | struct ubuf_info *uarg; | |
536 | ||
537 | uarg = skb_shinfo(skb)->destructor_arg; | |
538 | if (uarg->callback) | |
e19d6763 | 539 | uarg->callback(uarg, true); |
a6686f2f SM |
540 | } |
541 | ||
21dc3301 | 542 | if (skb_has_frag_list(skb)) |
1da177e4 LT |
543 | skb_drop_fraglist(skb); |
544 | ||
d3836f21 | 545 | skb_free_head(skb); |
1da177e4 LT |
546 | } |
547 | } | |
548 | ||
549 | /* | |
550 | * Free an skbuff by memory without cleaning the state. | |
551 | */ | |
2d4baff8 | 552 | static void kfree_skbmem(struct sk_buff *skb) |
1da177e4 | 553 | { |
d179cd12 DM |
554 | struct sk_buff *other; |
555 | atomic_t *fclone_ref; | |
556 | ||
d179cd12 DM |
557 | switch (skb->fclone) { |
558 | case SKB_FCLONE_UNAVAILABLE: | |
559 | kmem_cache_free(skbuff_head_cache, skb); | |
560 | break; | |
561 | ||
562 | case SKB_FCLONE_ORIG: | |
563 | fclone_ref = (atomic_t *) (skb + 2); | |
564 | if (atomic_dec_and_test(fclone_ref)) | |
565 | kmem_cache_free(skbuff_fclone_cache, skb); | |
566 | break; | |
567 | ||
568 | case SKB_FCLONE_CLONE: | |
569 | fclone_ref = (atomic_t *) (skb + 1); | |
570 | other = skb - 1; | |
571 | ||
572 | /* The clone portion is available for | |
573 | * fast-cloning again. | |
574 | */ | |
575 | skb->fclone = SKB_FCLONE_UNAVAILABLE; | |
576 | ||
577 | if (atomic_dec_and_test(fclone_ref)) | |
578 | kmem_cache_free(skbuff_fclone_cache, other); | |
579 | break; | |
3ff50b79 | 580 | } |
1da177e4 LT |
581 | } |
582 | ||
04a4bb55 | 583 | static void skb_release_head_state(struct sk_buff *skb) |
1da177e4 | 584 | { |
adf30907 | 585 | skb_dst_drop(skb); |
1da177e4 LT |
586 | #ifdef CONFIG_XFRM |
587 | secpath_put(skb->sp); | |
588 | #endif | |
9c2b3328 SH |
589 | if (skb->destructor) { |
590 | WARN_ON(in_irq()); | |
1da177e4 LT |
591 | skb->destructor(skb); |
592 | } | |
a3bf7ae9 | 593 | #if IS_ENABLED(CONFIG_NF_CONNTRACK) |
5f79e0f9 | 594 | nf_conntrack_put(skb->nfct); |
2fc72c7b | 595 | #endif |
1da177e4 LT |
596 | #ifdef CONFIG_BRIDGE_NETFILTER |
597 | nf_bridge_put(skb->nf_bridge); | |
598 | #endif | |
1da177e4 LT |
599 | /* XXX: IS this still necessary? - JHS */ |
600 | #ifdef CONFIG_NET_SCHED | |
601 | skb->tc_index = 0; | |
602 | #ifdef CONFIG_NET_CLS_ACT | |
603 | skb->tc_verd = 0; | |
1da177e4 LT |
604 | #endif |
605 | #endif | |
04a4bb55 LB |
606 | } |
607 | ||
608 | /* Free everything but the sk_buff shell. */ | |
609 | static void skb_release_all(struct sk_buff *skb) | |
610 | { | |
611 | skb_release_head_state(skb); | |
5e71d9d7 | 612 | if (likely(skb->head)) |
0ebd0ac5 | 613 | skb_release_data(skb); |
2d4baff8 HX |
614 | } |
615 | ||
616 | /** | |
617 | * __kfree_skb - private function | |
618 | * @skb: buffer | |
619 | * | |
620 | * Free an sk_buff. Release anything attached to the buffer. | |
621 | * Clean the state. This is an internal helper function. Users should | |
622 | * always call kfree_skb | |
623 | */ | |
1da177e4 | 624 | |
2d4baff8 HX |
625 | void __kfree_skb(struct sk_buff *skb) |
626 | { | |
627 | skb_release_all(skb); | |
1da177e4 LT |
628 | kfree_skbmem(skb); |
629 | } | |
b4ac530f | 630 | EXPORT_SYMBOL(__kfree_skb); |
1da177e4 | 631 |