[NETFITLER]: Add nfnetlink layer.
[deliverable/linux.git] / net / core / sock.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Generic socket support routines. Memory allocators, socket lock/release
7 * handler for protocols to use and generic option handler.
8 *
9 *
10 * Version: $Id: sock.c,v 1.117 2002/02/01 22:01:03 davem Exp $
11 *
02c30a84 12 * Authors: Ross Biro
1da177e4
LT
13 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Alan Cox, <A.Cox@swansea.ac.uk>
16 *
17 * Fixes:
18 * Alan Cox : Numerous verify_area() problems
19 * Alan Cox : Connecting on a connecting socket
20 * now returns an error for tcp.
21 * Alan Cox : sock->protocol is set correctly.
22 * and is not sometimes left as 0.
23 * Alan Cox : connect handles icmp errors on a
24 * connect properly. Unfortunately there
25 * is a restart syscall nasty there. I
26 * can't match BSD without hacking the C
27 * library. Ideas urgently sought!
28 * Alan Cox : Disallow bind() to addresses that are
29 * not ours - especially broadcast ones!!
30 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
31 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
32 * instead they leave that for the DESTROY timer.
33 * Alan Cox : Clean up error flag in accept
34 * Alan Cox : TCP ack handling is buggy, the DESTROY timer
35 * was buggy. Put a remove_sock() in the handler
36 * for memory when we hit 0. Also altered the timer
37 * code. The ACK stuff can wait and needs major
38 * TCP layer surgery.
39 * Alan Cox : Fixed TCP ack bug, removed remove sock
40 * and fixed timer/inet_bh race.
41 * Alan Cox : Added zapped flag for TCP
42 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
43 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
44 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
45 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
46 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
47 * Rick Sladkey : Relaxed UDP rules for matching packets.
48 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
49 * Pauline Middelink : identd support
50 * Alan Cox : Fixed connect() taking signals I think.
51 * Alan Cox : SO_LINGER supported
52 * Alan Cox : Error reporting fixes
53 * Anonymous : inet_create tidied up (sk->reuse setting)
54 * Alan Cox : inet sockets don't set sk->type!
55 * Alan Cox : Split socket option code
56 * Alan Cox : Callbacks
57 * Alan Cox : Nagle flag for Charles & Johannes stuff
58 * Alex : Removed restriction on inet fioctl
59 * Alan Cox : Splitting INET from NET core
60 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
61 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
62 * Alan Cox : Split IP from generic code
63 * Alan Cox : New kfree_skbmem()
64 * Alan Cox : Make SO_DEBUG superuser only.
65 * Alan Cox : Allow anyone to clear SO_DEBUG
66 * (compatibility fix)
67 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput.
68 * Alan Cox : Allocator for a socket is settable.
69 * Alan Cox : SO_ERROR includes soft errors.
70 * Alan Cox : Allow NULL arguments on some SO_ opts
71 * Alan Cox : Generic socket allocation to make hooks
72 * easier (suggested by Craig Metz).
73 * Michael Pall : SO_ERROR returns positive errno again
74 * Steve Whitehouse: Added default destructor to free
75 * protocol private data.
76 * Steve Whitehouse: Added various other default routines
77 * common to several socket families.
78 * Chris Evans : Call suser() check last on F_SETOWN
79 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
80 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
81 * Andi Kleen : Fix write_space callback
82 * Chris Evans : Security fixes - signedness again
83 * Arnaldo C. Melo : cleanups, use skb_queue_purge
84 *
85 * To Fix:
86 *
87 *
88 * This program is free software; you can redistribute it and/or
89 * modify it under the terms of the GNU General Public License
90 * as published by the Free Software Foundation; either version
91 * 2 of the License, or (at your option) any later version.
92 */
93
94#include <linux/config.h>
95#include <linux/errno.h>
96#include <linux/types.h>
97#include <linux/socket.h>
98#include <linux/in.h>
99#include <linux/kernel.h>
1da177e4
LT
100#include <linux/module.h>
101#include <linux/proc_fs.h>
102#include <linux/seq_file.h>
103#include <linux/sched.h>
104#include <linux/timer.h>
105#include <linux/string.h>
106#include <linux/sockios.h>
107#include <linux/net.h>
108#include <linux/mm.h>
109#include <linux/slab.h>
110#include <linux/interrupt.h>
111#include <linux/poll.h>
112#include <linux/tcp.h>
113#include <linux/init.h>
114
115#include <asm/uaccess.h>
116#include <asm/system.h>
117
118#include <linux/netdevice.h>
119#include <net/protocol.h>
120#include <linux/skbuff.h>
2e6599cb 121#include <net/request_sock.h>
1da177e4
LT
122#include <net/sock.h>
123#include <net/xfrm.h>
124#include <linux/ipsec.h>
125
126#include <linux/filter.h>
127
128#ifdef CONFIG_INET
129#include <net/tcp.h>
130#endif
131
132/* Take into consideration the size of the struct sk_buff overhead in the
133 * determination of these values, since that is non-constant across
134 * platforms. This makes socket queueing behavior and performance
135 * not depend upon such differences.
136 */
137#define _SK_MEM_PACKETS 256
138#define _SK_MEM_OVERHEAD (sizeof(struct sk_buff) + 256)
139#define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
140#define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
141
142/* Run time adjustable parameters. */
143__u32 sysctl_wmem_max = SK_WMEM_MAX;
144__u32 sysctl_rmem_max = SK_RMEM_MAX;
145__u32 sysctl_wmem_default = SK_WMEM_MAX;
146__u32 sysctl_rmem_default = SK_RMEM_MAX;
147
148/* Maximal space eaten by iovec or ancilliary data plus some space */
149int sysctl_optmem_max = sizeof(unsigned long)*(2*UIO_MAXIOV + 512);
150
151static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
152{
153 struct timeval tv;
154
155 if (optlen < sizeof(tv))
156 return -EINVAL;
157 if (copy_from_user(&tv, optval, sizeof(tv)))
158 return -EFAULT;
159
160 *timeo_p = MAX_SCHEDULE_TIMEOUT;
161 if (tv.tv_sec == 0 && tv.tv_usec == 0)
162 return 0;
163 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
164 *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
165 return 0;
166}
167
168static void sock_warn_obsolete_bsdism(const char *name)
169{
170 static int warned;
171 static char warncomm[TASK_COMM_LEN];
172 if (strcmp(warncomm, current->comm) && warned < 5) {
173 strcpy(warncomm, current->comm);
174 printk(KERN_WARNING "process `%s' is using obsolete "
175 "%s SO_BSDCOMPAT\n", warncomm, name);
176 warned++;
177 }
178}
179
180static void sock_disable_timestamp(struct sock *sk)
181{
182 if (sock_flag(sk, SOCK_TIMESTAMP)) {
183 sock_reset_flag(sk, SOCK_TIMESTAMP);
184 net_disable_timestamp();
185 }
186}
187
188
189/*
190 * This is meant for all protocols to use and covers goings on
191 * at the socket level. Everything here is generic.
192 */
193
194int sock_setsockopt(struct socket *sock, int level, int optname,
195 char __user *optval, int optlen)
196{
197 struct sock *sk=sock->sk;
198 struct sk_filter *filter;
199 int val;
200 int valbool;
201 struct linger ling;
202 int ret = 0;
203
204 /*
205 * Options without arguments
206 */
207
208#ifdef SO_DONTLINGER /* Compatibility item... */
a77be819
KM
209 if (optname == SO_DONTLINGER) {
210 lock_sock(sk);
211 sock_reset_flag(sk, SOCK_LINGER);
212 release_sock(sk);
213 return 0;
1da177e4 214 }
a77be819
KM
215#endif
216
1da177e4
LT
217 if(optlen<sizeof(int))
218 return(-EINVAL);
219
220 if (get_user(val, (int __user *)optval))
221 return -EFAULT;
222
223 valbool = val?1:0;
224
225 lock_sock(sk);
226
227 switch(optname)
228 {
229 case SO_DEBUG:
230 if(val && !capable(CAP_NET_ADMIN))
231 {
232 ret = -EACCES;
233 }
234 else if (valbool)
235 sock_set_flag(sk, SOCK_DBG);
236 else
237 sock_reset_flag(sk, SOCK_DBG);
238 break;
239 case SO_REUSEADDR:
240 sk->sk_reuse = valbool;
241 break;
242 case SO_TYPE:
243 case SO_ERROR:
244 ret = -ENOPROTOOPT;
245 break;
246 case SO_DONTROUTE:
247 if (valbool)
248 sock_set_flag(sk, SOCK_LOCALROUTE);
249 else
250 sock_reset_flag(sk, SOCK_LOCALROUTE);
251 break;
252 case SO_BROADCAST:
253 sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
254 break;
255 case SO_SNDBUF:
256 /* Don't error on this BSD doesn't and if you think
257 about it this is right. Otherwise apps have to
258 play 'guess the biggest size' games. RCVBUF/SNDBUF
259 are treated in BSD as hints */
260
261 if (val > sysctl_wmem_max)
262 val = sysctl_wmem_max;
263
264 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
265 if ((val * 2) < SOCK_MIN_SNDBUF)
266 sk->sk_sndbuf = SOCK_MIN_SNDBUF;
267 else
268 sk->sk_sndbuf = val * 2;
269
270 /*
271 * Wake up sending tasks if we
272 * upped the value.
273 */
274 sk->sk_write_space(sk);
275 break;
276
277 case SO_RCVBUF:
278 /* Don't error on this BSD doesn't and if you think
279 about it this is right. Otherwise apps have to
280 play 'guess the biggest size' games. RCVBUF/SNDBUF
281 are treated in BSD as hints */
282
283 if (val > sysctl_rmem_max)
284 val = sysctl_rmem_max;
285
286 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
287 /* FIXME: is this lower bound the right one? */
288 if ((val * 2) < SOCK_MIN_RCVBUF)
289 sk->sk_rcvbuf = SOCK_MIN_RCVBUF;
290 else
291 sk->sk_rcvbuf = val * 2;
292 break;
293
294 case SO_KEEPALIVE:
295#ifdef CONFIG_INET
296 if (sk->sk_protocol == IPPROTO_TCP)
297 tcp_set_keepalive(sk, valbool);
298#endif
299 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
300 break;
301
302 case SO_OOBINLINE:
303 sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
304 break;
305
306 case SO_NO_CHECK:
307 sk->sk_no_check = valbool;
308 break;
309
310 case SO_PRIORITY:
311 if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN))
312 sk->sk_priority = val;
313 else
314 ret = -EPERM;
315 break;
316
317 case SO_LINGER:
318 if(optlen<sizeof(ling)) {
319 ret = -EINVAL; /* 1003.1g */
320 break;
321 }
322 if (copy_from_user(&ling,optval,sizeof(ling))) {
323 ret = -EFAULT;
324 break;
325 }
326 if (!ling.l_onoff)
327 sock_reset_flag(sk, SOCK_LINGER);
328 else {
329#if (BITS_PER_LONG == 32)
330 if (ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
331 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
332 else
333#endif
334 sk->sk_lingertime = ling.l_linger * HZ;
335 sock_set_flag(sk, SOCK_LINGER);
336 }
337 break;
338
339 case SO_BSDCOMPAT:
340 sock_warn_obsolete_bsdism("setsockopt");
341 break;
342
343 case SO_PASSCRED:
344 if (valbool)
345 set_bit(SOCK_PASSCRED, &sock->flags);
346 else
347 clear_bit(SOCK_PASSCRED, &sock->flags);
348 break;
349
350 case SO_TIMESTAMP:
351 if (valbool) {
352 sock_set_flag(sk, SOCK_RCVTSTAMP);
353 sock_enable_timestamp(sk);
354 } else
355 sock_reset_flag(sk, SOCK_RCVTSTAMP);
356 break;
357
358 case SO_RCVLOWAT:
359 if (val < 0)
360 val = INT_MAX;
361 sk->sk_rcvlowat = val ? : 1;
362 break;
363
364 case SO_RCVTIMEO:
365 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
366 break;
367
368 case SO_SNDTIMEO:
369 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
370 break;
371
372#ifdef CONFIG_NETDEVICES
373 case SO_BINDTODEVICE:
374 {
375 char devname[IFNAMSIZ];
376
377 /* Sorry... */
378 if (!capable(CAP_NET_RAW)) {
379 ret = -EPERM;
380 break;
381 }
382
383 /* Bind this socket to a particular device like "eth0",
384 * as specified in the passed interface name. If the
385 * name is "" or the option length is zero the socket
386 * is not bound.
387 */
388
389 if (!valbool) {
390 sk->sk_bound_dev_if = 0;
391 } else {
392 if (optlen > IFNAMSIZ)
393 optlen = IFNAMSIZ;
394 if (copy_from_user(devname, optval, optlen)) {
395 ret = -EFAULT;
396 break;
397 }
398
399 /* Remove any cached route for this socket. */
400 sk_dst_reset(sk);
401
402 if (devname[0] == '\0') {
403 sk->sk_bound_dev_if = 0;
404 } else {
405 struct net_device *dev = dev_get_by_name(devname);
406 if (!dev) {
407 ret = -ENODEV;
408 break;
409 }
410 sk->sk_bound_dev_if = dev->ifindex;
411 dev_put(dev);
412 }
413 }
414 break;
415 }
416#endif
417
418
419 case SO_ATTACH_FILTER:
420 ret = -EINVAL;
421 if (optlen == sizeof(struct sock_fprog)) {
422 struct sock_fprog fprog;
423
424 ret = -EFAULT;
425 if (copy_from_user(&fprog, optval, sizeof(fprog)))
426 break;
427
428 ret = sk_attach_filter(&fprog, sk);
429 }
430 break;
431
432 case SO_DETACH_FILTER:
433 spin_lock_bh(&sk->sk_lock.slock);
434 filter = sk->sk_filter;
435 if (filter) {
436 sk->sk_filter = NULL;
437 spin_unlock_bh(&sk->sk_lock.slock);
438 sk_filter_release(sk, filter);
439 break;
440 }
441 spin_unlock_bh(&sk->sk_lock.slock);
442 ret = -ENONET;
443 break;
444
445 /* We implement the SO_SNDLOWAT etc to
446 not be settable (1003.1g 5.3) */
447 default:
448 ret = -ENOPROTOOPT;
449 break;
450 }
451 release_sock(sk);
452 return ret;
453}
454
455
456int sock_getsockopt(struct socket *sock, int level, int optname,
457 char __user *optval, int __user *optlen)
458{
459 struct sock *sk = sock->sk;
460
461 union
462 {
463 int val;
464 struct linger ling;
465 struct timeval tm;
466 } v;
467
468 unsigned int lv = sizeof(int);
469 int len;
470
471 if(get_user(len,optlen))
472 return -EFAULT;
473 if(len < 0)
474 return -EINVAL;
475
476 switch(optname)
477 {
478 case SO_DEBUG:
479 v.val = sock_flag(sk, SOCK_DBG);
480 break;
481
482 case SO_DONTROUTE:
483 v.val = sock_flag(sk, SOCK_LOCALROUTE);
484 break;
485
486 case SO_BROADCAST:
487 v.val = !!sock_flag(sk, SOCK_BROADCAST);
488 break;
489
490 case SO_SNDBUF:
491 v.val = sk->sk_sndbuf;
492 break;
493
494 case SO_RCVBUF:
495 v.val = sk->sk_rcvbuf;
496 break;
497
498 case SO_REUSEADDR:
499 v.val = sk->sk_reuse;
500 break;
501
502 case SO_KEEPALIVE:
503 v.val = !!sock_flag(sk, SOCK_KEEPOPEN);
504 break;
505
506 case SO_TYPE:
507 v.val = sk->sk_type;
508 break;
509
510 case SO_ERROR:
511 v.val = -sock_error(sk);
512 if(v.val==0)
513 v.val = xchg(&sk->sk_err_soft, 0);
514 break;
515
516 case SO_OOBINLINE:
517 v.val = !!sock_flag(sk, SOCK_URGINLINE);
518 break;
519
520 case SO_NO_CHECK:
521 v.val = sk->sk_no_check;
522 break;
523
524 case SO_PRIORITY:
525 v.val = sk->sk_priority;
526 break;
527
528 case SO_LINGER:
529 lv = sizeof(v.ling);
530 v.ling.l_onoff = !!sock_flag(sk, SOCK_LINGER);
531 v.ling.l_linger = sk->sk_lingertime / HZ;
532 break;
533
534 case SO_BSDCOMPAT:
535 sock_warn_obsolete_bsdism("getsockopt");
536 break;
537
538 case SO_TIMESTAMP:
539 v.val = sock_flag(sk, SOCK_RCVTSTAMP);
540 break;
541
542 case SO_RCVTIMEO:
543 lv=sizeof(struct timeval);
544 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
545 v.tm.tv_sec = 0;
546 v.tm.tv_usec = 0;
547 } else {
548 v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
549 v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
550 }
551 break;
552
553 case SO_SNDTIMEO:
554 lv=sizeof(struct timeval);
555 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
556 v.tm.tv_sec = 0;
557 v.tm.tv_usec = 0;
558 } else {
559 v.tm.tv_sec = sk->sk_sndtimeo / HZ;
560 v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
561 }
562 break;
563
564 case SO_RCVLOWAT:
565 v.val = sk->sk_rcvlowat;
566 break;
567
568 case SO_SNDLOWAT:
569 v.val=1;
570 break;
571
572 case SO_PASSCRED:
573 v.val = test_bit(SOCK_PASSCRED, &sock->flags) ? 1 : 0;
574 break;
575
576 case SO_PEERCRED:
577 if (len > sizeof(sk->sk_peercred))
578 len = sizeof(sk->sk_peercred);
579 if (copy_to_user(optval, &sk->sk_peercred, len))
580 return -EFAULT;
581 goto lenout;
582
583 case SO_PEERNAME:
584 {
585 char address[128];
586
587 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
588 return -ENOTCONN;
589 if (lv < len)
590 return -EINVAL;
591 if (copy_to_user(optval, address, len))
592 return -EFAULT;
593 goto lenout;
594 }
595
596 /* Dubious BSD thing... Probably nobody even uses it, but
597 * the UNIX standard wants it for whatever reason... -DaveM
598 */
599 case SO_ACCEPTCONN:
600 v.val = sk->sk_state == TCP_LISTEN;
601 break;
602
603 case SO_PEERSEC:
604 return security_socket_getpeersec(sock, optval, optlen, len);
605
606 default:
607 return(-ENOPROTOOPT);
608 }
609 if (len > lv)
610 len = lv;
611 if (copy_to_user(optval, &v, len))
612 return -EFAULT;
613lenout:
614 if (put_user(len, optlen))
615 return -EFAULT;
616 return 0;
617}
618
619/**
620 * sk_alloc - All socket objects are allocated here
4dc3b16b
PP
621 * @family: protocol family
622 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
623 * @prot: struct proto associated with this new sock instance
624 * @zero_it: if we should zero the newly allocated sock
1da177e4 625 */
86a76caf
VF
626struct sock *sk_alloc(int family, unsigned int __nocast priority,
627 struct proto *prot, int zero_it)
1da177e4
LT
628{
629 struct sock *sk = NULL;
630 kmem_cache_t *slab = prot->slab;
631
632 if (slab != NULL)
633 sk = kmem_cache_alloc(slab, priority);
634 else
635 sk = kmalloc(prot->obj_size, priority);
636
637 if (sk) {
638 if (zero_it) {
639 memset(sk, 0, prot->obj_size);
640 sk->sk_family = family;
476e19cf
ACM
641 /*
642 * See comment in struct sock definition to understand
643 * why we need sk_prot_creator -acme
644 */
645 sk->sk_prot = sk->sk_prot_creator = prot;
1da177e4
LT
646 sock_lock_init(sk);
647 }
648
649 if (security_sk_alloc(sk, family, priority)) {
88a66858
ACM
650 if (slab != NULL)
651 kmem_cache_free(slab, sk);
652 else
653 kfree(sk);
1da177e4
LT
654 sk = NULL;
655 } else
656 __module_get(prot->owner);
657 }
658 return sk;
659}
660
661void sk_free(struct sock *sk)
662{
663 struct sk_filter *filter;
476e19cf 664 struct module *owner = sk->sk_prot_creator->owner;
1da177e4
LT
665
666 if (sk->sk_destruct)
667 sk->sk_destruct(sk);
668
669 filter = sk->sk_filter;
670 if (filter) {
671 sk_filter_release(sk, filter);
672 sk->sk_filter = NULL;
673 }
674
675 sock_disable_timestamp(sk);
676
677 if (atomic_read(&sk->sk_omem_alloc))
678 printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n",
679 __FUNCTION__, atomic_read(&sk->sk_omem_alloc));
680
681 security_sk_free(sk);
476e19cf
ACM
682 if (sk->sk_prot_creator->slab != NULL)
683 kmem_cache_free(sk->sk_prot_creator->slab, sk);
1da177e4
LT
684 else
685 kfree(sk);
686 module_put(owner);
687}
688
689void __init sk_init(void)
690{
691 if (num_physpages <= 4096) {
692 sysctl_wmem_max = 32767;
693 sysctl_rmem_max = 32767;
694 sysctl_wmem_default = 32767;
695 sysctl_rmem_default = 32767;
696 } else if (num_physpages >= 131072) {
697 sysctl_wmem_max = 131071;
698 sysctl_rmem_max = 131071;
699 }
700}
701
702/*
703 * Simple resource managers for sockets.
704 */
705
706
707/*
708 * Write buffer destructor automatically called from kfree_skb.
709 */
710void sock_wfree(struct sk_buff *skb)
711{
712 struct sock *sk = skb->sk;
713
714 /* In case it might be waiting for more memory. */
715 atomic_sub(skb->truesize, &sk->sk_wmem_alloc);
716 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE))
717 sk->sk_write_space(sk);
718 sock_put(sk);
719}
720
721/*
722 * Read buffer destructor automatically called from kfree_skb.
723 */
724void sock_rfree(struct sk_buff *skb)
725{
726 struct sock *sk = skb->sk;
727
728 atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
729}
730
731
732int sock_i_uid(struct sock *sk)
733{
734 int uid;
735
736 read_lock(&sk->sk_callback_lock);
737 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0;
738 read_unlock(&sk->sk_callback_lock);
739 return uid;
740}
741
742unsigned long sock_i_ino(struct sock *sk)
743{
744 unsigned long ino;
745
746 read_lock(&sk->sk_callback_lock);
747 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
748 read_unlock(&sk->sk_callback_lock);
749 return ino;
750}
751
752/*
753 * Allocate a skb from the socket's send buffer.
754 */
86a76caf
VF
755struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
756 unsigned int __nocast priority)
1da177e4
LT
757{
758 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
759 struct sk_buff * skb = alloc_skb(size, priority);
760 if (skb) {
761 skb_set_owner_w(skb, sk);
762 return skb;
763 }
764 }
765 return NULL;
766}
767
768/*
769 * Allocate a skb from the socket's receive buffer.
770 */
86a76caf
VF
771struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force,
772 unsigned int __nocast priority)
1da177e4
LT
773{
774 if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
775 struct sk_buff *skb = alloc_skb(size, priority);
776 if (skb) {
777 skb_set_owner_r(skb, sk);
778 return skb;
779 }
780 }
781 return NULL;
782}
783
784/*
785 * Allocate a memory block from the socket's option memory buffer.
786 */
86a76caf 787void *sock_kmalloc(struct sock *sk, int size, unsigned int __nocast priority)
1da177e4
LT
788{
789 if ((unsigned)size <= sysctl_optmem_max &&
790 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
791 void *mem;
792 /* First do the add, to avoid the race if kmalloc
793 * might sleep.
794 */
795 atomic_add(size, &sk->sk_omem_alloc);
796 mem = kmalloc(size, priority);
797 if (mem)
798 return mem;
799 atomic_sub(size, &sk->sk_omem_alloc);
800 }
801 return NULL;
802}
803
804/*
805 * Free an option memory block.
806 */
807void sock_kfree_s(struct sock *sk, void *mem, int size)
808{
809 kfree(mem);
810 atomic_sub(size, &sk->sk_omem_alloc);
811}
812
813/* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
814 I think, these locks should be removed for datagram sockets.
815 */
816static long sock_wait_for_wmem(struct sock * sk, long timeo)
817{
818 DEFINE_WAIT(wait);
819
820 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
821 for (;;) {
822 if (!timeo)
823 break;
824 if (signal_pending(current))
825 break;
826 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
827 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
828 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
829 break;
830 if (sk->sk_shutdown & SEND_SHUTDOWN)
831 break;
832 if (sk->sk_err)
833 break;
834 timeo = schedule_timeout(timeo);
835 }
836 finish_wait(sk->sk_sleep, &wait);
837 return timeo;
838}
839
840
841/*
842 * Generic send/receive buffer handlers
843 */
844
845static struct sk_buff *sock_alloc_send_pskb(struct sock *sk,
846 unsigned long header_len,
847 unsigned long data_len,
848 int noblock, int *errcode)
849{
850 struct sk_buff *skb;
851 unsigned int gfp_mask;
852 long timeo;
853 int err;
854
855 gfp_mask = sk->sk_allocation;
856 if (gfp_mask & __GFP_WAIT)
857 gfp_mask |= __GFP_REPEAT;
858
859 timeo = sock_sndtimeo(sk, noblock);
860 while (1) {
861 err = sock_error(sk);
862 if (err != 0)
863 goto failure;
864
865 err = -EPIPE;
866 if (sk->sk_shutdown & SEND_SHUTDOWN)
867 goto failure;
868
869 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
870 skb = alloc_skb(header_len, sk->sk_allocation);
871 if (skb) {
872 int npages;
873 int i;
874
875 /* No pages, we're done... */
876 if (!data_len)
877 break;
878
879 npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
880 skb->truesize += data_len;
881 skb_shinfo(skb)->nr_frags = npages;
882 for (i = 0; i < npages; i++) {
883 struct page *page;
884 skb_frag_t *frag;
885
886 page = alloc_pages(sk->sk_allocation, 0);
887 if (!page) {
888 err = -ENOBUFS;
889 skb_shinfo(skb)->nr_frags = i;
890 kfree_skb(skb);
891 goto failure;
892 }
893
894 frag = &skb_shinfo(skb)->frags[i];
895 frag->page = page;
896 frag->page_offset = 0;
897 frag->size = (data_len >= PAGE_SIZE ?
898 PAGE_SIZE :
899 data_len);
900 data_len -= PAGE_SIZE;
901 }
902
903 /* Full success... */
904 break;
905 }
906 err = -ENOBUFS;
907 goto failure;
908 }
909 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
910 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
911 err = -EAGAIN;
912 if (!timeo)
913 goto failure;
914 if (signal_pending(current))
915 goto interrupted;
916 timeo = sock_wait_for_wmem(sk, timeo);
917 }
918
919 skb_set_owner_w(skb, sk);
920 return skb;
921
922interrupted:
923 err = sock_intr_errno(timeo);
924failure:
925 *errcode = err;
926 return NULL;
927}
928
929struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
930 int noblock, int *errcode)
931{
932 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode);
933}
934
935static void __lock_sock(struct sock *sk)
936{
937 DEFINE_WAIT(wait);
938
939 for(;;) {
940 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
941 TASK_UNINTERRUPTIBLE);
942 spin_unlock_bh(&sk->sk_lock.slock);
943 schedule();
944 spin_lock_bh(&sk->sk_lock.slock);
945 if(!sock_owned_by_user(sk))
946 break;
947 }
948 finish_wait(&sk->sk_lock.wq, &wait);
949}
950
951static void __release_sock(struct sock *sk)
952{
953 struct sk_buff *skb = sk->sk_backlog.head;
954
955 do {
956 sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
957 bh_unlock_sock(sk);
958
959 do {
960 struct sk_buff *next = skb->next;
961
962 skb->next = NULL;
963 sk->sk_backlog_rcv(sk, skb);
964
965 /*
966 * We are in process context here with softirqs
967 * disabled, use cond_resched_softirq() to preempt.
968 * This is safe to do because we've taken the backlog
969 * queue private:
970 */
971 cond_resched_softirq();
972
973 skb = next;
974 } while (skb != NULL);
975
976 bh_lock_sock(sk);
977 } while((skb = sk->sk_backlog.head) != NULL);
978}
979
980/**
981 * sk_wait_data - wait for data to arrive at sk_receive_queue
4dc3b16b
PP
982 * @sk: sock to wait on
983 * @timeo: for how long
1da177e4
LT
984 *
985 * Now socket state including sk->sk_err is changed only under lock,
986 * hence we may omit checks after joining wait queue.
987 * We check receive queue before schedule() only as optimization;
988 * it is very likely that release_sock() added new data.
989 */
990int sk_wait_data(struct sock *sk, long *timeo)
991{
992 int rc;
993 DEFINE_WAIT(wait);
994
995 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
996 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
997 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
998 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
999 finish_wait(sk->sk_sleep, &wait);
1000 return rc;
1001}
1002
1003EXPORT_SYMBOL(sk_wait_data);
1004
1005/*
1006 * Set of default routines for initialising struct proto_ops when
1007 * the protocol does not support a particular function. In certain
1008 * cases where it makes no sense for a protocol to have a "do nothing"
1009 * function, some default processing is provided.
1010 */
1011
1012int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
1013{
1014 return -EOPNOTSUPP;
1015}
1016
1017int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
1018 int len, int flags)
1019{
1020 return -EOPNOTSUPP;
1021}
1022
1023int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
1024{
1025 return -EOPNOTSUPP;
1026}
1027
1028int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
1029{
1030 return -EOPNOTSUPP;
1031}
1032
1033int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
1034 int *len, int peer)
1035{
1036 return -EOPNOTSUPP;
1037}
1038
1039unsigned int sock_no_poll(struct file * file, struct socket *sock, poll_table *pt)
1040{
1041 return 0;
1042}
1043
1044int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1045{
1046 return -EOPNOTSUPP;
1047}
1048
1049int sock_no_listen(struct socket *sock, int backlog)
1050{
1051 return -EOPNOTSUPP;
1052}
1053
1054int sock_no_shutdown(struct socket *sock, int how)
1055{
1056 return -EOPNOTSUPP;
1057}
1058
1059int sock_no_setsockopt(struct socket *sock, int level, int optname,
1060 char __user *optval, int optlen)
1061{
1062 return -EOPNOTSUPP;
1063}
1064
1065int sock_no_getsockopt(struct socket *sock, int level, int optname,
1066 char __user *optval, int __user *optlen)
1067{
1068 return -EOPNOTSUPP;
1069}
1070
1071int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
1072 size_t len)
1073{
1074 return -EOPNOTSUPP;
1075}
1076
1077int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
1078 size_t len, int flags)
1079{
1080 return -EOPNOTSUPP;
1081}
1082
1083int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
1084{
1085 /* Mirror missing mmap method error code */
1086 return -ENODEV;
1087}
1088
1089ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
1090{
1091 ssize_t res;
1092 struct msghdr msg = {.msg_flags = flags};
1093 struct kvec iov;
1094 char *kaddr = kmap(page);
1095 iov.iov_base = kaddr + offset;
1096 iov.iov_len = size;
1097 res = kernel_sendmsg(sock, &msg, &iov, 1, size);
1098 kunmap(page);
1099 return res;
1100}
1101
1102/*
1103 * Default Socket Callbacks
1104 */
1105
1106static void sock_def_wakeup(struct sock *sk)
1107{
1108 read_lock(&sk->sk_callback_lock);
1109 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
1110 wake_up_interruptible_all(sk->sk_sleep);
1111 read_unlock(&sk->sk_callback_lock);
1112}
1113
1114static void sock_def_error_report(struct sock *sk)
1115{
1116 read_lock(&sk->sk_callback_lock);
1117 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
1118 wake_up_interruptible(sk->sk_sleep);
1119 sk_wake_async(sk,0,POLL_ERR);
1120 read_unlock(&sk->sk_callback_lock);
1121}
1122
1123static void sock_def_readable(struct sock *sk, int len)
1124{
1125 read_lock(&sk->sk_callback_lock);
1126 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
1127 wake_up_interruptible(sk->sk_sleep);
1128 sk_wake_async(sk,1,POLL_IN);
1129 read_unlock(&sk->sk_callback_lock);
1130}
1131
1132static void sock_def_write_space(struct sock *sk)
1133{
1134 read_lock(&sk->sk_callback_lock);
1135
1136 /* Do not wake up a writer until he can make "significant"
1137 * progress. --DaveM
1138 */
1139 if((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
1140 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
1141 wake_up_interruptible(sk->sk_sleep);
1142
1143 /* Should agree with poll, otherwise some programs break */
1144 if (sock_writeable(sk))
1145 sk_wake_async(sk, 2, POLL_OUT);
1146 }
1147
1148 read_unlock(&sk->sk_callback_lock);
1149}
1150
1151static void sock_def_destruct(struct sock *sk)
1152{
1153 if (sk->sk_protinfo)
1154 kfree(sk->sk_protinfo);
1155}
1156
1157void sk_send_sigurg(struct sock *sk)
1158{
1159 if (sk->sk_socket && sk->sk_socket->file)
1160 if (send_sigurg(&sk->sk_socket->file->f_owner))
1161 sk_wake_async(sk, 3, POLL_PRI);
1162}
1163
1164void sk_reset_timer(struct sock *sk, struct timer_list* timer,
1165 unsigned long expires)
1166{
1167 if (!mod_timer(timer, expires))
1168 sock_hold(sk);
1169}
1170
1171EXPORT_SYMBOL(sk_reset_timer);
1172
1173void sk_stop_timer(struct sock *sk, struct timer_list* timer)
1174{
1175 if (timer_pending(timer) && del_timer(timer))
1176 __sock_put(sk);
1177}
1178
1179EXPORT_SYMBOL(sk_stop_timer);
1180
1181void sock_init_data(struct socket *sock, struct sock *sk)
1182{
1183 skb_queue_head_init(&sk->sk_receive_queue);
1184 skb_queue_head_init(&sk->sk_write_queue);
1185 skb_queue_head_init(&sk->sk_error_queue);
1186
1187 sk->sk_send_head = NULL;
1188
1189 init_timer(&sk->sk_timer);
1190
1191 sk->sk_allocation = GFP_KERNEL;
1192 sk->sk_rcvbuf = sysctl_rmem_default;
1193 sk->sk_sndbuf = sysctl_wmem_default;
1194 sk->sk_state = TCP_CLOSE;
1195 sk->sk_socket = sock;
1196
1197 sock_set_flag(sk, SOCK_ZAPPED);
1198
1199 if(sock)
1200 {
1201 sk->sk_type = sock->type;
1202 sk->sk_sleep = &sock->wait;
1203 sock->sk = sk;
1204 } else
1205 sk->sk_sleep = NULL;
1206
1207 rwlock_init(&sk->sk_dst_lock);
1208 rwlock_init(&sk->sk_callback_lock);
1209
1210 sk->sk_state_change = sock_def_wakeup;
1211 sk->sk_data_ready = sock_def_readable;
1212 sk->sk_write_space = sock_def_write_space;
1213 sk->sk_error_report = sock_def_error_report;
1214 sk->sk_destruct = sock_def_destruct;
1215
1216 sk->sk_sndmsg_page = NULL;
1217 sk->sk_sndmsg_off = 0;
1218
1219 sk->sk_peercred.pid = 0;
1220 sk->sk_peercred.uid = -1;
1221 sk->sk_peercred.gid = -1;
1222 sk->sk_write_pending = 0;
1223 sk->sk_rcvlowat = 1;
1224 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
1225 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
1226
1227 sk->sk_stamp.tv_sec = -1L;
1228 sk->sk_stamp.tv_usec = -1L;
1229
1230 atomic_set(&sk->sk_refcnt, 1);
1231}
1232
1233void fastcall lock_sock(struct sock *sk)
1234{
1235 might_sleep();
1236 spin_lock_bh(&(sk->sk_lock.slock));
1237 if (sk->sk_lock.owner)
1238 __lock_sock(sk);
1239 sk->sk_lock.owner = (void *)1;
1240 spin_unlock_bh(&(sk->sk_lock.slock));
1241}
1242
1243EXPORT_SYMBOL(lock_sock);
1244
1245void fastcall release_sock(struct sock *sk)
1246{
1247 spin_lock_bh(&(sk->sk_lock.slock));
1248 if (sk->sk_backlog.tail)
1249 __release_sock(sk);
1250 sk->sk_lock.owner = NULL;
1251 if (waitqueue_active(&(sk->sk_lock.wq)))
1252 wake_up(&(sk->sk_lock.wq));
1253 spin_unlock_bh(&(sk->sk_lock.slock));
1254}
1255EXPORT_SYMBOL(release_sock);
1256
1257int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
1258{
1259 if (!sock_flag(sk, SOCK_TIMESTAMP))
1260 sock_enable_timestamp(sk);
1261 if (sk->sk_stamp.tv_sec == -1)
1262 return -ENOENT;
1263 if (sk->sk_stamp.tv_sec == 0)
1264 do_gettimeofday(&sk->sk_stamp);
1265 return copy_to_user(userstamp, &sk->sk_stamp, sizeof(struct timeval)) ?
1266 -EFAULT : 0;
1267}
1268EXPORT_SYMBOL(sock_get_timestamp);
1269
1270void sock_enable_timestamp(struct sock *sk)
1271{
1272 if (!sock_flag(sk, SOCK_TIMESTAMP)) {
1273 sock_set_flag(sk, SOCK_TIMESTAMP);
1274 net_enable_timestamp();
1275 }
1276}
1277EXPORT_SYMBOL(sock_enable_timestamp);
1278
1279/*
1280 * Get a socket option on an socket.
1281 *
1282 * FIX: POSIX 1003.1g is very ambiguous here. It states that
1283 * asynchronous errors should be reported by getsockopt. We assume
1284 * this means if you specify SO_ERROR (otherwise whats the point of it).
1285 */
1286int sock_common_getsockopt(struct socket *sock, int level, int optname,
1287 char __user *optval, int __user *optlen)
1288{
1289 struct sock *sk = sock->sk;
1290
1291 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
1292}
1293
1294EXPORT_SYMBOL(sock_common_getsockopt);
1295
1296int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
1297 struct msghdr *msg, size_t size, int flags)
1298{
1299 struct sock *sk = sock->sk;
1300 int addr_len = 0;
1301 int err;
1302
1303 err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
1304 flags & ~MSG_DONTWAIT, &addr_len);
1305 if (err >= 0)
1306 msg->msg_namelen = addr_len;
1307 return err;
1308}
1309
1310EXPORT_SYMBOL(sock_common_recvmsg);
1311
1312/*
1313 * Set socket options on an inet socket.
1314 */
1315int sock_common_setsockopt(struct socket *sock, int level, int optname,
1316 char __user *optval, int optlen)
1317{
1318 struct sock *sk = sock->sk;
1319
1320 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
1321}
1322
1323EXPORT_SYMBOL(sock_common_setsockopt);
1324
1325void sk_common_release(struct sock *sk)
1326{
1327 if (sk->sk_prot->destroy)
1328 sk->sk_prot->destroy(sk);
1329
1330 /*
1331 * Observation: when sock_common_release is called, processes have
1332 * no access to socket. But net still has.
1333 * Step one, detach it from networking:
1334 *
1335 * A. Remove from hash tables.
1336 */
1337
1338 sk->sk_prot->unhash(sk);
1339
1340 /*
1341 * In this point socket cannot receive new packets, but it is possible
1342 * that some packets are in flight because some CPU runs receiver and
1343 * did hash table lookup before we unhashed socket. They will achieve
1344 * receive queue and will be purged by socket destructor.
1345 *
1346 * Also we still have packets pending on receive queue and probably,
1347 * our own packets waiting in device queues. sock_destroy will drain
1348 * receive queue, but transmitted packets will delay socket destruction
1349 * until the last reference will be released.
1350 */
1351
1352 sock_orphan(sk);
1353
1354 xfrm_sk_free_policy(sk);
1355
1356#ifdef INET_REFCNT_DEBUG
1357 if (atomic_read(&sk->sk_refcnt) != 1)
1358 printk(KERN_DEBUG "Destruction of the socket %p delayed, c=%d\n",
1359 sk, atomic_read(&sk->sk_refcnt));
1360#endif
1361 sock_put(sk);
1362}
1363
1364EXPORT_SYMBOL(sk_common_release);
1365
1366static DEFINE_RWLOCK(proto_list_lock);
1367static LIST_HEAD(proto_list);
1368
1369int proto_register(struct proto *prot, int alloc_slab)
1370{
2e6599cb 1371 char *request_sock_slab_name;
1da177e4
LT
1372 int rc = -ENOBUFS;
1373
1da177e4
LT
1374 if (alloc_slab) {
1375 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
1376 SLAB_HWCACHE_ALIGN, NULL, NULL);
1377
1378 if (prot->slab == NULL) {
1379 printk(KERN_CRIT "%s: Can't create sock SLAB cache!\n",
1380 prot->name);
2a278051 1381 goto out;
1da177e4 1382 }
2e6599cb
ACM
1383
1384 if (prot->rsk_prot != NULL) {
1385 static const char mask[] = "request_sock_%s";
1386
1387 request_sock_slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL);
1388 if (request_sock_slab_name == NULL)
1389 goto out_free_sock_slab;
1390
1391 sprintf(request_sock_slab_name, mask, prot->name);
1392 prot->rsk_prot->slab = kmem_cache_create(request_sock_slab_name,
1393 prot->rsk_prot->obj_size, 0,
1394 SLAB_HWCACHE_ALIGN, NULL, NULL);
1395
1396 if (prot->rsk_prot->slab == NULL) {
1397 printk(KERN_CRIT "%s: Can't create request sock SLAB cache!\n",
1398 prot->name);
1399 goto out_free_request_sock_slab_name;
1400 }
1401 }
1da177e4
LT
1402 }
1403
2a278051 1404 write_lock(&proto_list_lock);
1da177e4 1405 list_add(&prot->node, &proto_list);
1da177e4 1406 write_unlock(&proto_list_lock);
2a278051
ACM
1407 rc = 0;
1408out:
1da177e4 1409 return rc;
2e6599cb
ACM
1410out_free_request_sock_slab_name:
1411 kfree(request_sock_slab_name);
1412out_free_sock_slab:
1413 kmem_cache_destroy(prot->slab);
1414 prot->slab = NULL;
1415 goto out;
1da177e4
LT
1416}
1417
1418EXPORT_SYMBOL(proto_register);
1419
1420void proto_unregister(struct proto *prot)
1421{
1422 write_lock(&proto_list_lock);
1423
1424 if (prot->slab != NULL) {
1425 kmem_cache_destroy(prot->slab);
1426 prot->slab = NULL;
1427 }
1428
2e6599cb
ACM
1429 if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) {
1430 const char *name = kmem_cache_name(prot->rsk_prot->slab);
1431
1432 kmem_cache_destroy(prot->rsk_prot->slab);
1433 kfree(name);
1434 prot->rsk_prot->slab = NULL;
1435 }
1436
1da177e4
LT
1437 list_del(&prot->node);
1438 write_unlock(&proto_list_lock);
1439}
1440
1441EXPORT_SYMBOL(proto_unregister);
1442
1443#ifdef CONFIG_PROC_FS
1444static inline struct proto *__proto_head(void)
1445{
1446 return list_entry(proto_list.next, struct proto, node);
1447}
1448
1449static inline struct proto *proto_head(void)
1450{
1451 return list_empty(&proto_list) ? NULL : __proto_head();
1452}
1453
1454static inline struct proto *proto_next(struct proto *proto)
1455{
1456 return proto->node.next == &proto_list ? NULL :
1457 list_entry(proto->node.next, struct proto, node);
1458}
1459
1460static inline struct proto *proto_get_idx(loff_t pos)
1461{
1462 struct proto *proto;
1463 loff_t i = 0;
1464
1465 list_for_each_entry(proto, &proto_list, node)
1466 if (i++ == pos)
1467 goto out;
1468
1469 proto = NULL;
1470out:
1471 return proto;
1472}
1473
1474static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
1475{
1476 read_lock(&proto_list_lock);
1477 return *pos ? proto_get_idx(*pos - 1) : SEQ_START_TOKEN;
1478}
1479
1480static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1481{
1482 ++*pos;
1483 return v == SEQ_START_TOKEN ? proto_head() : proto_next(v);
1484}
1485
1486static void proto_seq_stop(struct seq_file *seq, void *v)
1487{
1488 read_unlock(&proto_list_lock);
1489}
1490
1491static char proto_method_implemented(const void *method)
1492{
1493 return method == NULL ? 'n' : 'y';
1494}
1495
1496static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
1497{
1498 seq_printf(seq, "%-9s %4u %6d %6d %-3s %6u %-3s %-10s "
1499 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
1500 proto->name,
1501 proto->obj_size,
1502 proto->sockets_allocated != NULL ? atomic_read(proto->sockets_allocated) : -1,
1503 proto->memory_allocated != NULL ? atomic_read(proto->memory_allocated) : -1,
1504 proto->memory_pressure != NULL ? *proto->memory_pressure ? "yes" : "no" : "NI",
1505 proto->max_header,
1506 proto->slab == NULL ? "no" : "yes",
1507 module_name(proto->owner),
1508 proto_method_implemented(proto->close),
1509 proto_method_implemented(proto->connect),
1510 proto_method_implemented(proto->disconnect),
1511 proto_method_implemented(proto->accept),
1512 proto_method_implemented(proto->ioctl),
1513 proto_method_implemented(proto->init),
1514 proto_method_implemented(proto->destroy),
1515 proto_method_implemented(proto->shutdown),
1516 proto_method_implemented(proto->setsockopt),
1517 proto_method_implemented(proto->getsockopt),
1518 proto_method_implemented(proto->sendmsg),
1519 proto_method_implemented(proto->recvmsg),
1520 proto_method_implemented(proto->sendpage),
1521 proto_method_implemented(proto->bind),
1522 proto_method_implemented(proto->backlog_rcv),
1523 proto_method_implemented(proto->hash),
1524 proto_method_implemented(proto->unhash),
1525 proto_method_implemented(proto->get_port),
1526 proto_method_implemented(proto->enter_memory_pressure));
1527}
1528
1529static int proto_seq_show(struct seq_file *seq, void *v)
1530{
1531 if (v == SEQ_START_TOKEN)
1532 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
1533 "protocol",
1534 "size",
1535 "sockets",
1536 "memory",
1537 "press",
1538 "maxhdr",
1539 "slab",
1540 "module",
1541 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
1542 else
1543 proto_seq_printf(seq, v);
1544 return 0;
1545}
1546
1547static struct seq_operations proto_seq_ops = {
1548 .start = proto_seq_start,
1549 .next = proto_seq_next,
1550 .stop = proto_seq_stop,
1551 .show = proto_seq_show,
1552};
1553
1554static int proto_seq_open(struct inode *inode, struct file *file)
1555{
1556 return seq_open(file, &proto_seq_ops);
1557}
1558
1559static struct file_operations proto_seq_fops = {
1560 .owner = THIS_MODULE,
1561 .open = proto_seq_open,
1562 .read = seq_read,
1563 .llseek = seq_lseek,
1564 .release = seq_release,
1565};
1566
1567static int __init proto_init(void)
1568{
1569 /* register /proc/net/protocols */
1570 return proc_net_fops_create("protocols", S_IRUGO, &proto_seq_fops) == NULL ? -ENOBUFS : 0;
1571}
1572
1573subsys_initcall(proto_init);
1574
1575#endif /* PROC_FS */
1576
1577EXPORT_SYMBOL(sk_alloc);
1578EXPORT_SYMBOL(sk_free);
1579EXPORT_SYMBOL(sk_send_sigurg);
1580EXPORT_SYMBOL(sock_alloc_send_skb);
1581EXPORT_SYMBOL(sock_init_data);
1582EXPORT_SYMBOL(sock_kfree_s);
1583EXPORT_SYMBOL(sock_kmalloc);
1584EXPORT_SYMBOL(sock_no_accept);
1585EXPORT_SYMBOL(sock_no_bind);
1586EXPORT_SYMBOL(sock_no_connect);
1587EXPORT_SYMBOL(sock_no_getname);
1588EXPORT_SYMBOL(sock_no_getsockopt);
1589EXPORT_SYMBOL(sock_no_ioctl);
1590EXPORT_SYMBOL(sock_no_listen);
1591EXPORT_SYMBOL(sock_no_mmap);
1592EXPORT_SYMBOL(sock_no_poll);
1593EXPORT_SYMBOL(sock_no_recvmsg);
1594EXPORT_SYMBOL(sock_no_sendmsg);
1595EXPORT_SYMBOL(sock_no_sendpage);
1596EXPORT_SYMBOL(sock_no_setsockopt);
1597EXPORT_SYMBOL(sock_no_shutdown);
1598EXPORT_SYMBOL(sock_no_socketpair);
1599EXPORT_SYMBOL(sock_rfree);
1600EXPORT_SYMBOL(sock_setsockopt);
1601EXPORT_SYMBOL(sock_wfree);
1602EXPORT_SYMBOL(sock_wmalloc);
1603EXPORT_SYMBOL(sock_i_uid);
1604EXPORT_SYMBOL(sock_i_ino);
1605#ifdef CONFIG_SYSCTL
1606EXPORT_SYMBOL(sysctl_optmem_max);
1607EXPORT_SYMBOL(sysctl_rmem_max);
1608EXPORT_SYMBOL(sysctl_wmem_max);
1609#endif
This page took 0.124834 seconds and 5 git commands to generate.