rhashtable: Fix race in rhashtable_destroy() and use regular work_struct
[deliverable/linux.git] / net / netlink / af_netlink.c
CommitLineData
1da177e4
LT
1/*
2 * NETLINK Kernel-user communication protocol.
3 *
113aa838 4 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>
1da177e4 5 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
cd1df525 6 * Patrick McHardy <kaber@trash.net>
1da177e4
LT
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
746fac4d 12 *
1da177e4
LT
13 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
14 * added netlink_proto_exit
15 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
16 * use nlk_sk, as sk->protinfo is on a diet 8)
4fdb3bb7
HW
17 * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
18 * - inc module use count of module that owns
19 * the kernel socket in case userspace opens
20 * socket of same protocol
21 * - remove all module support, since netlink is
22 * mandatory if CONFIG_NET=y these days
1da177e4
LT
23 */
24
1da177e4
LT
25#include <linux/module.h>
26
4fc268d2 27#include <linux/capability.h>
1da177e4
LT
28#include <linux/kernel.h>
29#include <linux/init.h>
1da177e4
LT
30#include <linux/signal.h>
31#include <linux/sched.h>
32#include <linux/errno.h>
33#include <linux/string.h>
34#include <linux/stat.h>
35#include <linux/socket.h>
36#include <linux/un.h>
37#include <linux/fcntl.h>
38#include <linux/termios.h>
39#include <linux/sockios.h>
40#include <linux/net.h>
41#include <linux/fs.h>
42#include <linux/slab.h>
43#include <asm/uaccess.h>
44#include <linux/skbuff.h>
45#include <linux/netdevice.h>
46#include <linux/rtnetlink.h>
47#include <linux/proc_fs.h>
48#include <linux/seq_file.h>
1da177e4
LT
49#include <linux/notifier.h>
50#include <linux/security.h>
51#include <linux/jhash.h>
52#include <linux/jiffies.h>
53#include <linux/random.h>
54#include <linux/bitops.h>
55#include <linux/mm.h>
56#include <linux/types.h>
54e0f520 57#include <linux/audit.h>
af65bdfc 58#include <linux/mutex.h>
ccdfcc39 59#include <linux/vmalloc.h>
bcbde0d4 60#include <linux/if_arp.h>
e341694e 61#include <linux/rhashtable.h>
9652e931 62#include <asm/cacheflush.h>
e341694e 63#include <linux/hash.h>
54e0f520 64
457c4cbc 65#include <net/net_namespace.h>
1da177e4
LT
66#include <net/sock.h>
67#include <net/scm.h>
82ace47a 68#include <net/netlink.h>
1da177e4 69
0f29c768 70#include "af_netlink.h"
1da177e4 71
5c398dc8
ED
72struct listeners {
73 struct rcu_head rcu;
74 unsigned long masks[0];
6c04bb18
JB
75};
76
cd967e05
PM
77/* state bits */
78#define NETLINK_CONGESTED 0x0
79
80/* flags */
77247bbb 81#define NETLINK_KERNEL_SOCKET 0x1
9a4595bc 82#define NETLINK_RECV_PKTINFO 0x2
be0c22a4 83#define NETLINK_BROADCAST_SEND_ERROR 0x4
38938bfe 84#define NETLINK_RECV_NO_ENOBUFS 0x8
77247bbb 85
035c4c16 86static inline int netlink_is_kernel(struct sock *sk)
aed81560
DL
87{
88 return nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET;
89}
90
0f29c768
AV
91struct netlink_table *nl_table;
92EXPORT_SYMBOL_GPL(nl_table);
1da177e4
LT
93
94static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
95
96static int netlink_dump(struct sock *sk);
9652e931 97static void netlink_skb_destructor(struct sk_buff *skb);
1da177e4 98
78fd1d0a 99/* nl_table locking explained:
21e4902a 100 * Lookup and traversal are protected with an RCU read-side lock. Insertion
c5adde94 101 * and removal are protected with per bucket lock while using RCU list
21e4902a
TG
102 * modification primitives and may run in parallel to RCU protected lookups.
103 * Destruction of the Netlink socket may only occur *after* nl_table_lock has
104 * been acquired * either during or after the socket has been removed from
105 * the list and after an RCU grace period.
78fd1d0a 106 */
0f29c768
AV
107DEFINE_RWLOCK(nl_table_lock);
108EXPORT_SYMBOL_GPL(nl_table_lock);
1da177e4
LT
109static atomic_t nl_table_users = ATOMIC_INIT(0);
110
6d772ac5
ED
111#define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock));
112
e041c683 113static ATOMIC_NOTIFIER_HEAD(netlink_chain);
1da177e4 114
bcbde0d4
DB
115static DEFINE_SPINLOCK(netlink_tap_lock);
116static struct list_head netlink_tap_all __read_mostly;
117
b57ef81f 118static inline u32 netlink_group_mask(u32 group)
d629b836
PM
119{
120 return group ? 1 << (group - 1) : 0;
121}
122
bcbde0d4
DB
123int netlink_add_tap(struct netlink_tap *nt)
124{
125 if (unlikely(nt->dev->type != ARPHRD_NETLINK))
126 return -EINVAL;
127
128 spin_lock(&netlink_tap_lock);
129 list_add_rcu(&nt->list, &netlink_tap_all);
130 spin_unlock(&netlink_tap_lock);
131
fcd4d35e 132 __module_get(nt->module);
bcbde0d4
DB
133
134 return 0;
135}
136EXPORT_SYMBOL_GPL(netlink_add_tap);
137
2173f8d9 138static int __netlink_remove_tap(struct netlink_tap *nt)
bcbde0d4
DB
139{
140 bool found = false;
141 struct netlink_tap *tmp;
142
143 spin_lock(&netlink_tap_lock);
144
145 list_for_each_entry(tmp, &netlink_tap_all, list) {
146 if (nt == tmp) {
147 list_del_rcu(&nt->list);
148 found = true;
149 goto out;
150 }
151 }
152
153 pr_warn("__netlink_remove_tap: %p not found\n", nt);
154out:
155 spin_unlock(&netlink_tap_lock);
156
157 if (found && nt->module)
158 module_put(nt->module);
159
160 return found ? 0 : -ENODEV;
161}
bcbde0d4
DB
162
163int netlink_remove_tap(struct netlink_tap *nt)
164{
165 int ret;
166
167 ret = __netlink_remove_tap(nt);
168 synchronize_net();
169
170 return ret;
171}
172EXPORT_SYMBOL_GPL(netlink_remove_tap);
173
5ffd5cdd
DB
174static bool netlink_filter_tap(const struct sk_buff *skb)
175{
176 struct sock *sk = skb->sk;
5ffd5cdd
DB
177
178 /* We take the more conservative approach and
179 * whitelist socket protocols that may pass.
180 */
181 switch (sk->sk_protocol) {
182 case NETLINK_ROUTE:
183 case NETLINK_USERSOCK:
184 case NETLINK_SOCK_DIAG:
185 case NETLINK_NFLOG:
186 case NETLINK_XFRM:
187 case NETLINK_FIB_LOOKUP:
188 case NETLINK_NETFILTER:
189 case NETLINK_GENERIC:
498044bb 190 return true;
5ffd5cdd
DB
191 }
192
498044bb 193 return false;
5ffd5cdd
DB
194}
195
bcbde0d4
DB
196static int __netlink_deliver_tap_skb(struct sk_buff *skb,
197 struct net_device *dev)
198{
199 struct sk_buff *nskb;
5ffd5cdd 200 struct sock *sk = skb->sk;
bcbde0d4
DB
201 int ret = -ENOMEM;
202
203 dev_hold(dev);
204 nskb = skb_clone(skb, GFP_ATOMIC);
205 if (nskb) {
206 nskb->dev = dev;
5ffd5cdd 207 nskb->protocol = htons((u16) sk->sk_protocol);
604d13c9
DB
208 nskb->pkt_type = netlink_is_kernel(sk) ?
209 PACKET_KERNEL : PACKET_USER;
4e48ed88 210 skb_reset_network_header(nskb);
bcbde0d4
DB
211 ret = dev_queue_xmit(nskb);
212 if (unlikely(ret > 0))
213 ret = net_xmit_errno(ret);
214 }
215
216 dev_put(dev);
217 return ret;
218}
219
220static void __netlink_deliver_tap(struct sk_buff *skb)
221{
222 int ret;
223 struct netlink_tap *tmp;
224
5ffd5cdd
DB
225 if (!netlink_filter_tap(skb))
226 return;
227
bcbde0d4
DB
228 list_for_each_entry_rcu(tmp, &netlink_tap_all, list) {
229 ret = __netlink_deliver_tap_skb(skb, tmp->dev);
230 if (unlikely(ret))
231 break;
232 }
233}
234
235static void netlink_deliver_tap(struct sk_buff *skb)
236{
237 rcu_read_lock();
238
239 if (unlikely(!list_empty(&netlink_tap_all)))
240 __netlink_deliver_tap(skb);
241
242 rcu_read_unlock();
243}
244
73bfd370
DB
245static void netlink_deliver_tap_kernel(struct sock *dst, struct sock *src,
246 struct sk_buff *skb)
247{
248 if (!(netlink_is_kernel(dst) && netlink_is_kernel(src)))
249 netlink_deliver_tap(skb);
250}
251
cd1df525
PM
252static void netlink_overrun(struct sock *sk)
253{
254 struct netlink_sock *nlk = nlk_sk(sk);
255
256 if (!(nlk->flags & NETLINK_RECV_NO_ENOBUFS)) {
257 if (!test_and_set_bit(NETLINK_CONGESTED, &nlk_sk(sk)->state)) {
258 sk->sk_err = ENOBUFS;
259 sk->sk_error_report(sk);
260 }
261 }
262 atomic_inc(&sk->sk_drops);
263}
264
265static void netlink_rcv_wake(struct sock *sk)
266{
267 struct netlink_sock *nlk = nlk_sk(sk);
268
269 if (skb_queue_empty(&sk->sk_receive_queue))
270 clear_bit(NETLINK_CONGESTED, &nlk->state);
271 if (!test_bit(NETLINK_CONGESTED, &nlk->state))
272 wake_up_interruptible(&nlk->wait);
273}
274
ccdfcc39 275#ifdef CONFIG_NETLINK_MMAP
9652e931
PM
276static bool netlink_skb_is_mmaped(const struct sk_buff *skb)
277{
278 return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED;
279}
280
f9c22888
PM
281static bool netlink_rx_is_mmaped(struct sock *sk)
282{
283 return nlk_sk(sk)->rx_ring.pg_vec != NULL;
284}
285
5fd96123
PM
286static bool netlink_tx_is_mmaped(struct sock *sk)
287{
288 return nlk_sk(sk)->tx_ring.pg_vec != NULL;
289}
290
ccdfcc39
PM
291static __pure struct page *pgvec_to_page(const void *addr)
292{
293 if (is_vmalloc_addr(addr))
294 return vmalloc_to_page(addr);
295 else
296 return virt_to_page(addr);
297}
298
299static void free_pg_vec(void **pg_vec, unsigned int order, unsigned int len)
300{
301 unsigned int i;
302
303 for (i = 0; i < len; i++) {
304 if (pg_vec[i] != NULL) {
305 if (is_vmalloc_addr(pg_vec[i]))
306 vfree(pg_vec[i]);
307 else
308 free_pages((unsigned long)pg_vec[i], order);
309 }
310 }
311 kfree(pg_vec);
312}
313
314static void *alloc_one_pg_vec_page(unsigned long order)
315{
316 void *buffer;
317 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | __GFP_ZERO |
318 __GFP_NOWARN | __GFP_NORETRY;
319
320 buffer = (void *)__get_free_pages(gfp_flags, order);
321 if (buffer != NULL)
322 return buffer;
323
324 buffer = vzalloc((1 << order) * PAGE_SIZE);
325 if (buffer != NULL)
326 return buffer;
327
328 gfp_flags &= ~__GFP_NORETRY;
329 return (void *)__get_free_pages(gfp_flags, order);
330}
331
332static void **alloc_pg_vec(struct netlink_sock *nlk,
333 struct nl_mmap_req *req, unsigned int order)
334{
335 unsigned int block_nr = req->nm_block_nr;
336 unsigned int i;
8a849bb7 337 void **pg_vec;
ccdfcc39
PM
338
339 pg_vec = kcalloc(block_nr, sizeof(void *), GFP_KERNEL);
340 if (pg_vec == NULL)
341 return NULL;
342
343 for (i = 0; i < block_nr; i++) {
8a849bb7 344 pg_vec[i] = alloc_one_pg_vec_page(order);
ccdfcc39
PM
345 if (pg_vec[i] == NULL)
346 goto err1;
347 }
348
349 return pg_vec;
350err1:
351 free_pg_vec(pg_vec, order, block_nr);
352 return NULL;
353}
354
355static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req,
356 bool closing, bool tx_ring)
357{
358 struct netlink_sock *nlk = nlk_sk(sk);
359 struct netlink_ring *ring;
360 struct sk_buff_head *queue;
361 void **pg_vec = NULL;
362 unsigned int order = 0;
363 int err;
364
365 ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
366 queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
367
368 if (!closing) {
369 if (atomic_read(&nlk->mapped))
370 return -EBUSY;
371 if (atomic_read(&ring->pending))
372 return -EBUSY;
373 }
374
375 if (req->nm_block_nr) {
376 if (ring->pg_vec != NULL)
377 return -EBUSY;
378
379 if ((int)req->nm_block_size <= 0)
380 return -EINVAL;
74e83b23 381 if (!PAGE_ALIGNED(req->nm_block_size))
ccdfcc39
PM
382 return -EINVAL;
383 if (req->nm_frame_size < NL_MMAP_HDRLEN)
384 return -EINVAL;
385 if (!IS_ALIGNED(req->nm_frame_size, NL_MMAP_MSG_ALIGNMENT))
386 return -EINVAL;
387
388 ring->frames_per_block = req->nm_block_size /
389 req->nm_frame_size;
390 if (ring->frames_per_block == 0)
391 return -EINVAL;
392 if (ring->frames_per_block * req->nm_block_nr !=
393 req->nm_frame_nr)
394 return -EINVAL;
395
396 order = get_order(req->nm_block_size);
397 pg_vec = alloc_pg_vec(nlk, req, order);
398 if (pg_vec == NULL)
399 return -ENOMEM;
400 } else {
401 if (req->nm_frame_nr)
402 return -EINVAL;
403 }
404
405 err = -EBUSY;
406 mutex_lock(&nlk->pg_vec_lock);
407 if (closing || atomic_read(&nlk->mapped) == 0) {
408 err = 0;
409 spin_lock_bh(&queue->lock);
410
411 ring->frame_max = req->nm_frame_nr - 1;
412 ring->head = 0;
413 ring->frame_size = req->nm_frame_size;
414 ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE;
415
416 swap(ring->pg_vec_len, req->nm_block_nr);
417 swap(ring->pg_vec_order, order);
418 swap(ring->pg_vec, pg_vec);
419
420 __skb_queue_purge(queue);
421 spin_unlock_bh(&queue->lock);
422
423 WARN_ON(atomic_read(&nlk->mapped));
424 }
425 mutex_unlock(&nlk->pg_vec_lock);
426
427 if (pg_vec)
428 free_pg_vec(pg_vec, order, req->nm_block_nr);
429 return err;
430}
431
432static void netlink_mm_open(struct vm_area_struct *vma)
433{
434 struct file *file = vma->vm_file;
435 struct socket *sock = file->private_data;
436 struct sock *sk = sock->sk;
437
438 if (sk)
439 atomic_inc(&nlk_sk(sk)->mapped);
440}
441
442static void netlink_mm_close(struct vm_area_struct *vma)
443{
444 struct file *file = vma->vm_file;
445 struct socket *sock = file->private_data;
446 struct sock *sk = sock->sk;
447
448 if (sk)
449 atomic_dec(&nlk_sk(sk)->mapped);
450}
451
452static const struct vm_operations_struct netlink_mmap_ops = {
453 .open = netlink_mm_open,
454 .close = netlink_mm_close,
455};
456
457static int netlink_mmap(struct file *file, struct socket *sock,
458 struct vm_area_struct *vma)
459{
460 struct sock *sk = sock->sk;
461 struct netlink_sock *nlk = nlk_sk(sk);
462 struct netlink_ring *ring;
463 unsigned long start, size, expected;
464 unsigned int i;
465 int err = -EINVAL;
466
467 if (vma->vm_pgoff)
468 return -EINVAL;
469
470 mutex_lock(&nlk->pg_vec_lock);
471
472 expected = 0;
473 for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
474 if (ring->pg_vec == NULL)
475 continue;
476 expected += ring->pg_vec_len * ring->pg_vec_pages * PAGE_SIZE;
477 }
478
479 if (expected == 0)
480 goto out;
481
482 size = vma->vm_end - vma->vm_start;
483 if (size != expected)
484 goto out;
485
486 start = vma->vm_start;
487 for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
488 if (ring->pg_vec == NULL)
489 continue;
490
491 for (i = 0; i < ring->pg_vec_len; i++) {
492 struct page *page;
493 void *kaddr = ring->pg_vec[i];
494 unsigned int pg_num;
495
496 for (pg_num = 0; pg_num < ring->pg_vec_pages; pg_num++) {
497 page = pgvec_to_page(kaddr);
498 err = vm_insert_page(vma, start, page);
499 if (err < 0)
500 goto out;
501 start += PAGE_SIZE;
502 kaddr += PAGE_SIZE;
503 }
504 }
505 }
506
507 atomic_inc(&nlk->mapped);
508 vma->vm_ops = &netlink_mmap_ops;
509 err = 0;
510out:
511 mutex_unlock(&nlk->pg_vec_lock);
7cdbac71 512 return err;
ccdfcc39 513}
9652e931 514
4682a035 515static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr, unsigned int nm_len)
9652e931
PM
516{
517#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
518 struct page *p_start, *p_end;
519
520 /* First page is flushed through netlink_{get,set}_status */
521 p_start = pgvec_to_page(hdr + PAGE_SIZE);
4682a035 522 p_end = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + nm_len - 1);
9652e931
PM
523 while (p_start <= p_end) {
524 flush_dcache_page(p_start);
525 p_start++;
526 }
527#endif
528}
529
530static enum nl_mmap_status netlink_get_status(const struct nl_mmap_hdr *hdr)
531{
532 smp_rmb();
533 flush_dcache_page(pgvec_to_page(hdr));
534 return hdr->nm_status;
535}
536
537static void netlink_set_status(struct nl_mmap_hdr *hdr,
538 enum nl_mmap_status status)
539{
a18e6a18 540 smp_mb();
9652e931
PM
541 hdr->nm_status = status;
542 flush_dcache_page(pgvec_to_page(hdr));
9652e931
PM
543}
544
545static struct nl_mmap_hdr *
546__netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos)
547{
548 unsigned int pg_vec_pos, frame_off;
549
550 pg_vec_pos = pos / ring->frames_per_block;
551 frame_off = pos % ring->frames_per_block;
552
553 return ring->pg_vec[pg_vec_pos] + (frame_off * ring->frame_size);
554}
555
556static struct nl_mmap_hdr *
557netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos,
558 enum nl_mmap_status status)
559{
560 struct nl_mmap_hdr *hdr;
561
562 hdr = __netlink_lookup_frame(ring, pos);
563 if (netlink_get_status(hdr) != status)
564 return NULL;
565
566 return hdr;
567}
568
569static struct nl_mmap_hdr *
570netlink_current_frame(const struct netlink_ring *ring,
571 enum nl_mmap_status status)
572{
573 return netlink_lookup_frame(ring, ring->head, status);
574}
575
576static struct nl_mmap_hdr *
577netlink_previous_frame(const struct netlink_ring *ring,
578 enum nl_mmap_status status)
579{
580 unsigned int prev;
581
582 prev = ring->head ? ring->head - 1 : ring->frame_max;
583 return netlink_lookup_frame(ring, prev, status);
584}
585
586static void netlink_increment_head(struct netlink_ring *ring)
587{
588 ring->head = ring->head != ring->frame_max ? ring->head + 1 : 0;
589}
590
591static void netlink_forward_ring(struct netlink_ring *ring)
592{
593 unsigned int head = ring->head, pos = head;
594 const struct nl_mmap_hdr *hdr;
595
596 do {
597 hdr = __netlink_lookup_frame(ring, pos);
598 if (hdr->nm_status == NL_MMAP_STATUS_UNUSED)
599 break;
600 if (hdr->nm_status != NL_MMAP_STATUS_SKIP)
601 break;
602 netlink_increment_head(ring);
603 } while (ring->head != head);
604}
605
cd1df525
PM
606static bool netlink_dump_space(struct netlink_sock *nlk)
607{
608 struct netlink_ring *ring = &nlk->rx_ring;
609 struct nl_mmap_hdr *hdr;
610 unsigned int n;
611
612 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
613 if (hdr == NULL)
614 return false;
615
616 n = ring->head + ring->frame_max / 2;
617 if (n > ring->frame_max)
618 n -= ring->frame_max;
619
620 hdr = __netlink_lookup_frame(ring, n);
621
622 return hdr->nm_status == NL_MMAP_STATUS_UNUSED;
623}
624
9652e931
PM
625static unsigned int netlink_poll(struct file *file, struct socket *sock,
626 poll_table *wait)
627{
628 struct sock *sk = sock->sk;
629 struct netlink_sock *nlk = nlk_sk(sk);
630 unsigned int mask;
cd1df525 631 int err;
9652e931 632
cd1df525
PM
633 if (nlk->rx_ring.pg_vec != NULL) {
634 /* Memory mapped sockets don't call recvmsg(), so flow control
635 * for dumps is performed here. A dump is allowed to continue
636 * if at least half the ring is unused.
637 */
16b304f3 638 while (nlk->cb_running && netlink_dump_space(nlk)) {
cd1df525
PM
639 err = netlink_dump(sk);
640 if (err < 0) {
ac30ef83 641 sk->sk_err = -err;
cd1df525
PM
642 sk->sk_error_report(sk);
643 break;
644 }
645 }
646 netlink_rcv_wake(sk);
647 }
5fd96123 648
9652e931
PM
649 mask = datagram_poll(file, sock, wait);
650
651 spin_lock_bh(&sk->sk_receive_queue.lock);
652 if (nlk->rx_ring.pg_vec) {
653 netlink_forward_ring(&nlk->rx_ring);
654 if (!netlink_previous_frame(&nlk->rx_ring, NL_MMAP_STATUS_UNUSED))
655 mask |= POLLIN | POLLRDNORM;
656 }
657 spin_unlock_bh(&sk->sk_receive_queue.lock);
658
659 spin_lock_bh(&sk->sk_write_queue.lock);
660 if (nlk->tx_ring.pg_vec) {
661 if (netlink_current_frame(&nlk->tx_ring, NL_MMAP_STATUS_UNUSED))
662 mask |= POLLOUT | POLLWRNORM;
663 }
664 spin_unlock_bh(&sk->sk_write_queue.lock);
665
666 return mask;
667}
668
669static struct nl_mmap_hdr *netlink_mmap_hdr(struct sk_buff *skb)
670{
671 return (struct nl_mmap_hdr *)(skb->head - NL_MMAP_HDRLEN);
672}
673
674static void netlink_ring_setup_skb(struct sk_buff *skb, struct sock *sk,
675 struct netlink_ring *ring,
676 struct nl_mmap_hdr *hdr)
677{
678 unsigned int size;
679 void *data;
680
681 size = ring->frame_size - NL_MMAP_HDRLEN;
682 data = (void *)hdr + NL_MMAP_HDRLEN;
683
684 skb->head = data;
685 skb->data = data;
686 skb_reset_tail_pointer(skb);
687 skb->end = skb->tail + size;
688 skb->len = 0;
689
690 skb->destructor = netlink_skb_destructor;
691 NETLINK_CB(skb).flags |= NETLINK_SKB_MMAPED;
692 NETLINK_CB(skb).sk = sk;
693}
5fd96123
PM
694
695static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
696 u32 dst_portid, u32 dst_group,
697 struct sock_iocb *siocb)
698{
699 struct netlink_sock *nlk = nlk_sk(sk);
700 struct netlink_ring *ring;
701 struct nl_mmap_hdr *hdr;
702 struct sk_buff *skb;
703 unsigned int maxlen;
5fd96123
PM
704 int err = 0, len = 0;
705
5fd96123
PM
706 mutex_lock(&nlk->pg_vec_lock);
707
708 ring = &nlk->tx_ring;
709 maxlen = ring->frame_size - NL_MMAP_HDRLEN;
710
711 do {
4682a035
DM
712 unsigned int nm_len;
713
5fd96123
PM
714 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_VALID);
715 if (hdr == NULL) {
716 if (!(msg->msg_flags & MSG_DONTWAIT) &&
717 atomic_read(&nlk->tx_ring.pending))
718 schedule();
719 continue;
720 }
4682a035
DM
721
722 nm_len = ACCESS_ONCE(hdr->nm_len);
723 if (nm_len > maxlen) {
5fd96123
PM
724 err = -EINVAL;
725 goto out;
726 }
727
4682a035 728 netlink_frame_flush_dcache(hdr, nm_len);
5fd96123 729
4682a035
DM
730 skb = alloc_skb(nm_len, GFP_KERNEL);
731 if (skb == NULL) {
732 err = -ENOBUFS;
733 goto out;
5fd96123 734 }
4682a035
DM
735 __skb_put(skb, nm_len);
736 memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, nm_len);
737 netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
5fd96123
PM
738
739 netlink_increment_head(ring);
740
741 NETLINK_CB(skb).portid = nlk->portid;
742 NETLINK_CB(skb).dst_group = dst_group;
743 NETLINK_CB(skb).creds = siocb->scm->creds;
744
745 err = security_netlink_send(sk, skb);
746 if (err) {
747 kfree_skb(skb);
748 goto out;
749 }
750
751 if (unlikely(dst_group)) {
752 atomic_inc(&skb->users);
753 netlink_broadcast(sk, skb, dst_portid, dst_group,
754 GFP_KERNEL);
755 }
756 err = netlink_unicast(sk, skb, dst_portid,
757 msg->msg_flags & MSG_DONTWAIT);
758 if (err < 0)
759 goto out;
760 len += err;
761
762 } while (hdr != NULL ||
763 (!(msg->msg_flags & MSG_DONTWAIT) &&
764 atomic_read(&nlk->tx_ring.pending)));
765
766 if (len > 0)
767 err = len;
768out:
769 mutex_unlock(&nlk->pg_vec_lock);
770 return err;
771}
f9c22888
PM
772
773static void netlink_queue_mmaped_skb(struct sock *sk, struct sk_buff *skb)
774{
775 struct nl_mmap_hdr *hdr;
776
777 hdr = netlink_mmap_hdr(skb);
778 hdr->nm_len = skb->len;
779 hdr->nm_group = NETLINK_CB(skb).dst_group;
780 hdr->nm_pid = NETLINK_CB(skb).creds.pid;
1bf9310a
ND
781 hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
782 hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
4682a035 783 netlink_frame_flush_dcache(hdr, hdr->nm_len);
f9c22888
PM
784 netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
785
786 NETLINK_CB(skb).flags |= NETLINK_SKB_DELIVERED;
787 kfree_skb(skb);
788}
789
790static void netlink_ring_set_copied(struct sock *sk, struct sk_buff *skb)
791{
792 struct netlink_sock *nlk = nlk_sk(sk);
793 struct netlink_ring *ring = &nlk->rx_ring;
794 struct nl_mmap_hdr *hdr;
795
796 spin_lock_bh(&sk->sk_receive_queue.lock);
797 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
798 if (hdr == NULL) {
799 spin_unlock_bh(&sk->sk_receive_queue.lock);
800 kfree_skb(skb);
cd1df525 801 netlink_overrun(sk);
f9c22888
PM
802 return;
803 }
804 netlink_increment_head(ring);
805 __skb_queue_tail(&sk->sk_receive_queue, skb);
806 spin_unlock_bh(&sk->sk_receive_queue.lock);
807
808 hdr->nm_len = skb->len;
809 hdr->nm_group = NETLINK_CB(skb).dst_group;
810 hdr->nm_pid = NETLINK_CB(skb).creds.pid;
1bf9310a
ND
811 hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
812 hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
f9c22888
PM
813 netlink_set_status(hdr, NL_MMAP_STATUS_COPY);
814}
815
ccdfcc39 816#else /* CONFIG_NETLINK_MMAP */
9652e931 817#define netlink_skb_is_mmaped(skb) false
f9c22888 818#define netlink_rx_is_mmaped(sk) false
5fd96123 819#define netlink_tx_is_mmaped(sk) false
ccdfcc39 820#define netlink_mmap sock_no_mmap
9652e931 821#define netlink_poll datagram_poll
5fd96123 822#define netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group, siocb) 0
ccdfcc39
PM
823#endif /* CONFIG_NETLINK_MMAP */
824
cf0a018a
PM
825static void netlink_skb_destructor(struct sk_buff *skb)
826{
9652e931
PM
827#ifdef CONFIG_NETLINK_MMAP
828 struct nl_mmap_hdr *hdr;
829 struct netlink_ring *ring;
830 struct sock *sk;
831
832 /* If a packet from the kernel to userspace was freed because of an
833 * error without being delivered to userspace, the kernel must reset
834 * the status. In the direction userspace to kernel, the status is
835 * always reset here after the packet was processed and freed.
836 */
837 if (netlink_skb_is_mmaped(skb)) {
838 hdr = netlink_mmap_hdr(skb);
839 sk = NETLINK_CB(skb).sk;
840
5fd96123
PM
841 if (NETLINK_CB(skb).flags & NETLINK_SKB_TX) {
842 netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
843 ring = &nlk_sk(sk)->tx_ring;
844 } else {
845 if (!(NETLINK_CB(skb).flags & NETLINK_SKB_DELIVERED)) {
846 hdr->nm_len = 0;
847 netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
848 }
849 ring = &nlk_sk(sk)->rx_ring;
9652e931 850 }
9652e931
PM
851
852 WARN_ON(atomic_read(&ring->pending) == 0);
853 atomic_dec(&ring->pending);
854 sock_put(sk);
855
5e71d9d7 856 skb->head = NULL;
9652e931
PM
857 }
858#endif
c05cdb1b 859 if (is_vmalloc_addr(skb->head)) {
3a36515f
PN
860 if (!skb->cloned ||
861 !atomic_dec_return(&(skb_shinfo(skb)->dataref)))
862 vfree(skb->head);
863
c05cdb1b
PNA
864 skb->head = NULL;
865 }
9652e931
PM
866 if (skb->sk != NULL)
867 sock_rfree(skb);
cf0a018a
PM
868}
869
870static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
871{
872 WARN_ON(skb->sk != NULL);
873 skb->sk = sk;
874 skb->destructor = netlink_skb_destructor;
875 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
876 sk_mem_charge(sk, skb->truesize);
877}
878
1da177e4
LT
879static void netlink_sock_destruct(struct sock *sk)
880{
3f660d66
HX
881 struct netlink_sock *nlk = nlk_sk(sk);
882
16b304f3
PS
883 if (nlk->cb_running) {
884 if (nlk->cb.done)
885 nlk->cb.done(&nlk->cb);
6dc878a8 886
16b304f3
PS
887 module_put(nlk->cb.module);
888 kfree_skb(nlk->cb.skb);
3f660d66
HX
889 }
890
1da177e4 891 skb_queue_purge(&sk->sk_receive_queue);
ccdfcc39
PM
892#ifdef CONFIG_NETLINK_MMAP
893 if (1) {
894 struct nl_mmap_req req;
895
896 memset(&req, 0, sizeof(req));
897 if (nlk->rx_ring.pg_vec)
898 netlink_set_ring(sk, &req, true, false);
899 memset(&req, 0, sizeof(req));
900 if (nlk->tx_ring.pg_vec)
901 netlink_set_ring(sk, &req, true, true);
902 }
903#endif /* CONFIG_NETLINK_MMAP */
1da177e4
LT
904
905 if (!sock_flag(sk, SOCK_DEAD)) {
6ac552fd 906 printk(KERN_ERR "Freeing alive netlink socket %p\n", sk);
1da177e4
LT
907 return;
908 }
547b792c
IJ
909
910 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
911 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
912 WARN_ON(nlk_sk(sk)->groups);
1da177e4
LT
913}
914
6ac552fd
PM
915/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
916 * SMP. Look, when several writers sleep and reader wakes them up, all but one
1da177e4
LT
917 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
918 * this, _but_ remember, it adds useless work on UP machines.
919 */
920
d136f1bd 921void netlink_table_grab(void)
9a429c49 922 __acquires(nl_table_lock)
1da177e4 923{
d136f1bd
JB
924 might_sleep();
925
6abd219c 926 write_lock_irq(&nl_table_lock);
1da177e4
LT
927
928 if (atomic_read(&nl_table_users)) {
929 DECLARE_WAITQUEUE(wait, current);
930
931 add_wait_queue_exclusive(&nl_table_wait, &wait);
6ac552fd 932 for (;;) {
1da177e4
LT
933 set_current_state(TASK_UNINTERRUPTIBLE);
934 if (atomic_read(&nl_table_users) == 0)
935 break;
6abd219c 936 write_unlock_irq(&nl_table_lock);
1da177e4 937 schedule();
6abd219c 938 write_lock_irq(&nl_table_lock);
1da177e4
LT
939 }
940
941 __set_current_state(TASK_RUNNING);
942 remove_wait_queue(&nl_table_wait, &wait);
943 }
944}
945
d136f1bd 946void netlink_table_ungrab(void)
9a429c49 947 __releases(nl_table_lock)
1da177e4 948{
6abd219c 949 write_unlock_irq(&nl_table_lock);
1da177e4
LT
950 wake_up(&nl_table_wait);
951}
952
6ac552fd 953static inline void
1da177e4
LT
954netlink_lock_table(void)
955{
956 /* read_lock() synchronizes us to netlink_table_grab */
957
958 read_lock(&nl_table_lock);
959 atomic_inc(&nl_table_users);
960 read_unlock(&nl_table_lock);
961}
962
6ac552fd 963static inline void
1da177e4
LT
964netlink_unlock_table(void)
965{
966 if (atomic_dec_and_test(&nl_table_users))
967 wake_up(&nl_table_wait);
968}
969
e341694e 970struct netlink_compare_arg
1da177e4 971{
e341694e
TG
972 struct net *net;
973 u32 portid;
974};
1da177e4 975
e341694e 976static bool netlink_compare(void *ptr, void *arg)
1da177e4 977{
e341694e
TG
978 struct netlink_compare_arg *x = arg;
979 struct sock *sk = ptr;
1da177e4 980
e341694e
TG
981 return nlk_sk(sk)->portid == x->portid &&
982 net_eq(sock_net(sk), x->net);
1da177e4
LT
983}
984
e341694e
TG
985static struct sock *__netlink_lookup(struct netlink_table *table, u32 portid,
986 struct net *net)
1da177e4 987{
e341694e
TG
988 struct netlink_compare_arg arg = {
989 .net = net,
990 .portid = portid,
991 };
1da177e4 992
8d24c0b4 993 return rhashtable_lookup_compare(&table->hash, &portid,
e341694e 994 &netlink_compare, &arg);
1da177e4
LT
995}
996
c5adde94
YX
997static bool __netlink_insert(struct netlink_table *table, struct sock *sk,
998 struct net *net)
999{
1000 struct netlink_compare_arg arg = {
1001 .net = net,
1002 .portid = nlk_sk(sk)->portid,
1003 };
1004
1005 return rhashtable_lookup_compare_insert(&table->hash,
1006 &nlk_sk(sk)->node,
1007 &netlink_compare, &arg);
1008}
1009
e341694e 1010static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
1da177e4 1011{
e341694e
TG
1012 struct netlink_table *table = &nl_table[protocol];
1013 struct sock *sk;
1da177e4 1014
e341694e
TG
1015 rcu_read_lock();
1016 sk = __netlink_lookup(table, portid, net);
1017 if (sk)
1018 sock_hold(sk);
1019 rcu_read_unlock();
1da177e4 1020
e341694e 1021 return sk;
1da177e4
LT
1022}
1023
90ddc4f0 1024static const struct proto_ops netlink_ops;
1da177e4 1025
4277a083
PM
1026static void
1027netlink_update_listeners(struct sock *sk)
1028{
1029 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
4277a083
PM
1030 unsigned long mask;
1031 unsigned int i;
6d772ac5
ED
1032 struct listeners *listeners;
1033
1034 listeners = nl_deref_protected(tbl->listeners);
1035 if (!listeners)
1036 return;
4277a083 1037
b4ff4f04 1038 for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
4277a083 1039 mask = 0;
b67bfe0d 1040 sk_for_each_bound(sk, &tbl->mc_list) {
b4ff4f04
JB
1041 if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
1042 mask |= nlk_sk(sk)->groups[i];
1043 }
6d772ac5 1044 listeners->masks[i] = mask;
4277a083
PM
1045 }
1046 /* this function is only called with the netlink table "grabbed", which
1047 * makes sure updates are visible before bind or setsockopt return. */
1048}
1049
15e47304 1050static int netlink_insert(struct sock *sk, struct net *net, u32 portid)
1da177e4 1051{
da12c90e 1052 struct netlink_table *table = &nl_table[sk->sk_protocol];
1da177e4 1053 int err = -EADDRINUSE;
1da177e4 1054
c5adde94 1055 lock_sock(sk);
1da177e4
LT
1056
1057 err = -EBUSY;
15e47304 1058 if (nlk_sk(sk)->portid)
1da177e4
LT
1059 goto err;
1060
1061 err = -ENOMEM;
97defe1e
TG
1062 if (BITS_PER_LONG > 32 &&
1063 unlikely(atomic_read(&table->hash.nelems) >= UINT_MAX))
1da177e4
LT
1064 goto err;
1065
15e47304 1066 nlk_sk(sk)->portid = portid;
e341694e 1067 sock_hold(sk);
c5adde94
YX
1068 if (__netlink_insert(table, sk, net))
1069 err = 0;
1070 else
1071 sock_put(sk);
1da177e4 1072err:
c5adde94 1073 release_sock(sk);
1da177e4
LT
1074 return err;
1075}
1076
1077static void netlink_remove(struct sock *sk)
1078{
e341694e
TG
1079 struct netlink_table *table;
1080
e341694e 1081 table = &nl_table[sk->sk_protocol];
6eba8224 1082 if (rhashtable_remove(&table->hash, &nlk_sk(sk)->node)) {
e341694e
TG
1083 WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
1084 __sock_put(sk);
1085 }
e341694e 1086
1da177e4 1087 netlink_table_grab();
b10dcb3b 1088 if (nlk_sk(sk)->subscriptions) {
1da177e4 1089 __sk_del_bind_node(sk);
b10dcb3b
JB
1090 netlink_update_listeners(sk);
1091 }
1da177e4
LT
1092 netlink_table_ungrab();
1093}
1094
1095static struct proto netlink_proto = {
1096 .name = "NETLINK",
1097 .owner = THIS_MODULE,
1098 .obj_size = sizeof(struct netlink_sock),
1099};
1100
1b8d7ae4
EB
1101static int __netlink_create(struct net *net, struct socket *sock,
1102 struct mutex *cb_mutex, int protocol)
1da177e4
LT
1103{
1104 struct sock *sk;
1105 struct netlink_sock *nlk;
ab33a171
PM
1106
1107 sock->ops = &netlink_ops;
1108
6257ff21 1109 sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto);
ab33a171
PM
1110 if (!sk)
1111 return -ENOMEM;
1112
1113 sock_init_data(sock, sk);
1114
1115 nlk = nlk_sk(sk);
658cb354 1116 if (cb_mutex) {
ffa4d721 1117 nlk->cb_mutex = cb_mutex;
658cb354 1118 } else {
ffa4d721
PM
1119 nlk->cb_mutex = &nlk->cb_def_mutex;
1120 mutex_init(nlk->cb_mutex);
1121 }
ab33a171 1122 init_waitqueue_head(&nlk->wait);
ccdfcc39
PM
1123#ifdef CONFIG_NETLINK_MMAP
1124 mutex_init(&nlk->pg_vec_lock);
1125#endif
ab33a171
PM
1126
1127 sk->sk_destruct = netlink_sock_destruct;
1128 sk->sk_protocol = protocol;
1129 return 0;
1130}
1131
3f378b68
EP
1132static int netlink_create(struct net *net, struct socket *sock, int protocol,
1133 int kern)
ab33a171
PM
1134{
1135 struct module *module = NULL;
af65bdfc 1136 struct mutex *cb_mutex;
f7fa9b10 1137 struct netlink_sock *nlk;
023e2cfa
JB
1138 int (*bind)(struct net *net, int group);
1139 void (*unbind)(struct net *net, int group);
ab33a171 1140 int err = 0;
1da177e4
LT
1141
1142 sock->state = SS_UNCONNECTED;
1143
1144 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
1145 return -ESOCKTNOSUPPORT;
1146
6ac552fd 1147 if (protocol < 0 || protocol >= MAX_LINKS)
1da177e4
LT
1148 return -EPROTONOSUPPORT;
1149
77247bbb 1150 netlink_lock_table();
95a5afca 1151#ifdef CONFIG_MODULES
ab33a171 1152 if (!nl_table[protocol].registered) {
77247bbb 1153 netlink_unlock_table();
4fdb3bb7 1154 request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol);
77247bbb 1155 netlink_lock_table();
4fdb3bb7 1156 }
ab33a171
PM
1157#endif
1158 if (nl_table[protocol].registered &&
1159 try_module_get(nl_table[protocol].module))
1160 module = nl_table[protocol].module;
974c37e9
AD
1161 else
1162 err = -EPROTONOSUPPORT;
af65bdfc 1163 cb_mutex = nl_table[protocol].cb_mutex;
03292745 1164 bind = nl_table[protocol].bind;
4f520900 1165 unbind = nl_table[protocol].unbind;
77247bbb 1166 netlink_unlock_table();
4fdb3bb7 1167
974c37e9
AD
1168 if (err < 0)
1169 goto out;
1170
6ac552fd
PM
1171 err = __netlink_create(net, sock, cb_mutex, protocol);
1172 if (err < 0)
f7fa9b10
PM
1173 goto out_module;
1174
6f756a8c 1175 local_bh_disable();
c1fd3b94 1176 sock_prot_inuse_add(net, &netlink_proto, 1);
6f756a8c
DM
1177 local_bh_enable();
1178
f7fa9b10 1179 nlk = nlk_sk(sock->sk);
f7fa9b10 1180 nlk->module = module;
03292745 1181 nlk->netlink_bind = bind;
4f520900 1182 nlk->netlink_unbind = unbind;
ab33a171
PM
1183out:
1184 return err;
1da177e4 1185
ab33a171
PM
1186out_module:
1187 module_put(module);
1188 goto out;
1da177e4
LT
1189}
1190
21e4902a
TG
1191static void deferred_put_nlk_sk(struct rcu_head *head)
1192{
1193 struct netlink_sock *nlk = container_of(head, struct netlink_sock, rcu);
1194
1195 sock_put(&nlk->sk);
1196}
1197
1da177e4
LT
1198static int netlink_release(struct socket *sock)
1199{
1200 struct sock *sk = sock->sk;
1201 struct netlink_sock *nlk;
1202
1203 if (!sk)
1204 return 0;
1205
1206 netlink_remove(sk);
ac57b3a9 1207 sock_orphan(sk);
1da177e4
LT
1208 nlk = nlk_sk(sk);
1209
3f660d66
HX
1210 /*
1211 * OK. Socket is unlinked, any packets that arrive now
1212 * will be purged.
1213 */
1da177e4 1214
1da177e4
LT
1215 sock->sk = NULL;
1216 wake_up_interruptible_all(&nlk->wait);
1217
1218 skb_queue_purge(&sk->sk_write_queue);
1219
15e47304 1220 if (nlk->portid) {
1da177e4 1221 struct netlink_notify n = {
3b1e0a65 1222 .net = sock_net(sk),
1da177e4 1223 .protocol = sk->sk_protocol,
15e47304 1224 .portid = nlk->portid,
1da177e4 1225 };
e041c683
AS
1226 atomic_notifier_call_chain(&netlink_chain,
1227 NETLINK_URELEASE, &n);
746fac4d 1228 }
4fdb3bb7 1229
5e7c001c 1230 module_put(nlk->module);
4fdb3bb7 1231
aed81560 1232 if (netlink_is_kernel(sk)) {
b10dcb3b 1233 netlink_table_grab();
869e58f8
DL
1234 BUG_ON(nl_table[sk->sk_protocol].registered == 0);
1235 if (--nl_table[sk->sk_protocol].registered == 0) {
6d772ac5
ED
1236 struct listeners *old;
1237
1238 old = nl_deref_protected(nl_table[sk->sk_protocol].listeners);
1239 RCU_INIT_POINTER(nl_table[sk->sk_protocol].listeners, NULL);
1240 kfree_rcu(old, rcu);
869e58f8 1241 nl_table[sk->sk_protocol].module = NULL;
9785e10a 1242 nl_table[sk->sk_protocol].bind = NULL;
4f520900 1243 nl_table[sk->sk_protocol].unbind = NULL;
9785e10a 1244 nl_table[sk->sk_protocol].flags = 0;
869e58f8
DL
1245 nl_table[sk->sk_protocol].registered = 0;
1246 }
b10dcb3b 1247 netlink_table_ungrab();
658cb354 1248 }
77247bbb 1249
7d68536b
JB
1250 if (nlk->netlink_unbind) {
1251 int i;
1252
1253 for (i = 0; i < nlk->ngroups; i++)
1254 if (test_bit(i, nlk->groups))
023e2cfa 1255 nlk->netlink_unbind(sock_net(sk), i + 1);
7d68536b 1256 }
f7fa9b10
PM
1257 kfree(nlk->groups);
1258 nlk->groups = NULL;
1259
3755810c 1260 local_bh_disable();
c1fd3b94 1261 sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1);
3755810c 1262 local_bh_enable();
21e4902a 1263 call_rcu(&nlk->rcu, deferred_put_nlk_sk);
1da177e4
LT
1264 return 0;
1265}
1266
1267static int netlink_autobind(struct socket *sock)
1268{
1269 struct sock *sk = sock->sk;
3b1e0a65 1270 struct net *net = sock_net(sk);
da12c90e 1271 struct netlink_table *table = &nl_table[sk->sk_protocol];
15e47304 1272 s32 portid = task_tgid_vnr(current);
1da177e4
LT
1273 int err;
1274 static s32 rover = -4097;
1275
1276retry:
1277 cond_resched();
e341694e
TG
1278 rcu_read_lock();
1279 if (__netlink_lookup(table, portid, net)) {
1280 /* Bind collision, search negative portid values. */
1281 portid = rover--;
1282 if (rover > -4097)
1283 rover = -4097;
1284 rcu_read_unlock();
1285 goto retry;
1da177e4 1286 }
e341694e 1287 rcu_read_unlock();
1da177e4 1288
15e47304 1289 err = netlink_insert(sk, net, portid);
1da177e4
LT
1290 if (err == -EADDRINUSE)
1291 goto retry;
d470e3b4
DM
1292
1293 /* If 2 threads race to autobind, that is fine. */
1294 if (err == -EBUSY)
1295 err = 0;
1296
1297 return err;
1da177e4
LT
1298}
1299
aa4cf945
EB
1300/**
1301 * __netlink_ns_capable - General netlink message capability test
1302 * @nsp: NETLINK_CB of the socket buffer holding a netlink command from userspace.
1303 * @user_ns: The user namespace of the capability to use
1304 * @cap: The capability to use
1305 *
1306 * Test to see if the opener of the socket we received the message
1307 * from had when the netlink socket was created and the sender of the
1308 * message has has the capability @cap in the user namespace @user_ns.
1309 */
1310bool __netlink_ns_capable(const struct netlink_skb_parms *nsp,
1311 struct user_namespace *user_ns, int cap)
1312{
2d7a85f4
EB
1313 return ((nsp->flags & NETLINK_SKB_DST) ||
1314 file_ns_capable(nsp->sk->sk_socket->file, user_ns, cap)) &&
1315 ns_capable(user_ns, cap);
aa4cf945
EB
1316}
1317EXPORT_SYMBOL(__netlink_ns_capable);
1318
1319/**
1320 * netlink_ns_capable - General netlink message capability test
1321 * @skb: socket buffer holding a netlink command from userspace
1322 * @user_ns: The user namespace of the capability to use
1323 * @cap: The capability to use
1324 *
1325 * Test to see if the opener of the socket we received the message
1326 * from had when the netlink socket was created and the sender of the
1327 * message has has the capability @cap in the user namespace @user_ns.
1328 */
1329bool netlink_ns_capable(const struct sk_buff *skb,
1330 struct user_namespace *user_ns, int cap)
1331{
1332 return __netlink_ns_capable(&NETLINK_CB(skb), user_ns, cap);
1333}
1334EXPORT_SYMBOL(netlink_ns_capable);
1335
1336/**
1337 * netlink_capable - Netlink global message capability test
1338 * @skb: socket buffer holding a netlink command from userspace
1339 * @cap: The capability to use
1340 *
1341 * Test to see if the opener of the socket we received the message
1342 * from had when the netlink socket was created and the sender of the
1343 * message has has the capability @cap in all user namespaces.
1344 */
1345bool netlink_capable(const struct sk_buff *skb, int cap)
1346{
1347 return netlink_ns_capable(skb, &init_user_ns, cap);
1348}
1349EXPORT_SYMBOL(netlink_capable);
1350
1351/**
1352 * netlink_net_capable - Netlink network namespace message capability test
1353 * @skb: socket buffer holding a netlink command from userspace
1354 * @cap: The capability to use
1355 *
1356 * Test to see if the opener of the socket we received the message
1357 * from had when the netlink socket was created and the sender of the
1358 * message has has the capability @cap over the network namespace of
1359 * the socket we received the message from.
1360 */
1361bool netlink_net_capable(const struct sk_buff *skb, int cap)
1362{
1363 return netlink_ns_capable(skb, sock_net(skb->sk)->user_ns, cap);
1364}
1365EXPORT_SYMBOL(netlink_net_capable);
1366
5187cd05 1367static inline int netlink_allowed(const struct socket *sock, unsigned int flag)
746fac4d 1368{
9785e10a 1369 return (nl_table[sock->sk->sk_protocol].flags & flag) ||
df008c91 1370 ns_capable(sock_net(sock->sk)->user_ns, CAP_NET_ADMIN);
746fac4d 1371}
1da177e4 1372
f7fa9b10
PM
1373static void
1374netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions)
1375{
1376 struct netlink_sock *nlk = nlk_sk(sk);
1377
1378 if (nlk->subscriptions && !subscriptions)
1379 __sk_del_bind_node(sk);
1380 else if (!nlk->subscriptions && subscriptions)
1381 sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
1382 nlk->subscriptions = subscriptions;
1383}
1384
b4ff4f04 1385static int netlink_realloc_groups(struct sock *sk)
513c2500
PM
1386{
1387 struct netlink_sock *nlk = nlk_sk(sk);
1388 unsigned int groups;
b4ff4f04 1389 unsigned long *new_groups;
513c2500
PM
1390 int err = 0;
1391
b4ff4f04
JB
1392 netlink_table_grab();
1393
513c2500 1394 groups = nl_table[sk->sk_protocol].groups;
b4ff4f04 1395 if (!nl_table[sk->sk_protocol].registered) {
513c2500 1396 err = -ENOENT;
b4ff4f04
JB
1397 goto out_unlock;
1398 }
513c2500 1399
b4ff4f04
JB
1400 if (nlk->ngroups >= groups)
1401 goto out_unlock;
513c2500 1402
b4ff4f04
JB
1403 new_groups = krealloc(nlk->groups, NLGRPSZ(groups), GFP_ATOMIC);
1404 if (new_groups == NULL) {
1405 err = -ENOMEM;
1406 goto out_unlock;
1407 }
6ac552fd 1408 memset((char *)new_groups + NLGRPSZ(nlk->ngroups), 0,
b4ff4f04
JB
1409 NLGRPSZ(groups) - NLGRPSZ(nlk->ngroups));
1410
1411 nlk->groups = new_groups;
513c2500 1412 nlk->ngroups = groups;
b4ff4f04
JB
1413 out_unlock:
1414 netlink_table_ungrab();
1415 return err;
513c2500
PM
1416}
1417
02c81ab9 1418static void netlink_undo_bind(int group, long unsigned int groups,
023e2cfa 1419 struct sock *sk)
4f520900 1420{
023e2cfa 1421 struct netlink_sock *nlk = nlk_sk(sk);
4f520900
RGB
1422 int undo;
1423
1424 if (!nlk->netlink_unbind)
1425 return;
1426
1427 for (undo = 0; undo < group; undo++)
6251edd9 1428 if (test_bit(undo, &groups))
023e2cfa 1429 nlk->netlink_unbind(sock_net(sk), undo);
4f520900
RGB
1430}
1431
6ac552fd
PM
1432static int netlink_bind(struct socket *sock, struct sockaddr *addr,
1433 int addr_len)
1da177e4
LT
1434{
1435 struct sock *sk = sock->sk;
3b1e0a65 1436 struct net *net = sock_net(sk);
1da177e4
LT
1437 struct netlink_sock *nlk = nlk_sk(sk);
1438 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
1439 int err;
4f520900 1440 long unsigned int groups = nladdr->nl_groups;
746fac4d 1441
4e4b5376
HFS
1442 if (addr_len < sizeof(struct sockaddr_nl))
1443 return -EINVAL;
1444
1da177e4
LT
1445 if (nladdr->nl_family != AF_NETLINK)
1446 return -EINVAL;
1447
1448 /* Only superuser is allowed to listen multicasts */
4f520900 1449 if (groups) {
5187cd05 1450 if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
513c2500 1451 return -EPERM;
b4ff4f04
JB
1452 err = netlink_realloc_groups(sk);
1453 if (err)
1454 return err;
513c2500 1455 }
1da177e4 1456
4f520900 1457 if (nlk->portid)
15e47304 1458 if (nladdr->nl_pid != nlk->portid)
1da177e4 1459 return -EINVAL;
4f520900
RGB
1460
1461 if (nlk->netlink_bind && groups) {
1462 int group;
1463
1464 for (group = 0; group < nlk->ngroups; group++) {
1465 if (!test_bit(group, &groups))
1466 continue;
023e2cfa 1467 err = nlk->netlink_bind(net, group);
4f520900
RGB
1468 if (!err)
1469 continue;
023e2cfa 1470 netlink_undo_bind(group, groups, sk);
4f520900
RGB
1471 return err;
1472 }
1473 }
1474
1475 if (!nlk->portid) {
1da177e4 1476 err = nladdr->nl_pid ?
b4b51029 1477 netlink_insert(sk, net, nladdr->nl_pid) :
1da177e4 1478 netlink_autobind(sock);
4f520900 1479 if (err) {
023e2cfa 1480 netlink_undo_bind(nlk->ngroups, groups, sk);
1da177e4 1481 return err;
4f520900 1482 }
1da177e4
LT
1483 }
1484
4f520900 1485 if (!groups && (nlk->groups == NULL || !(u32)nlk->groups[0]))
1da177e4
LT
1486 return 0;
1487
1488 netlink_table_grab();
f7fa9b10 1489 netlink_update_subscriptions(sk, nlk->subscriptions +
4f520900 1490 hweight32(groups) -
746fac4d 1491 hweight32(nlk->groups[0]));
4f520900 1492 nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | groups;
4277a083 1493 netlink_update_listeners(sk);
1da177e4
LT
1494 netlink_table_ungrab();
1495
1496 return 0;
1497}
1498
1499static int netlink_connect(struct socket *sock, struct sockaddr *addr,
1500 int alen, int flags)
1501{
1502 int err = 0;
1503 struct sock *sk = sock->sk;
1504 struct netlink_sock *nlk = nlk_sk(sk);
6ac552fd 1505 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
1da177e4 1506
6503d961
CG
1507 if (alen < sizeof(addr->sa_family))
1508 return -EINVAL;
1509
1da177e4
LT
1510 if (addr->sa_family == AF_UNSPEC) {
1511 sk->sk_state = NETLINK_UNCONNECTED;
15e47304 1512 nlk->dst_portid = 0;
d629b836 1513 nlk->dst_group = 0;
1da177e4
LT
1514 return 0;
1515 }
1516 if (addr->sa_family != AF_NETLINK)
1517 return -EINVAL;
1518
46833a86 1519 if ((nladdr->nl_groups || nladdr->nl_pid) &&
5187cd05 1520 !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
1da177e4
LT
1521 return -EPERM;
1522
15e47304 1523 if (!nlk->portid)
1da177e4
LT
1524 err = netlink_autobind(sock);
1525
1526 if (err == 0) {
1527 sk->sk_state = NETLINK_CONNECTED;
15e47304 1528 nlk->dst_portid = nladdr->nl_pid;
d629b836 1529 nlk->dst_group = ffs(nladdr->nl_groups);
1da177e4
LT
1530 }
1531
1532 return err;
1533}
1534
6ac552fd
PM
1535static int netlink_getname(struct socket *sock, struct sockaddr *addr,
1536 int *addr_len, int peer)
1da177e4
LT
1537{
1538 struct sock *sk = sock->sk;
1539 struct netlink_sock *nlk = nlk_sk(sk);
13cfa97b 1540 DECLARE_SOCKADDR(struct sockaddr_nl *, nladdr, addr);
746fac4d 1541
1da177e4
LT
1542 nladdr->nl_family = AF_NETLINK;
1543 nladdr->nl_pad = 0;
1544 *addr_len = sizeof(*nladdr);
1545
1546 if (peer) {
15e47304 1547 nladdr->nl_pid = nlk->dst_portid;
d629b836 1548 nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
1da177e4 1549 } else {
15e47304 1550 nladdr->nl_pid = nlk->portid;
513c2500 1551 nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
1da177e4
LT
1552 }
1553 return 0;
1554}
1555
15e47304 1556static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid)
1da177e4 1557{
1da177e4
LT
1558 struct sock *sock;
1559 struct netlink_sock *nlk;
1560
15e47304 1561 sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, portid);
1da177e4
LT
1562 if (!sock)
1563 return ERR_PTR(-ECONNREFUSED);
1564
1565 /* Don't bother queuing skb if kernel socket has no input function */
1566 nlk = nlk_sk(sock);
cd40b7d3 1567 if (sock->sk_state == NETLINK_CONNECTED &&
15e47304 1568 nlk->dst_portid != nlk_sk(ssk)->portid) {
1da177e4
LT
1569 sock_put(sock);
1570 return ERR_PTR(-ECONNREFUSED);
1571 }
1572 return sock;
1573}
1574
1575struct sock *netlink_getsockbyfilp(struct file *filp)
1576{
496ad9aa 1577 struct inode *inode = file_inode(filp);
1da177e4
LT
1578 struct sock *sock;
1579
1580 if (!S_ISSOCK(inode->i_mode))
1581 return ERR_PTR(-ENOTSOCK);
1582
1583 sock = SOCKET_I(inode)->sk;
1584 if (sock->sk_family != AF_NETLINK)
1585 return ERR_PTR(-EINVAL);
1586
1587 sock_hold(sock);
1588 return sock;
1589}
1590
3a36515f
PN
1591static struct sk_buff *netlink_alloc_large_skb(unsigned int size,
1592 int broadcast)
c05cdb1b
PNA
1593{
1594 struct sk_buff *skb;
1595 void *data;
1596
3a36515f 1597 if (size <= NLMSG_GOODSIZE || broadcast)
c05cdb1b
PNA
1598 return alloc_skb(size, GFP_KERNEL);
1599
3a36515f
PN
1600 size = SKB_DATA_ALIGN(size) +
1601 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
c05cdb1b
PNA
1602
1603 data = vmalloc(size);
1604 if (data == NULL)
3a36515f 1605 return NULL;
c05cdb1b 1606
3a36515f
PN
1607 skb = build_skb(data, size);
1608 if (skb == NULL)
1609 vfree(data);
1610 else {
1611 skb->head_frag = 0;
1612 skb->destructor = netlink_skb_destructor;
1613 }
c05cdb1b
PNA
1614
1615 return skb;
c05cdb1b
PNA
1616}
1617
1da177e4
LT
1618/*
1619 * Attach a skb to a netlink socket.
1620 * The caller must hold a reference to the destination socket. On error, the
1621 * reference is dropped. The skb is not send to the destination, just all
1622 * all error checks are performed and memory in the queue is reserved.
1623 * Return values:
1624 * < 0: error. skb freed, reference to sock dropped.
1625 * 0: continue
1626 * 1: repeat lookup - reference dropped while waiting for socket memory.
1627 */
9457afee 1628int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
c3d8d1e3 1629 long *timeo, struct sock *ssk)
1da177e4
LT
1630{
1631 struct netlink_sock *nlk;
1632
1633 nlk = nlk_sk(sk);
1634
5fd96123
PM
1635 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
1636 test_bit(NETLINK_CONGESTED, &nlk->state)) &&
1637 !netlink_skb_is_mmaped(skb)) {
1da177e4 1638 DECLARE_WAITQUEUE(wait, current);
c3d8d1e3 1639 if (!*timeo) {
aed81560 1640 if (!ssk || netlink_is_kernel(ssk))
1da177e4
LT
1641 netlink_overrun(sk);
1642 sock_put(sk);
1643 kfree_skb(skb);
1644 return -EAGAIN;
1645 }
1646
1647 __set_current_state(TASK_INTERRUPTIBLE);
1648 add_wait_queue(&nlk->wait, &wait);
1649
1650 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
cd967e05 1651 test_bit(NETLINK_CONGESTED, &nlk->state)) &&
1da177e4 1652 !sock_flag(sk, SOCK_DEAD))
c3d8d1e3 1653 *timeo = schedule_timeout(*timeo);
1da177e4
LT
1654
1655 __set_current_state(TASK_RUNNING);
1656 remove_wait_queue(&nlk->wait, &wait);
1657 sock_put(sk);
1658
1659 if (signal_pending(current)) {
1660 kfree_skb(skb);
c3d8d1e3 1661 return sock_intr_errno(*timeo);
1da177e4
LT
1662 }
1663 return 1;
1664 }
cf0a018a 1665 netlink_skb_set_owner_r(skb, sk);
1da177e4
LT
1666 return 0;
1667}
1668
4a7e7c2a 1669static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1da177e4 1670{
1da177e4
LT
1671 int len = skb->len;
1672
bcbde0d4
DB
1673 netlink_deliver_tap(skb);
1674
f9c22888
PM
1675#ifdef CONFIG_NETLINK_MMAP
1676 if (netlink_skb_is_mmaped(skb))
1677 netlink_queue_mmaped_skb(sk, skb);
1678 else if (netlink_rx_is_mmaped(sk))
1679 netlink_ring_set_copied(sk, skb);
1680 else
1681#endif /* CONFIG_NETLINK_MMAP */
1682 skb_queue_tail(&sk->sk_receive_queue, skb);
676d2369 1683 sk->sk_data_ready(sk);
4a7e7c2a
ED
1684 return len;
1685}
1686
1687int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1688{
1689 int len = __netlink_sendskb(sk, skb);
1690
1da177e4
LT
1691 sock_put(sk);
1692 return len;
1693}
1694
1695void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
1696{
1697 kfree_skb(skb);
1698 sock_put(sk);
1699}
1700
b57ef81f 1701static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
1da177e4
LT
1702{
1703 int delta;
1704
1298ca46 1705 WARN_ON(skb->sk != NULL);
5fd96123
PM
1706 if (netlink_skb_is_mmaped(skb))
1707 return skb;
1da177e4 1708
4305b541 1709 delta = skb->end - skb->tail;
c05cdb1b 1710 if (is_vmalloc_addr(skb->head) || delta * 2 < skb->truesize)
1da177e4
LT
1711 return skb;
1712
1713 if (skb_shared(skb)) {
1714 struct sk_buff *nskb = skb_clone(skb, allocation);
1715 if (!nskb)
1716 return skb;
8460c00f 1717 consume_skb(skb);
1da177e4
LT
1718 skb = nskb;
1719 }
1720
1721 if (!pskb_expand_head(skb, 0, -delta, allocation))
1722 skb->truesize -= delta;
1723
1724 return skb;
1725}
1726
3fbc2905
EB
1727static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb,
1728 struct sock *ssk)
cd40b7d3
DL
1729{
1730 int ret;
1731 struct netlink_sock *nlk = nlk_sk(sk);
1732
1733 ret = -ECONNREFUSED;
1734 if (nlk->netlink_rcv != NULL) {
1735 ret = skb->len;
cf0a018a 1736 netlink_skb_set_owner_r(skb, sk);
e32123e5 1737 NETLINK_CB(skb).sk = ssk;
73bfd370 1738 netlink_deliver_tap_kernel(sk, ssk, skb);
cd40b7d3 1739 nlk->netlink_rcv(skb);
bfb253c9
ED
1740 consume_skb(skb);
1741 } else {
1742 kfree_skb(skb);
cd40b7d3 1743 }
cd40b7d3
DL
1744 sock_put(sk);
1745 return ret;
1746}
1747
1748int netlink_unicast(struct sock *ssk, struct sk_buff *skb,
15e47304 1749 u32 portid, int nonblock)
1da177e4
LT
1750{
1751 struct sock *sk;
1752 int err;
1753 long timeo;
1754
1755 skb = netlink_trim(skb, gfp_any());
1756
1757 timeo = sock_sndtimeo(ssk, nonblock);
1758retry:
15e47304 1759 sk = netlink_getsockbyportid(ssk, portid);
1da177e4
LT
1760 if (IS_ERR(sk)) {
1761 kfree_skb(skb);
1762 return PTR_ERR(sk);
1763 }
cd40b7d3 1764 if (netlink_is_kernel(sk))
3fbc2905 1765 return netlink_unicast_kernel(sk, skb, ssk);
cd40b7d3 1766
b1153f29 1767 if (sk_filter(sk, skb)) {
84874607 1768 err = skb->len;
b1153f29
SH
1769 kfree_skb(skb);
1770 sock_put(sk);
1771 return err;
1772 }
1773
9457afee 1774 err = netlink_attachskb(sk, skb, &timeo, ssk);
1da177e4
LT
1775 if (err == 1)
1776 goto retry;
1777 if (err)
1778 return err;
1779
7ee015e0 1780 return netlink_sendskb(sk, skb);
1da177e4 1781}
6ac552fd 1782EXPORT_SYMBOL(netlink_unicast);
1da177e4 1783
f9c22888
PM
1784struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
1785 u32 dst_portid, gfp_t gfp_mask)
1786{
1787#ifdef CONFIG_NETLINK_MMAP
1788 struct sock *sk = NULL;
1789 struct sk_buff *skb;
1790 struct netlink_ring *ring;
1791 struct nl_mmap_hdr *hdr;
1792 unsigned int maxlen;
1793
1794 sk = netlink_getsockbyportid(ssk, dst_portid);
1795 if (IS_ERR(sk))
1796 goto out;
1797
1798 ring = &nlk_sk(sk)->rx_ring;
1799 /* fast-path without atomic ops for common case: non-mmaped receiver */
1800 if (ring->pg_vec == NULL)
1801 goto out_put;
1802
aae9f0e2
TG
1803 if (ring->frame_size - NL_MMAP_HDRLEN < size)
1804 goto out_put;
1805
f9c22888
PM
1806 skb = alloc_skb_head(gfp_mask);
1807 if (skb == NULL)
1808 goto err1;
1809
1810 spin_lock_bh(&sk->sk_receive_queue.lock);
1811 /* check again under lock */
1812 if (ring->pg_vec == NULL)
1813 goto out_free;
1814
aae9f0e2 1815 /* check again under lock */
f9c22888
PM
1816 maxlen = ring->frame_size - NL_MMAP_HDRLEN;
1817 if (maxlen < size)
1818 goto out_free;
1819
1820 netlink_forward_ring(ring);
1821 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
1822 if (hdr == NULL)
1823 goto err2;
1824 netlink_ring_setup_skb(skb, sk, ring, hdr);
1825 netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED);
1826 atomic_inc(&ring->pending);
1827 netlink_increment_head(ring);
1828
1829 spin_unlock_bh(&sk->sk_receive_queue.lock);
1830 return skb;
1831
1832err2:
1833 kfree_skb(skb);
1834 spin_unlock_bh(&sk->sk_receive_queue.lock);
cd1df525 1835 netlink_overrun(sk);
f9c22888
PM
1836err1:
1837 sock_put(sk);
1838 return NULL;
1839
1840out_free:
1841 kfree_skb(skb);
1842 spin_unlock_bh(&sk->sk_receive_queue.lock);
1843out_put:
1844 sock_put(sk);
1845out:
1846#endif
1847 return alloc_skb(size, gfp_mask);
1848}
1849EXPORT_SYMBOL_GPL(netlink_alloc_skb);
1850
4277a083
PM
1851int netlink_has_listeners(struct sock *sk, unsigned int group)
1852{
1853 int res = 0;
5c398dc8 1854 struct listeners *listeners;
4277a083 1855
aed81560 1856 BUG_ON(!netlink_is_kernel(sk));
b4ff4f04
JB
1857
1858 rcu_read_lock();
1859 listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
1860
6d772ac5 1861 if (listeners && group - 1 < nl_table[sk->sk_protocol].groups)
5c398dc8 1862 res = test_bit(group - 1, listeners->masks);
b4ff4f04
JB
1863
1864 rcu_read_unlock();
1865
4277a083
PM
1866 return res;
1867}
1868EXPORT_SYMBOL_GPL(netlink_has_listeners);
1869
b57ef81f 1870static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
1da177e4
LT
1871{
1872 struct netlink_sock *nlk = nlk_sk(sk);
1873
1874 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
cd967e05 1875 !test_bit(NETLINK_CONGESTED, &nlk->state)) {
cf0a018a 1876 netlink_skb_set_owner_r(skb, sk);
4a7e7c2a 1877 __netlink_sendskb(sk, skb);
2c645800 1878 return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
1da177e4
LT
1879 }
1880 return -1;
1881}
1882
1883struct netlink_broadcast_data {
1884 struct sock *exclude_sk;
b4b51029 1885 struct net *net;
15e47304 1886 u32 portid;
1da177e4
LT
1887 u32 group;
1888 int failure;
ff491a73 1889 int delivery_failure;
1da177e4
LT
1890 int congested;
1891 int delivered;
7d877f3b 1892 gfp_t allocation;
1da177e4 1893 struct sk_buff *skb, *skb2;
910a7e90
EB
1894 int (*tx_filter)(struct sock *dsk, struct sk_buff *skb, void *data);
1895 void *tx_data;
1da177e4
LT
1896};
1897
46c9521f
RR
1898static void do_one_broadcast(struct sock *sk,
1899 struct netlink_broadcast_data *p)
1da177e4
LT
1900{
1901 struct netlink_sock *nlk = nlk_sk(sk);
1902 int val;
1903
1904 if (p->exclude_sk == sk)
46c9521f 1905 return;
1da177e4 1906
15e47304 1907 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
f7fa9b10 1908 !test_bit(p->group - 1, nlk->groups))
46c9521f 1909 return;
1da177e4 1910
878628fb 1911 if (!net_eq(sock_net(sk), p->net))
46c9521f 1912 return;
b4b51029 1913
1da177e4
LT
1914 if (p->failure) {
1915 netlink_overrun(sk);
46c9521f 1916 return;
1da177e4
LT
1917 }
1918
1919 sock_hold(sk);
1920 if (p->skb2 == NULL) {
68acc024 1921 if (skb_shared(p->skb)) {
1da177e4
LT
1922 p->skb2 = skb_clone(p->skb, p->allocation);
1923 } else {
68acc024
TC
1924 p->skb2 = skb_get(p->skb);
1925 /*
1926 * skb ownership may have been set when
1927 * delivered to a previous socket.
1928 */
1929 skb_orphan(p->skb2);
1da177e4
LT
1930 }
1931 }
1932 if (p->skb2 == NULL) {
1933 netlink_overrun(sk);
1934 /* Clone failed. Notify ALL listeners. */
1935 p->failure = 1;
be0c22a4
PNA
1936 if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
1937 p->delivery_failure = 1;
910a7e90
EB
1938 } else if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) {
1939 kfree_skb(p->skb2);
1940 p->skb2 = NULL;
b1153f29
SH
1941 } else if (sk_filter(sk, p->skb2)) {
1942 kfree_skb(p->skb2);
1943 p->skb2 = NULL;
1da177e4
LT
1944 } else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) {
1945 netlink_overrun(sk);
be0c22a4
PNA
1946 if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
1947 p->delivery_failure = 1;
1da177e4
LT
1948 } else {
1949 p->congested |= val;
1950 p->delivered = 1;
1951 p->skb2 = NULL;
1952 }
1953 sock_put(sk);
1da177e4
LT
1954}
1955
15e47304 1956int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid,
910a7e90
EB
1957 u32 group, gfp_t allocation,
1958 int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
1959 void *filter_data)
1da177e4 1960{
3b1e0a65 1961 struct net *net = sock_net(ssk);
1da177e4 1962 struct netlink_broadcast_data info;
1da177e4
LT
1963 struct sock *sk;
1964
1965 skb = netlink_trim(skb, allocation);
1966
1967 info.exclude_sk = ssk;
b4b51029 1968 info.net = net;
15e47304 1969 info.portid = portid;
1da177e4
LT
1970 info.group = group;
1971 info.failure = 0;
ff491a73 1972 info.delivery_failure = 0;
1da177e4
LT
1973 info.congested = 0;
1974 info.delivered = 0;
1975 info.allocation = allocation;
1976 info.skb = skb;
1977 info.skb2 = NULL;
910a7e90
EB
1978 info.tx_filter = filter;
1979 info.tx_data = filter_data;
1da177e4
LT
1980
1981 /* While we sleep in clone, do not allow to change socket list */
1982
1983 netlink_lock_table();
1984
b67bfe0d 1985 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
1da177e4
LT
1986 do_one_broadcast(sk, &info);
1987
70d4bf6d 1988 consume_skb(skb);
aa1c6a6f 1989
1da177e4
LT
1990 netlink_unlock_table();
1991
70d4bf6d
NH
1992 if (info.delivery_failure) {
1993 kfree_skb(info.skb2);
ff491a73 1994 return -ENOBUFS;
658cb354
ED
1995 }
1996 consume_skb(info.skb2);
ff491a73 1997
1da177e4
LT
1998 if (info.delivered) {
1999 if (info.congested && (allocation & __GFP_WAIT))
2000 yield();
2001 return 0;
2002 }
1da177e4
LT
2003 return -ESRCH;
2004}
910a7e90
EB
2005EXPORT_SYMBOL(netlink_broadcast_filtered);
2006
15e47304 2007int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 portid,
910a7e90
EB
2008 u32 group, gfp_t allocation)
2009{
15e47304 2010 return netlink_broadcast_filtered(ssk, skb, portid, group, allocation,
910a7e90
EB
2011 NULL, NULL);
2012}
6ac552fd 2013EXPORT_SYMBOL(netlink_broadcast);
1da177e4
LT
2014
2015struct netlink_set_err_data {
2016 struct sock *exclude_sk;
15e47304 2017 u32 portid;
1da177e4
LT
2018 u32 group;
2019 int code;
2020};
2021
b57ef81f 2022static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
1da177e4
LT
2023{
2024 struct netlink_sock *nlk = nlk_sk(sk);
1a50307b 2025 int ret = 0;
1da177e4
LT
2026
2027 if (sk == p->exclude_sk)
2028 goto out;
2029
09ad9bc7 2030 if (!net_eq(sock_net(sk), sock_net(p->exclude_sk)))
b4b51029
EB
2031 goto out;
2032
15e47304 2033 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
f7fa9b10 2034 !test_bit(p->group - 1, nlk->groups))
1da177e4
LT
2035 goto out;
2036
1a50307b
PNA
2037 if (p->code == ENOBUFS && nlk->flags & NETLINK_RECV_NO_ENOBUFS) {
2038 ret = 1;
2039 goto out;
2040 }
2041
1da177e4
LT
2042 sk->sk_err = p->code;
2043 sk->sk_error_report(sk);
2044out:
1a50307b 2045 return ret;
1da177e4
LT
2046}
2047
4843b93c
PNA
2048/**
2049 * netlink_set_err - report error to broadcast listeners
2050 * @ssk: the kernel netlink socket, as returned by netlink_kernel_create()
15e47304 2051 * @portid: the PORTID of a process that we want to skip (if any)
840e93f2 2052 * @group: the broadcast group that will notice the error
4843b93c 2053 * @code: error code, must be negative (as usual in kernelspace)
1a50307b
PNA
2054 *
2055 * This function returns the number of broadcast listeners that have set the
2056 * NETLINK_RECV_NO_ENOBUFS socket option.
4843b93c 2057 */
15e47304 2058int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
1da177e4
LT
2059{
2060 struct netlink_set_err_data info;
1da177e4 2061 struct sock *sk;
1a50307b 2062 int ret = 0;
1da177e4
LT
2063
2064 info.exclude_sk = ssk;
15e47304 2065 info.portid = portid;
1da177e4 2066 info.group = group;
4843b93c
PNA
2067 /* sk->sk_err wants a positive error value */
2068 info.code = -code;
1da177e4
LT
2069
2070 read_lock(&nl_table_lock);
2071
b67bfe0d 2072 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
1a50307b 2073 ret += do_one_set_err(sk, &info);
1da177e4
LT
2074
2075 read_unlock(&nl_table_lock);
1a50307b 2076 return ret;
1da177e4 2077}
dd5b6ce6 2078EXPORT_SYMBOL(netlink_set_err);
1da177e4 2079
84659eb5
JB
2080/* must be called with netlink table grabbed */
2081static void netlink_update_socket_mc(struct netlink_sock *nlk,
2082 unsigned int group,
2083 int is_new)
2084{
2085 int old, new = !!is_new, subscriptions;
2086
2087 old = test_bit(group - 1, nlk->groups);
2088 subscriptions = nlk->subscriptions - old + new;
2089 if (new)
2090 __set_bit(group - 1, nlk->groups);
2091 else
2092 __clear_bit(group - 1, nlk->groups);
2093 netlink_update_subscriptions(&nlk->sk, subscriptions);
2094 netlink_update_listeners(&nlk->sk);
2095}
2096
9a4595bc 2097static int netlink_setsockopt(struct socket *sock, int level, int optname,
b7058842 2098 char __user *optval, unsigned int optlen)
9a4595bc
PM
2099{
2100 struct sock *sk = sock->sk;
2101 struct netlink_sock *nlk = nlk_sk(sk);
eb496534
JB
2102 unsigned int val = 0;
2103 int err;
9a4595bc
PM
2104
2105 if (level != SOL_NETLINK)
2106 return -ENOPROTOOPT;
2107
ccdfcc39
PM
2108 if (optname != NETLINK_RX_RING && optname != NETLINK_TX_RING &&
2109 optlen >= sizeof(int) &&
eb496534 2110 get_user(val, (unsigned int __user *)optval))
9a4595bc
PM
2111 return -EFAULT;
2112
2113 switch (optname) {
2114 case NETLINK_PKTINFO:
2115 if (val)
2116 nlk->flags |= NETLINK_RECV_PKTINFO;
2117 else
2118 nlk->flags &= ~NETLINK_RECV_PKTINFO;
2119 err = 0;
2120 break;
2121 case NETLINK_ADD_MEMBERSHIP:
2122 case NETLINK_DROP_MEMBERSHIP: {
5187cd05 2123 if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
9a4595bc 2124 return -EPERM;
b4ff4f04
JB
2125 err = netlink_realloc_groups(sk);
2126 if (err)
2127 return err;
9a4595bc
PM
2128 if (!val || val - 1 >= nlk->ngroups)
2129 return -EINVAL;
7774d5e0 2130 if (optname == NETLINK_ADD_MEMBERSHIP && nlk->netlink_bind) {
023e2cfa 2131 err = nlk->netlink_bind(sock_net(sk), val);
4f520900
RGB
2132 if (err)
2133 return err;
2134 }
9a4595bc 2135 netlink_table_grab();
84659eb5
JB
2136 netlink_update_socket_mc(nlk, val,
2137 optname == NETLINK_ADD_MEMBERSHIP);
9a4595bc 2138 netlink_table_ungrab();
7774d5e0 2139 if (optname == NETLINK_DROP_MEMBERSHIP && nlk->netlink_unbind)
023e2cfa 2140 nlk->netlink_unbind(sock_net(sk), val);
03292745 2141
9a4595bc
PM
2142 err = 0;
2143 break;
2144 }
be0c22a4
PNA
2145 case NETLINK_BROADCAST_ERROR:
2146 if (val)
2147 nlk->flags |= NETLINK_BROADCAST_SEND_ERROR;
2148 else
2149 nlk->flags &= ~NETLINK_BROADCAST_SEND_ERROR;
2150 err = 0;
2151 break;
38938bfe
PNA
2152 case NETLINK_NO_ENOBUFS:
2153 if (val) {
2154 nlk->flags |= NETLINK_RECV_NO_ENOBUFS;
cd967e05 2155 clear_bit(NETLINK_CONGESTED, &nlk->state);
38938bfe 2156 wake_up_interruptible(&nlk->wait);
658cb354 2157 } else {
38938bfe 2158 nlk->flags &= ~NETLINK_RECV_NO_ENOBUFS;
658cb354 2159 }
38938bfe
PNA
2160 err = 0;
2161 break;
ccdfcc39
PM
2162#ifdef CONFIG_NETLINK_MMAP
2163 case NETLINK_RX_RING:
2164 case NETLINK_TX_RING: {
2165 struct nl_mmap_req req;
2166
2167 /* Rings might consume more memory than queue limits, require
2168 * CAP_NET_ADMIN.
2169 */
2170 if (!capable(CAP_NET_ADMIN))
2171 return -EPERM;
2172 if (optlen < sizeof(req))
2173 return -EINVAL;
2174 if (copy_from_user(&req, optval, sizeof(req)))
2175 return -EFAULT;
2176 err = netlink_set_ring(sk, &req, false,
2177 optname == NETLINK_TX_RING);
2178 break;
2179 }
2180#endif /* CONFIG_NETLINK_MMAP */
9a4595bc
PM
2181 default:
2182 err = -ENOPROTOOPT;
2183 }
2184 return err;
2185}
2186
2187static int netlink_getsockopt(struct socket *sock, int level, int optname,
746fac4d 2188 char __user *optval, int __user *optlen)
9a4595bc
PM
2189{
2190 struct sock *sk = sock->sk;
2191 struct netlink_sock *nlk = nlk_sk(sk);
2192 int len, val, err;
2193
2194 if (level != SOL_NETLINK)
2195 return -ENOPROTOOPT;
2196
2197 if (get_user(len, optlen))
2198 return -EFAULT;
2199 if (len < 0)
2200 return -EINVAL;
2201
2202 switch (optname) {
2203 case NETLINK_PKTINFO:
2204 if (len < sizeof(int))
2205 return -EINVAL;
2206 len = sizeof(int);
2207 val = nlk->flags & NETLINK_RECV_PKTINFO ? 1 : 0;
a27b58fe
HC
2208 if (put_user(len, optlen) ||
2209 put_user(val, optval))
2210 return -EFAULT;
9a4595bc
PM
2211 err = 0;
2212 break;
be0c22a4
PNA
2213 case NETLINK_BROADCAST_ERROR:
2214 if (len < sizeof(int))
2215 return -EINVAL;
2216 len = sizeof(int);
2217 val = nlk->flags & NETLINK_BROADCAST_SEND_ERROR ? 1 : 0;
2218 if (put_user(len, optlen) ||
2219 put_user(val, optval))
2220 return -EFAULT;
2221 err = 0;
2222 break;
38938bfe
PNA
2223 case NETLINK_NO_ENOBUFS:
2224 if (len < sizeof(int))
2225 return -EINVAL;
2226 len = sizeof(int);
2227 val = nlk->flags & NETLINK_RECV_NO_ENOBUFS ? 1 : 0;
2228 if (put_user(len, optlen) ||
2229 put_user(val, optval))
2230 return -EFAULT;
2231 err = 0;
2232 break;
9a4595bc
PM
2233 default:
2234 err = -ENOPROTOOPT;
2235 }
2236 return err;
2237}
2238
2239static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
2240{
2241 struct nl_pktinfo info;
2242
2243 info.group = NETLINK_CB(skb).dst_group;
2244 put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info);
2245}
2246
1da177e4
LT
2247static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
2248 struct msghdr *msg, size_t len)
2249{
2250 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
2251 struct sock *sk = sock->sk;
2252 struct netlink_sock *nlk = nlk_sk(sk);
342dfc30 2253 DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
15e47304 2254 u32 dst_portid;
d629b836 2255 u32 dst_group;
1da177e4
LT
2256 struct sk_buff *skb;
2257 int err;
2258 struct scm_cookie scm;
2d7a85f4 2259 u32 netlink_skb_flags = 0;
1da177e4
LT
2260
2261 if (msg->msg_flags&MSG_OOB)
2262 return -EOPNOTSUPP;
2263
16e57262 2264 if (NULL == siocb->scm)
1da177e4 2265 siocb->scm = &scm;
16e57262 2266
e0e3cea4 2267 err = scm_send(sock, msg, siocb->scm, true);
1da177e4
LT
2268 if (err < 0)
2269 return err;
2270
2271 if (msg->msg_namelen) {
b47030c7 2272 err = -EINVAL;
1da177e4 2273 if (addr->nl_family != AF_NETLINK)
b47030c7 2274 goto out;
15e47304 2275 dst_portid = addr->nl_pid;
d629b836 2276 dst_group = ffs(addr->nl_groups);
b47030c7 2277 err = -EPERM;
15e47304 2278 if ((dst_group || dst_portid) &&
5187cd05 2279 !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
b47030c7 2280 goto out;
2d7a85f4 2281 netlink_skb_flags |= NETLINK_SKB_DST;
1da177e4 2282 } else {
15e47304 2283 dst_portid = nlk->dst_portid;
d629b836 2284 dst_group = nlk->dst_group;
1da177e4
LT
2285 }
2286
15e47304 2287 if (!nlk->portid) {
1da177e4
LT
2288 err = netlink_autobind(sock);
2289 if (err)
2290 goto out;
2291 }
2292
5fd96123 2293 if (netlink_tx_is_mmaped(sk) &&
c0371da6 2294 msg->msg_iter.iov->iov_base == NULL) {
5fd96123
PM
2295 err = netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group,
2296 siocb);
2297 goto out;
2298 }
2299
1da177e4
LT
2300 err = -EMSGSIZE;
2301 if (len > sk->sk_sndbuf - 32)
2302 goto out;
2303 err = -ENOBUFS;
3a36515f 2304 skb = netlink_alloc_large_skb(len, dst_group);
6ac552fd 2305 if (skb == NULL)
1da177e4
LT
2306 goto out;
2307
15e47304 2308 NETLINK_CB(skb).portid = nlk->portid;
d629b836 2309 NETLINK_CB(skb).dst_group = dst_group;
dbe9a417 2310 NETLINK_CB(skb).creds = siocb->scm->creds;
2d7a85f4 2311 NETLINK_CB(skb).flags = netlink_skb_flags;
1da177e4 2312
1da177e4 2313 err = -EFAULT;
6ce8e9ce 2314 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1da177e4
LT
2315 kfree_skb(skb);
2316 goto out;
2317 }
2318
2319 err = security_netlink_send(sk, skb);
2320 if (err) {
2321 kfree_skb(skb);
2322 goto out;
2323 }
2324
d629b836 2325 if (dst_group) {
1da177e4 2326 atomic_inc(&skb->users);
15e47304 2327 netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL);
1da177e4 2328 }
15e47304 2329 err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags&MSG_DONTWAIT);
1da177e4
LT
2330
2331out:
b47030c7 2332 scm_destroy(siocb->scm);
1da177e4
LT
2333 return err;
2334}
2335
2336static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
2337 struct msghdr *msg, size_t len,
2338 int flags)
2339{
2340 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
2341 struct scm_cookie scm;
2342 struct sock *sk = sock->sk;
2343 struct netlink_sock *nlk = nlk_sk(sk);
2344 int noblock = flags&MSG_DONTWAIT;
2345 size_t copied;
68d6ac6d 2346 struct sk_buff *skb, *data_skb;
b44d211e 2347 int err, ret;
1da177e4
LT
2348
2349 if (flags&MSG_OOB)
2350 return -EOPNOTSUPP;
2351
2352 copied = 0;
2353
6ac552fd
PM
2354 skb = skb_recv_datagram(sk, flags, noblock, &err);
2355 if (skb == NULL)
1da177e4
LT
2356 goto out;
2357
68d6ac6d
JB
2358 data_skb = skb;
2359
1dacc76d
JB
2360#ifdef CONFIG_COMPAT_NETLINK_MESSAGES
2361 if (unlikely(skb_shinfo(skb)->frag_list)) {
1dacc76d 2362 /*
68d6ac6d
JB
2363 * If this skb has a frag_list, then here that means that we
2364 * will have to use the frag_list skb's data for compat tasks
2365 * and the regular skb's data for normal (non-compat) tasks.
1dacc76d 2366 *
68d6ac6d
JB
2367 * If we need to send the compat skb, assign it to the
2368 * 'data_skb' variable so that it will be used below for data
2369 * copying. We keep 'skb' for everything else, including
2370 * freeing both later.
1dacc76d 2371 */
68d6ac6d
JB
2372 if (flags & MSG_CMSG_COMPAT)
2373 data_skb = skb_shinfo(skb)->frag_list;
1dacc76d
JB
2374 }
2375#endif
2376
9063e21f
ED
2377 /* Record the max length of recvmsg() calls for future allocations */
2378 nlk->max_recvmsg_len = max(nlk->max_recvmsg_len, len);
2379 nlk->max_recvmsg_len = min_t(size_t, nlk->max_recvmsg_len,
2380 16384);
2381
68d6ac6d 2382 copied = data_skb->len;
1da177e4
LT
2383 if (len < copied) {
2384 msg->msg_flags |= MSG_TRUNC;
2385 copied = len;
2386 }
2387
68d6ac6d 2388 skb_reset_transport_header(data_skb);
51f3d02b 2389 err = skb_copy_datagram_msg(data_skb, 0, msg, copied);
1da177e4
LT
2390
2391 if (msg->msg_name) {
342dfc30 2392 DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
1da177e4
LT
2393 addr->nl_family = AF_NETLINK;
2394 addr->nl_pad = 0;
15e47304 2395 addr->nl_pid = NETLINK_CB(skb).portid;
d629b836 2396 addr->nl_groups = netlink_group_mask(NETLINK_CB(skb).dst_group);
1da177e4
LT
2397 msg->msg_namelen = sizeof(*addr);
2398 }
2399
cc9a06cd
PM
2400 if (nlk->flags & NETLINK_RECV_PKTINFO)
2401 netlink_cmsg_recv_pktinfo(msg, skb);
2402
1da177e4
LT
2403 if (NULL == siocb->scm) {
2404 memset(&scm, 0, sizeof(scm));
2405 siocb->scm = &scm;
2406 }
2407 siocb->scm->creds = *NETLINK_CREDS(skb);
188ccb55 2408 if (flags & MSG_TRUNC)
68d6ac6d 2409 copied = data_skb->len;
daa3766e 2410
1da177e4
LT
2411 skb_free_datagram(sk, skb);
2412
16b304f3
PS
2413 if (nlk->cb_running &&
2414 atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
b44d211e
AV
2415 ret = netlink_dump(sk);
2416 if (ret) {
ac30ef83 2417 sk->sk_err = -ret;
b44d211e
AV
2418 sk->sk_error_report(sk);
2419 }
2420 }
1da177e4
LT
2421
2422 scm_recv(sock, msg, siocb->scm, flags);
1da177e4
LT
2423out:
2424 netlink_rcv_wake(sk);
2425 return err ? : copied;
2426}
2427
676d2369 2428static void netlink_data_ready(struct sock *sk)
1da177e4 2429{
cd40b7d3 2430 BUG();
1da177e4
LT
2431}
2432
2433/*
746fac4d 2434 * We export these functions to other modules. They provide a
1da177e4
LT
2435 * complete set of kernel non-blocking support for message
2436 * queueing.
2437 */
2438
2439struct sock *
9f00d977
PNA
2440__netlink_kernel_create(struct net *net, int unit, struct module *module,
2441 struct netlink_kernel_cfg *cfg)
1da177e4
LT
2442{
2443 struct socket *sock;
2444 struct sock *sk;
77247bbb 2445 struct netlink_sock *nlk;
5c398dc8 2446 struct listeners *listeners = NULL;
a31f2d17
PNA
2447 struct mutex *cb_mutex = cfg ? cfg->cb_mutex : NULL;
2448 unsigned int groups;
1da177e4 2449
fab2caf6 2450 BUG_ON(!nl_table);
1da177e4 2451
6ac552fd 2452 if (unit < 0 || unit >= MAX_LINKS)
1da177e4
LT
2453 return NULL;
2454
2455 if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
2456 return NULL;
2457
23fe1866
PE
2458 /*
2459 * We have to just have a reference on the net from sk, but don't
2460 * get_net it. Besides, we cannot get and then put the net here.
2461 * So we create one inside init_net and the move it to net.
2462 */
2463
2464 if (__netlink_create(&init_net, sock, cb_mutex, unit) < 0)
2465 goto out_sock_release_nosk;
2466
2467 sk = sock->sk;
edf02087 2468 sk_change_net(sk, net);
4fdb3bb7 2469
a31f2d17 2470 if (!cfg || cfg->groups < 32)
4277a083 2471 groups = 32;
a31f2d17
PNA
2472 else
2473 groups = cfg->groups;
4277a083 2474
5c398dc8 2475 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
4277a083
PM
2476 if (!listeners)
2477 goto out_sock_release;
2478
1da177e4 2479 sk->sk_data_ready = netlink_data_ready;
a31f2d17
PNA
2480 if (cfg && cfg->input)
2481 nlk_sk(sk)->netlink_rcv = cfg->input;
1da177e4 2482
b4b51029 2483 if (netlink_insert(sk, net, 0))
77247bbb 2484 goto out_sock_release;
4fdb3bb7 2485
77247bbb
PM
2486 nlk = nlk_sk(sk);
2487 nlk->flags |= NETLINK_KERNEL_SOCKET;
4fdb3bb7 2488
4fdb3bb7 2489 netlink_table_grab();
b4b51029
EB
2490 if (!nl_table[unit].registered) {
2491 nl_table[unit].groups = groups;
5c398dc8 2492 rcu_assign_pointer(nl_table[unit].listeners, listeners);
b4b51029
EB
2493 nl_table[unit].cb_mutex = cb_mutex;
2494 nl_table[unit].module = module;
9785e10a
PNA
2495 if (cfg) {
2496 nl_table[unit].bind = cfg->bind;
6251edd9 2497 nl_table[unit].unbind = cfg->unbind;
9785e10a 2498 nl_table[unit].flags = cfg->flags;
da12c90e
G
2499 if (cfg->compare)
2500 nl_table[unit].compare = cfg->compare;
9785e10a 2501 }
b4b51029 2502 nl_table[unit].registered = 1;
f937f1f4
JJ
2503 } else {
2504 kfree(listeners);
869e58f8 2505 nl_table[unit].registered++;
b4b51029 2506 }
4fdb3bb7 2507 netlink_table_ungrab();
77247bbb
PM
2508 return sk;
2509
4fdb3bb7 2510out_sock_release:
4277a083 2511 kfree(listeners);
9dfbec1f 2512 netlink_kernel_release(sk);
23fe1866
PE
2513 return NULL;
2514
2515out_sock_release_nosk:
4fdb3bb7 2516 sock_release(sock);
77247bbb 2517 return NULL;
1da177e4 2518}
9f00d977 2519EXPORT_SYMBOL(__netlink_kernel_create);
b7c6ba6e
DL
2520
2521void
2522netlink_kernel_release(struct sock *sk)
2523{
edf02087 2524 sk_release_kernel(sk);
b7c6ba6e
DL
2525}
2526EXPORT_SYMBOL(netlink_kernel_release);
2527
d136f1bd 2528int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
b4ff4f04 2529{
5c398dc8 2530 struct listeners *new, *old;
b4ff4f04 2531 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
b4ff4f04
JB
2532
2533 if (groups < 32)
2534 groups = 32;
2535
b4ff4f04 2536 if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) {
5c398dc8
ED
2537 new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
2538 if (!new)
d136f1bd 2539 return -ENOMEM;
6d772ac5 2540 old = nl_deref_protected(tbl->listeners);
5c398dc8
ED
2541 memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
2542 rcu_assign_pointer(tbl->listeners, new);
2543
37b6b935 2544 kfree_rcu(old, rcu);
b4ff4f04
JB
2545 }
2546 tbl->groups = groups;
2547
d136f1bd
JB
2548 return 0;
2549}
2550
2551/**
2552 * netlink_change_ngroups - change number of multicast groups
2553 *
2554 * This changes the number of multicast groups that are available
2555 * on a certain netlink family. Note that it is not possible to
2556 * change the number of groups to below 32. Also note that it does
2557 * not implicitly call netlink_clear_multicast_users() when the
2558 * number of groups is reduced.
2559 *
2560 * @sk: The kernel netlink socket, as returned by netlink_kernel_create().
2561 * @groups: The new number of groups.
2562 */
2563int netlink_change_ngroups(struct sock *sk, unsigned int groups)
2564{
2565 int err;
2566
2567 netlink_table_grab();
2568 err = __netlink_change_ngroups(sk, groups);
b4ff4f04 2569 netlink_table_ungrab();
d136f1bd 2570
b4ff4f04
JB
2571 return err;
2572}
b4ff4f04 2573
b8273570
JB
2574void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
2575{
2576 struct sock *sk;
b8273570
JB
2577 struct netlink_table *tbl = &nl_table[ksk->sk_protocol];
2578
b67bfe0d 2579 sk_for_each_bound(sk, &tbl->mc_list)
b8273570
JB
2580 netlink_update_socket_mc(nlk_sk(sk), group, 0);
2581}
2582
a46621a3 2583struct nlmsghdr *
15e47304 2584__nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags)
a46621a3
DV
2585{
2586 struct nlmsghdr *nlh;
573ce260 2587 int size = nlmsg_msg_size(len);
a46621a3 2588
23b45672 2589 nlh = (struct nlmsghdr *)skb_put(skb, NLMSG_ALIGN(size));
a46621a3
DV
2590 nlh->nlmsg_type = type;
2591 nlh->nlmsg_len = size;
2592 nlh->nlmsg_flags = flags;
15e47304 2593 nlh->nlmsg_pid = portid;
a46621a3
DV
2594 nlh->nlmsg_seq = seq;
2595 if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0)
573ce260 2596 memset(nlmsg_data(nlh) + len, 0, NLMSG_ALIGN(size) - size);
a46621a3
DV
2597 return nlh;
2598}
2599EXPORT_SYMBOL(__nlmsg_put);
2600
1da177e4
LT
2601/*
2602 * It looks a bit ugly.
2603 * It would be better to create kernel thread.
2604 */
2605
2606static int netlink_dump(struct sock *sk)
2607{
2608 struct netlink_sock *nlk = nlk_sk(sk);
2609 struct netlink_callback *cb;
c7ac8679 2610 struct sk_buff *skb = NULL;
1da177e4 2611 struct nlmsghdr *nlh;
bf8b79e4 2612 int len, err = -ENOBUFS;
c7ac8679 2613 int alloc_size;
1da177e4 2614
af65bdfc 2615 mutex_lock(nlk->cb_mutex);
16b304f3 2616 if (!nlk->cb_running) {
bf8b79e4
TG
2617 err = -EINVAL;
2618 goto errout_skb;
1da177e4
LT
2619 }
2620
16b304f3 2621 cb = &nlk->cb;
c7ac8679
GR
2622 alloc_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
2623
f9c22888
PM
2624 if (!netlink_rx_is_mmaped(sk) &&
2625 atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2626 goto errout_skb;
9063e21f
ED
2627
2628 /* NLMSG_GOODSIZE is small to avoid high order allocations being
2629 * required, but it makes sense to _attempt_ a 16K bytes allocation
2630 * to reduce number of system calls on dump operations, if user
2631 * ever provided a big enough buffer.
2632 */
2633 if (alloc_size < nlk->max_recvmsg_len) {
2634 skb = netlink_alloc_skb(sk,
2635 nlk->max_recvmsg_len,
2636 nlk->portid,
2637 GFP_KERNEL |
2638 __GFP_NOWARN |
2639 __GFP_NORETRY);
2640 /* available room should be exact amount to avoid MSG_TRUNC */
2641 if (skb)
2642 skb_reserve(skb, skb_tailroom(skb) -
2643 nlk->max_recvmsg_len);
2644 }
2645 if (!skb)
2646 skb = netlink_alloc_skb(sk, alloc_size, nlk->portid,
2647 GFP_KERNEL);
c7ac8679 2648 if (!skb)
c63d6ea3 2649 goto errout_skb;
f9c22888 2650 netlink_skb_set_owner_r(skb, sk);
c7ac8679 2651
1da177e4
LT
2652 len = cb->dump(skb, cb);
2653
2654 if (len > 0) {
af65bdfc 2655 mutex_unlock(nlk->cb_mutex);
b1153f29
SH
2656
2657 if (sk_filter(sk, skb))
2658 kfree_skb(skb);
4a7e7c2a
ED
2659 else
2660 __netlink_sendskb(sk, skb);
1da177e4
LT
2661 return 0;
2662 }
2663
bf8b79e4
TG
2664 nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI);
2665 if (!nlh)
2666 goto errout_skb;
2667
670dc283
JB
2668 nl_dump_check_consistent(cb, nlh);
2669
bf8b79e4
TG
2670 memcpy(nlmsg_data(nlh), &len, sizeof(len));
2671
b1153f29
SH
2672 if (sk_filter(sk, skb))
2673 kfree_skb(skb);
4a7e7c2a
ED
2674 else
2675 __netlink_sendskb(sk, skb);
1da177e4 2676
a8f74b22
TG
2677 if (cb->done)
2678 cb->done(cb);
1da177e4 2679
16b304f3
PS
2680 nlk->cb_running = false;
2681 mutex_unlock(nlk->cb_mutex);
6dc878a8 2682 module_put(cb->module);
16b304f3 2683 consume_skb(cb->skb);
1da177e4 2684 return 0;
1797754e 2685
bf8b79e4 2686errout_skb:
af65bdfc 2687 mutex_unlock(nlk->cb_mutex);
bf8b79e4 2688 kfree_skb(skb);
bf8b79e4 2689 return err;
1da177e4
LT
2690}
2691
6dc878a8
G
2692int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
2693 const struct nlmsghdr *nlh,
2694 struct netlink_dump_control *control)
1da177e4
LT
2695{
2696 struct netlink_callback *cb;
2697 struct sock *sk;
2698 struct netlink_sock *nlk;
b44d211e 2699 int ret;
1da177e4 2700
f9c22888
PM
2701 /* Memory mapped dump requests need to be copied to avoid looping
2702 * on the pending state in netlink_mmap_sendmsg() while the CB hold
2703 * a reference to the skb.
2704 */
2705 if (netlink_skb_is_mmaped(skb)) {
2706 skb = skb_copy(skb, GFP_KERNEL);
16b304f3 2707 if (skb == NULL)
f9c22888 2708 return -ENOBUFS;
f9c22888
PM
2709 } else
2710 atomic_inc(&skb->users);
2711
15e47304 2712 sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid);
1da177e4 2713 if (sk == NULL) {
16b304f3
PS
2714 ret = -ECONNREFUSED;
2715 goto error_free;
1da177e4 2716 }
6dc878a8 2717
16b304f3 2718 nlk = nlk_sk(sk);
af65bdfc 2719 mutex_lock(nlk->cb_mutex);
6dc878a8 2720 /* A dump is in progress... */
16b304f3 2721 if (nlk->cb_running) {
6dc878a8 2722 ret = -EBUSY;
16b304f3 2723 goto error_unlock;
1da177e4 2724 }
6dc878a8 2725 /* add reference of module which cb->dump belongs to */
16b304f3 2726 if (!try_module_get(control->module)) {
6dc878a8 2727 ret = -EPROTONOSUPPORT;
16b304f3 2728 goto error_unlock;
6dc878a8
G
2729 }
2730
16b304f3
PS
2731 cb = &nlk->cb;
2732 memset(cb, 0, sizeof(*cb));
2733 cb->dump = control->dump;
2734 cb->done = control->done;
2735 cb->nlh = nlh;
2736 cb->data = control->data;
2737 cb->module = control->module;
2738 cb->min_dump_alloc = control->min_dump_alloc;
2739 cb->skb = skb;
2740
2741 nlk->cb_running = true;
2742
af65bdfc 2743 mutex_unlock(nlk->cb_mutex);
1da177e4 2744
b44d211e 2745 ret = netlink_dump(sk);
1da177e4 2746 sock_put(sk);
5c58298c 2747
b44d211e
AV
2748 if (ret)
2749 return ret;
2750
5c58298c
DL
2751 /* We successfully started a dump, by returning -EINTR we
2752 * signal not to send ACK even if it was requested.
2753 */
2754 return -EINTR;
16b304f3
PS
2755
2756error_unlock:
2757 sock_put(sk);
2758 mutex_unlock(nlk->cb_mutex);
2759error_free:
2760 kfree_skb(skb);
2761 return ret;
1da177e4 2762}
6dc878a8 2763EXPORT_SYMBOL(__netlink_dump_start);
1da177e4
LT
2764
2765void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
2766{
2767 struct sk_buff *skb;
2768 struct nlmsghdr *rep;
2769 struct nlmsgerr *errmsg;
339bf98f 2770 size_t payload = sizeof(*errmsg);
1da177e4 2771
339bf98f
TG
2772 /* error messages get the original request appened */
2773 if (err)
2774 payload += nlmsg_len(nlh);
1da177e4 2775
f9c22888
PM
2776 skb = netlink_alloc_skb(in_skb->sk, nlmsg_total_size(payload),
2777 NETLINK_CB(in_skb).portid, GFP_KERNEL);
1da177e4
LT
2778 if (!skb) {
2779 struct sock *sk;
2780
3b1e0a65 2781 sk = netlink_lookup(sock_net(in_skb->sk),
b4b51029 2782 in_skb->sk->sk_protocol,
15e47304 2783 NETLINK_CB(in_skb).portid);
1da177e4
LT
2784 if (sk) {
2785 sk->sk_err = ENOBUFS;
2786 sk->sk_error_report(sk);
2787 sock_put(sk);
2788 }
2789 return;
2790 }
2791
15e47304 2792 rep = __nlmsg_put(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
5dba93ae 2793 NLMSG_ERROR, payload, 0);
bf8b79e4 2794 errmsg = nlmsg_data(rep);
1da177e4 2795 errmsg->error = err;
bf8b79e4 2796 memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(*nlh));
15e47304 2797 netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid, MSG_DONTWAIT);
1da177e4 2798}
6ac552fd 2799EXPORT_SYMBOL(netlink_ack);
1da177e4 2800
cd40b7d3 2801int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
1d00a4eb 2802 struct nlmsghdr *))
82ace47a 2803{
82ace47a
TG
2804 struct nlmsghdr *nlh;
2805 int err;
2806
2807 while (skb->len >= nlmsg_total_size(0)) {
cd40b7d3
DL
2808 int msglen;
2809
b529ccf2 2810 nlh = nlmsg_hdr(skb);
d35b6856 2811 err = 0;
82ace47a 2812
ad8e4b75 2813 if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
82ace47a
TG
2814 return 0;
2815
d35b6856
TG
2816 /* Only requests are handled by the kernel */
2817 if (!(nlh->nlmsg_flags & NLM_F_REQUEST))
5c58298c 2818 goto ack;
45e7ae7f
TG
2819
2820 /* Skip control messages */
2821 if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
5c58298c 2822 goto ack;
d35b6856 2823
1d00a4eb 2824 err = cb(skb, nlh);
5c58298c
DL
2825 if (err == -EINTR)
2826 goto skip;
2827
2828ack:
d35b6856 2829 if (nlh->nlmsg_flags & NLM_F_ACK || err)
82ace47a 2830 netlink_ack(skb, nlh, err);
82ace47a 2831
5c58298c 2832skip:
6ac552fd 2833 msglen = NLMSG_ALIGN(nlh->nlmsg_len);
cd40b7d3
DL
2834 if (msglen > skb->len)
2835 msglen = skb->len;
2836 skb_pull(skb, msglen);
82ace47a
TG
2837 }
2838
2839 return 0;
2840}
6ac552fd 2841EXPORT_SYMBOL(netlink_rcv_skb);
82ace47a 2842
d387f6ad
TG
2843/**
2844 * nlmsg_notify - send a notification netlink message
2845 * @sk: netlink socket to use
2846 * @skb: notification message
15e47304 2847 * @portid: destination netlink portid for reports or 0
d387f6ad
TG
2848 * @group: destination multicast group or 0
2849 * @report: 1 to report back, 0 to disable
2850 * @flags: allocation flags
2851 */
15e47304 2852int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
d387f6ad
TG
2853 unsigned int group, int report, gfp_t flags)
2854{
2855 int err = 0;
2856
2857 if (group) {
15e47304 2858 int exclude_portid = 0;
d387f6ad
TG
2859
2860 if (report) {
2861 atomic_inc(&skb->users);
15e47304 2862 exclude_portid = portid;
d387f6ad
TG
2863 }
2864
1ce85fe4
PNA
2865 /* errors reported via destination sk->sk_err, but propagate
2866 * delivery errors if NETLINK_BROADCAST_ERROR flag is set */
15e47304 2867 err = nlmsg_multicast(sk, skb, exclude_portid, group, flags);
d387f6ad
TG
2868 }
2869
1ce85fe4
PNA
2870 if (report) {
2871 int err2;
2872
15e47304 2873 err2 = nlmsg_unicast(sk, skb, portid);
1ce85fe4
PNA
2874 if (!err || err == -ESRCH)
2875 err = err2;
2876 }
d387f6ad
TG
2877
2878 return err;
2879}
6ac552fd 2880EXPORT_SYMBOL(nlmsg_notify);
d387f6ad 2881
1da177e4
LT
2882#ifdef CONFIG_PROC_FS
2883struct nl_seq_iter {
e372c414 2884 struct seq_net_private p;
1da177e4
LT
2885 int link;
2886 int hash_idx;
2887};
2888
2889static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
2890{
2891 struct nl_seq_iter *iter = seq->private;
2892 int i, j;
e341694e 2893 struct netlink_sock *nlk;
1da177e4 2894 struct sock *s;
1da177e4
LT
2895 loff_t off = 0;
2896
6ac552fd 2897 for (i = 0; i < MAX_LINKS; i++) {
e341694e 2898 struct rhashtable *ht = &nl_table[i].hash;
67a24ac1 2899 const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
e341694e
TG
2900
2901 for (j = 0; j < tbl->size; j++) {
88d6ed15
TG
2902 struct rhash_head *node;
2903
2904 rht_for_each_entry_rcu(nlk, node, tbl, j, node) {
e341694e 2905 s = (struct sock *)nlk;
1da177e4 2906
1218854a 2907 if (sock_net(s) != seq_file_net(seq))
b4b51029 2908 continue;
1da177e4
LT
2909 if (off == pos) {
2910 iter->link = i;
2911 iter->hash_idx = j;
2912 return s;
2913 }
2914 ++off;
2915 }
2916 }
2917 }
2918 return NULL;
2919}
2920
2921static void *netlink_seq_start(struct seq_file *seq, loff_t *pos)
21e4902a 2922 __acquires(RCU)
1da177e4 2923{
e341694e 2924 rcu_read_lock();
1da177e4
LT
2925 return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2926}
2927
2928static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2929{
78fd1d0a 2930 struct rhashtable *ht;
88d6ed15
TG
2931 const struct bucket_table *tbl;
2932 struct rhash_head *node;
e341694e 2933 struct netlink_sock *nlk;
1da177e4 2934 struct nl_seq_iter *iter;
da12c90e 2935 struct net *net;
1da177e4
LT
2936 int i, j;
2937
2938 ++*pos;
2939
2940 if (v == SEQ_START_TOKEN)
2941 return netlink_seq_socket_idx(seq, 0);
746fac4d 2942
da12c90e 2943 net = seq_file_net(seq);
b4b51029 2944 iter = seq->private;
e341694e
TG
2945 nlk = v;
2946
78fd1d0a
TG
2947 i = iter->link;
2948 ht = &nl_table[i].hash;
88d6ed15
TG
2949 tbl = rht_dereference_rcu(ht->tbl, ht);
2950 rht_for_each_entry_rcu_continue(nlk, node, nlk->node.next, tbl, iter->hash_idx, node)
e341694e
TG
2951 if (net_eq(sock_net((struct sock *)nlk), net))
2952 return nlk;
1da177e4 2953
1da177e4
LT
2954 j = iter->hash_idx + 1;
2955
2956 do {
da12c90e 2957
e341694e 2958 for (; j < tbl->size; j++) {
88d6ed15 2959 rht_for_each_entry_rcu(nlk, node, tbl, j, node) {
e341694e
TG
2960 if (net_eq(sock_net((struct sock *)nlk), net)) {
2961 iter->link = i;
2962 iter->hash_idx = j;
2963 return nlk;
2964 }
1da177e4
LT
2965 }
2966 }
2967
2968 j = 0;
2969 } while (++i < MAX_LINKS);
2970
2971 return NULL;
2972}
2973
2974static void netlink_seq_stop(struct seq_file *seq, void *v)
21e4902a 2975 __releases(RCU)
1da177e4 2976{
e341694e 2977 rcu_read_unlock();
1da177e4
LT
2978}
2979
2980
2981static int netlink_seq_show(struct seq_file *seq, void *v)
2982{
658cb354 2983 if (v == SEQ_START_TOKEN) {
1da177e4
LT
2984 seq_puts(seq,
2985 "sk Eth Pid Groups "
cf0aa4e0 2986 "Rmem Wmem Dump Locks Drops Inode\n");
658cb354 2987 } else {
1da177e4
LT
2988 struct sock *s = v;
2989 struct netlink_sock *nlk = nlk_sk(s);
2990
16b304f3 2991 seq_printf(seq, "%pK %-3d %-6u %08x %-8d %-8d %d %-8d %-8d %-8lu\n",
1da177e4
LT
2992 s,
2993 s->sk_protocol,
15e47304 2994 nlk->portid,
513c2500 2995 nlk->groups ? (u32)nlk->groups[0] : 0,
31e6d363
ED
2996 sk_rmem_alloc_get(s),
2997 sk_wmem_alloc_get(s),
16b304f3 2998 nlk->cb_running,
38938bfe 2999 atomic_read(&s->sk_refcnt),
cf0aa4e0
MY
3000 atomic_read(&s->sk_drops),
3001 sock_i_ino(s)
1da177e4
LT
3002 );
3003
3004 }
3005 return 0;
3006}
3007
56b3d975 3008static const struct seq_operations netlink_seq_ops = {
1da177e4
LT
3009 .start = netlink_seq_start,
3010 .next = netlink_seq_next,
3011 .stop = netlink_seq_stop,
3012 .show = netlink_seq_show,
3013};
3014
3015
3016static int netlink_seq_open(struct inode *inode, struct file *file)
3017{
e372c414
DL
3018 return seq_open_net(inode, file, &netlink_seq_ops,
3019 sizeof(struct nl_seq_iter));
b4b51029
EB
3020}
3021
da7071d7 3022static const struct file_operations netlink_seq_fops = {
1da177e4
LT
3023 .owner = THIS_MODULE,
3024 .open = netlink_seq_open,
3025 .read = seq_read,
3026 .llseek = seq_lseek,
e372c414 3027 .release = seq_release_net,
1da177e4
LT
3028};
3029
3030#endif
3031
3032int netlink_register_notifier(struct notifier_block *nb)
3033{
e041c683 3034 return atomic_notifier_chain_register(&netlink_chain, nb);
1da177e4 3035}
6ac552fd 3036EXPORT_SYMBOL(netlink_register_notifier);
1da177e4
LT
3037
3038int netlink_unregister_notifier(struct notifier_block *nb)
3039{
e041c683 3040 return atomic_notifier_chain_unregister(&netlink_chain, nb);
1da177e4 3041}
6ac552fd 3042EXPORT_SYMBOL(netlink_unregister_notifier);
746fac4d 3043
90ddc4f0 3044static const struct proto_ops netlink_ops = {
1da177e4
LT
3045 .family = PF_NETLINK,
3046 .owner = THIS_MODULE,
3047 .release = netlink_release,
3048 .bind = netlink_bind,
3049 .connect = netlink_connect,
3050 .socketpair = sock_no_socketpair,
3051 .accept = sock_no_accept,
3052 .getname = netlink_getname,
9652e931 3053 .poll = netlink_poll,
1da177e4
LT
3054 .ioctl = sock_no_ioctl,
3055 .listen = sock_no_listen,
3056 .shutdown = sock_no_shutdown,
9a4595bc
PM
3057 .setsockopt = netlink_setsockopt,
3058 .getsockopt = netlink_getsockopt,
1da177e4
LT
3059 .sendmsg = netlink_sendmsg,
3060 .recvmsg = netlink_recvmsg,
ccdfcc39 3061 .mmap = netlink_mmap,
1da177e4
LT
3062 .sendpage = sock_no_sendpage,
3063};
3064
ec1b4cf7 3065static const struct net_proto_family netlink_family_ops = {
1da177e4
LT
3066 .family = PF_NETLINK,
3067 .create = netlink_create,
3068 .owner = THIS_MODULE, /* for consistency 8) */
3069};
3070
4665079c 3071static int __net_init netlink_net_init(struct net *net)
b4b51029
EB
3072{
3073#ifdef CONFIG_PROC_FS
d4beaa66 3074 if (!proc_create("netlink", 0, net->proc_net, &netlink_seq_fops))
b4b51029
EB
3075 return -ENOMEM;
3076#endif
3077 return 0;
3078}
3079
4665079c 3080static void __net_exit netlink_net_exit(struct net *net)
b4b51029
EB
3081{
3082#ifdef CONFIG_PROC_FS
ece31ffd 3083 remove_proc_entry("netlink", net->proc_net);
b4b51029
EB
3084#endif
3085}
3086
b963ea89
DM
3087static void __init netlink_add_usersock_entry(void)
3088{
5c398dc8 3089 struct listeners *listeners;
b963ea89
DM
3090 int groups = 32;
3091
5c398dc8 3092 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
b963ea89 3093 if (!listeners)
5c398dc8 3094 panic("netlink_add_usersock_entry: Cannot allocate listeners\n");
b963ea89
DM
3095
3096 netlink_table_grab();
3097
3098 nl_table[NETLINK_USERSOCK].groups = groups;
5c398dc8 3099 rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
b963ea89
DM
3100 nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
3101 nl_table[NETLINK_USERSOCK].registered = 1;
9785e10a 3102 nl_table[NETLINK_USERSOCK].flags = NL_CFG_F_NONROOT_SEND;
b963ea89
DM
3103
3104 netlink_table_ungrab();
3105}
3106
022cbae6 3107static struct pernet_operations __net_initdata netlink_net_ops = {
b4b51029
EB
3108 .init = netlink_net_init,
3109 .exit = netlink_net_exit,
3110};
3111
1da177e4
LT
3112static int __init netlink_proto_init(void)
3113{
1da177e4 3114 int i;
1da177e4 3115 int err = proto_register(&netlink_proto, 0);
e341694e
TG
3116 struct rhashtable_params ht_params = {
3117 .head_offset = offsetof(struct netlink_sock, node),
3118 .key_offset = offsetof(struct netlink_sock, portid),
3119 .key_len = sizeof(u32), /* portid */
7f19fc5e 3120 .hashfn = jhash,
e341694e
TG
3121 .max_shift = 16, /* 64K */
3122 .grow_decision = rht_grow_above_75,
3123 .shrink_decision = rht_shrink_below_30,
e341694e 3124 };
1da177e4
LT
3125
3126 if (err != 0)
3127 goto out;
3128
fab25745 3129 BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
1da177e4 3130
0da974f4 3131 nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
fab2caf6
AM
3132 if (!nl_table)
3133 goto panic;
1da177e4 3134
1da177e4 3135 for (i = 0; i < MAX_LINKS; i++) {
e341694e
TG
3136 if (rhashtable_init(&nl_table[i].hash, &ht_params) < 0) {
3137 while (--i > 0)
3138 rhashtable_destroy(&nl_table[i].hash);
1da177e4 3139 kfree(nl_table);
fab2caf6 3140 goto panic;
1da177e4 3141 }
1da177e4
LT
3142 }
3143
bcbde0d4
DB
3144 INIT_LIST_HEAD(&netlink_tap_all);
3145
b963ea89
DM
3146 netlink_add_usersock_entry();
3147
1da177e4 3148 sock_register(&netlink_family_ops);
b4b51029 3149 register_pernet_subsys(&netlink_net_ops);
746fac4d 3150 /* The netlink device handler may be needed early. */
1da177e4
LT
3151 rtnetlink_init();
3152out:
3153 return err;
fab2caf6
AM
3154panic:
3155 panic("netlink_init: Cannot allocate nl_table\n");
1da177e4
LT
3156}
3157
1da177e4 3158core_initcall(netlink_proto_init);
This page took 1.006743 seconds and 5 git commands to generate.