netlink, mmap: don't walk rx ring on poll if receive queue non-empty
[deliverable/linux.git] / net / netlink / af_netlink.c
CommitLineData
1da177e4
LT
1/*
2 * NETLINK Kernel-user communication protocol.
3 *
113aa838 4 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>
1da177e4 5 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
cd1df525 6 * Patrick McHardy <kaber@trash.net>
1da177e4
LT
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
746fac4d 12 *
1da177e4
LT
13 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
14 * added netlink_proto_exit
15 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
16 * use nlk_sk, as sk->protinfo is on a diet 8)
4fdb3bb7
HW
17 * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
18 * - inc module use count of module that owns
19 * the kernel socket in case userspace opens
20 * socket of same protocol
21 * - remove all module support, since netlink is
22 * mandatory if CONFIG_NET=y these days
1da177e4
LT
23 */
24
1da177e4
LT
25#include <linux/module.h>
26
4fc268d2 27#include <linux/capability.h>
1da177e4
LT
28#include <linux/kernel.h>
29#include <linux/init.h>
1da177e4
LT
30#include <linux/signal.h>
31#include <linux/sched.h>
32#include <linux/errno.h>
33#include <linux/string.h>
34#include <linux/stat.h>
35#include <linux/socket.h>
36#include <linux/un.h>
37#include <linux/fcntl.h>
38#include <linux/termios.h>
39#include <linux/sockios.h>
40#include <linux/net.h>
41#include <linux/fs.h>
42#include <linux/slab.h>
43#include <asm/uaccess.h>
44#include <linux/skbuff.h>
45#include <linux/netdevice.h>
46#include <linux/rtnetlink.h>
47#include <linux/proc_fs.h>
48#include <linux/seq_file.h>
1da177e4
LT
49#include <linux/notifier.h>
50#include <linux/security.h>
51#include <linux/jhash.h>
52#include <linux/jiffies.h>
53#include <linux/random.h>
54#include <linux/bitops.h>
55#include <linux/mm.h>
56#include <linux/types.h>
54e0f520 57#include <linux/audit.h>
af65bdfc 58#include <linux/mutex.h>
ccdfcc39 59#include <linux/vmalloc.h>
bcbde0d4 60#include <linux/if_arp.h>
e341694e 61#include <linux/rhashtable.h>
9652e931 62#include <asm/cacheflush.h>
e341694e 63#include <linux/hash.h>
ee1c2442 64#include <linux/genetlink.h>
54e0f520 65
457c4cbc 66#include <net/net_namespace.h>
1da177e4
LT
67#include <net/sock.h>
68#include <net/scm.h>
82ace47a 69#include <net/netlink.h>
1da177e4 70
0f29c768 71#include "af_netlink.h"
1da177e4 72
5c398dc8
ED
73struct listeners {
74 struct rcu_head rcu;
75 unsigned long masks[0];
6c04bb18
JB
76};
77
cd967e05 78/* state bits */
cc3a572f 79#define NETLINK_S_CONGESTED 0x0
cd967e05
PM
80
81/* flags */
cc3a572f
ND
82#define NETLINK_F_KERNEL_SOCKET 0x1
83#define NETLINK_F_RECV_PKTINFO 0x2
84#define NETLINK_F_BROADCAST_SEND_ERROR 0x4
85#define NETLINK_F_RECV_NO_ENOBUFS 0x8
59324cf3 86#define NETLINK_F_LISTEN_ALL_NSID 0x10
0a6a3a23 87#define NETLINK_F_CAP_ACK 0x20
77247bbb 88
035c4c16 89static inline int netlink_is_kernel(struct sock *sk)
aed81560 90{
cc3a572f 91 return nlk_sk(sk)->flags & NETLINK_F_KERNEL_SOCKET;
aed81560
DL
92}
93
91dd93f9 94struct netlink_table *nl_table __read_mostly;
0f29c768 95EXPORT_SYMBOL_GPL(nl_table);
1da177e4
LT
96
97static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
98
99static int netlink_dump(struct sock *sk);
9652e931 100static void netlink_skb_destructor(struct sk_buff *skb);
1da177e4 101
78fd1d0a 102/* nl_table locking explained:
21e4902a 103 * Lookup and traversal are protected with an RCU read-side lock. Insertion
c5adde94 104 * and removal are protected with per bucket lock while using RCU list
21e4902a
TG
105 * modification primitives and may run in parallel to RCU protected lookups.
106 * Destruction of the Netlink socket may only occur *after* nl_table_lock has
107 * been acquired * either during or after the socket has been removed from
108 * the list and after an RCU grace period.
78fd1d0a 109 */
0f29c768
AV
110DEFINE_RWLOCK(nl_table_lock);
111EXPORT_SYMBOL_GPL(nl_table_lock);
1da177e4
LT
112static atomic_t nl_table_users = ATOMIC_INIT(0);
113
6d772ac5
ED
114#define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock));
115
e041c683 116static ATOMIC_NOTIFIER_HEAD(netlink_chain);
1da177e4 117
bcbde0d4
DB
118static DEFINE_SPINLOCK(netlink_tap_lock);
119static struct list_head netlink_tap_all __read_mostly;
120
c428ecd1
HX
121static const struct rhashtable_params netlink_rhashtable_params;
122
b57ef81f 123static inline u32 netlink_group_mask(u32 group)
d629b836
PM
124{
125 return group ? 1 << (group - 1) : 0;
126}
127
bcbde0d4
DB
128int netlink_add_tap(struct netlink_tap *nt)
129{
130 if (unlikely(nt->dev->type != ARPHRD_NETLINK))
131 return -EINVAL;
132
133 spin_lock(&netlink_tap_lock);
134 list_add_rcu(&nt->list, &netlink_tap_all);
135 spin_unlock(&netlink_tap_lock);
136
fcd4d35e 137 __module_get(nt->module);
bcbde0d4
DB
138
139 return 0;
140}
141EXPORT_SYMBOL_GPL(netlink_add_tap);
142
2173f8d9 143static int __netlink_remove_tap(struct netlink_tap *nt)
bcbde0d4
DB
144{
145 bool found = false;
146 struct netlink_tap *tmp;
147
148 spin_lock(&netlink_tap_lock);
149
150 list_for_each_entry(tmp, &netlink_tap_all, list) {
151 if (nt == tmp) {
152 list_del_rcu(&nt->list);
153 found = true;
154 goto out;
155 }
156 }
157
158 pr_warn("__netlink_remove_tap: %p not found\n", nt);
159out:
160 spin_unlock(&netlink_tap_lock);
161
92b80eb3 162 if (found)
bcbde0d4
DB
163 module_put(nt->module);
164
165 return found ? 0 : -ENODEV;
166}
bcbde0d4
DB
167
168int netlink_remove_tap(struct netlink_tap *nt)
169{
170 int ret;
171
172 ret = __netlink_remove_tap(nt);
173 synchronize_net();
174
175 return ret;
176}
177EXPORT_SYMBOL_GPL(netlink_remove_tap);
178
5ffd5cdd
DB
179static bool netlink_filter_tap(const struct sk_buff *skb)
180{
181 struct sock *sk = skb->sk;
5ffd5cdd
DB
182
183 /* We take the more conservative approach and
184 * whitelist socket protocols that may pass.
185 */
186 switch (sk->sk_protocol) {
187 case NETLINK_ROUTE:
188 case NETLINK_USERSOCK:
189 case NETLINK_SOCK_DIAG:
190 case NETLINK_NFLOG:
191 case NETLINK_XFRM:
192 case NETLINK_FIB_LOOKUP:
193 case NETLINK_NETFILTER:
194 case NETLINK_GENERIC:
498044bb 195 return true;
5ffd5cdd
DB
196 }
197
498044bb 198 return false;
5ffd5cdd
DB
199}
200
bcbde0d4
DB
201static int __netlink_deliver_tap_skb(struct sk_buff *skb,
202 struct net_device *dev)
203{
204 struct sk_buff *nskb;
5ffd5cdd 205 struct sock *sk = skb->sk;
bcbde0d4
DB
206 int ret = -ENOMEM;
207
208 dev_hold(dev);
209 nskb = skb_clone(skb, GFP_ATOMIC);
210 if (nskb) {
211 nskb->dev = dev;
5ffd5cdd 212 nskb->protocol = htons((u16) sk->sk_protocol);
604d13c9
DB
213 nskb->pkt_type = netlink_is_kernel(sk) ?
214 PACKET_KERNEL : PACKET_USER;
4e48ed88 215 skb_reset_network_header(nskb);
bcbde0d4
DB
216 ret = dev_queue_xmit(nskb);
217 if (unlikely(ret > 0))
218 ret = net_xmit_errno(ret);
219 }
220
221 dev_put(dev);
222 return ret;
223}
224
225static void __netlink_deliver_tap(struct sk_buff *skb)
226{
227 int ret;
228 struct netlink_tap *tmp;
229
5ffd5cdd
DB
230 if (!netlink_filter_tap(skb))
231 return;
232
bcbde0d4
DB
233 list_for_each_entry_rcu(tmp, &netlink_tap_all, list) {
234 ret = __netlink_deliver_tap_skb(skb, tmp->dev);
235 if (unlikely(ret))
236 break;
237 }
238}
239
240static void netlink_deliver_tap(struct sk_buff *skb)
241{
242 rcu_read_lock();
243
244 if (unlikely(!list_empty(&netlink_tap_all)))
245 __netlink_deliver_tap(skb);
246
247 rcu_read_unlock();
248}
249
73bfd370
DB
250static void netlink_deliver_tap_kernel(struct sock *dst, struct sock *src,
251 struct sk_buff *skb)
252{
253 if (!(netlink_is_kernel(dst) && netlink_is_kernel(src)))
254 netlink_deliver_tap(skb);
255}
256
cd1df525
PM
257static void netlink_overrun(struct sock *sk)
258{
259 struct netlink_sock *nlk = nlk_sk(sk);
260
cc3a572f
ND
261 if (!(nlk->flags & NETLINK_F_RECV_NO_ENOBUFS)) {
262 if (!test_and_set_bit(NETLINK_S_CONGESTED,
263 &nlk_sk(sk)->state)) {
cd1df525
PM
264 sk->sk_err = ENOBUFS;
265 sk->sk_error_report(sk);
266 }
267 }
268 atomic_inc(&sk->sk_drops);
269}
270
271static void netlink_rcv_wake(struct sock *sk)
272{
273 struct netlink_sock *nlk = nlk_sk(sk);
274
275 if (skb_queue_empty(&sk->sk_receive_queue))
cc3a572f
ND
276 clear_bit(NETLINK_S_CONGESTED, &nlk->state);
277 if (!test_bit(NETLINK_S_CONGESTED, &nlk->state))
cd1df525
PM
278 wake_up_interruptible(&nlk->wait);
279}
280
ccdfcc39 281#ifdef CONFIG_NETLINK_MMAP
9652e931
PM
282static bool netlink_skb_is_mmaped(const struct sk_buff *skb)
283{
284 return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED;
285}
286
f9c22888
PM
287static bool netlink_rx_is_mmaped(struct sock *sk)
288{
289 return nlk_sk(sk)->rx_ring.pg_vec != NULL;
290}
291
5fd96123
PM
292static bool netlink_tx_is_mmaped(struct sock *sk)
293{
294 return nlk_sk(sk)->tx_ring.pg_vec != NULL;
295}
296
ccdfcc39
PM
297static __pure struct page *pgvec_to_page(const void *addr)
298{
299 if (is_vmalloc_addr(addr))
300 return vmalloc_to_page(addr);
301 else
302 return virt_to_page(addr);
303}
304
305static void free_pg_vec(void **pg_vec, unsigned int order, unsigned int len)
306{
307 unsigned int i;
308
309 for (i = 0; i < len; i++) {
310 if (pg_vec[i] != NULL) {
311 if (is_vmalloc_addr(pg_vec[i]))
312 vfree(pg_vec[i]);
313 else
314 free_pages((unsigned long)pg_vec[i], order);
315 }
316 }
317 kfree(pg_vec);
318}
319
320static void *alloc_one_pg_vec_page(unsigned long order)
321{
322 void *buffer;
323 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | __GFP_ZERO |
324 __GFP_NOWARN | __GFP_NORETRY;
325
326 buffer = (void *)__get_free_pages(gfp_flags, order);
327 if (buffer != NULL)
328 return buffer;
329
330 buffer = vzalloc((1 << order) * PAGE_SIZE);
331 if (buffer != NULL)
332 return buffer;
333
334 gfp_flags &= ~__GFP_NORETRY;
335 return (void *)__get_free_pages(gfp_flags, order);
336}
337
338static void **alloc_pg_vec(struct netlink_sock *nlk,
339 struct nl_mmap_req *req, unsigned int order)
340{
341 unsigned int block_nr = req->nm_block_nr;
342 unsigned int i;
8a849bb7 343 void **pg_vec;
ccdfcc39
PM
344
345 pg_vec = kcalloc(block_nr, sizeof(void *), GFP_KERNEL);
346 if (pg_vec == NULL)
347 return NULL;
348
349 for (i = 0; i < block_nr; i++) {
8a849bb7 350 pg_vec[i] = alloc_one_pg_vec_page(order);
ccdfcc39
PM
351 if (pg_vec[i] == NULL)
352 goto err1;
353 }
354
355 return pg_vec;
356err1:
357 free_pg_vec(pg_vec, order, block_nr);
358 return NULL;
359}
360
0470eb99
FW
361
362static void
363__netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, bool tx_ring, void **pg_vec,
364 unsigned int order)
365{
366 struct netlink_sock *nlk = nlk_sk(sk);
367 struct sk_buff_head *queue;
368 struct netlink_ring *ring;
369
370 queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
371 ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
372
373 spin_lock_bh(&queue->lock);
374
375 ring->frame_max = req->nm_frame_nr - 1;
376 ring->head = 0;
377 ring->frame_size = req->nm_frame_size;
378 ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE;
379
380 swap(ring->pg_vec_len, req->nm_block_nr);
381 swap(ring->pg_vec_order, order);
382 swap(ring->pg_vec, pg_vec);
383
384 __skb_queue_purge(queue);
385 spin_unlock_bh(&queue->lock);
386
387 WARN_ON(atomic_read(&nlk->mapped));
388
389 if (pg_vec)
390 free_pg_vec(pg_vec, order, req->nm_block_nr);
391}
392
ccdfcc39 393static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req,
0470eb99 394 bool tx_ring)
ccdfcc39
PM
395{
396 struct netlink_sock *nlk = nlk_sk(sk);
397 struct netlink_ring *ring;
ccdfcc39
PM
398 void **pg_vec = NULL;
399 unsigned int order = 0;
ccdfcc39
PM
400
401 ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
ccdfcc39 402
0470eb99
FW
403 if (atomic_read(&nlk->mapped))
404 return -EBUSY;
405 if (atomic_read(&ring->pending))
406 return -EBUSY;
ccdfcc39
PM
407
408 if (req->nm_block_nr) {
409 if (ring->pg_vec != NULL)
410 return -EBUSY;
411
412 if ((int)req->nm_block_size <= 0)
413 return -EINVAL;
74e83b23 414 if (!PAGE_ALIGNED(req->nm_block_size))
ccdfcc39
PM
415 return -EINVAL;
416 if (req->nm_frame_size < NL_MMAP_HDRLEN)
417 return -EINVAL;
418 if (!IS_ALIGNED(req->nm_frame_size, NL_MMAP_MSG_ALIGNMENT))
419 return -EINVAL;
420
421 ring->frames_per_block = req->nm_block_size /
422 req->nm_frame_size;
423 if (ring->frames_per_block == 0)
424 return -EINVAL;
425 if (ring->frames_per_block * req->nm_block_nr !=
426 req->nm_frame_nr)
427 return -EINVAL;
428
429 order = get_order(req->nm_block_size);
430 pg_vec = alloc_pg_vec(nlk, req, order);
431 if (pg_vec == NULL)
432 return -ENOMEM;
433 } else {
434 if (req->nm_frame_nr)
435 return -EINVAL;
436 }
437
ccdfcc39 438 mutex_lock(&nlk->pg_vec_lock);
0470eb99
FW
439 if (atomic_read(&nlk->mapped) == 0) {
440 __netlink_set_ring(sk, req, tx_ring, pg_vec, order);
441 mutex_unlock(&nlk->pg_vec_lock);
442 return 0;
ccdfcc39 443 }
0470eb99 444
ccdfcc39
PM
445 mutex_unlock(&nlk->pg_vec_lock);
446
447 if (pg_vec)
448 free_pg_vec(pg_vec, order, req->nm_block_nr);
0470eb99
FW
449
450 return -EBUSY;
ccdfcc39
PM
451}
452
453static void netlink_mm_open(struct vm_area_struct *vma)
454{
455 struct file *file = vma->vm_file;
456 struct socket *sock = file->private_data;
457 struct sock *sk = sock->sk;
458
459 if (sk)
460 atomic_inc(&nlk_sk(sk)->mapped);
461}
462
463static void netlink_mm_close(struct vm_area_struct *vma)
464{
465 struct file *file = vma->vm_file;
466 struct socket *sock = file->private_data;
467 struct sock *sk = sock->sk;
468
469 if (sk)
470 atomic_dec(&nlk_sk(sk)->mapped);
471}
472
473static const struct vm_operations_struct netlink_mmap_ops = {
474 .open = netlink_mm_open,
475 .close = netlink_mm_close,
476};
477
478static int netlink_mmap(struct file *file, struct socket *sock,
479 struct vm_area_struct *vma)
480{
481 struct sock *sk = sock->sk;
482 struct netlink_sock *nlk = nlk_sk(sk);
483 struct netlink_ring *ring;
484 unsigned long start, size, expected;
485 unsigned int i;
486 int err = -EINVAL;
487
488 if (vma->vm_pgoff)
489 return -EINVAL;
490
491 mutex_lock(&nlk->pg_vec_lock);
492
493 expected = 0;
494 for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
495 if (ring->pg_vec == NULL)
496 continue;
497 expected += ring->pg_vec_len * ring->pg_vec_pages * PAGE_SIZE;
498 }
499
500 if (expected == 0)
501 goto out;
502
503 size = vma->vm_end - vma->vm_start;
504 if (size != expected)
505 goto out;
506
507 start = vma->vm_start;
508 for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
509 if (ring->pg_vec == NULL)
510 continue;
511
512 for (i = 0; i < ring->pg_vec_len; i++) {
513 struct page *page;
514 void *kaddr = ring->pg_vec[i];
515 unsigned int pg_num;
516
517 for (pg_num = 0; pg_num < ring->pg_vec_pages; pg_num++) {
518 page = pgvec_to_page(kaddr);
519 err = vm_insert_page(vma, start, page);
520 if (err < 0)
521 goto out;
522 start += PAGE_SIZE;
523 kaddr += PAGE_SIZE;
524 }
525 }
526 }
527
528 atomic_inc(&nlk->mapped);
529 vma->vm_ops = &netlink_mmap_ops;
530 err = 0;
531out:
532 mutex_unlock(&nlk->pg_vec_lock);
7cdbac71 533 return err;
ccdfcc39 534}
9652e931 535
4682a035 536static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr, unsigned int nm_len)
9652e931
PM
537{
538#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
539 struct page *p_start, *p_end;
540
541 /* First page is flushed through netlink_{get,set}_status */
542 p_start = pgvec_to_page(hdr + PAGE_SIZE);
4682a035 543 p_end = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + nm_len - 1);
9652e931
PM
544 while (p_start <= p_end) {
545 flush_dcache_page(p_start);
546 p_start++;
547 }
548#endif
549}
550
551static enum nl_mmap_status netlink_get_status(const struct nl_mmap_hdr *hdr)
552{
553 smp_rmb();
554 flush_dcache_page(pgvec_to_page(hdr));
555 return hdr->nm_status;
556}
557
558static void netlink_set_status(struct nl_mmap_hdr *hdr,
559 enum nl_mmap_status status)
560{
a18e6a18 561 smp_mb();
9652e931
PM
562 hdr->nm_status = status;
563 flush_dcache_page(pgvec_to_page(hdr));
9652e931
PM
564}
565
566static struct nl_mmap_hdr *
567__netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos)
568{
569 unsigned int pg_vec_pos, frame_off;
570
571 pg_vec_pos = pos / ring->frames_per_block;
572 frame_off = pos % ring->frames_per_block;
573
574 return ring->pg_vec[pg_vec_pos] + (frame_off * ring->frame_size);
575}
576
577static struct nl_mmap_hdr *
578netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos,
579 enum nl_mmap_status status)
580{
581 struct nl_mmap_hdr *hdr;
582
583 hdr = __netlink_lookup_frame(ring, pos);
584 if (netlink_get_status(hdr) != status)
585 return NULL;
586
587 return hdr;
588}
589
590static struct nl_mmap_hdr *
591netlink_current_frame(const struct netlink_ring *ring,
592 enum nl_mmap_status status)
593{
594 return netlink_lookup_frame(ring, ring->head, status);
595}
596
9652e931
PM
597static void netlink_increment_head(struct netlink_ring *ring)
598{
599 ring->head = ring->head != ring->frame_max ? ring->head + 1 : 0;
600}
601
602static void netlink_forward_ring(struct netlink_ring *ring)
603{
7084a315 604 unsigned int head = ring->head;
9652e931
PM
605 const struct nl_mmap_hdr *hdr;
606
607 do {
7084a315 608 hdr = __netlink_lookup_frame(ring, ring->head);
9652e931
PM
609 if (hdr->nm_status == NL_MMAP_STATUS_UNUSED)
610 break;
611 if (hdr->nm_status != NL_MMAP_STATUS_SKIP)
612 break;
613 netlink_increment_head(ring);
614 } while (ring->head != head);
615}
616
0ef70770
KM
617static bool netlink_has_valid_frame(struct netlink_ring *ring)
618{
619 unsigned int head = ring->head, pos = head;
620 const struct nl_mmap_hdr *hdr;
621
622 do {
623 hdr = __netlink_lookup_frame(ring, pos);
624 if (hdr->nm_status == NL_MMAP_STATUS_VALID)
625 return true;
626 pos = pos != 0 ? pos - 1 : ring->frame_max;
627 } while (pos != head);
628
629 return false;
630}
631
cd1df525
PM
632static bool netlink_dump_space(struct netlink_sock *nlk)
633{
634 struct netlink_ring *ring = &nlk->rx_ring;
635 struct nl_mmap_hdr *hdr;
636 unsigned int n;
637
638 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
639 if (hdr == NULL)
640 return false;
641
642 n = ring->head + ring->frame_max / 2;
643 if (n > ring->frame_max)
644 n -= ring->frame_max;
645
646 hdr = __netlink_lookup_frame(ring, n);
647
648 return hdr->nm_status == NL_MMAP_STATUS_UNUSED;
649}
650
9652e931
PM
651static unsigned int netlink_poll(struct file *file, struct socket *sock,
652 poll_table *wait)
653{
654 struct sock *sk = sock->sk;
655 struct netlink_sock *nlk = nlk_sk(sk);
656 unsigned int mask;
cd1df525 657 int err;
9652e931 658
cd1df525
PM
659 if (nlk->rx_ring.pg_vec != NULL) {
660 /* Memory mapped sockets don't call recvmsg(), so flow control
661 * for dumps is performed here. A dump is allowed to continue
662 * if at least half the ring is unused.
663 */
16b304f3 664 while (nlk->cb_running && netlink_dump_space(nlk)) {
cd1df525
PM
665 err = netlink_dump(sk);
666 if (err < 0) {
ac30ef83 667 sk->sk_err = -err;
cd1df525
PM
668 sk->sk_error_report(sk);
669 break;
670 }
671 }
672 netlink_rcv_wake(sk);
673 }
5fd96123 674
9652e931
PM
675 mask = datagram_poll(file, sock, wait);
676
a66e3656
DB
677 /* We could already have received frames in the normal receive
678 * queue, that will show up as NL_MMAP_STATUS_COPY in the ring,
679 * so if mask contains pollin/etc already, there's no point
680 * walking the ring.
681 */
682 if ((mask & (POLLIN | POLLRDNORM)) != (POLLIN | POLLRDNORM)) {
683 spin_lock_bh(&sk->sk_receive_queue.lock);
684 if (nlk->rx_ring.pg_vec) {
685 if (netlink_has_valid_frame(&nlk->rx_ring))
686 mask |= POLLIN | POLLRDNORM;
687 }
688 spin_unlock_bh(&sk->sk_receive_queue.lock);
9652e931 689 }
9652e931
PM
690
691 spin_lock_bh(&sk->sk_write_queue.lock);
692 if (nlk->tx_ring.pg_vec) {
693 if (netlink_current_frame(&nlk->tx_ring, NL_MMAP_STATUS_UNUSED))
694 mask |= POLLOUT | POLLWRNORM;
695 }
696 spin_unlock_bh(&sk->sk_write_queue.lock);
697
698 return mask;
699}
700
701static struct nl_mmap_hdr *netlink_mmap_hdr(struct sk_buff *skb)
702{
703 return (struct nl_mmap_hdr *)(skb->head - NL_MMAP_HDRLEN);
704}
705
706static void netlink_ring_setup_skb(struct sk_buff *skb, struct sock *sk,
707 struct netlink_ring *ring,
708 struct nl_mmap_hdr *hdr)
709{
710 unsigned int size;
711 void *data;
712
713 size = ring->frame_size - NL_MMAP_HDRLEN;
714 data = (void *)hdr + NL_MMAP_HDRLEN;
715
716 skb->head = data;
717 skb->data = data;
718 skb_reset_tail_pointer(skb);
719 skb->end = skb->tail + size;
720 skb->len = 0;
721
722 skb->destructor = netlink_skb_destructor;
723 NETLINK_CB(skb).flags |= NETLINK_SKB_MMAPED;
724 NETLINK_CB(skb).sk = sk;
725}
5fd96123
PM
726
727static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
728 u32 dst_portid, u32 dst_group,
7cc05662 729 struct scm_cookie *scm)
5fd96123
PM
730{
731 struct netlink_sock *nlk = nlk_sk(sk);
732 struct netlink_ring *ring;
733 struct nl_mmap_hdr *hdr;
734 struct sk_buff *skb;
735 unsigned int maxlen;
5fd96123
PM
736 int err = 0, len = 0;
737
5fd96123
PM
738 mutex_lock(&nlk->pg_vec_lock);
739
740 ring = &nlk->tx_ring;
741 maxlen = ring->frame_size - NL_MMAP_HDRLEN;
742
743 do {
4682a035
DM
744 unsigned int nm_len;
745
5fd96123
PM
746 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_VALID);
747 if (hdr == NULL) {
748 if (!(msg->msg_flags & MSG_DONTWAIT) &&
749 atomic_read(&nlk->tx_ring.pending))
750 schedule();
751 continue;
752 }
4682a035
DM
753
754 nm_len = ACCESS_ONCE(hdr->nm_len);
755 if (nm_len > maxlen) {
5fd96123
PM
756 err = -EINVAL;
757 goto out;
758 }
759
4682a035 760 netlink_frame_flush_dcache(hdr, nm_len);
5fd96123 761
4682a035
DM
762 skb = alloc_skb(nm_len, GFP_KERNEL);
763 if (skb == NULL) {
764 err = -ENOBUFS;
765 goto out;
5fd96123 766 }
4682a035
DM
767 __skb_put(skb, nm_len);
768 memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, nm_len);
769 netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
5fd96123
PM
770
771 netlink_increment_head(ring);
772
773 NETLINK_CB(skb).portid = nlk->portid;
774 NETLINK_CB(skb).dst_group = dst_group;
7cc05662 775 NETLINK_CB(skb).creds = scm->creds;
5fd96123
PM
776
777 err = security_netlink_send(sk, skb);
778 if (err) {
779 kfree_skb(skb);
780 goto out;
781 }
782
783 if (unlikely(dst_group)) {
784 atomic_inc(&skb->users);
785 netlink_broadcast(sk, skb, dst_portid, dst_group,
786 GFP_KERNEL);
787 }
788 err = netlink_unicast(sk, skb, dst_portid,
789 msg->msg_flags & MSG_DONTWAIT);
790 if (err < 0)
791 goto out;
792 len += err;
793
794 } while (hdr != NULL ||
795 (!(msg->msg_flags & MSG_DONTWAIT) &&
796 atomic_read(&nlk->tx_ring.pending)));
797
798 if (len > 0)
799 err = len;
800out:
801 mutex_unlock(&nlk->pg_vec_lock);
802 return err;
803}
f9c22888
PM
804
805static void netlink_queue_mmaped_skb(struct sock *sk, struct sk_buff *skb)
806{
807 struct nl_mmap_hdr *hdr;
808
809 hdr = netlink_mmap_hdr(skb);
810 hdr->nm_len = skb->len;
811 hdr->nm_group = NETLINK_CB(skb).dst_group;
812 hdr->nm_pid = NETLINK_CB(skb).creds.pid;
1bf9310a
ND
813 hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
814 hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
4682a035 815 netlink_frame_flush_dcache(hdr, hdr->nm_len);
f9c22888
PM
816 netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
817
818 NETLINK_CB(skb).flags |= NETLINK_SKB_DELIVERED;
819 kfree_skb(skb);
820}
821
822static void netlink_ring_set_copied(struct sock *sk, struct sk_buff *skb)
823{
824 struct netlink_sock *nlk = nlk_sk(sk);
825 struct netlink_ring *ring = &nlk->rx_ring;
826 struct nl_mmap_hdr *hdr;
827
828 spin_lock_bh(&sk->sk_receive_queue.lock);
829 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
830 if (hdr == NULL) {
831 spin_unlock_bh(&sk->sk_receive_queue.lock);
832 kfree_skb(skb);
cd1df525 833 netlink_overrun(sk);
f9c22888
PM
834 return;
835 }
836 netlink_increment_head(ring);
837 __skb_queue_tail(&sk->sk_receive_queue, skb);
838 spin_unlock_bh(&sk->sk_receive_queue.lock);
839
840 hdr->nm_len = skb->len;
841 hdr->nm_group = NETLINK_CB(skb).dst_group;
842 hdr->nm_pid = NETLINK_CB(skb).creds.pid;
1bf9310a
ND
843 hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
844 hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
f9c22888
PM
845 netlink_set_status(hdr, NL_MMAP_STATUS_COPY);
846}
847
ccdfcc39 848#else /* CONFIG_NETLINK_MMAP */
9652e931 849#define netlink_skb_is_mmaped(skb) false
f9c22888 850#define netlink_rx_is_mmaped(sk) false
5fd96123 851#define netlink_tx_is_mmaped(sk) false
ccdfcc39 852#define netlink_mmap sock_no_mmap
9652e931 853#define netlink_poll datagram_poll
7cc05662 854#define netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group, scm) 0
ccdfcc39
PM
855#endif /* CONFIG_NETLINK_MMAP */
856
cf0a018a
PM
857static void netlink_skb_destructor(struct sk_buff *skb)
858{
9652e931
PM
859#ifdef CONFIG_NETLINK_MMAP
860 struct nl_mmap_hdr *hdr;
861 struct netlink_ring *ring;
862 struct sock *sk;
863
864 /* If a packet from the kernel to userspace was freed because of an
865 * error without being delivered to userspace, the kernel must reset
866 * the status. In the direction userspace to kernel, the status is
867 * always reset here after the packet was processed and freed.
868 */
869 if (netlink_skb_is_mmaped(skb)) {
870 hdr = netlink_mmap_hdr(skb);
871 sk = NETLINK_CB(skb).sk;
872
5fd96123
PM
873 if (NETLINK_CB(skb).flags & NETLINK_SKB_TX) {
874 netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
875 ring = &nlk_sk(sk)->tx_ring;
876 } else {
877 if (!(NETLINK_CB(skb).flags & NETLINK_SKB_DELIVERED)) {
878 hdr->nm_len = 0;
879 netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
880 }
881 ring = &nlk_sk(sk)->rx_ring;
9652e931 882 }
9652e931
PM
883
884 WARN_ON(atomic_read(&ring->pending) == 0);
885 atomic_dec(&ring->pending);
886 sock_put(sk);
887
5e71d9d7 888 skb->head = NULL;
9652e931
PM
889 }
890#endif
c05cdb1b 891 if (is_vmalloc_addr(skb->head)) {
3a36515f
PN
892 if (!skb->cloned ||
893 !atomic_dec_return(&(skb_shinfo(skb)->dataref)))
894 vfree(skb->head);
895
c05cdb1b
PNA
896 skb->head = NULL;
897 }
9652e931
PM
898 if (skb->sk != NULL)
899 sock_rfree(skb);
cf0a018a
PM
900}
901
902static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
903{
904 WARN_ON(skb->sk != NULL);
905 skb->sk = sk;
906 skb->destructor = netlink_skb_destructor;
907 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
908 sk_mem_charge(sk, skb->truesize);
909}
910
1da177e4
LT
911static void netlink_sock_destruct(struct sock *sk)
912{
3f660d66
HX
913 struct netlink_sock *nlk = nlk_sk(sk);
914
16b304f3
PS
915 if (nlk->cb_running) {
916 if (nlk->cb.done)
917 nlk->cb.done(&nlk->cb);
6dc878a8 918
16b304f3
PS
919 module_put(nlk->cb.module);
920 kfree_skb(nlk->cb.skb);
3f660d66
HX
921 }
922
1da177e4 923 skb_queue_purge(&sk->sk_receive_queue);
ccdfcc39
PM
924#ifdef CONFIG_NETLINK_MMAP
925 if (1) {
926 struct nl_mmap_req req;
927
928 memset(&req, 0, sizeof(req));
929 if (nlk->rx_ring.pg_vec)
0470eb99 930 __netlink_set_ring(sk, &req, false, NULL, 0);
ccdfcc39
PM
931 memset(&req, 0, sizeof(req));
932 if (nlk->tx_ring.pg_vec)
0470eb99 933 __netlink_set_ring(sk, &req, true, NULL, 0);
ccdfcc39
PM
934 }
935#endif /* CONFIG_NETLINK_MMAP */
1da177e4
LT
936
937 if (!sock_flag(sk, SOCK_DEAD)) {
6ac552fd 938 printk(KERN_ERR "Freeing alive netlink socket %p\n", sk);
1da177e4
LT
939 return;
940 }
547b792c
IJ
941
942 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
943 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
944 WARN_ON(nlk_sk(sk)->groups);
1da177e4
LT
945}
946
6ac552fd
PM
947/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
948 * SMP. Look, when several writers sleep and reader wakes them up, all but one
1da177e4
LT
949 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
950 * this, _but_ remember, it adds useless work on UP machines.
951 */
952
d136f1bd 953void netlink_table_grab(void)
9a429c49 954 __acquires(nl_table_lock)
1da177e4 955{
d136f1bd
JB
956 might_sleep();
957
6abd219c 958 write_lock_irq(&nl_table_lock);
1da177e4
LT
959
960 if (atomic_read(&nl_table_users)) {
961 DECLARE_WAITQUEUE(wait, current);
962
963 add_wait_queue_exclusive(&nl_table_wait, &wait);
6ac552fd 964 for (;;) {
1da177e4
LT
965 set_current_state(TASK_UNINTERRUPTIBLE);
966 if (atomic_read(&nl_table_users) == 0)
967 break;
6abd219c 968 write_unlock_irq(&nl_table_lock);
1da177e4 969 schedule();
6abd219c 970 write_lock_irq(&nl_table_lock);
1da177e4
LT
971 }
972
973 __set_current_state(TASK_RUNNING);
974 remove_wait_queue(&nl_table_wait, &wait);
975 }
976}
977
d136f1bd 978void netlink_table_ungrab(void)
9a429c49 979 __releases(nl_table_lock)
1da177e4 980{
6abd219c 981 write_unlock_irq(&nl_table_lock);
1da177e4
LT
982 wake_up(&nl_table_wait);
983}
984
6ac552fd 985static inline void
1da177e4
LT
986netlink_lock_table(void)
987{
988 /* read_lock() synchronizes us to netlink_table_grab */
989
990 read_lock(&nl_table_lock);
991 atomic_inc(&nl_table_users);
992 read_unlock(&nl_table_lock);
993}
994
6ac552fd 995static inline void
1da177e4
LT
996netlink_unlock_table(void)
997{
998 if (atomic_dec_and_test(&nl_table_users))
999 wake_up(&nl_table_wait);
1000}
1001
e341694e 1002struct netlink_compare_arg
1da177e4 1003{
c428ecd1 1004 possible_net_t pnet;
e341694e
TG
1005 u32 portid;
1006};
1da177e4 1007
8f2ddaac
HX
1008/* Doing sizeof directly may yield 4 extra bytes on 64-bit. */
1009#define netlink_compare_arg_len \
1010 (offsetof(struct netlink_compare_arg, portid) + sizeof(u32))
c428ecd1
HX
1011
1012static inline int netlink_compare(struct rhashtable_compare_arg *arg,
1013 const void *ptr)
1da177e4 1014{
c428ecd1
HX
1015 const struct netlink_compare_arg *x = arg->key;
1016 const struct netlink_sock *nlk = ptr;
1da177e4 1017
c428ecd1
HX
1018 return nlk->portid != x->portid ||
1019 !net_eq(sock_net(&nlk->sk), read_pnet(&x->pnet));
1020}
1021
1022static void netlink_compare_arg_init(struct netlink_compare_arg *arg,
1023 struct net *net, u32 portid)
1024{
1025 memset(arg, 0, sizeof(*arg));
1026 write_pnet(&arg->pnet, net);
1027 arg->portid = portid;
1da177e4
LT
1028}
1029
e341694e
TG
1030static struct sock *__netlink_lookup(struct netlink_table *table, u32 portid,
1031 struct net *net)
1da177e4 1032{
c428ecd1 1033 struct netlink_compare_arg arg;
1da177e4 1034
c428ecd1
HX
1035 netlink_compare_arg_init(&arg, net, portid);
1036 return rhashtable_lookup_fast(&table->hash, &arg,
1037 netlink_rhashtable_params);
1da177e4
LT
1038}
1039
c428ecd1 1040static int __netlink_insert(struct netlink_table *table, struct sock *sk)
c5adde94 1041{
c428ecd1 1042 struct netlink_compare_arg arg;
c5adde94 1043
c428ecd1
HX
1044 netlink_compare_arg_init(&arg, sock_net(sk), nlk_sk(sk)->portid);
1045 return rhashtable_lookup_insert_key(&table->hash, &arg,
1046 &nlk_sk(sk)->node,
1047 netlink_rhashtable_params);
c5adde94
YX
1048}
1049
e341694e 1050static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
1da177e4 1051{
e341694e
TG
1052 struct netlink_table *table = &nl_table[protocol];
1053 struct sock *sk;
1da177e4 1054
e341694e
TG
1055 rcu_read_lock();
1056 sk = __netlink_lookup(table, portid, net);
1057 if (sk)
1058 sock_hold(sk);
1059 rcu_read_unlock();
1da177e4 1060
e341694e 1061 return sk;
1da177e4
LT
1062}
1063
90ddc4f0 1064static const struct proto_ops netlink_ops;
1da177e4 1065
4277a083
PM
1066static void
1067netlink_update_listeners(struct sock *sk)
1068{
1069 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
4277a083
PM
1070 unsigned long mask;
1071 unsigned int i;
6d772ac5
ED
1072 struct listeners *listeners;
1073
1074 listeners = nl_deref_protected(tbl->listeners);
1075 if (!listeners)
1076 return;
4277a083 1077
b4ff4f04 1078 for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
4277a083 1079 mask = 0;
b67bfe0d 1080 sk_for_each_bound(sk, &tbl->mc_list) {
b4ff4f04
JB
1081 if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
1082 mask |= nlk_sk(sk)->groups[i];
1083 }
6d772ac5 1084 listeners->masks[i] = mask;
4277a083
PM
1085 }
1086 /* this function is only called with the netlink table "grabbed", which
1087 * makes sure updates are visible before bind or setsockopt return. */
1088}
1089
8ea65f4a 1090static int netlink_insert(struct sock *sk, u32 portid)
1da177e4 1091{
da12c90e 1092 struct netlink_table *table = &nl_table[sk->sk_protocol];
919d9db9 1093 int err;
1da177e4 1094
c5adde94 1095 lock_sock(sk);
1da177e4
LT
1096
1097 err = -EBUSY;
15e47304 1098 if (nlk_sk(sk)->portid)
1da177e4
LT
1099 goto err;
1100
1101 err = -ENOMEM;
97defe1e
TG
1102 if (BITS_PER_LONG > 32 &&
1103 unlikely(atomic_read(&table->hash.nelems) >= UINT_MAX))
1da177e4
LT
1104 goto err;
1105
15e47304 1106 nlk_sk(sk)->portid = portid;
e341694e 1107 sock_hold(sk);
919d9db9 1108
c428ecd1
HX
1109 err = __netlink_insert(table, sk);
1110 if (err) {
4e7c1330
DB
1111 /* In case the hashtable backend returns with -EBUSY
1112 * from here, it must not escape to the caller.
1113 */
1114 if (unlikely(err == -EBUSY))
1115 err = -EOVERFLOW;
c428ecd1
HX
1116 if (err == -EEXIST)
1117 err = -EADDRINUSE;
c0bb07df 1118 nlk_sk(sk)->portid = 0;
c5adde94 1119 sock_put(sk);
919d9db9
HX
1120 }
1121
1da177e4 1122err:
c5adde94 1123 release_sock(sk);
1da177e4
LT
1124 return err;
1125}
1126
1127static void netlink_remove(struct sock *sk)
1128{
e341694e
TG
1129 struct netlink_table *table;
1130
e341694e 1131 table = &nl_table[sk->sk_protocol];
c428ecd1
HX
1132 if (!rhashtable_remove_fast(&table->hash, &nlk_sk(sk)->node,
1133 netlink_rhashtable_params)) {
e341694e
TG
1134 WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
1135 __sock_put(sk);
1136 }
e341694e 1137
1da177e4 1138 netlink_table_grab();
b10dcb3b 1139 if (nlk_sk(sk)->subscriptions) {
1da177e4 1140 __sk_del_bind_node(sk);
b10dcb3b
JB
1141 netlink_update_listeners(sk);
1142 }
ee1c2442
JB
1143 if (sk->sk_protocol == NETLINK_GENERIC)
1144 atomic_inc(&genl_sk_destructing_cnt);
1da177e4
LT
1145 netlink_table_ungrab();
1146}
1147
1148static struct proto netlink_proto = {
1149 .name = "NETLINK",
1150 .owner = THIS_MODULE,
1151 .obj_size = sizeof(struct netlink_sock),
1152};
1153
1b8d7ae4 1154static int __netlink_create(struct net *net, struct socket *sock,
11aa9c28
EB
1155 struct mutex *cb_mutex, int protocol,
1156 int kern)
1da177e4
LT
1157{
1158 struct sock *sk;
1159 struct netlink_sock *nlk;
ab33a171
PM
1160
1161 sock->ops = &netlink_ops;
1162
11aa9c28 1163 sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto, kern);
ab33a171
PM
1164 if (!sk)
1165 return -ENOMEM;
1166
1167 sock_init_data(sock, sk);
1168
1169 nlk = nlk_sk(sk);
658cb354 1170 if (cb_mutex) {
ffa4d721 1171 nlk->cb_mutex = cb_mutex;
658cb354 1172 } else {
ffa4d721
PM
1173 nlk->cb_mutex = &nlk->cb_def_mutex;
1174 mutex_init(nlk->cb_mutex);
1175 }
ab33a171 1176 init_waitqueue_head(&nlk->wait);
ccdfcc39
PM
1177#ifdef CONFIG_NETLINK_MMAP
1178 mutex_init(&nlk->pg_vec_lock);
1179#endif
ab33a171
PM
1180
1181 sk->sk_destruct = netlink_sock_destruct;
1182 sk->sk_protocol = protocol;
1183 return 0;
1184}
1185
3f378b68
EP
1186static int netlink_create(struct net *net, struct socket *sock, int protocol,
1187 int kern)
ab33a171
PM
1188{
1189 struct module *module = NULL;
af65bdfc 1190 struct mutex *cb_mutex;
f7fa9b10 1191 struct netlink_sock *nlk;
023e2cfa
JB
1192 int (*bind)(struct net *net, int group);
1193 void (*unbind)(struct net *net, int group);
ab33a171 1194 int err = 0;
1da177e4
LT
1195
1196 sock->state = SS_UNCONNECTED;
1197
1198 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
1199 return -ESOCKTNOSUPPORT;
1200
6ac552fd 1201 if (protocol < 0 || protocol >= MAX_LINKS)
1da177e4
LT
1202 return -EPROTONOSUPPORT;
1203
77247bbb 1204 netlink_lock_table();
95a5afca 1205#ifdef CONFIG_MODULES
ab33a171 1206 if (!nl_table[protocol].registered) {
77247bbb 1207 netlink_unlock_table();
4fdb3bb7 1208 request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol);
77247bbb 1209 netlink_lock_table();
4fdb3bb7 1210 }
ab33a171
PM
1211#endif
1212 if (nl_table[protocol].registered &&
1213 try_module_get(nl_table[protocol].module))
1214 module = nl_table[protocol].module;
974c37e9
AD
1215 else
1216 err = -EPROTONOSUPPORT;
af65bdfc 1217 cb_mutex = nl_table[protocol].cb_mutex;
03292745 1218 bind = nl_table[protocol].bind;
4f520900 1219 unbind = nl_table[protocol].unbind;
77247bbb 1220 netlink_unlock_table();
4fdb3bb7 1221
974c37e9
AD
1222 if (err < 0)
1223 goto out;
1224
11aa9c28 1225 err = __netlink_create(net, sock, cb_mutex, protocol, kern);
6ac552fd 1226 if (err < 0)
f7fa9b10
PM
1227 goto out_module;
1228
6f756a8c 1229 local_bh_disable();
c1fd3b94 1230 sock_prot_inuse_add(net, &netlink_proto, 1);
6f756a8c
DM
1231 local_bh_enable();
1232
f7fa9b10 1233 nlk = nlk_sk(sock->sk);
f7fa9b10 1234 nlk->module = module;
03292745 1235 nlk->netlink_bind = bind;
4f520900 1236 nlk->netlink_unbind = unbind;
ab33a171
PM
1237out:
1238 return err;
1da177e4 1239
ab33a171
PM
1240out_module:
1241 module_put(module);
1242 goto out;
1da177e4
LT
1243}
1244
21e4902a
TG
1245static void deferred_put_nlk_sk(struct rcu_head *head)
1246{
1247 struct netlink_sock *nlk = container_of(head, struct netlink_sock, rcu);
1248
1249 sock_put(&nlk->sk);
1250}
1251
1da177e4
LT
1252static int netlink_release(struct socket *sock)
1253{
1254 struct sock *sk = sock->sk;
1255 struct netlink_sock *nlk;
1256
1257 if (!sk)
1258 return 0;
1259
1260 netlink_remove(sk);
ac57b3a9 1261 sock_orphan(sk);
1da177e4
LT
1262 nlk = nlk_sk(sk);
1263
3f660d66
HX
1264 /*
1265 * OK. Socket is unlinked, any packets that arrive now
1266 * will be purged.
1267 */
1da177e4 1268
ee1c2442
JB
1269 /* must not acquire netlink_table_lock in any way again before unbind
1270 * and notifying genetlink is done as otherwise it might deadlock
1271 */
1272 if (nlk->netlink_unbind) {
1273 int i;
1274
1275 for (i = 0; i < nlk->ngroups; i++)
1276 if (test_bit(i, nlk->groups))
1277 nlk->netlink_unbind(sock_net(sk), i + 1);
1278 }
1279 if (sk->sk_protocol == NETLINK_GENERIC &&
1280 atomic_dec_return(&genl_sk_destructing_cnt) == 0)
1281 wake_up(&genl_sk_destructing_waitq);
1282
1da177e4
LT
1283 sock->sk = NULL;
1284 wake_up_interruptible_all(&nlk->wait);
1285
1286 skb_queue_purge(&sk->sk_write_queue);
1287
15e47304 1288 if (nlk->portid) {
1da177e4 1289 struct netlink_notify n = {
3b1e0a65 1290 .net = sock_net(sk),
1da177e4 1291 .protocol = sk->sk_protocol,
15e47304 1292 .portid = nlk->portid,
1da177e4 1293 };
e041c683
AS
1294 atomic_notifier_call_chain(&netlink_chain,
1295 NETLINK_URELEASE, &n);
746fac4d 1296 }
4fdb3bb7 1297
5e7c001c 1298 module_put(nlk->module);
4fdb3bb7 1299
aed81560 1300 if (netlink_is_kernel(sk)) {
b10dcb3b 1301 netlink_table_grab();
869e58f8
DL
1302 BUG_ON(nl_table[sk->sk_protocol].registered == 0);
1303 if (--nl_table[sk->sk_protocol].registered == 0) {
6d772ac5
ED
1304 struct listeners *old;
1305
1306 old = nl_deref_protected(nl_table[sk->sk_protocol].listeners);
1307 RCU_INIT_POINTER(nl_table[sk->sk_protocol].listeners, NULL);
1308 kfree_rcu(old, rcu);
869e58f8 1309 nl_table[sk->sk_protocol].module = NULL;
9785e10a 1310 nl_table[sk->sk_protocol].bind = NULL;
4f520900 1311 nl_table[sk->sk_protocol].unbind = NULL;
9785e10a 1312 nl_table[sk->sk_protocol].flags = 0;
869e58f8
DL
1313 nl_table[sk->sk_protocol].registered = 0;
1314 }
b10dcb3b 1315 netlink_table_ungrab();
658cb354 1316 }
77247bbb 1317
f7fa9b10
PM
1318 kfree(nlk->groups);
1319 nlk->groups = NULL;
1320
3755810c 1321 local_bh_disable();
c1fd3b94 1322 sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1);
3755810c 1323 local_bh_enable();
21e4902a 1324 call_rcu(&nlk->rcu, deferred_put_nlk_sk);
1da177e4
LT
1325 return 0;
1326}
1327
1328static int netlink_autobind(struct socket *sock)
1329{
1330 struct sock *sk = sock->sk;
3b1e0a65 1331 struct net *net = sock_net(sk);
da12c90e 1332 struct netlink_table *table = &nl_table[sk->sk_protocol];
15e47304 1333 s32 portid = task_tgid_vnr(current);
1da177e4 1334 int err;
b9fbe709
HX
1335 s32 rover = -4096;
1336 bool ok;
1da177e4
LT
1337
1338retry:
1339 cond_resched();
e341694e 1340 rcu_read_lock();
b9fbe709
HX
1341 ok = !__netlink_lookup(table, portid, net);
1342 rcu_read_unlock();
1343 if (!ok) {
e341694e 1344 /* Bind collision, search negative portid values. */
b9fbe709
HX
1345 if (rover == -4096)
1346 /* rover will be in range [S32_MIN, -4097] */
1347 rover = S32_MIN + prandom_u32_max(-4096 - S32_MIN);
1348 else if (rover >= -4096)
e341694e 1349 rover = -4097;
b9fbe709 1350 portid = rover--;
e341694e 1351 goto retry;
1da177e4 1352 }
1da177e4 1353
8ea65f4a 1354 err = netlink_insert(sk, portid);
1da177e4
LT
1355 if (err == -EADDRINUSE)
1356 goto retry;
d470e3b4
DM
1357
1358 /* If 2 threads race to autobind, that is fine. */
1359 if (err == -EBUSY)
1360 err = 0;
1361
1362 return err;
1da177e4
LT
1363}
1364
aa4cf945
EB
1365/**
1366 * __netlink_ns_capable - General netlink message capability test
1367 * @nsp: NETLINK_CB of the socket buffer holding a netlink command from userspace.
1368 * @user_ns: The user namespace of the capability to use
1369 * @cap: The capability to use
1370 *
1371 * Test to see if the opener of the socket we received the message
1372 * from had when the netlink socket was created and the sender of the
1373 * message has has the capability @cap in the user namespace @user_ns.
1374 */
1375bool __netlink_ns_capable(const struct netlink_skb_parms *nsp,
1376 struct user_namespace *user_ns, int cap)
1377{
2d7a85f4
EB
1378 return ((nsp->flags & NETLINK_SKB_DST) ||
1379 file_ns_capable(nsp->sk->sk_socket->file, user_ns, cap)) &&
1380 ns_capable(user_ns, cap);
aa4cf945
EB
1381}
1382EXPORT_SYMBOL(__netlink_ns_capable);
1383
1384/**
1385 * netlink_ns_capable - General netlink message capability test
1386 * @skb: socket buffer holding a netlink command from userspace
1387 * @user_ns: The user namespace of the capability to use
1388 * @cap: The capability to use
1389 *
1390 * Test to see if the opener of the socket we received the message
1391 * from had when the netlink socket was created and the sender of the
1392 * message has has the capability @cap in the user namespace @user_ns.
1393 */
1394bool netlink_ns_capable(const struct sk_buff *skb,
1395 struct user_namespace *user_ns, int cap)
1396{
1397 return __netlink_ns_capable(&NETLINK_CB(skb), user_ns, cap);
1398}
1399EXPORT_SYMBOL(netlink_ns_capable);
1400
1401/**
1402 * netlink_capable - Netlink global message capability test
1403 * @skb: socket buffer holding a netlink command from userspace
1404 * @cap: The capability to use
1405 *
1406 * Test to see if the opener of the socket we received the message
1407 * from had when the netlink socket was created and the sender of the
1408 * message has has the capability @cap in all user namespaces.
1409 */
1410bool netlink_capable(const struct sk_buff *skb, int cap)
1411{
1412 return netlink_ns_capable(skb, &init_user_ns, cap);
1413}
1414EXPORT_SYMBOL(netlink_capable);
1415
1416/**
1417 * netlink_net_capable - Netlink network namespace message capability test
1418 * @skb: socket buffer holding a netlink command from userspace
1419 * @cap: The capability to use
1420 *
1421 * Test to see if the opener of the socket we received the message
1422 * from had when the netlink socket was created and the sender of the
1423 * message has has the capability @cap over the network namespace of
1424 * the socket we received the message from.
1425 */
1426bool netlink_net_capable(const struct sk_buff *skb, int cap)
1427{
1428 return netlink_ns_capable(skb, sock_net(skb->sk)->user_ns, cap);
1429}
1430EXPORT_SYMBOL(netlink_net_capable);
1431
5187cd05 1432static inline int netlink_allowed(const struct socket *sock, unsigned int flag)
746fac4d 1433{
9785e10a 1434 return (nl_table[sock->sk->sk_protocol].flags & flag) ||
df008c91 1435 ns_capable(sock_net(sock->sk)->user_ns, CAP_NET_ADMIN);
746fac4d 1436}
1da177e4 1437
f7fa9b10
PM
1438static void
1439netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions)
1440{
1441 struct netlink_sock *nlk = nlk_sk(sk);
1442
1443 if (nlk->subscriptions && !subscriptions)
1444 __sk_del_bind_node(sk);
1445 else if (!nlk->subscriptions && subscriptions)
1446 sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
1447 nlk->subscriptions = subscriptions;
1448}
1449
b4ff4f04 1450static int netlink_realloc_groups(struct sock *sk)
513c2500
PM
1451{
1452 struct netlink_sock *nlk = nlk_sk(sk);
1453 unsigned int groups;
b4ff4f04 1454 unsigned long *new_groups;
513c2500
PM
1455 int err = 0;
1456
b4ff4f04
JB
1457 netlink_table_grab();
1458
513c2500 1459 groups = nl_table[sk->sk_protocol].groups;
b4ff4f04 1460 if (!nl_table[sk->sk_protocol].registered) {
513c2500 1461 err = -ENOENT;
b4ff4f04
JB
1462 goto out_unlock;
1463 }
513c2500 1464
b4ff4f04
JB
1465 if (nlk->ngroups >= groups)
1466 goto out_unlock;
513c2500 1467
b4ff4f04
JB
1468 new_groups = krealloc(nlk->groups, NLGRPSZ(groups), GFP_ATOMIC);
1469 if (new_groups == NULL) {
1470 err = -ENOMEM;
1471 goto out_unlock;
1472 }
6ac552fd 1473 memset((char *)new_groups + NLGRPSZ(nlk->ngroups), 0,
b4ff4f04
JB
1474 NLGRPSZ(groups) - NLGRPSZ(nlk->ngroups));
1475
1476 nlk->groups = new_groups;
513c2500 1477 nlk->ngroups = groups;
b4ff4f04
JB
1478 out_unlock:
1479 netlink_table_ungrab();
1480 return err;
513c2500
PM
1481}
1482
02c81ab9 1483static void netlink_undo_bind(int group, long unsigned int groups,
023e2cfa 1484 struct sock *sk)
4f520900 1485{
023e2cfa 1486 struct netlink_sock *nlk = nlk_sk(sk);
4f520900
RGB
1487 int undo;
1488
1489 if (!nlk->netlink_unbind)
1490 return;
1491
1492 for (undo = 0; undo < group; undo++)
6251edd9 1493 if (test_bit(undo, &groups))
8b7c36d8 1494 nlk->netlink_unbind(sock_net(sk), undo + 1);
4f520900
RGB
1495}
1496
6ac552fd
PM
1497static int netlink_bind(struct socket *sock, struct sockaddr *addr,
1498 int addr_len)
1da177e4
LT
1499{
1500 struct sock *sk = sock->sk;
3b1e0a65 1501 struct net *net = sock_net(sk);
1da177e4
LT
1502 struct netlink_sock *nlk = nlk_sk(sk);
1503 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
1504 int err;
4f520900 1505 long unsigned int groups = nladdr->nl_groups;
746fac4d 1506
4e4b5376
HFS
1507 if (addr_len < sizeof(struct sockaddr_nl))
1508 return -EINVAL;
1509
1da177e4
LT
1510 if (nladdr->nl_family != AF_NETLINK)
1511 return -EINVAL;
1512
1513 /* Only superuser is allowed to listen multicasts */
4f520900 1514 if (groups) {
5187cd05 1515 if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
513c2500 1516 return -EPERM;
b4ff4f04
JB
1517 err = netlink_realloc_groups(sk);
1518 if (err)
1519 return err;
513c2500 1520 }
1da177e4 1521
4f520900 1522 if (nlk->portid)
15e47304 1523 if (nladdr->nl_pid != nlk->portid)
1da177e4 1524 return -EINVAL;
4f520900
RGB
1525
1526 if (nlk->netlink_bind && groups) {
1527 int group;
1528
1529 for (group = 0; group < nlk->ngroups; group++) {
1530 if (!test_bit(group, &groups))
1531 continue;
8b7c36d8 1532 err = nlk->netlink_bind(net, group + 1);
4f520900
RGB
1533 if (!err)
1534 continue;
023e2cfa 1535 netlink_undo_bind(group, groups, sk);
4f520900
RGB
1536 return err;
1537 }
1538 }
1539
1540 if (!nlk->portid) {
1da177e4 1541 err = nladdr->nl_pid ?
8ea65f4a 1542 netlink_insert(sk, nladdr->nl_pid) :
1da177e4 1543 netlink_autobind(sock);
4f520900 1544 if (err) {
023e2cfa 1545 netlink_undo_bind(nlk->ngroups, groups, sk);
1da177e4 1546 return err;
4f520900 1547 }
1da177e4
LT
1548 }
1549
4f520900 1550 if (!groups && (nlk->groups == NULL || !(u32)nlk->groups[0]))
1da177e4
LT
1551 return 0;
1552
1553 netlink_table_grab();
f7fa9b10 1554 netlink_update_subscriptions(sk, nlk->subscriptions +
4f520900 1555 hweight32(groups) -
746fac4d 1556 hweight32(nlk->groups[0]));
4f520900 1557 nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | groups;
4277a083 1558 netlink_update_listeners(sk);
1da177e4
LT
1559 netlink_table_ungrab();
1560
1561 return 0;
1562}
1563
1564static int netlink_connect(struct socket *sock, struct sockaddr *addr,
1565 int alen, int flags)
1566{
1567 int err = 0;
1568 struct sock *sk = sock->sk;
1569 struct netlink_sock *nlk = nlk_sk(sk);
6ac552fd 1570 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
1da177e4 1571
6503d961
CG
1572 if (alen < sizeof(addr->sa_family))
1573 return -EINVAL;
1574
1da177e4
LT
1575 if (addr->sa_family == AF_UNSPEC) {
1576 sk->sk_state = NETLINK_UNCONNECTED;
15e47304 1577 nlk->dst_portid = 0;
d629b836 1578 nlk->dst_group = 0;
1da177e4
LT
1579 return 0;
1580 }
1581 if (addr->sa_family != AF_NETLINK)
1582 return -EINVAL;
1583
46833a86 1584 if ((nladdr->nl_groups || nladdr->nl_pid) &&
5187cd05 1585 !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
1da177e4
LT
1586 return -EPERM;
1587
15e47304 1588 if (!nlk->portid)
1da177e4
LT
1589 err = netlink_autobind(sock);
1590
1591 if (err == 0) {
1592 sk->sk_state = NETLINK_CONNECTED;
15e47304 1593 nlk->dst_portid = nladdr->nl_pid;
d629b836 1594 nlk->dst_group = ffs(nladdr->nl_groups);
1da177e4
LT
1595 }
1596
1597 return err;
1598}
1599
6ac552fd
PM
1600static int netlink_getname(struct socket *sock, struct sockaddr *addr,
1601 int *addr_len, int peer)
1da177e4
LT
1602{
1603 struct sock *sk = sock->sk;
1604 struct netlink_sock *nlk = nlk_sk(sk);
13cfa97b 1605 DECLARE_SOCKADDR(struct sockaddr_nl *, nladdr, addr);
746fac4d 1606
1da177e4
LT
1607 nladdr->nl_family = AF_NETLINK;
1608 nladdr->nl_pad = 0;
1609 *addr_len = sizeof(*nladdr);
1610
1611 if (peer) {
15e47304 1612 nladdr->nl_pid = nlk->dst_portid;
d629b836 1613 nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
1da177e4 1614 } else {
15e47304 1615 nladdr->nl_pid = nlk->portid;
513c2500 1616 nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
1da177e4
LT
1617 }
1618 return 0;
1619}
1620
15e47304 1621static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid)
1da177e4 1622{
1da177e4
LT
1623 struct sock *sock;
1624 struct netlink_sock *nlk;
1625
15e47304 1626 sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, portid);
1da177e4
LT
1627 if (!sock)
1628 return ERR_PTR(-ECONNREFUSED);
1629
1630 /* Don't bother queuing skb if kernel socket has no input function */
1631 nlk = nlk_sk(sock);
cd40b7d3 1632 if (sock->sk_state == NETLINK_CONNECTED &&
15e47304 1633 nlk->dst_portid != nlk_sk(ssk)->portid) {
1da177e4
LT
1634 sock_put(sock);
1635 return ERR_PTR(-ECONNREFUSED);
1636 }
1637 return sock;
1638}
1639
1640struct sock *netlink_getsockbyfilp(struct file *filp)
1641{
496ad9aa 1642 struct inode *inode = file_inode(filp);
1da177e4
LT
1643 struct sock *sock;
1644
1645 if (!S_ISSOCK(inode->i_mode))
1646 return ERR_PTR(-ENOTSOCK);
1647
1648 sock = SOCKET_I(inode)->sk;
1649 if (sock->sk_family != AF_NETLINK)
1650 return ERR_PTR(-EINVAL);
1651
1652 sock_hold(sock);
1653 return sock;
1654}
1655
3a36515f
PN
1656static struct sk_buff *netlink_alloc_large_skb(unsigned int size,
1657 int broadcast)
c05cdb1b
PNA
1658{
1659 struct sk_buff *skb;
1660 void *data;
1661
3a36515f 1662 if (size <= NLMSG_GOODSIZE || broadcast)
c05cdb1b
PNA
1663 return alloc_skb(size, GFP_KERNEL);
1664
3a36515f
PN
1665 size = SKB_DATA_ALIGN(size) +
1666 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
c05cdb1b
PNA
1667
1668 data = vmalloc(size);
1669 if (data == NULL)
3a36515f 1670 return NULL;
c05cdb1b 1671
2ea2f62c 1672 skb = __build_skb(data, size);
3a36515f
PN
1673 if (skb == NULL)
1674 vfree(data);
2ea2f62c 1675 else
3a36515f 1676 skb->destructor = netlink_skb_destructor;
c05cdb1b
PNA
1677
1678 return skb;
c05cdb1b
PNA
1679}
1680
1da177e4
LT
1681/*
1682 * Attach a skb to a netlink socket.
1683 * The caller must hold a reference to the destination socket. On error, the
1684 * reference is dropped. The skb is not send to the destination, just all
1685 * all error checks are performed and memory in the queue is reserved.
1686 * Return values:
1687 * < 0: error. skb freed, reference to sock dropped.
1688 * 0: continue
1689 * 1: repeat lookup - reference dropped while waiting for socket memory.
1690 */
9457afee 1691int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
c3d8d1e3 1692 long *timeo, struct sock *ssk)
1da177e4
LT
1693{
1694 struct netlink_sock *nlk;
1695
1696 nlk = nlk_sk(sk);
1697
5fd96123 1698 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
cc3a572f 1699 test_bit(NETLINK_S_CONGESTED, &nlk->state)) &&
5fd96123 1700 !netlink_skb_is_mmaped(skb)) {
1da177e4 1701 DECLARE_WAITQUEUE(wait, current);
c3d8d1e3 1702 if (!*timeo) {
aed81560 1703 if (!ssk || netlink_is_kernel(ssk))
1da177e4
LT
1704 netlink_overrun(sk);
1705 sock_put(sk);
1706 kfree_skb(skb);
1707 return -EAGAIN;
1708 }
1709
1710 __set_current_state(TASK_INTERRUPTIBLE);
1711 add_wait_queue(&nlk->wait, &wait);
1712
1713 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
cc3a572f 1714 test_bit(NETLINK_S_CONGESTED, &nlk->state)) &&
1da177e4 1715 !sock_flag(sk, SOCK_DEAD))
c3d8d1e3 1716 *timeo = schedule_timeout(*timeo);
1da177e4
LT
1717
1718 __set_current_state(TASK_RUNNING);
1719 remove_wait_queue(&nlk->wait, &wait);
1720 sock_put(sk);
1721
1722 if (signal_pending(current)) {
1723 kfree_skb(skb);
c3d8d1e3 1724 return sock_intr_errno(*timeo);
1da177e4
LT
1725 }
1726 return 1;
1727 }
cf0a018a 1728 netlink_skb_set_owner_r(skb, sk);
1da177e4
LT
1729 return 0;
1730}
1731
4a7e7c2a 1732static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1da177e4 1733{
1da177e4
LT
1734 int len = skb->len;
1735
bcbde0d4
DB
1736 netlink_deliver_tap(skb);
1737
f9c22888
PM
1738#ifdef CONFIG_NETLINK_MMAP
1739 if (netlink_skb_is_mmaped(skb))
1740 netlink_queue_mmaped_skb(sk, skb);
1741 else if (netlink_rx_is_mmaped(sk))
1742 netlink_ring_set_copied(sk, skb);
1743 else
1744#endif /* CONFIG_NETLINK_MMAP */
1745 skb_queue_tail(&sk->sk_receive_queue, skb);
676d2369 1746 sk->sk_data_ready(sk);
4a7e7c2a
ED
1747 return len;
1748}
1749
1750int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1751{
1752 int len = __netlink_sendskb(sk, skb);
1753
1da177e4
LT
1754 sock_put(sk);
1755 return len;
1756}
1757
1758void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
1759{
1760 kfree_skb(skb);
1761 sock_put(sk);
1762}
1763
b57ef81f 1764static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
1da177e4
LT
1765{
1766 int delta;
1767
1298ca46 1768 WARN_ON(skb->sk != NULL);
5fd96123
PM
1769 if (netlink_skb_is_mmaped(skb))
1770 return skb;
1da177e4 1771
4305b541 1772 delta = skb->end - skb->tail;
c05cdb1b 1773 if (is_vmalloc_addr(skb->head) || delta * 2 < skb->truesize)
1da177e4
LT
1774 return skb;
1775
1776 if (skb_shared(skb)) {
1777 struct sk_buff *nskb = skb_clone(skb, allocation);
1778 if (!nskb)
1779 return skb;
8460c00f 1780 consume_skb(skb);
1da177e4
LT
1781 skb = nskb;
1782 }
1783
1784 if (!pskb_expand_head(skb, 0, -delta, allocation))
1785 skb->truesize -= delta;
1786
1787 return skb;
1788}
1789
3fbc2905
EB
1790static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb,
1791 struct sock *ssk)
cd40b7d3
DL
1792{
1793 int ret;
1794 struct netlink_sock *nlk = nlk_sk(sk);
1795
1796 ret = -ECONNREFUSED;
1797 if (nlk->netlink_rcv != NULL) {
1798 ret = skb->len;
cf0a018a 1799 netlink_skb_set_owner_r(skb, sk);
e32123e5 1800 NETLINK_CB(skb).sk = ssk;
73bfd370 1801 netlink_deliver_tap_kernel(sk, ssk, skb);
cd40b7d3 1802 nlk->netlink_rcv(skb);
bfb253c9
ED
1803 consume_skb(skb);
1804 } else {
1805 kfree_skb(skb);
cd40b7d3 1806 }
cd40b7d3
DL
1807 sock_put(sk);
1808 return ret;
1809}
1810
1811int netlink_unicast(struct sock *ssk, struct sk_buff *skb,
15e47304 1812 u32 portid, int nonblock)
1da177e4
LT
1813{
1814 struct sock *sk;
1815 int err;
1816 long timeo;
1817
1818 skb = netlink_trim(skb, gfp_any());
1819
1820 timeo = sock_sndtimeo(ssk, nonblock);
1821retry:
15e47304 1822 sk = netlink_getsockbyportid(ssk, portid);
1da177e4
LT
1823 if (IS_ERR(sk)) {
1824 kfree_skb(skb);
1825 return PTR_ERR(sk);
1826 }
cd40b7d3 1827 if (netlink_is_kernel(sk))
3fbc2905 1828 return netlink_unicast_kernel(sk, skb, ssk);
cd40b7d3 1829
b1153f29 1830 if (sk_filter(sk, skb)) {
84874607 1831 err = skb->len;
b1153f29
SH
1832 kfree_skb(skb);
1833 sock_put(sk);
1834 return err;
1835 }
1836
9457afee 1837 err = netlink_attachskb(sk, skb, &timeo, ssk);
1da177e4
LT
1838 if (err == 1)
1839 goto retry;
1840 if (err)
1841 return err;
1842
7ee015e0 1843 return netlink_sendskb(sk, skb);
1da177e4 1844}
6ac552fd 1845EXPORT_SYMBOL(netlink_unicast);
1da177e4 1846
f9c22888
PM
1847struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
1848 u32 dst_portid, gfp_t gfp_mask)
1849{
1850#ifdef CONFIG_NETLINK_MMAP
1851 struct sock *sk = NULL;
1852 struct sk_buff *skb;
1853 struct netlink_ring *ring;
1854 struct nl_mmap_hdr *hdr;
1855 unsigned int maxlen;
1856
1857 sk = netlink_getsockbyportid(ssk, dst_portid);
1858 if (IS_ERR(sk))
1859 goto out;
1860
1861 ring = &nlk_sk(sk)->rx_ring;
1862 /* fast-path without atomic ops for common case: non-mmaped receiver */
1863 if (ring->pg_vec == NULL)
1864 goto out_put;
1865
aae9f0e2
TG
1866 if (ring->frame_size - NL_MMAP_HDRLEN < size)
1867 goto out_put;
1868
f9c22888
PM
1869 skb = alloc_skb_head(gfp_mask);
1870 if (skb == NULL)
1871 goto err1;
1872
1873 spin_lock_bh(&sk->sk_receive_queue.lock);
1874 /* check again under lock */
1875 if (ring->pg_vec == NULL)
1876 goto out_free;
1877
aae9f0e2 1878 /* check again under lock */
f9c22888
PM
1879 maxlen = ring->frame_size - NL_MMAP_HDRLEN;
1880 if (maxlen < size)
1881 goto out_free;
1882
1883 netlink_forward_ring(ring);
1884 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
1885 if (hdr == NULL)
1886 goto err2;
1887 netlink_ring_setup_skb(skb, sk, ring, hdr);
1888 netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED);
1889 atomic_inc(&ring->pending);
1890 netlink_increment_head(ring);
1891
1892 spin_unlock_bh(&sk->sk_receive_queue.lock);
1893 return skb;
1894
1895err2:
1896 kfree_skb(skb);
1897 spin_unlock_bh(&sk->sk_receive_queue.lock);
cd1df525 1898 netlink_overrun(sk);
f9c22888
PM
1899err1:
1900 sock_put(sk);
1901 return NULL;
1902
1903out_free:
1904 kfree_skb(skb);
1905 spin_unlock_bh(&sk->sk_receive_queue.lock);
1906out_put:
1907 sock_put(sk);
1908out:
1909#endif
1910 return alloc_skb(size, gfp_mask);
1911}
1912EXPORT_SYMBOL_GPL(netlink_alloc_skb);
1913
4277a083
PM
1914int netlink_has_listeners(struct sock *sk, unsigned int group)
1915{
1916 int res = 0;
5c398dc8 1917 struct listeners *listeners;
4277a083 1918
aed81560 1919 BUG_ON(!netlink_is_kernel(sk));
b4ff4f04
JB
1920
1921 rcu_read_lock();
1922 listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
1923
6d772ac5 1924 if (listeners && group - 1 < nl_table[sk->sk_protocol].groups)
5c398dc8 1925 res = test_bit(group - 1, listeners->masks);
b4ff4f04
JB
1926
1927 rcu_read_unlock();
1928
4277a083
PM
1929 return res;
1930}
1931EXPORT_SYMBOL_GPL(netlink_has_listeners);
1932
b57ef81f 1933static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
1da177e4
LT
1934{
1935 struct netlink_sock *nlk = nlk_sk(sk);
1936
1937 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
cc3a572f 1938 !test_bit(NETLINK_S_CONGESTED, &nlk->state)) {
cf0a018a 1939 netlink_skb_set_owner_r(skb, sk);
4a7e7c2a 1940 __netlink_sendskb(sk, skb);
2c645800 1941 return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
1da177e4
LT
1942 }
1943 return -1;
1944}
1945
1946struct netlink_broadcast_data {
1947 struct sock *exclude_sk;
b4b51029 1948 struct net *net;
15e47304 1949 u32 portid;
1da177e4
LT
1950 u32 group;
1951 int failure;
ff491a73 1952 int delivery_failure;
1da177e4
LT
1953 int congested;
1954 int delivered;
7d877f3b 1955 gfp_t allocation;
1da177e4 1956 struct sk_buff *skb, *skb2;
910a7e90
EB
1957 int (*tx_filter)(struct sock *dsk, struct sk_buff *skb, void *data);
1958 void *tx_data;
1da177e4
LT
1959};
1960
46c9521f
RR
1961static void do_one_broadcast(struct sock *sk,
1962 struct netlink_broadcast_data *p)
1da177e4
LT
1963{
1964 struct netlink_sock *nlk = nlk_sk(sk);
1965 int val;
1966
1967 if (p->exclude_sk == sk)
46c9521f 1968 return;
1da177e4 1969
15e47304 1970 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
f7fa9b10 1971 !test_bit(p->group - 1, nlk->groups))
46c9521f 1972 return;
1da177e4 1973
59324cf3
ND
1974 if (!net_eq(sock_net(sk), p->net)) {
1975 if (!(nlk->flags & NETLINK_F_LISTEN_ALL_NSID))
1976 return;
1977
1978 if (!peernet_has_id(sock_net(sk), p->net))
1979 return;
1980
1981 if (!file_ns_capable(sk->sk_socket->file, p->net->user_ns,
1982 CAP_NET_BROADCAST))
1983 return;
1984 }
b4b51029 1985
1da177e4
LT
1986 if (p->failure) {
1987 netlink_overrun(sk);
46c9521f 1988 return;
1da177e4
LT
1989 }
1990
1991 sock_hold(sk);
1992 if (p->skb2 == NULL) {
68acc024 1993 if (skb_shared(p->skb)) {
1da177e4
LT
1994 p->skb2 = skb_clone(p->skb, p->allocation);
1995 } else {
68acc024
TC
1996 p->skb2 = skb_get(p->skb);
1997 /*
1998 * skb ownership may have been set when
1999 * delivered to a previous socket.
2000 */
2001 skb_orphan(p->skb2);
1da177e4
LT
2002 }
2003 }
2004 if (p->skb2 == NULL) {
2005 netlink_overrun(sk);
2006 /* Clone failed. Notify ALL listeners. */
2007 p->failure = 1;
cc3a572f 2008 if (nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR)
be0c22a4 2009 p->delivery_failure = 1;
59324cf3
ND
2010 goto out;
2011 }
2012 if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) {
910a7e90
EB
2013 kfree_skb(p->skb2);
2014 p->skb2 = NULL;
59324cf3
ND
2015 goto out;
2016 }
2017 if (sk_filter(sk, p->skb2)) {
b1153f29
SH
2018 kfree_skb(p->skb2);
2019 p->skb2 = NULL;
59324cf3
ND
2020 goto out;
2021 }
2022 NETLINK_CB(p->skb2).nsid = peernet2id(sock_net(sk), p->net);
2023 NETLINK_CB(p->skb2).nsid_is_set = true;
2024 val = netlink_broadcast_deliver(sk, p->skb2);
2025 if (val < 0) {
1da177e4 2026 netlink_overrun(sk);
cc3a572f 2027 if (nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR)
be0c22a4 2028 p->delivery_failure = 1;
1da177e4
LT
2029 } else {
2030 p->congested |= val;
2031 p->delivered = 1;
2032 p->skb2 = NULL;
2033 }
59324cf3 2034out:
1da177e4 2035 sock_put(sk);
1da177e4
LT
2036}
2037
15e47304 2038int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid,
910a7e90
EB
2039 u32 group, gfp_t allocation,
2040 int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
2041 void *filter_data)
1da177e4 2042{
3b1e0a65 2043 struct net *net = sock_net(ssk);
1da177e4 2044 struct netlink_broadcast_data info;
1da177e4
LT
2045 struct sock *sk;
2046
2047 skb = netlink_trim(skb, allocation);
2048
2049 info.exclude_sk = ssk;
b4b51029 2050 info.net = net;
15e47304 2051 info.portid = portid;
1da177e4
LT
2052 info.group = group;
2053 info.failure = 0;
ff491a73 2054 info.delivery_failure = 0;
1da177e4
LT
2055 info.congested = 0;
2056 info.delivered = 0;
2057 info.allocation = allocation;
2058 info.skb = skb;
2059 info.skb2 = NULL;
910a7e90
EB
2060 info.tx_filter = filter;
2061 info.tx_data = filter_data;
1da177e4
LT
2062
2063 /* While we sleep in clone, do not allow to change socket list */
2064
2065 netlink_lock_table();
2066
b67bfe0d 2067 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
1da177e4
LT
2068 do_one_broadcast(sk, &info);
2069
70d4bf6d 2070 consume_skb(skb);
aa1c6a6f 2071
1da177e4
LT
2072 netlink_unlock_table();
2073
70d4bf6d
NH
2074 if (info.delivery_failure) {
2075 kfree_skb(info.skb2);
ff491a73 2076 return -ENOBUFS;
658cb354
ED
2077 }
2078 consume_skb(info.skb2);
ff491a73 2079
1da177e4
LT
2080 if (info.delivered) {
2081 if (info.congested && (allocation & __GFP_WAIT))
2082 yield();
2083 return 0;
2084 }
1da177e4
LT
2085 return -ESRCH;
2086}
910a7e90
EB
2087EXPORT_SYMBOL(netlink_broadcast_filtered);
2088
15e47304 2089int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 portid,
910a7e90
EB
2090 u32 group, gfp_t allocation)
2091{
15e47304 2092 return netlink_broadcast_filtered(ssk, skb, portid, group, allocation,
910a7e90
EB
2093 NULL, NULL);
2094}
6ac552fd 2095EXPORT_SYMBOL(netlink_broadcast);
1da177e4
LT
2096
2097struct netlink_set_err_data {
2098 struct sock *exclude_sk;
15e47304 2099 u32 portid;
1da177e4
LT
2100 u32 group;
2101 int code;
2102};
2103
b57ef81f 2104static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
1da177e4
LT
2105{
2106 struct netlink_sock *nlk = nlk_sk(sk);
1a50307b 2107 int ret = 0;
1da177e4
LT
2108
2109 if (sk == p->exclude_sk)
2110 goto out;
2111
09ad9bc7 2112 if (!net_eq(sock_net(sk), sock_net(p->exclude_sk)))
b4b51029
EB
2113 goto out;
2114
15e47304 2115 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
f7fa9b10 2116 !test_bit(p->group - 1, nlk->groups))
1da177e4
LT
2117 goto out;
2118
cc3a572f 2119 if (p->code == ENOBUFS && nlk->flags & NETLINK_F_RECV_NO_ENOBUFS) {
1a50307b
PNA
2120 ret = 1;
2121 goto out;
2122 }
2123
1da177e4
LT
2124 sk->sk_err = p->code;
2125 sk->sk_error_report(sk);
2126out:
1a50307b 2127 return ret;
1da177e4
LT
2128}
2129
4843b93c
PNA
2130/**
2131 * netlink_set_err - report error to broadcast listeners
2132 * @ssk: the kernel netlink socket, as returned by netlink_kernel_create()
15e47304 2133 * @portid: the PORTID of a process that we want to skip (if any)
840e93f2 2134 * @group: the broadcast group that will notice the error
4843b93c 2135 * @code: error code, must be negative (as usual in kernelspace)
1a50307b
PNA
2136 *
2137 * This function returns the number of broadcast listeners that have set the
cc3a572f 2138 * NETLINK_NO_ENOBUFS socket option.
4843b93c 2139 */
15e47304 2140int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
1da177e4
LT
2141{
2142 struct netlink_set_err_data info;
1da177e4 2143 struct sock *sk;
1a50307b 2144 int ret = 0;
1da177e4
LT
2145
2146 info.exclude_sk = ssk;
15e47304 2147 info.portid = portid;
1da177e4 2148 info.group = group;
4843b93c
PNA
2149 /* sk->sk_err wants a positive error value */
2150 info.code = -code;
1da177e4
LT
2151
2152 read_lock(&nl_table_lock);
2153
b67bfe0d 2154 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
1a50307b 2155 ret += do_one_set_err(sk, &info);
1da177e4
LT
2156
2157 read_unlock(&nl_table_lock);
1a50307b 2158 return ret;
1da177e4 2159}
dd5b6ce6 2160EXPORT_SYMBOL(netlink_set_err);
1da177e4 2161
84659eb5
JB
2162/* must be called with netlink table grabbed */
2163static void netlink_update_socket_mc(struct netlink_sock *nlk,
2164 unsigned int group,
2165 int is_new)
2166{
2167 int old, new = !!is_new, subscriptions;
2168
2169 old = test_bit(group - 1, nlk->groups);
2170 subscriptions = nlk->subscriptions - old + new;
2171 if (new)
2172 __set_bit(group - 1, nlk->groups);
2173 else
2174 __clear_bit(group - 1, nlk->groups);
2175 netlink_update_subscriptions(&nlk->sk, subscriptions);
2176 netlink_update_listeners(&nlk->sk);
2177}
2178
9a4595bc 2179static int netlink_setsockopt(struct socket *sock, int level, int optname,
b7058842 2180 char __user *optval, unsigned int optlen)
9a4595bc
PM
2181{
2182 struct sock *sk = sock->sk;
2183 struct netlink_sock *nlk = nlk_sk(sk);
eb496534
JB
2184 unsigned int val = 0;
2185 int err;
9a4595bc
PM
2186
2187 if (level != SOL_NETLINK)
2188 return -ENOPROTOOPT;
2189
ccdfcc39
PM
2190 if (optname != NETLINK_RX_RING && optname != NETLINK_TX_RING &&
2191 optlen >= sizeof(int) &&
eb496534 2192 get_user(val, (unsigned int __user *)optval))
9a4595bc
PM
2193 return -EFAULT;
2194
2195 switch (optname) {
2196 case NETLINK_PKTINFO:
2197 if (val)
cc3a572f 2198 nlk->flags |= NETLINK_F_RECV_PKTINFO;
9a4595bc 2199 else
cc3a572f 2200 nlk->flags &= ~NETLINK_F_RECV_PKTINFO;
9a4595bc
PM
2201 err = 0;
2202 break;
2203 case NETLINK_ADD_MEMBERSHIP:
2204 case NETLINK_DROP_MEMBERSHIP: {
5187cd05 2205 if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
9a4595bc 2206 return -EPERM;
b4ff4f04
JB
2207 err = netlink_realloc_groups(sk);
2208 if (err)
2209 return err;
9a4595bc
PM
2210 if (!val || val - 1 >= nlk->ngroups)
2211 return -EINVAL;
7774d5e0 2212 if (optname == NETLINK_ADD_MEMBERSHIP && nlk->netlink_bind) {
023e2cfa 2213 err = nlk->netlink_bind(sock_net(sk), val);
4f520900
RGB
2214 if (err)
2215 return err;
2216 }
9a4595bc 2217 netlink_table_grab();
84659eb5
JB
2218 netlink_update_socket_mc(nlk, val,
2219 optname == NETLINK_ADD_MEMBERSHIP);
9a4595bc 2220 netlink_table_ungrab();
7774d5e0 2221 if (optname == NETLINK_DROP_MEMBERSHIP && nlk->netlink_unbind)
023e2cfa 2222 nlk->netlink_unbind(sock_net(sk), val);
03292745 2223
9a4595bc
PM
2224 err = 0;
2225 break;
2226 }
be0c22a4
PNA
2227 case NETLINK_BROADCAST_ERROR:
2228 if (val)
cc3a572f 2229 nlk->flags |= NETLINK_F_BROADCAST_SEND_ERROR;
be0c22a4 2230 else
cc3a572f 2231 nlk->flags &= ~NETLINK_F_BROADCAST_SEND_ERROR;
be0c22a4
PNA
2232 err = 0;
2233 break;
38938bfe
PNA
2234 case NETLINK_NO_ENOBUFS:
2235 if (val) {
cc3a572f
ND
2236 nlk->flags |= NETLINK_F_RECV_NO_ENOBUFS;
2237 clear_bit(NETLINK_S_CONGESTED, &nlk->state);
38938bfe 2238 wake_up_interruptible(&nlk->wait);
658cb354 2239 } else {
cc3a572f 2240 nlk->flags &= ~NETLINK_F_RECV_NO_ENOBUFS;
658cb354 2241 }
38938bfe
PNA
2242 err = 0;
2243 break;
ccdfcc39
PM
2244#ifdef CONFIG_NETLINK_MMAP
2245 case NETLINK_RX_RING:
2246 case NETLINK_TX_RING: {
2247 struct nl_mmap_req req;
2248
2249 /* Rings might consume more memory than queue limits, require
2250 * CAP_NET_ADMIN.
2251 */
2252 if (!capable(CAP_NET_ADMIN))
2253 return -EPERM;
2254 if (optlen < sizeof(req))
2255 return -EINVAL;
2256 if (copy_from_user(&req, optval, sizeof(req)))
2257 return -EFAULT;
0470eb99 2258 err = netlink_set_ring(sk, &req,
ccdfcc39
PM
2259 optname == NETLINK_TX_RING);
2260 break;
2261 }
2262#endif /* CONFIG_NETLINK_MMAP */
59324cf3
ND
2263 case NETLINK_LISTEN_ALL_NSID:
2264 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_BROADCAST))
2265 return -EPERM;
2266
2267 if (val)
2268 nlk->flags |= NETLINK_F_LISTEN_ALL_NSID;
2269 else
2270 nlk->flags &= ~NETLINK_F_LISTEN_ALL_NSID;
2271 err = 0;
2272 break;
0a6a3a23
CR
2273 case NETLINK_CAP_ACK:
2274 if (val)
2275 nlk->flags |= NETLINK_F_CAP_ACK;
2276 else
2277 nlk->flags &= ~NETLINK_F_CAP_ACK;
2278 err = 0;
2279 break;
9a4595bc
PM
2280 default:
2281 err = -ENOPROTOOPT;
2282 }
2283 return err;
2284}
2285
2286static int netlink_getsockopt(struct socket *sock, int level, int optname,
746fac4d 2287 char __user *optval, int __user *optlen)
9a4595bc
PM
2288{
2289 struct sock *sk = sock->sk;
2290 struct netlink_sock *nlk = nlk_sk(sk);
2291 int len, val, err;
2292
2293 if (level != SOL_NETLINK)
2294 return -ENOPROTOOPT;
2295
2296 if (get_user(len, optlen))
2297 return -EFAULT;
2298 if (len < 0)
2299 return -EINVAL;
2300
2301 switch (optname) {
2302 case NETLINK_PKTINFO:
2303 if (len < sizeof(int))
2304 return -EINVAL;
2305 len = sizeof(int);
cc3a572f 2306 val = nlk->flags & NETLINK_F_RECV_PKTINFO ? 1 : 0;
a27b58fe
HC
2307 if (put_user(len, optlen) ||
2308 put_user(val, optval))
2309 return -EFAULT;
9a4595bc
PM
2310 err = 0;
2311 break;
be0c22a4
PNA
2312 case NETLINK_BROADCAST_ERROR:
2313 if (len < sizeof(int))
2314 return -EINVAL;
2315 len = sizeof(int);
cc3a572f 2316 val = nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR ? 1 : 0;
be0c22a4
PNA
2317 if (put_user(len, optlen) ||
2318 put_user(val, optval))
2319 return -EFAULT;
2320 err = 0;
2321 break;
38938bfe
PNA
2322 case NETLINK_NO_ENOBUFS:
2323 if (len < sizeof(int))
2324 return -EINVAL;
2325 len = sizeof(int);
cc3a572f 2326 val = nlk->flags & NETLINK_F_RECV_NO_ENOBUFS ? 1 : 0;
38938bfe
PNA
2327 if (put_user(len, optlen) ||
2328 put_user(val, optval))
2329 return -EFAULT;
2330 err = 0;
2331 break;
b42be38b
DH
2332 case NETLINK_LIST_MEMBERSHIPS: {
2333 int pos, idx, shift;
2334
2335 err = 0;
2336 netlink_table_grab();
2337 for (pos = 0; pos * 8 < nlk->ngroups; pos += sizeof(u32)) {
2338 if (len - pos < sizeof(u32))
2339 break;
2340
2341 idx = pos / sizeof(unsigned long);
2342 shift = (pos % sizeof(unsigned long)) * 8;
2343 if (put_user((u32)(nlk->groups[idx] >> shift),
2344 (u32 __user *)(optval + pos))) {
2345 err = -EFAULT;
2346 break;
2347 }
2348 }
2349 if (put_user(ALIGN(nlk->ngroups / 8, sizeof(u32)), optlen))
2350 err = -EFAULT;
2351 netlink_table_ungrab();
2352 break;
2353 }
0a6a3a23
CR
2354 case NETLINK_CAP_ACK:
2355 if (len < sizeof(int))
2356 return -EINVAL;
2357 len = sizeof(int);
2358 val = nlk->flags & NETLINK_F_CAP_ACK ? 1 : 0;
2359 if (put_user(len, optlen) ||
2360 put_user(val, optval))
2361 return -EFAULT;
2362 err = 0;
2363 break;
9a4595bc
PM
2364 default:
2365 err = -ENOPROTOOPT;
2366 }
2367 return err;
2368}
2369
2370static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
2371{
2372 struct nl_pktinfo info;
2373
2374 info.group = NETLINK_CB(skb).dst_group;
2375 put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info);
2376}
2377
59324cf3
ND
2378static void netlink_cmsg_listen_all_nsid(struct sock *sk, struct msghdr *msg,
2379 struct sk_buff *skb)
2380{
2381 if (!NETLINK_CB(skb).nsid_is_set)
2382 return;
2383
2384 put_cmsg(msg, SOL_NETLINK, NETLINK_LISTEN_ALL_NSID, sizeof(int),
2385 &NETLINK_CB(skb).nsid);
2386}
2387
1b784140 2388static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
1da177e4 2389{
1da177e4
LT
2390 struct sock *sk = sock->sk;
2391 struct netlink_sock *nlk = nlk_sk(sk);
342dfc30 2392 DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
15e47304 2393 u32 dst_portid;
d629b836 2394 u32 dst_group;
1da177e4
LT
2395 struct sk_buff *skb;
2396 int err;
2397 struct scm_cookie scm;
2d7a85f4 2398 u32 netlink_skb_flags = 0;
1da177e4
LT
2399
2400 if (msg->msg_flags&MSG_OOB)
2401 return -EOPNOTSUPP;
2402
7cc05662 2403 err = scm_send(sock, msg, &scm, true);
1da177e4
LT
2404 if (err < 0)
2405 return err;
2406
2407 if (msg->msg_namelen) {
b47030c7 2408 err = -EINVAL;
1da177e4 2409 if (addr->nl_family != AF_NETLINK)
b47030c7 2410 goto out;
15e47304 2411 dst_portid = addr->nl_pid;
d629b836 2412 dst_group = ffs(addr->nl_groups);
b47030c7 2413 err = -EPERM;
15e47304 2414 if ((dst_group || dst_portid) &&
5187cd05 2415 !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
b47030c7 2416 goto out;
2d7a85f4 2417 netlink_skb_flags |= NETLINK_SKB_DST;
1da177e4 2418 } else {
15e47304 2419 dst_portid = nlk->dst_portid;
d629b836 2420 dst_group = nlk->dst_group;
1da177e4
LT
2421 }
2422
15e47304 2423 if (!nlk->portid) {
1da177e4
LT
2424 err = netlink_autobind(sock);
2425 if (err)
2426 goto out;
2427 }
2428
a8866ff6
AV
2429 /* It's a really convoluted way for userland to ask for mmaped
2430 * sendmsg(), but that's what we've got...
2431 */
5fd96123 2432 if (netlink_tx_is_mmaped(sk) &&
c953e239 2433 iter_is_iovec(&msg->msg_iter) &&
a8866ff6 2434 msg->msg_iter.nr_segs == 1 &&
c0371da6 2435 msg->msg_iter.iov->iov_base == NULL) {
5fd96123 2436 err = netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group,
7cc05662 2437 &scm);
5fd96123
PM
2438 goto out;
2439 }
2440
1da177e4
LT
2441 err = -EMSGSIZE;
2442 if (len > sk->sk_sndbuf - 32)
2443 goto out;
2444 err = -ENOBUFS;
3a36515f 2445 skb = netlink_alloc_large_skb(len, dst_group);
6ac552fd 2446 if (skb == NULL)
1da177e4
LT
2447 goto out;
2448
15e47304 2449 NETLINK_CB(skb).portid = nlk->portid;
d629b836 2450 NETLINK_CB(skb).dst_group = dst_group;
7cc05662 2451 NETLINK_CB(skb).creds = scm.creds;
2d7a85f4 2452 NETLINK_CB(skb).flags = netlink_skb_flags;
1da177e4 2453
1da177e4 2454 err = -EFAULT;
6ce8e9ce 2455 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1da177e4
LT
2456 kfree_skb(skb);
2457 goto out;
2458 }
2459
2460 err = security_netlink_send(sk, skb);
2461 if (err) {
2462 kfree_skb(skb);
2463 goto out;
2464 }
2465
d629b836 2466 if (dst_group) {
1da177e4 2467 atomic_inc(&skb->users);
15e47304 2468 netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL);
1da177e4 2469 }
15e47304 2470 err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags&MSG_DONTWAIT);
1da177e4
LT
2471
2472out:
7cc05662 2473 scm_destroy(&scm);
1da177e4
LT
2474 return err;
2475}
2476
1b784140 2477static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
1da177e4
LT
2478 int flags)
2479{
1da177e4
LT
2480 struct scm_cookie scm;
2481 struct sock *sk = sock->sk;
2482 struct netlink_sock *nlk = nlk_sk(sk);
2483 int noblock = flags&MSG_DONTWAIT;
2484 size_t copied;
68d6ac6d 2485 struct sk_buff *skb, *data_skb;
b44d211e 2486 int err, ret;
1da177e4
LT
2487
2488 if (flags&MSG_OOB)
2489 return -EOPNOTSUPP;
2490
2491 copied = 0;
2492
6ac552fd
PM
2493 skb = skb_recv_datagram(sk, flags, noblock, &err);
2494 if (skb == NULL)
1da177e4
LT
2495 goto out;
2496
68d6ac6d
JB
2497 data_skb = skb;
2498
1dacc76d
JB
2499#ifdef CONFIG_COMPAT_NETLINK_MESSAGES
2500 if (unlikely(skb_shinfo(skb)->frag_list)) {
1dacc76d 2501 /*
68d6ac6d
JB
2502 * If this skb has a frag_list, then here that means that we
2503 * will have to use the frag_list skb's data for compat tasks
2504 * and the regular skb's data for normal (non-compat) tasks.
1dacc76d 2505 *
68d6ac6d
JB
2506 * If we need to send the compat skb, assign it to the
2507 * 'data_skb' variable so that it will be used below for data
2508 * copying. We keep 'skb' for everything else, including
2509 * freeing both later.
1dacc76d 2510 */
68d6ac6d
JB
2511 if (flags & MSG_CMSG_COMPAT)
2512 data_skb = skb_shinfo(skb)->frag_list;
1dacc76d
JB
2513 }
2514#endif
2515
9063e21f
ED
2516 /* Record the max length of recvmsg() calls for future allocations */
2517 nlk->max_recvmsg_len = max(nlk->max_recvmsg_len, len);
2518 nlk->max_recvmsg_len = min_t(size_t, nlk->max_recvmsg_len,
2519 16384);
2520
68d6ac6d 2521 copied = data_skb->len;
1da177e4
LT
2522 if (len < copied) {
2523 msg->msg_flags |= MSG_TRUNC;
2524 copied = len;
2525 }
2526
68d6ac6d 2527 skb_reset_transport_header(data_skb);
51f3d02b 2528 err = skb_copy_datagram_msg(data_skb, 0, msg, copied);
1da177e4
LT
2529
2530 if (msg->msg_name) {
342dfc30 2531 DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
1da177e4
LT
2532 addr->nl_family = AF_NETLINK;
2533 addr->nl_pad = 0;
15e47304 2534 addr->nl_pid = NETLINK_CB(skb).portid;
d629b836 2535 addr->nl_groups = netlink_group_mask(NETLINK_CB(skb).dst_group);
1da177e4
LT
2536 msg->msg_namelen = sizeof(*addr);
2537 }
2538
cc3a572f 2539 if (nlk->flags & NETLINK_F_RECV_PKTINFO)
cc9a06cd 2540 netlink_cmsg_recv_pktinfo(msg, skb);
59324cf3
ND
2541 if (nlk->flags & NETLINK_F_LISTEN_ALL_NSID)
2542 netlink_cmsg_listen_all_nsid(sk, msg, skb);
cc9a06cd 2543
7cc05662
CH
2544 memset(&scm, 0, sizeof(scm));
2545 scm.creds = *NETLINK_CREDS(skb);
188ccb55 2546 if (flags & MSG_TRUNC)
68d6ac6d 2547 copied = data_skb->len;
daa3766e 2548
1da177e4
LT
2549 skb_free_datagram(sk, skb);
2550
16b304f3
PS
2551 if (nlk->cb_running &&
2552 atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
b44d211e
AV
2553 ret = netlink_dump(sk);
2554 if (ret) {
ac30ef83 2555 sk->sk_err = -ret;
b44d211e
AV
2556 sk->sk_error_report(sk);
2557 }
2558 }
1da177e4 2559
7cc05662 2560 scm_recv(sock, msg, &scm, flags);
1da177e4
LT
2561out:
2562 netlink_rcv_wake(sk);
2563 return err ? : copied;
2564}
2565
676d2369 2566static void netlink_data_ready(struct sock *sk)
1da177e4 2567{
cd40b7d3 2568 BUG();
1da177e4
LT
2569}
2570
2571/*
746fac4d 2572 * We export these functions to other modules. They provide a
1da177e4
LT
2573 * complete set of kernel non-blocking support for message
2574 * queueing.
2575 */
2576
2577struct sock *
9f00d977
PNA
2578__netlink_kernel_create(struct net *net, int unit, struct module *module,
2579 struct netlink_kernel_cfg *cfg)
1da177e4
LT
2580{
2581 struct socket *sock;
2582 struct sock *sk;
77247bbb 2583 struct netlink_sock *nlk;
5c398dc8 2584 struct listeners *listeners = NULL;
a31f2d17
PNA
2585 struct mutex *cb_mutex = cfg ? cfg->cb_mutex : NULL;
2586 unsigned int groups;
1da177e4 2587
fab2caf6 2588 BUG_ON(!nl_table);
1da177e4 2589
6ac552fd 2590 if (unit < 0 || unit >= MAX_LINKS)
1da177e4
LT
2591 return NULL;
2592
2593 if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
2594 return NULL;
13d3078e
EB
2595
2596 if (__netlink_create(net, sock, cb_mutex, unit, 1) < 0)
23fe1866
PE
2597 goto out_sock_release_nosk;
2598
2599 sk = sock->sk;
4fdb3bb7 2600
a31f2d17 2601 if (!cfg || cfg->groups < 32)
4277a083 2602 groups = 32;
a31f2d17
PNA
2603 else
2604 groups = cfg->groups;
4277a083 2605
5c398dc8 2606 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
4277a083
PM
2607 if (!listeners)
2608 goto out_sock_release;
2609
1da177e4 2610 sk->sk_data_ready = netlink_data_ready;
a31f2d17
PNA
2611 if (cfg && cfg->input)
2612 nlk_sk(sk)->netlink_rcv = cfg->input;
1da177e4 2613
8ea65f4a 2614 if (netlink_insert(sk, 0))
77247bbb 2615 goto out_sock_release;
4fdb3bb7 2616
77247bbb 2617 nlk = nlk_sk(sk);
cc3a572f 2618 nlk->flags |= NETLINK_F_KERNEL_SOCKET;
4fdb3bb7 2619
4fdb3bb7 2620 netlink_table_grab();
b4b51029
EB
2621 if (!nl_table[unit].registered) {
2622 nl_table[unit].groups = groups;
5c398dc8 2623 rcu_assign_pointer(nl_table[unit].listeners, listeners);
b4b51029
EB
2624 nl_table[unit].cb_mutex = cb_mutex;
2625 nl_table[unit].module = module;
9785e10a
PNA
2626 if (cfg) {
2627 nl_table[unit].bind = cfg->bind;
6251edd9 2628 nl_table[unit].unbind = cfg->unbind;
9785e10a 2629 nl_table[unit].flags = cfg->flags;
da12c90e
G
2630 if (cfg->compare)
2631 nl_table[unit].compare = cfg->compare;
9785e10a 2632 }
b4b51029 2633 nl_table[unit].registered = 1;
f937f1f4
JJ
2634 } else {
2635 kfree(listeners);
869e58f8 2636 nl_table[unit].registered++;
b4b51029 2637 }
4fdb3bb7 2638 netlink_table_ungrab();
77247bbb
PM
2639 return sk;
2640
4fdb3bb7 2641out_sock_release:
4277a083 2642 kfree(listeners);
9dfbec1f 2643 netlink_kernel_release(sk);
23fe1866
PE
2644 return NULL;
2645
2646out_sock_release_nosk:
4fdb3bb7 2647 sock_release(sock);
77247bbb 2648 return NULL;
1da177e4 2649}
9f00d977 2650EXPORT_SYMBOL(__netlink_kernel_create);
b7c6ba6e
DL
2651
2652void
2653netlink_kernel_release(struct sock *sk)
2654{
13d3078e
EB
2655 if (sk == NULL || sk->sk_socket == NULL)
2656 return;
2657
2658 sock_release(sk->sk_socket);
b7c6ba6e
DL
2659}
2660EXPORT_SYMBOL(netlink_kernel_release);
2661
d136f1bd 2662int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
b4ff4f04 2663{
5c398dc8 2664 struct listeners *new, *old;
b4ff4f04 2665 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
b4ff4f04
JB
2666
2667 if (groups < 32)
2668 groups = 32;
2669
b4ff4f04 2670 if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) {
5c398dc8
ED
2671 new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
2672 if (!new)
d136f1bd 2673 return -ENOMEM;
6d772ac5 2674 old = nl_deref_protected(tbl->listeners);
5c398dc8
ED
2675 memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
2676 rcu_assign_pointer(tbl->listeners, new);
2677
37b6b935 2678 kfree_rcu(old, rcu);
b4ff4f04
JB
2679 }
2680 tbl->groups = groups;
2681
d136f1bd
JB
2682 return 0;
2683}
2684
2685/**
2686 * netlink_change_ngroups - change number of multicast groups
2687 *
2688 * This changes the number of multicast groups that are available
2689 * on a certain netlink family. Note that it is not possible to
2690 * change the number of groups to below 32. Also note that it does
2691 * not implicitly call netlink_clear_multicast_users() when the
2692 * number of groups is reduced.
2693 *
2694 * @sk: The kernel netlink socket, as returned by netlink_kernel_create().
2695 * @groups: The new number of groups.
2696 */
2697int netlink_change_ngroups(struct sock *sk, unsigned int groups)
2698{
2699 int err;
2700
2701 netlink_table_grab();
2702 err = __netlink_change_ngroups(sk, groups);
b4ff4f04 2703 netlink_table_ungrab();
d136f1bd 2704
b4ff4f04
JB
2705 return err;
2706}
b4ff4f04 2707
b8273570
JB
2708void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
2709{
2710 struct sock *sk;
b8273570
JB
2711 struct netlink_table *tbl = &nl_table[ksk->sk_protocol];
2712
b67bfe0d 2713 sk_for_each_bound(sk, &tbl->mc_list)
b8273570
JB
2714 netlink_update_socket_mc(nlk_sk(sk), group, 0);
2715}
2716
a46621a3 2717struct nlmsghdr *
15e47304 2718__nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags)
a46621a3
DV
2719{
2720 struct nlmsghdr *nlh;
573ce260 2721 int size = nlmsg_msg_size(len);
a46621a3 2722
23b45672 2723 nlh = (struct nlmsghdr *)skb_put(skb, NLMSG_ALIGN(size));
a46621a3
DV
2724 nlh->nlmsg_type = type;
2725 nlh->nlmsg_len = size;
2726 nlh->nlmsg_flags = flags;
15e47304 2727 nlh->nlmsg_pid = portid;
a46621a3
DV
2728 nlh->nlmsg_seq = seq;
2729 if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0)
573ce260 2730 memset(nlmsg_data(nlh) + len, 0, NLMSG_ALIGN(size) - size);
a46621a3
DV
2731 return nlh;
2732}
2733EXPORT_SYMBOL(__nlmsg_put);
2734
1da177e4
LT
2735/*
2736 * It looks a bit ugly.
2737 * It would be better to create kernel thread.
2738 */
2739
2740static int netlink_dump(struct sock *sk)
2741{
2742 struct netlink_sock *nlk = nlk_sk(sk);
2743 struct netlink_callback *cb;
c7ac8679 2744 struct sk_buff *skb = NULL;
1da177e4 2745 struct nlmsghdr *nlh;
bf8b79e4 2746 int len, err = -ENOBUFS;
c7ac8679 2747 int alloc_size;
1da177e4 2748
af65bdfc 2749 mutex_lock(nlk->cb_mutex);
16b304f3 2750 if (!nlk->cb_running) {
bf8b79e4
TG
2751 err = -EINVAL;
2752 goto errout_skb;
1da177e4
LT
2753 }
2754
16b304f3 2755 cb = &nlk->cb;
c7ac8679
GR
2756 alloc_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
2757
f9c22888
PM
2758 if (!netlink_rx_is_mmaped(sk) &&
2759 atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2760 goto errout_skb;
9063e21f
ED
2761
2762 /* NLMSG_GOODSIZE is small to avoid high order allocations being
2763 * required, but it makes sense to _attempt_ a 16K bytes allocation
2764 * to reduce number of system calls on dump operations, if user
2765 * ever provided a big enough buffer.
2766 */
2767 if (alloc_size < nlk->max_recvmsg_len) {
2768 skb = netlink_alloc_skb(sk,
2769 nlk->max_recvmsg_len,
2770 nlk->portid,
2771 GFP_KERNEL |
2772 __GFP_NOWARN |
2773 __GFP_NORETRY);
2774 /* available room should be exact amount to avoid MSG_TRUNC */
2775 if (skb)
2776 skb_reserve(skb, skb_tailroom(skb) -
2777 nlk->max_recvmsg_len);
2778 }
2779 if (!skb)
2780 skb = netlink_alloc_skb(sk, alloc_size, nlk->portid,
2781 GFP_KERNEL);
c7ac8679 2782 if (!skb)
c63d6ea3 2783 goto errout_skb;
f9c22888 2784 netlink_skb_set_owner_r(skb, sk);
c7ac8679 2785
1da177e4
LT
2786 len = cb->dump(skb, cb);
2787
2788 if (len > 0) {
af65bdfc 2789 mutex_unlock(nlk->cb_mutex);
b1153f29
SH
2790
2791 if (sk_filter(sk, skb))
2792 kfree_skb(skb);
4a7e7c2a
ED
2793 else
2794 __netlink_sendskb(sk, skb);
1da177e4
LT
2795 return 0;
2796 }
2797
bf8b79e4
TG
2798 nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI);
2799 if (!nlh)
2800 goto errout_skb;
2801
670dc283
JB
2802 nl_dump_check_consistent(cb, nlh);
2803
bf8b79e4
TG
2804 memcpy(nlmsg_data(nlh), &len, sizeof(len));
2805
b1153f29
SH
2806 if (sk_filter(sk, skb))
2807 kfree_skb(skb);
4a7e7c2a
ED
2808 else
2809 __netlink_sendskb(sk, skb);
1da177e4 2810
a8f74b22
TG
2811 if (cb->done)
2812 cb->done(cb);
1da177e4 2813
16b304f3
PS
2814 nlk->cb_running = false;
2815 mutex_unlock(nlk->cb_mutex);
6dc878a8 2816 module_put(cb->module);
16b304f3 2817 consume_skb(cb->skb);
1da177e4 2818 return 0;
1797754e 2819
bf8b79e4 2820errout_skb:
af65bdfc 2821 mutex_unlock(nlk->cb_mutex);
bf8b79e4 2822 kfree_skb(skb);
bf8b79e4 2823 return err;
1da177e4
LT
2824}
2825
6dc878a8
G
2826int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
2827 const struct nlmsghdr *nlh,
2828 struct netlink_dump_control *control)
1da177e4
LT
2829{
2830 struct netlink_callback *cb;
2831 struct sock *sk;
2832 struct netlink_sock *nlk;
b44d211e 2833 int ret;
1da177e4 2834
f9c22888
PM
2835 /* Memory mapped dump requests need to be copied to avoid looping
2836 * on the pending state in netlink_mmap_sendmsg() while the CB hold
2837 * a reference to the skb.
2838 */
2839 if (netlink_skb_is_mmaped(skb)) {
2840 skb = skb_copy(skb, GFP_KERNEL);
16b304f3 2841 if (skb == NULL)
f9c22888 2842 return -ENOBUFS;
f9c22888
PM
2843 } else
2844 atomic_inc(&skb->users);
2845
15e47304 2846 sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid);
1da177e4 2847 if (sk == NULL) {
16b304f3
PS
2848 ret = -ECONNREFUSED;
2849 goto error_free;
1da177e4 2850 }
6dc878a8 2851
16b304f3 2852 nlk = nlk_sk(sk);
af65bdfc 2853 mutex_lock(nlk->cb_mutex);
6dc878a8 2854 /* A dump is in progress... */
16b304f3 2855 if (nlk->cb_running) {
6dc878a8 2856 ret = -EBUSY;
16b304f3 2857 goto error_unlock;
1da177e4 2858 }
6dc878a8 2859 /* add reference of module which cb->dump belongs to */
16b304f3 2860 if (!try_module_get(control->module)) {
6dc878a8 2861 ret = -EPROTONOSUPPORT;
16b304f3 2862 goto error_unlock;
6dc878a8
G
2863 }
2864
16b304f3
PS
2865 cb = &nlk->cb;
2866 memset(cb, 0, sizeof(*cb));
2867 cb->dump = control->dump;
2868 cb->done = control->done;
2869 cb->nlh = nlh;
2870 cb->data = control->data;
2871 cb->module = control->module;
2872 cb->min_dump_alloc = control->min_dump_alloc;
2873 cb->skb = skb;
2874
2875 nlk->cb_running = true;
2876
af65bdfc 2877 mutex_unlock(nlk->cb_mutex);
1da177e4 2878
b44d211e 2879 ret = netlink_dump(sk);
1da177e4 2880 sock_put(sk);
5c58298c 2881
b44d211e
AV
2882 if (ret)
2883 return ret;
2884
5c58298c
DL
2885 /* We successfully started a dump, by returning -EINTR we
2886 * signal not to send ACK even if it was requested.
2887 */
2888 return -EINTR;
16b304f3
PS
2889
2890error_unlock:
2891 sock_put(sk);
2892 mutex_unlock(nlk->cb_mutex);
2893error_free:
2894 kfree_skb(skb);
2895 return ret;
1da177e4 2896}
6dc878a8 2897EXPORT_SYMBOL(__netlink_dump_start);
1da177e4
LT
2898
2899void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
2900{
2901 struct sk_buff *skb;
2902 struct nlmsghdr *rep;
2903 struct nlmsgerr *errmsg;
339bf98f 2904 size_t payload = sizeof(*errmsg);
0a6a3a23 2905 struct netlink_sock *nlk = nlk_sk(NETLINK_CB(in_skb).sk);
1da177e4 2906
0a6a3a23
CR
2907 /* Error messages get the original request appened, unless the user
2908 * requests to cap the error message.
2909 */
2910 if (!(nlk->flags & NETLINK_F_CAP_ACK) && err)
339bf98f 2911 payload += nlmsg_len(nlh);
1da177e4 2912
f9c22888
PM
2913 skb = netlink_alloc_skb(in_skb->sk, nlmsg_total_size(payload),
2914 NETLINK_CB(in_skb).portid, GFP_KERNEL);
1da177e4
LT
2915 if (!skb) {
2916 struct sock *sk;
2917
3b1e0a65 2918 sk = netlink_lookup(sock_net(in_skb->sk),
b4b51029 2919 in_skb->sk->sk_protocol,
15e47304 2920 NETLINK_CB(in_skb).portid);
1da177e4
LT
2921 if (sk) {
2922 sk->sk_err = ENOBUFS;
2923 sk->sk_error_report(sk);
2924 sock_put(sk);
2925 }
2926 return;
2927 }
2928
15e47304 2929 rep = __nlmsg_put(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
5dba93ae 2930 NLMSG_ERROR, payload, 0);
bf8b79e4 2931 errmsg = nlmsg_data(rep);
1da177e4 2932 errmsg->error = err;
0a6a3a23 2933 memcpy(&errmsg->msg, nlh, payload > sizeof(*errmsg) ? nlh->nlmsg_len : sizeof(*nlh));
15e47304 2934 netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid, MSG_DONTWAIT);
1da177e4 2935}
6ac552fd 2936EXPORT_SYMBOL(netlink_ack);
1da177e4 2937
cd40b7d3 2938int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
1d00a4eb 2939 struct nlmsghdr *))
82ace47a 2940{
82ace47a
TG
2941 struct nlmsghdr *nlh;
2942 int err;
2943
2944 while (skb->len >= nlmsg_total_size(0)) {
cd40b7d3
DL
2945 int msglen;
2946
b529ccf2 2947 nlh = nlmsg_hdr(skb);
d35b6856 2948 err = 0;
82ace47a 2949
ad8e4b75 2950 if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
82ace47a
TG
2951 return 0;
2952
d35b6856
TG
2953 /* Only requests are handled by the kernel */
2954 if (!(nlh->nlmsg_flags & NLM_F_REQUEST))
5c58298c 2955 goto ack;
45e7ae7f
TG
2956
2957 /* Skip control messages */
2958 if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
5c58298c 2959 goto ack;
d35b6856 2960
1d00a4eb 2961 err = cb(skb, nlh);
5c58298c
DL
2962 if (err == -EINTR)
2963 goto skip;
2964
2965ack:
d35b6856 2966 if (nlh->nlmsg_flags & NLM_F_ACK || err)
82ace47a 2967 netlink_ack(skb, nlh, err);
82ace47a 2968
5c58298c 2969skip:
6ac552fd 2970 msglen = NLMSG_ALIGN(nlh->nlmsg_len);
cd40b7d3
DL
2971 if (msglen > skb->len)
2972 msglen = skb->len;
2973 skb_pull(skb, msglen);
82ace47a
TG
2974 }
2975
2976 return 0;
2977}
6ac552fd 2978EXPORT_SYMBOL(netlink_rcv_skb);
82ace47a 2979
d387f6ad
TG
2980/**
2981 * nlmsg_notify - send a notification netlink message
2982 * @sk: netlink socket to use
2983 * @skb: notification message
15e47304 2984 * @portid: destination netlink portid for reports or 0
d387f6ad
TG
2985 * @group: destination multicast group or 0
2986 * @report: 1 to report back, 0 to disable
2987 * @flags: allocation flags
2988 */
15e47304 2989int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
d387f6ad
TG
2990 unsigned int group, int report, gfp_t flags)
2991{
2992 int err = 0;
2993
2994 if (group) {
15e47304 2995 int exclude_portid = 0;
d387f6ad
TG
2996
2997 if (report) {
2998 atomic_inc(&skb->users);
15e47304 2999 exclude_portid = portid;
d387f6ad
TG
3000 }
3001
1ce85fe4
PNA
3002 /* errors reported via destination sk->sk_err, but propagate
3003 * delivery errors if NETLINK_BROADCAST_ERROR flag is set */
15e47304 3004 err = nlmsg_multicast(sk, skb, exclude_portid, group, flags);
d387f6ad
TG
3005 }
3006
1ce85fe4
PNA
3007 if (report) {
3008 int err2;
3009
15e47304 3010 err2 = nlmsg_unicast(sk, skb, portid);
1ce85fe4
PNA
3011 if (!err || err == -ESRCH)
3012 err = err2;
3013 }
d387f6ad
TG
3014
3015 return err;
3016}
6ac552fd 3017EXPORT_SYMBOL(nlmsg_notify);
d387f6ad 3018
1da177e4
LT
3019#ifdef CONFIG_PROC_FS
3020struct nl_seq_iter {
e372c414 3021 struct seq_net_private p;
56d28b1e 3022 struct rhashtable_iter hti;
1da177e4 3023 int link;
1da177e4
LT
3024};
3025
56d28b1e 3026static int netlink_walk_start(struct nl_seq_iter *iter)
1da177e4 3027{
56d28b1e 3028 int err;
1da177e4 3029
56d28b1e
HX
3030 err = rhashtable_walk_init(&nl_table[iter->link].hash, &iter->hti);
3031 if (err) {
3032 iter->link = MAX_LINKS;
3033 return err;
1da177e4 3034 }
56d28b1e
HX
3035
3036 err = rhashtable_walk_start(&iter->hti);
3037 return err == -EAGAIN ? 0 : err;
1da177e4
LT
3038}
3039
56d28b1e 3040static void netlink_walk_stop(struct nl_seq_iter *iter)
1da177e4 3041{
56d28b1e
HX
3042 rhashtable_walk_stop(&iter->hti);
3043 rhashtable_walk_exit(&iter->hti);
1da177e4
LT
3044}
3045
56d28b1e 3046static void *__netlink_seq_next(struct seq_file *seq)
1da177e4 3047{
56d28b1e 3048 struct nl_seq_iter *iter = seq->private;
e341694e 3049 struct netlink_sock *nlk;
1da177e4 3050
56d28b1e
HX
3051 do {
3052 for (;;) {
3053 int err;
1da177e4 3054
56d28b1e 3055 nlk = rhashtable_walk_next(&iter->hti);
746fac4d 3056
56d28b1e
HX
3057 if (IS_ERR(nlk)) {
3058 if (PTR_ERR(nlk) == -EAGAIN)
3059 continue;
e341694e 3060
56d28b1e
HX
3061 return nlk;
3062 }
1da177e4 3063
56d28b1e
HX
3064 if (nlk)
3065 break;
1da177e4 3066
56d28b1e
HX
3067 netlink_walk_stop(iter);
3068 if (++iter->link >= MAX_LINKS)
3069 return NULL;
da12c90e 3070
56d28b1e
HX
3071 err = netlink_walk_start(iter);
3072 if (err)
3073 return ERR_PTR(err);
1da177e4 3074 }
56d28b1e 3075 } while (sock_net(&nlk->sk) != seq_file_net(seq));
1da177e4 3076
56d28b1e
HX
3077 return nlk;
3078}
1da177e4 3079
56d28b1e
HX
3080static void *netlink_seq_start(struct seq_file *seq, loff_t *posp)
3081{
3082 struct nl_seq_iter *iter = seq->private;
3083 void *obj = SEQ_START_TOKEN;
3084 loff_t pos;
3085 int err;
3086
3087 iter->link = 0;
3088
3089 err = netlink_walk_start(iter);
3090 if (err)
3091 return ERR_PTR(err);
3092
3093 for (pos = *posp; pos && obj && !IS_ERR(obj); pos--)
3094 obj = __netlink_seq_next(seq);
3095
3096 return obj;
3097}
3098
3099static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3100{
3101 ++*pos;
3102 return __netlink_seq_next(seq);
1da177e4
LT
3103}
3104
3105static void netlink_seq_stop(struct seq_file *seq, void *v)
3106{
56d28b1e
HX
3107 struct nl_seq_iter *iter = seq->private;
3108
3109 if (iter->link >= MAX_LINKS)
3110 return;
3111
3112 netlink_walk_stop(iter);
1da177e4
LT
3113}
3114
3115
3116static int netlink_seq_show(struct seq_file *seq, void *v)
3117{
658cb354 3118 if (v == SEQ_START_TOKEN) {
1da177e4
LT
3119 seq_puts(seq,
3120 "sk Eth Pid Groups "
cf0aa4e0 3121 "Rmem Wmem Dump Locks Drops Inode\n");
658cb354 3122 } else {
1da177e4
LT
3123 struct sock *s = v;
3124 struct netlink_sock *nlk = nlk_sk(s);
3125
16b304f3 3126 seq_printf(seq, "%pK %-3d %-6u %08x %-8d %-8d %d %-8d %-8d %-8lu\n",
1da177e4
LT
3127 s,
3128 s->sk_protocol,
15e47304 3129 nlk->portid,
513c2500 3130 nlk->groups ? (u32)nlk->groups[0] : 0,
31e6d363
ED
3131 sk_rmem_alloc_get(s),
3132 sk_wmem_alloc_get(s),
16b304f3 3133 nlk->cb_running,
38938bfe 3134 atomic_read(&s->sk_refcnt),
cf0aa4e0
MY
3135 atomic_read(&s->sk_drops),
3136 sock_i_ino(s)
1da177e4
LT
3137 );
3138
3139 }
3140 return 0;
3141}
3142
56b3d975 3143static const struct seq_operations netlink_seq_ops = {
1da177e4
LT
3144 .start = netlink_seq_start,
3145 .next = netlink_seq_next,
3146 .stop = netlink_seq_stop,
3147 .show = netlink_seq_show,
3148};
3149
3150
3151static int netlink_seq_open(struct inode *inode, struct file *file)
3152{
e372c414
DL
3153 return seq_open_net(inode, file, &netlink_seq_ops,
3154 sizeof(struct nl_seq_iter));
b4b51029
EB
3155}
3156
da7071d7 3157static const struct file_operations netlink_seq_fops = {
1da177e4
LT
3158 .owner = THIS_MODULE,
3159 .open = netlink_seq_open,
3160 .read = seq_read,
3161 .llseek = seq_lseek,
e372c414 3162 .release = seq_release_net,
1da177e4
LT
3163};
3164
3165#endif
3166
3167int netlink_register_notifier(struct notifier_block *nb)
3168{
e041c683 3169 return atomic_notifier_chain_register(&netlink_chain, nb);
1da177e4 3170}
6ac552fd 3171EXPORT_SYMBOL(netlink_register_notifier);
1da177e4
LT
3172
3173int netlink_unregister_notifier(struct notifier_block *nb)
3174{
e041c683 3175 return atomic_notifier_chain_unregister(&netlink_chain, nb);
1da177e4 3176}
6ac552fd 3177EXPORT_SYMBOL(netlink_unregister_notifier);
746fac4d 3178
90ddc4f0 3179static const struct proto_ops netlink_ops = {
1da177e4
LT
3180 .family = PF_NETLINK,
3181 .owner = THIS_MODULE,
3182 .release = netlink_release,
3183 .bind = netlink_bind,
3184 .connect = netlink_connect,
3185 .socketpair = sock_no_socketpair,
3186 .accept = sock_no_accept,
3187 .getname = netlink_getname,
9652e931 3188 .poll = netlink_poll,
1da177e4
LT
3189 .ioctl = sock_no_ioctl,
3190 .listen = sock_no_listen,
3191 .shutdown = sock_no_shutdown,
9a4595bc
PM
3192 .setsockopt = netlink_setsockopt,
3193 .getsockopt = netlink_getsockopt,
1da177e4
LT
3194 .sendmsg = netlink_sendmsg,
3195 .recvmsg = netlink_recvmsg,
ccdfcc39 3196 .mmap = netlink_mmap,
1da177e4
LT
3197 .sendpage = sock_no_sendpage,
3198};
3199
ec1b4cf7 3200static const struct net_proto_family netlink_family_ops = {
1da177e4
LT
3201 .family = PF_NETLINK,
3202 .create = netlink_create,
3203 .owner = THIS_MODULE, /* for consistency 8) */
3204};
3205
4665079c 3206static int __net_init netlink_net_init(struct net *net)
b4b51029
EB
3207{
3208#ifdef CONFIG_PROC_FS
d4beaa66 3209 if (!proc_create("netlink", 0, net->proc_net, &netlink_seq_fops))
b4b51029
EB
3210 return -ENOMEM;
3211#endif
3212 return 0;
3213}
3214
4665079c 3215static void __net_exit netlink_net_exit(struct net *net)
b4b51029
EB
3216{
3217#ifdef CONFIG_PROC_FS
ece31ffd 3218 remove_proc_entry("netlink", net->proc_net);
b4b51029
EB
3219#endif
3220}
3221
b963ea89
DM
3222static void __init netlink_add_usersock_entry(void)
3223{
5c398dc8 3224 struct listeners *listeners;
b963ea89
DM
3225 int groups = 32;
3226
5c398dc8 3227 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
b963ea89 3228 if (!listeners)
5c398dc8 3229 panic("netlink_add_usersock_entry: Cannot allocate listeners\n");
b963ea89
DM
3230
3231 netlink_table_grab();
3232
3233 nl_table[NETLINK_USERSOCK].groups = groups;
5c398dc8 3234 rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
b963ea89
DM
3235 nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
3236 nl_table[NETLINK_USERSOCK].registered = 1;
9785e10a 3237 nl_table[NETLINK_USERSOCK].flags = NL_CFG_F_NONROOT_SEND;
b963ea89
DM
3238
3239 netlink_table_ungrab();
3240}
3241
022cbae6 3242static struct pernet_operations __net_initdata netlink_net_ops = {
b4b51029
EB
3243 .init = netlink_net_init,
3244 .exit = netlink_net_exit,
3245};
3246
49f7b33e 3247static inline u32 netlink_hash(const void *data, u32 len, u32 seed)
c428ecd1
HX
3248{
3249 const struct netlink_sock *nlk = data;
3250 struct netlink_compare_arg arg;
3251
3252 netlink_compare_arg_init(&arg, sock_net(&nlk->sk), nlk->portid);
11b58ba1 3253 return jhash2((u32 *)&arg, netlink_compare_arg_len / sizeof(u32), seed);
c428ecd1
HX
3254}
3255
3256static const struct rhashtable_params netlink_rhashtable_params = {
3257 .head_offset = offsetof(struct netlink_sock, node),
3258 .key_len = netlink_compare_arg_len,
c428ecd1
HX
3259 .obj_hashfn = netlink_hash,
3260 .obj_cmpfn = netlink_compare,
b5e2c150 3261 .automatic_shrinking = true,
c428ecd1
HX
3262};
3263
1da177e4
LT
3264static int __init netlink_proto_init(void)
3265{
1da177e4 3266 int i;
1da177e4
LT
3267 int err = proto_register(&netlink_proto, 0);
3268
3269 if (err != 0)
3270 goto out;
3271
fab25745 3272 BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
1da177e4 3273
0da974f4 3274 nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
fab2caf6
AM
3275 if (!nl_table)
3276 goto panic;
1da177e4 3277
1da177e4 3278 for (i = 0; i < MAX_LINKS; i++) {
c428ecd1
HX
3279 if (rhashtable_init(&nl_table[i].hash,
3280 &netlink_rhashtable_params) < 0) {
e341694e
TG
3281 while (--i > 0)
3282 rhashtable_destroy(&nl_table[i].hash);
1da177e4 3283 kfree(nl_table);
fab2caf6 3284 goto panic;
1da177e4 3285 }
1da177e4
LT
3286 }
3287
bcbde0d4
DB
3288 INIT_LIST_HEAD(&netlink_tap_all);
3289
b963ea89
DM
3290 netlink_add_usersock_entry();
3291
1da177e4 3292 sock_register(&netlink_family_ops);
b4b51029 3293 register_pernet_subsys(&netlink_net_ops);
746fac4d 3294 /* The netlink device handler may be needed early. */
1da177e4
LT
3295 rtnetlink_init();
3296out:
3297 return err;
fab2caf6
AM
3298panic:
3299 panic("netlink_init: Cannot allocate nl_table\n");
1da177e4
LT
3300}
3301
1da177e4 3302core_initcall(netlink_proto_init);
This page took 1.405593 seconds and 5 git commands to generate.