netlink: implement memory mapped sendmsg()
[deliverable/linux.git] / net / netlink / af_netlink.c
CommitLineData
1da177e4
LT
1/*
2 * NETLINK Kernel-user communication protocol.
3 *
113aa838 4 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>
1da177e4
LT
5 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
746fac4d 11 *
1da177e4
LT
12 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
13 * added netlink_proto_exit
14 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
15 * use nlk_sk, as sk->protinfo is on a diet 8)
4fdb3bb7
HW
16 * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
17 * - inc module use count of module that owns
18 * the kernel socket in case userspace opens
19 * socket of same protocol
20 * - remove all module support, since netlink is
21 * mandatory if CONFIG_NET=y these days
1da177e4
LT
22 */
23
1da177e4
LT
24#include <linux/module.h>
25
4fc268d2 26#include <linux/capability.h>
1da177e4
LT
27#include <linux/kernel.h>
28#include <linux/init.h>
1da177e4
LT
29#include <linux/signal.h>
30#include <linux/sched.h>
31#include <linux/errno.h>
32#include <linux/string.h>
33#include <linux/stat.h>
34#include <linux/socket.h>
35#include <linux/un.h>
36#include <linux/fcntl.h>
37#include <linux/termios.h>
38#include <linux/sockios.h>
39#include <linux/net.h>
40#include <linux/fs.h>
41#include <linux/slab.h>
42#include <asm/uaccess.h>
43#include <linux/skbuff.h>
44#include <linux/netdevice.h>
45#include <linux/rtnetlink.h>
46#include <linux/proc_fs.h>
47#include <linux/seq_file.h>
1da177e4
LT
48#include <linux/notifier.h>
49#include <linux/security.h>
50#include <linux/jhash.h>
51#include <linux/jiffies.h>
52#include <linux/random.h>
53#include <linux/bitops.h>
54#include <linux/mm.h>
55#include <linux/types.h>
54e0f520 56#include <linux/audit.h>
af65bdfc 57#include <linux/mutex.h>
ccdfcc39 58#include <linux/vmalloc.h>
9652e931 59#include <asm/cacheflush.h>
54e0f520 60
457c4cbc 61#include <net/net_namespace.h>
1da177e4
LT
62#include <net/sock.h>
63#include <net/scm.h>
82ace47a 64#include <net/netlink.h>
1da177e4 65
0f29c768 66#include "af_netlink.h"
1da177e4 67
5c398dc8
ED
68struct listeners {
69 struct rcu_head rcu;
70 unsigned long masks[0];
6c04bb18
JB
71};
72
cd967e05
PM
73/* state bits */
74#define NETLINK_CONGESTED 0x0
75
76/* flags */
77247bbb 77#define NETLINK_KERNEL_SOCKET 0x1
9a4595bc 78#define NETLINK_RECV_PKTINFO 0x2
be0c22a4 79#define NETLINK_BROADCAST_SEND_ERROR 0x4
38938bfe 80#define NETLINK_RECV_NO_ENOBUFS 0x8
77247bbb 81
035c4c16 82static inline int netlink_is_kernel(struct sock *sk)
aed81560
DL
83{
84 return nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET;
85}
86
0f29c768
AV
87struct netlink_table *nl_table;
88EXPORT_SYMBOL_GPL(nl_table);
1da177e4
LT
89
90static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
91
92static int netlink_dump(struct sock *sk);
9652e931 93static void netlink_skb_destructor(struct sk_buff *skb);
1da177e4 94
0f29c768
AV
95DEFINE_RWLOCK(nl_table_lock);
96EXPORT_SYMBOL_GPL(nl_table_lock);
1da177e4
LT
97static atomic_t nl_table_users = ATOMIC_INIT(0);
98
6d772ac5
ED
99#define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock));
100
e041c683 101static ATOMIC_NOTIFIER_HEAD(netlink_chain);
1da177e4 102
b57ef81f 103static inline u32 netlink_group_mask(u32 group)
d629b836
PM
104{
105 return group ? 1 << (group - 1) : 0;
106}
107
15e47304 108static inline struct hlist_head *nl_portid_hashfn(struct nl_portid_hash *hash, u32 portid)
1da177e4 109{
15e47304 110 return &hash->table[jhash_1word(portid, hash->rnd) & hash->mask];
1da177e4
LT
111}
112
ccdfcc39 113#ifdef CONFIG_NETLINK_MMAP
9652e931
PM
114static bool netlink_skb_is_mmaped(const struct sk_buff *skb)
115{
116 return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED;
117}
118
5fd96123
PM
119static bool netlink_tx_is_mmaped(struct sock *sk)
120{
121 return nlk_sk(sk)->tx_ring.pg_vec != NULL;
122}
123
ccdfcc39
PM
124static __pure struct page *pgvec_to_page(const void *addr)
125{
126 if (is_vmalloc_addr(addr))
127 return vmalloc_to_page(addr);
128 else
129 return virt_to_page(addr);
130}
131
132static void free_pg_vec(void **pg_vec, unsigned int order, unsigned int len)
133{
134 unsigned int i;
135
136 for (i = 0; i < len; i++) {
137 if (pg_vec[i] != NULL) {
138 if (is_vmalloc_addr(pg_vec[i]))
139 vfree(pg_vec[i]);
140 else
141 free_pages((unsigned long)pg_vec[i], order);
142 }
143 }
144 kfree(pg_vec);
145}
146
147static void *alloc_one_pg_vec_page(unsigned long order)
148{
149 void *buffer;
150 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | __GFP_ZERO |
151 __GFP_NOWARN | __GFP_NORETRY;
152
153 buffer = (void *)__get_free_pages(gfp_flags, order);
154 if (buffer != NULL)
155 return buffer;
156
157 buffer = vzalloc((1 << order) * PAGE_SIZE);
158 if (buffer != NULL)
159 return buffer;
160
161 gfp_flags &= ~__GFP_NORETRY;
162 return (void *)__get_free_pages(gfp_flags, order);
163}
164
165static void **alloc_pg_vec(struct netlink_sock *nlk,
166 struct nl_mmap_req *req, unsigned int order)
167{
168 unsigned int block_nr = req->nm_block_nr;
169 unsigned int i;
170 void **pg_vec, *ptr;
171
172 pg_vec = kcalloc(block_nr, sizeof(void *), GFP_KERNEL);
173 if (pg_vec == NULL)
174 return NULL;
175
176 for (i = 0; i < block_nr; i++) {
177 pg_vec[i] = ptr = alloc_one_pg_vec_page(order);
178 if (pg_vec[i] == NULL)
179 goto err1;
180 }
181
182 return pg_vec;
183err1:
184 free_pg_vec(pg_vec, order, block_nr);
185 return NULL;
186}
187
188static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req,
189 bool closing, bool tx_ring)
190{
191 struct netlink_sock *nlk = nlk_sk(sk);
192 struct netlink_ring *ring;
193 struct sk_buff_head *queue;
194 void **pg_vec = NULL;
195 unsigned int order = 0;
196 int err;
197
198 ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
199 queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
200
201 if (!closing) {
202 if (atomic_read(&nlk->mapped))
203 return -EBUSY;
204 if (atomic_read(&ring->pending))
205 return -EBUSY;
206 }
207
208 if (req->nm_block_nr) {
209 if (ring->pg_vec != NULL)
210 return -EBUSY;
211
212 if ((int)req->nm_block_size <= 0)
213 return -EINVAL;
214 if (!IS_ALIGNED(req->nm_block_size, PAGE_SIZE))
215 return -EINVAL;
216 if (req->nm_frame_size < NL_MMAP_HDRLEN)
217 return -EINVAL;
218 if (!IS_ALIGNED(req->nm_frame_size, NL_MMAP_MSG_ALIGNMENT))
219 return -EINVAL;
220
221 ring->frames_per_block = req->nm_block_size /
222 req->nm_frame_size;
223 if (ring->frames_per_block == 0)
224 return -EINVAL;
225 if (ring->frames_per_block * req->nm_block_nr !=
226 req->nm_frame_nr)
227 return -EINVAL;
228
229 order = get_order(req->nm_block_size);
230 pg_vec = alloc_pg_vec(nlk, req, order);
231 if (pg_vec == NULL)
232 return -ENOMEM;
233 } else {
234 if (req->nm_frame_nr)
235 return -EINVAL;
236 }
237
238 err = -EBUSY;
239 mutex_lock(&nlk->pg_vec_lock);
240 if (closing || atomic_read(&nlk->mapped) == 0) {
241 err = 0;
242 spin_lock_bh(&queue->lock);
243
244 ring->frame_max = req->nm_frame_nr - 1;
245 ring->head = 0;
246 ring->frame_size = req->nm_frame_size;
247 ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE;
248
249 swap(ring->pg_vec_len, req->nm_block_nr);
250 swap(ring->pg_vec_order, order);
251 swap(ring->pg_vec, pg_vec);
252
253 __skb_queue_purge(queue);
254 spin_unlock_bh(&queue->lock);
255
256 WARN_ON(atomic_read(&nlk->mapped));
257 }
258 mutex_unlock(&nlk->pg_vec_lock);
259
260 if (pg_vec)
261 free_pg_vec(pg_vec, order, req->nm_block_nr);
262 return err;
263}
264
265static void netlink_mm_open(struct vm_area_struct *vma)
266{
267 struct file *file = vma->vm_file;
268 struct socket *sock = file->private_data;
269 struct sock *sk = sock->sk;
270
271 if (sk)
272 atomic_inc(&nlk_sk(sk)->mapped);
273}
274
275static void netlink_mm_close(struct vm_area_struct *vma)
276{
277 struct file *file = vma->vm_file;
278 struct socket *sock = file->private_data;
279 struct sock *sk = sock->sk;
280
281 if (sk)
282 atomic_dec(&nlk_sk(sk)->mapped);
283}
284
285static const struct vm_operations_struct netlink_mmap_ops = {
286 .open = netlink_mm_open,
287 .close = netlink_mm_close,
288};
289
290static int netlink_mmap(struct file *file, struct socket *sock,
291 struct vm_area_struct *vma)
292{
293 struct sock *sk = sock->sk;
294 struct netlink_sock *nlk = nlk_sk(sk);
295 struct netlink_ring *ring;
296 unsigned long start, size, expected;
297 unsigned int i;
298 int err = -EINVAL;
299
300 if (vma->vm_pgoff)
301 return -EINVAL;
302
303 mutex_lock(&nlk->pg_vec_lock);
304
305 expected = 0;
306 for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
307 if (ring->pg_vec == NULL)
308 continue;
309 expected += ring->pg_vec_len * ring->pg_vec_pages * PAGE_SIZE;
310 }
311
312 if (expected == 0)
313 goto out;
314
315 size = vma->vm_end - vma->vm_start;
316 if (size != expected)
317 goto out;
318
319 start = vma->vm_start;
320 for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
321 if (ring->pg_vec == NULL)
322 continue;
323
324 for (i = 0; i < ring->pg_vec_len; i++) {
325 struct page *page;
326 void *kaddr = ring->pg_vec[i];
327 unsigned int pg_num;
328
329 for (pg_num = 0; pg_num < ring->pg_vec_pages; pg_num++) {
330 page = pgvec_to_page(kaddr);
331 err = vm_insert_page(vma, start, page);
332 if (err < 0)
333 goto out;
334 start += PAGE_SIZE;
335 kaddr += PAGE_SIZE;
336 }
337 }
338 }
339
340 atomic_inc(&nlk->mapped);
341 vma->vm_ops = &netlink_mmap_ops;
342 err = 0;
343out:
344 mutex_unlock(&nlk->pg_vec_lock);
345 return 0;
346}
9652e931
PM
347
348static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr)
349{
350#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
351 struct page *p_start, *p_end;
352
353 /* First page is flushed through netlink_{get,set}_status */
354 p_start = pgvec_to_page(hdr + PAGE_SIZE);
355 p_end = pgvec_to_page((void *)hdr + NL_MMAP_MSG_HDRLEN + hdr->nm_len - 1);
356 while (p_start <= p_end) {
357 flush_dcache_page(p_start);
358 p_start++;
359 }
360#endif
361}
362
363static enum nl_mmap_status netlink_get_status(const struct nl_mmap_hdr *hdr)
364{
365 smp_rmb();
366 flush_dcache_page(pgvec_to_page(hdr));
367 return hdr->nm_status;
368}
369
370static void netlink_set_status(struct nl_mmap_hdr *hdr,
371 enum nl_mmap_status status)
372{
373 hdr->nm_status = status;
374 flush_dcache_page(pgvec_to_page(hdr));
375 smp_wmb();
376}
377
378static struct nl_mmap_hdr *
379__netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos)
380{
381 unsigned int pg_vec_pos, frame_off;
382
383 pg_vec_pos = pos / ring->frames_per_block;
384 frame_off = pos % ring->frames_per_block;
385
386 return ring->pg_vec[pg_vec_pos] + (frame_off * ring->frame_size);
387}
388
389static struct nl_mmap_hdr *
390netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos,
391 enum nl_mmap_status status)
392{
393 struct nl_mmap_hdr *hdr;
394
395 hdr = __netlink_lookup_frame(ring, pos);
396 if (netlink_get_status(hdr) != status)
397 return NULL;
398
399 return hdr;
400}
401
402static struct nl_mmap_hdr *
403netlink_current_frame(const struct netlink_ring *ring,
404 enum nl_mmap_status status)
405{
406 return netlink_lookup_frame(ring, ring->head, status);
407}
408
409static struct nl_mmap_hdr *
410netlink_previous_frame(const struct netlink_ring *ring,
411 enum nl_mmap_status status)
412{
413 unsigned int prev;
414
415 prev = ring->head ? ring->head - 1 : ring->frame_max;
416 return netlink_lookup_frame(ring, prev, status);
417}
418
419static void netlink_increment_head(struct netlink_ring *ring)
420{
421 ring->head = ring->head != ring->frame_max ? ring->head + 1 : 0;
422}
423
424static void netlink_forward_ring(struct netlink_ring *ring)
425{
426 unsigned int head = ring->head, pos = head;
427 const struct nl_mmap_hdr *hdr;
428
429 do {
430 hdr = __netlink_lookup_frame(ring, pos);
431 if (hdr->nm_status == NL_MMAP_STATUS_UNUSED)
432 break;
433 if (hdr->nm_status != NL_MMAP_STATUS_SKIP)
434 break;
435 netlink_increment_head(ring);
436 } while (ring->head != head);
437}
438
439static unsigned int netlink_poll(struct file *file, struct socket *sock,
440 poll_table *wait)
441{
442 struct sock *sk = sock->sk;
443 struct netlink_sock *nlk = nlk_sk(sk);
444 unsigned int mask;
445
5fd96123
PM
446 if (nlk->cb != NULL && nlk->rx_ring.pg_vec != NULL)
447 netlink_dump(sk);
448
9652e931
PM
449 mask = datagram_poll(file, sock, wait);
450
451 spin_lock_bh(&sk->sk_receive_queue.lock);
452 if (nlk->rx_ring.pg_vec) {
453 netlink_forward_ring(&nlk->rx_ring);
454 if (!netlink_previous_frame(&nlk->rx_ring, NL_MMAP_STATUS_UNUSED))
455 mask |= POLLIN | POLLRDNORM;
456 }
457 spin_unlock_bh(&sk->sk_receive_queue.lock);
458
459 spin_lock_bh(&sk->sk_write_queue.lock);
460 if (nlk->tx_ring.pg_vec) {
461 if (netlink_current_frame(&nlk->tx_ring, NL_MMAP_STATUS_UNUSED))
462 mask |= POLLOUT | POLLWRNORM;
463 }
464 spin_unlock_bh(&sk->sk_write_queue.lock);
465
466 return mask;
467}
468
469static struct nl_mmap_hdr *netlink_mmap_hdr(struct sk_buff *skb)
470{
471 return (struct nl_mmap_hdr *)(skb->head - NL_MMAP_HDRLEN);
472}
473
474static void netlink_ring_setup_skb(struct sk_buff *skb, struct sock *sk,
475 struct netlink_ring *ring,
476 struct nl_mmap_hdr *hdr)
477{
478 unsigned int size;
479 void *data;
480
481 size = ring->frame_size - NL_MMAP_HDRLEN;
482 data = (void *)hdr + NL_MMAP_HDRLEN;
483
484 skb->head = data;
485 skb->data = data;
486 skb_reset_tail_pointer(skb);
487 skb->end = skb->tail + size;
488 skb->len = 0;
489
490 skb->destructor = netlink_skb_destructor;
491 NETLINK_CB(skb).flags |= NETLINK_SKB_MMAPED;
492 NETLINK_CB(skb).sk = sk;
493}
5fd96123
PM
494
495static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
496 u32 dst_portid, u32 dst_group,
497 struct sock_iocb *siocb)
498{
499 struct netlink_sock *nlk = nlk_sk(sk);
500 struct netlink_ring *ring;
501 struct nl_mmap_hdr *hdr;
502 struct sk_buff *skb;
503 unsigned int maxlen;
504 bool excl = true;
505 int err = 0, len = 0;
506
507 /* Netlink messages are validated by the receiver before processing.
508 * In order to avoid userspace changing the contents of the message
509 * after validation, the socket and the ring may only be used by a
510 * single process, otherwise we fall back to copying.
511 */
512 if (atomic_long_read(&sk->sk_socket->file->f_count) > 2 ||
513 atomic_read(&nlk->mapped) > 1)
514 excl = false;
515
516 mutex_lock(&nlk->pg_vec_lock);
517
518 ring = &nlk->tx_ring;
519 maxlen = ring->frame_size - NL_MMAP_HDRLEN;
520
521 do {
522 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_VALID);
523 if (hdr == NULL) {
524 if (!(msg->msg_flags & MSG_DONTWAIT) &&
525 atomic_read(&nlk->tx_ring.pending))
526 schedule();
527 continue;
528 }
529 if (hdr->nm_len > maxlen) {
530 err = -EINVAL;
531 goto out;
532 }
533
534 netlink_frame_flush_dcache(hdr);
535
536 if (likely(dst_portid == 0 && dst_group == 0 && excl)) {
537 skb = alloc_skb_head(GFP_KERNEL);
538 if (skb == NULL) {
539 err = -ENOBUFS;
540 goto out;
541 }
542 sock_hold(sk);
543 netlink_ring_setup_skb(skb, sk, ring, hdr);
544 NETLINK_CB(skb).flags |= NETLINK_SKB_TX;
545 __skb_put(skb, hdr->nm_len);
546 netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED);
547 atomic_inc(&ring->pending);
548 } else {
549 skb = alloc_skb(hdr->nm_len, GFP_KERNEL);
550 if (skb == NULL) {
551 err = -ENOBUFS;
552 goto out;
553 }
554 __skb_put(skb, hdr->nm_len);
555 memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, hdr->nm_len);
556 netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
557 }
558
559 netlink_increment_head(ring);
560
561 NETLINK_CB(skb).portid = nlk->portid;
562 NETLINK_CB(skb).dst_group = dst_group;
563 NETLINK_CB(skb).creds = siocb->scm->creds;
564
565 err = security_netlink_send(sk, skb);
566 if (err) {
567 kfree_skb(skb);
568 goto out;
569 }
570
571 if (unlikely(dst_group)) {
572 atomic_inc(&skb->users);
573 netlink_broadcast(sk, skb, dst_portid, dst_group,
574 GFP_KERNEL);
575 }
576 err = netlink_unicast(sk, skb, dst_portid,
577 msg->msg_flags & MSG_DONTWAIT);
578 if (err < 0)
579 goto out;
580 len += err;
581
582 } while (hdr != NULL ||
583 (!(msg->msg_flags & MSG_DONTWAIT) &&
584 atomic_read(&nlk->tx_ring.pending)));
585
586 if (len > 0)
587 err = len;
588out:
589 mutex_unlock(&nlk->pg_vec_lock);
590 return err;
591}
ccdfcc39 592#else /* CONFIG_NETLINK_MMAP */
9652e931 593#define netlink_skb_is_mmaped(skb) false
5fd96123 594#define netlink_tx_is_mmaped(sk) false
ccdfcc39 595#define netlink_mmap sock_no_mmap
9652e931 596#define netlink_poll datagram_poll
5fd96123 597#define netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group, siocb) 0
ccdfcc39
PM
598#endif /* CONFIG_NETLINK_MMAP */
599
658cb354
ED
600static void netlink_destroy_callback(struct netlink_callback *cb)
601{
602 kfree_skb(cb->skb);
603 kfree(cb);
604}
605
bfb253c9
ED
606static void netlink_consume_callback(struct netlink_callback *cb)
607{
608 consume_skb(cb->skb);
609 kfree(cb);
610}
611
cf0a018a
PM
612static void netlink_skb_destructor(struct sk_buff *skb)
613{
9652e931
PM
614#ifdef CONFIG_NETLINK_MMAP
615 struct nl_mmap_hdr *hdr;
616 struct netlink_ring *ring;
617 struct sock *sk;
618
619 /* If a packet from the kernel to userspace was freed because of an
620 * error without being delivered to userspace, the kernel must reset
621 * the status. In the direction userspace to kernel, the status is
622 * always reset here after the packet was processed and freed.
623 */
624 if (netlink_skb_is_mmaped(skb)) {
625 hdr = netlink_mmap_hdr(skb);
626 sk = NETLINK_CB(skb).sk;
627
5fd96123
PM
628 if (NETLINK_CB(skb).flags & NETLINK_SKB_TX) {
629 netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
630 ring = &nlk_sk(sk)->tx_ring;
631 } else {
632 if (!(NETLINK_CB(skb).flags & NETLINK_SKB_DELIVERED)) {
633 hdr->nm_len = 0;
634 netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
635 }
636 ring = &nlk_sk(sk)->rx_ring;
9652e931 637 }
9652e931
PM
638
639 WARN_ON(atomic_read(&ring->pending) == 0);
640 atomic_dec(&ring->pending);
641 sock_put(sk);
642
643 skb->data = NULL;
644 }
645#endif
646 if (skb->sk != NULL)
647 sock_rfree(skb);
cf0a018a
PM
648}
649
650static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
651{
652 WARN_ON(skb->sk != NULL);
653 skb->sk = sk;
654 skb->destructor = netlink_skb_destructor;
655 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
656 sk_mem_charge(sk, skb->truesize);
657}
658
1da177e4
LT
659static void netlink_sock_destruct(struct sock *sk)
660{
3f660d66
HX
661 struct netlink_sock *nlk = nlk_sk(sk);
662
3f660d66
HX
663 if (nlk->cb) {
664 if (nlk->cb->done)
665 nlk->cb->done(nlk->cb);
6dc878a8
G
666
667 module_put(nlk->cb->module);
3f660d66
HX
668 netlink_destroy_callback(nlk->cb);
669 }
670
1da177e4 671 skb_queue_purge(&sk->sk_receive_queue);
ccdfcc39
PM
672#ifdef CONFIG_NETLINK_MMAP
673 if (1) {
674 struct nl_mmap_req req;
675
676 memset(&req, 0, sizeof(req));
677 if (nlk->rx_ring.pg_vec)
678 netlink_set_ring(sk, &req, true, false);
679 memset(&req, 0, sizeof(req));
680 if (nlk->tx_ring.pg_vec)
681 netlink_set_ring(sk, &req, true, true);
682 }
683#endif /* CONFIG_NETLINK_MMAP */
1da177e4
LT
684
685 if (!sock_flag(sk, SOCK_DEAD)) {
6ac552fd 686 printk(KERN_ERR "Freeing alive netlink socket %p\n", sk);
1da177e4
LT
687 return;
688 }
547b792c
IJ
689
690 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
691 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
692 WARN_ON(nlk_sk(sk)->groups);
1da177e4
LT
693}
694
6ac552fd
PM
695/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
696 * SMP. Look, when several writers sleep and reader wakes them up, all but one
1da177e4
LT
697 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
698 * this, _but_ remember, it adds useless work on UP machines.
699 */
700
d136f1bd 701void netlink_table_grab(void)
9a429c49 702 __acquires(nl_table_lock)
1da177e4 703{
d136f1bd
JB
704 might_sleep();
705
6abd219c 706 write_lock_irq(&nl_table_lock);
1da177e4
LT
707
708 if (atomic_read(&nl_table_users)) {
709 DECLARE_WAITQUEUE(wait, current);
710
711 add_wait_queue_exclusive(&nl_table_wait, &wait);
6ac552fd 712 for (;;) {
1da177e4
LT
713 set_current_state(TASK_UNINTERRUPTIBLE);
714 if (atomic_read(&nl_table_users) == 0)
715 break;
6abd219c 716 write_unlock_irq(&nl_table_lock);
1da177e4 717 schedule();
6abd219c 718 write_lock_irq(&nl_table_lock);
1da177e4
LT
719 }
720
721 __set_current_state(TASK_RUNNING);
722 remove_wait_queue(&nl_table_wait, &wait);
723 }
724}
725
d136f1bd 726void netlink_table_ungrab(void)
9a429c49 727 __releases(nl_table_lock)
1da177e4 728{
6abd219c 729 write_unlock_irq(&nl_table_lock);
1da177e4
LT
730 wake_up(&nl_table_wait);
731}
732
6ac552fd 733static inline void
1da177e4
LT
734netlink_lock_table(void)
735{
736 /* read_lock() synchronizes us to netlink_table_grab */
737
738 read_lock(&nl_table_lock);
739 atomic_inc(&nl_table_users);
740 read_unlock(&nl_table_lock);
741}
742
6ac552fd 743static inline void
1da177e4
LT
744netlink_unlock_table(void)
745{
746 if (atomic_dec_and_test(&nl_table_users))
747 wake_up(&nl_table_wait);
748}
749
15e47304 750static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
1da177e4 751{
15e47304 752 struct nl_portid_hash *hash = &nl_table[protocol].hash;
1da177e4
LT
753 struct hlist_head *head;
754 struct sock *sk;
1da177e4
LT
755
756 read_lock(&nl_table_lock);
15e47304 757 head = nl_portid_hashfn(hash, portid);
b67bfe0d 758 sk_for_each(sk, head) {
15e47304 759 if (net_eq(sock_net(sk), net) && (nlk_sk(sk)->portid == portid)) {
1da177e4
LT
760 sock_hold(sk);
761 goto found;
762 }
763 }
764 sk = NULL;
765found:
766 read_unlock(&nl_table_lock);
767 return sk;
768}
769
15e47304 770static struct hlist_head *nl_portid_hash_zalloc(size_t size)
1da177e4
LT
771{
772 if (size <= PAGE_SIZE)
ea72912c 773 return kzalloc(size, GFP_ATOMIC);
1da177e4
LT
774 else
775 return (struct hlist_head *)
ea72912c
ED
776 __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
777 get_order(size));
1da177e4
LT
778}
779
15e47304 780static void nl_portid_hash_free(struct hlist_head *table, size_t size)
1da177e4
LT
781{
782 if (size <= PAGE_SIZE)
783 kfree(table);
784 else
785 free_pages((unsigned long)table, get_order(size));
786}
787
15e47304 788static int nl_portid_hash_rehash(struct nl_portid_hash *hash, int grow)
1da177e4
LT
789{
790 unsigned int omask, mask, shift;
791 size_t osize, size;
792 struct hlist_head *otable, *table;
793 int i;
794
795 omask = mask = hash->mask;
796 osize = size = (mask + 1) * sizeof(*table);
797 shift = hash->shift;
798
799 if (grow) {
800 if (++shift > hash->max_shift)
801 return 0;
802 mask = mask * 2 + 1;
803 size *= 2;
804 }
805
15e47304 806 table = nl_portid_hash_zalloc(size);
1da177e4
LT
807 if (!table)
808 return 0;
809
1da177e4
LT
810 otable = hash->table;
811 hash->table = table;
812 hash->mask = mask;
813 hash->shift = shift;
814 get_random_bytes(&hash->rnd, sizeof(hash->rnd));
815
816 for (i = 0; i <= omask; i++) {
817 struct sock *sk;
b67bfe0d 818 struct hlist_node *tmp;
1da177e4 819
b67bfe0d 820 sk_for_each_safe(sk, tmp, &otable[i])
15e47304 821 __sk_add_node(sk, nl_portid_hashfn(hash, nlk_sk(sk)->portid));
1da177e4
LT
822 }
823
15e47304 824 nl_portid_hash_free(otable, osize);
1da177e4
LT
825 hash->rehash_time = jiffies + 10 * 60 * HZ;
826 return 1;
827}
828
15e47304 829static inline int nl_portid_hash_dilute(struct nl_portid_hash *hash, int len)
1da177e4
LT
830{
831 int avg = hash->entries >> hash->shift;
832
15e47304 833 if (unlikely(avg > 1) && nl_portid_hash_rehash(hash, 1))
1da177e4
LT
834 return 1;
835
836 if (unlikely(len > avg) && time_after(jiffies, hash->rehash_time)) {
15e47304 837 nl_portid_hash_rehash(hash, 0);
1da177e4
LT
838 return 1;
839 }
840
841 return 0;
842}
843
90ddc4f0 844static const struct proto_ops netlink_ops;
1da177e4 845
4277a083
PM
846static void
847netlink_update_listeners(struct sock *sk)
848{
849 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
4277a083
PM
850 unsigned long mask;
851 unsigned int i;
6d772ac5
ED
852 struct listeners *listeners;
853
854 listeners = nl_deref_protected(tbl->listeners);
855 if (!listeners)
856 return;
4277a083 857
b4ff4f04 858 for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
4277a083 859 mask = 0;
b67bfe0d 860 sk_for_each_bound(sk, &tbl->mc_list) {
b4ff4f04
JB
861 if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
862 mask |= nlk_sk(sk)->groups[i];
863 }
6d772ac5 864 listeners->masks[i] = mask;
4277a083
PM
865 }
866 /* this function is only called with the netlink table "grabbed", which
867 * makes sure updates are visible before bind or setsockopt return. */
868}
869
15e47304 870static int netlink_insert(struct sock *sk, struct net *net, u32 portid)
1da177e4 871{
15e47304 872 struct nl_portid_hash *hash = &nl_table[sk->sk_protocol].hash;
1da177e4
LT
873 struct hlist_head *head;
874 int err = -EADDRINUSE;
875 struct sock *osk;
1da177e4
LT
876 int len;
877
878 netlink_table_grab();
15e47304 879 head = nl_portid_hashfn(hash, portid);
1da177e4 880 len = 0;
b67bfe0d 881 sk_for_each(osk, head) {
15e47304 882 if (net_eq(sock_net(osk), net) && (nlk_sk(osk)->portid == portid))
1da177e4
LT
883 break;
884 len++;
885 }
b67bfe0d 886 if (osk)
1da177e4
LT
887 goto err;
888
889 err = -EBUSY;
15e47304 890 if (nlk_sk(sk)->portid)
1da177e4
LT
891 goto err;
892
893 err = -ENOMEM;
894 if (BITS_PER_LONG > 32 && unlikely(hash->entries >= UINT_MAX))
895 goto err;
896
15e47304
EB
897 if (len && nl_portid_hash_dilute(hash, len))
898 head = nl_portid_hashfn(hash, portid);
1da177e4 899 hash->entries++;
15e47304 900 nlk_sk(sk)->portid = portid;
1da177e4
LT
901 sk_add_node(sk, head);
902 err = 0;
903
904err:
905 netlink_table_ungrab();
906 return err;
907}
908
909static void netlink_remove(struct sock *sk)
910{
911 netlink_table_grab();
d470e3b4
DM
912 if (sk_del_node_init(sk))
913 nl_table[sk->sk_protocol].hash.entries--;
f7fa9b10 914 if (nlk_sk(sk)->subscriptions)
1da177e4
LT
915 __sk_del_bind_node(sk);
916 netlink_table_ungrab();
917}
918
919static struct proto netlink_proto = {
920 .name = "NETLINK",
921 .owner = THIS_MODULE,
922 .obj_size = sizeof(struct netlink_sock),
923};
924
1b8d7ae4
EB
925static int __netlink_create(struct net *net, struct socket *sock,
926 struct mutex *cb_mutex, int protocol)
1da177e4
LT
927{
928 struct sock *sk;
929 struct netlink_sock *nlk;
ab33a171
PM
930
931 sock->ops = &netlink_ops;
932
6257ff21 933 sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto);
ab33a171
PM
934 if (!sk)
935 return -ENOMEM;
936
937 sock_init_data(sock, sk);
938
939 nlk = nlk_sk(sk);
658cb354 940 if (cb_mutex) {
ffa4d721 941 nlk->cb_mutex = cb_mutex;
658cb354 942 } else {
ffa4d721
PM
943 nlk->cb_mutex = &nlk->cb_def_mutex;
944 mutex_init(nlk->cb_mutex);
945 }
ab33a171 946 init_waitqueue_head(&nlk->wait);
ccdfcc39
PM
947#ifdef CONFIG_NETLINK_MMAP
948 mutex_init(&nlk->pg_vec_lock);
949#endif
ab33a171
PM
950
951 sk->sk_destruct = netlink_sock_destruct;
952 sk->sk_protocol = protocol;
953 return 0;
954}
955
3f378b68
EP
956static int netlink_create(struct net *net, struct socket *sock, int protocol,
957 int kern)
ab33a171
PM
958{
959 struct module *module = NULL;
af65bdfc 960 struct mutex *cb_mutex;
f7fa9b10 961 struct netlink_sock *nlk;
03292745 962 void (*bind)(int group);
ab33a171 963 int err = 0;
1da177e4
LT
964
965 sock->state = SS_UNCONNECTED;
966
967 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
968 return -ESOCKTNOSUPPORT;
969
6ac552fd 970 if (protocol < 0 || protocol >= MAX_LINKS)
1da177e4
LT
971 return -EPROTONOSUPPORT;
972
77247bbb 973 netlink_lock_table();
95a5afca 974#ifdef CONFIG_MODULES
ab33a171 975 if (!nl_table[protocol].registered) {
77247bbb 976 netlink_unlock_table();
4fdb3bb7 977 request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol);
77247bbb 978 netlink_lock_table();
4fdb3bb7 979 }
ab33a171
PM
980#endif
981 if (nl_table[protocol].registered &&
982 try_module_get(nl_table[protocol].module))
983 module = nl_table[protocol].module;
974c37e9
AD
984 else
985 err = -EPROTONOSUPPORT;
af65bdfc 986 cb_mutex = nl_table[protocol].cb_mutex;
03292745 987 bind = nl_table[protocol].bind;
77247bbb 988 netlink_unlock_table();
4fdb3bb7 989
974c37e9
AD
990 if (err < 0)
991 goto out;
992
6ac552fd
PM
993 err = __netlink_create(net, sock, cb_mutex, protocol);
994 if (err < 0)
f7fa9b10
PM
995 goto out_module;
996
6f756a8c 997 local_bh_disable();
c1fd3b94 998 sock_prot_inuse_add(net, &netlink_proto, 1);
6f756a8c
DM
999 local_bh_enable();
1000
f7fa9b10 1001 nlk = nlk_sk(sock->sk);
f7fa9b10 1002 nlk->module = module;
03292745 1003 nlk->netlink_bind = bind;
ab33a171
PM
1004out:
1005 return err;
1da177e4 1006
ab33a171
PM
1007out_module:
1008 module_put(module);
1009 goto out;
1da177e4
LT
1010}
1011
1012static int netlink_release(struct socket *sock)
1013{
1014 struct sock *sk = sock->sk;
1015 struct netlink_sock *nlk;
1016
1017 if (!sk)
1018 return 0;
1019
1020 netlink_remove(sk);
ac57b3a9 1021 sock_orphan(sk);
1da177e4
LT
1022 nlk = nlk_sk(sk);
1023
3f660d66
HX
1024 /*
1025 * OK. Socket is unlinked, any packets that arrive now
1026 * will be purged.
1027 */
1da177e4 1028
1da177e4
LT
1029 sock->sk = NULL;
1030 wake_up_interruptible_all(&nlk->wait);
1031
1032 skb_queue_purge(&sk->sk_write_queue);
1033
15e47304 1034 if (nlk->portid) {
1da177e4 1035 struct netlink_notify n = {
3b1e0a65 1036 .net = sock_net(sk),
1da177e4 1037 .protocol = sk->sk_protocol,
15e47304 1038 .portid = nlk->portid,
1da177e4 1039 };
e041c683
AS
1040 atomic_notifier_call_chain(&netlink_chain,
1041 NETLINK_URELEASE, &n);
746fac4d 1042 }
4fdb3bb7 1043
5e7c001c 1044 module_put(nlk->module);
4fdb3bb7 1045
4277a083 1046 netlink_table_grab();
aed81560 1047 if (netlink_is_kernel(sk)) {
869e58f8
DL
1048 BUG_ON(nl_table[sk->sk_protocol].registered == 0);
1049 if (--nl_table[sk->sk_protocol].registered == 0) {
6d772ac5
ED
1050 struct listeners *old;
1051
1052 old = nl_deref_protected(nl_table[sk->sk_protocol].listeners);
1053 RCU_INIT_POINTER(nl_table[sk->sk_protocol].listeners, NULL);
1054 kfree_rcu(old, rcu);
869e58f8 1055 nl_table[sk->sk_protocol].module = NULL;
9785e10a
PNA
1056 nl_table[sk->sk_protocol].bind = NULL;
1057 nl_table[sk->sk_protocol].flags = 0;
869e58f8
DL
1058 nl_table[sk->sk_protocol].registered = 0;
1059 }
658cb354 1060 } else if (nlk->subscriptions) {
4277a083 1061 netlink_update_listeners(sk);
658cb354 1062 }
4277a083 1063 netlink_table_ungrab();
77247bbb 1064
f7fa9b10
PM
1065 kfree(nlk->groups);
1066 nlk->groups = NULL;
1067
3755810c 1068 local_bh_disable();
c1fd3b94 1069 sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1);
3755810c 1070 local_bh_enable();
1da177e4
LT
1071 sock_put(sk);
1072 return 0;
1073}
1074
1075static int netlink_autobind(struct socket *sock)
1076{
1077 struct sock *sk = sock->sk;
3b1e0a65 1078 struct net *net = sock_net(sk);
15e47304 1079 struct nl_portid_hash *hash = &nl_table[sk->sk_protocol].hash;
1da177e4
LT
1080 struct hlist_head *head;
1081 struct sock *osk;
15e47304 1082 s32 portid = task_tgid_vnr(current);
1da177e4
LT
1083 int err;
1084 static s32 rover = -4097;
1085
1086retry:
1087 cond_resched();
1088 netlink_table_grab();
15e47304 1089 head = nl_portid_hashfn(hash, portid);
b67bfe0d 1090 sk_for_each(osk, head) {
878628fb 1091 if (!net_eq(sock_net(osk), net))
b4b51029 1092 continue;
15e47304
EB
1093 if (nlk_sk(osk)->portid == portid) {
1094 /* Bind collision, search negative portid values. */
1095 portid = rover--;
1da177e4
LT
1096 if (rover > -4097)
1097 rover = -4097;
1098 netlink_table_ungrab();
1099 goto retry;
1100 }
1101 }
1102 netlink_table_ungrab();
1103
15e47304 1104 err = netlink_insert(sk, net, portid);
1da177e4
LT
1105 if (err == -EADDRINUSE)
1106 goto retry;
d470e3b4
DM
1107
1108 /* If 2 threads race to autobind, that is fine. */
1109 if (err == -EBUSY)
1110 err = 0;
1111
1112 return err;
1da177e4
LT
1113}
1114
b57ef81f 1115static inline int netlink_capable(const struct socket *sock, unsigned int flag)
746fac4d 1116{
9785e10a 1117 return (nl_table[sock->sk->sk_protocol].flags & flag) ||
df008c91 1118 ns_capable(sock_net(sock->sk)->user_ns, CAP_NET_ADMIN);
746fac4d 1119}
1da177e4 1120
f7fa9b10
PM
1121static void
1122netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions)
1123{
1124 struct netlink_sock *nlk = nlk_sk(sk);
1125
1126 if (nlk->subscriptions && !subscriptions)
1127 __sk_del_bind_node(sk);
1128 else if (!nlk->subscriptions && subscriptions)
1129 sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
1130 nlk->subscriptions = subscriptions;
1131}
1132
b4ff4f04 1133static int netlink_realloc_groups(struct sock *sk)
513c2500
PM
1134{
1135 struct netlink_sock *nlk = nlk_sk(sk);
1136 unsigned int groups;
b4ff4f04 1137 unsigned long *new_groups;
513c2500
PM
1138 int err = 0;
1139
b4ff4f04
JB
1140 netlink_table_grab();
1141
513c2500 1142 groups = nl_table[sk->sk_protocol].groups;
b4ff4f04 1143 if (!nl_table[sk->sk_protocol].registered) {
513c2500 1144 err = -ENOENT;
b4ff4f04
JB
1145 goto out_unlock;
1146 }
513c2500 1147
b4ff4f04
JB
1148 if (nlk->ngroups >= groups)
1149 goto out_unlock;
513c2500 1150
b4ff4f04
JB
1151 new_groups = krealloc(nlk->groups, NLGRPSZ(groups), GFP_ATOMIC);
1152 if (new_groups == NULL) {
1153 err = -ENOMEM;
1154 goto out_unlock;
1155 }
6ac552fd 1156 memset((char *)new_groups + NLGRPSZ(nlk->ngroups), 0,
b4ff4f04
JB
1157 NLGRPSZ(groups) - NLGRPSZ(nlk->ngroups));
1158
1159 nlk->groups = new_groups;
513c2500 1160 nlk->ngroups = groups;
b4ff4f04
JB
1161 out_unlock:
1162 netlink_table_ungrab();
1163 return err;
513c2500
PM
1164}
1165
6ac552fd
PM
1166static int netlink_bind(struct socket *sock, struct sockaddr *addr,
1167 int addr_len)
1da177e4
LT
1168{
1169 struct sock *sk = sock->sk;
3b1e0a65 1170 struct net *net = sock_net(sk);
1da177e4
LT
1171 struct netlink_sock *nlk = nlk_sk(sk);
1172 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
1173 int err;
746fac4d 1174
4e4b5376
HFS
1175 if (addr_len < sizeof(struct sockaddr_nl))
1176 return -EINVAL;
1177
1da177e4
LT
1178 if (nladdr->nl_family != AF_NETLINK)
1179 return -EINVAL;
1180
1181 /* Only superuser is allowed to listen multicasts */
513c2500 1182 if (nladdr->nl_groups) {
9785e10a 1183 if (!netlink_capable(sock, NL_CFG_F_NONROOT_RECV))
513c2500 1184 return -EPERM;
b4ff4f04
JB
1185 err = netlink_realloc_groups(sk);
1186 if (err)
1187 return err;
513c2500 1188 }
1da177e4 1189
15e47304
EB
1190 if (nlk->portid) {
1191 if (nladdr->nl_pid != nlk->portid)
1da177e4
LT
1192 return -EINVAL;
1193 } else {
1194 err = nladdr->nl_pid ?
b4b51029 1195 netlink_insert(sk, net, nladdr->nl_pid) :
1da177e4
LT
1196 netlink_autobind(sock);
1197 if (err)
1198 return err;
1199 }
1200
513c2500 1201 if (!nladdr->nl_groups && (nlk->groups == NULL || !(u32)nlk->groups[0]))
1da177e4
LT
1202 return 0;
1203
1204 netlink_table_grab();
f7fa9b10 1205 netlink_update_subscriptions(sk, nlk->subscriptions +
746fac4d
YH
1206 hweight32(nladdr->nl_groups) -
1207 hweight32(nlk->groups[0]));
1208 nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | nladdr->nl_groups;
4277a083 1209 netlink_update_listeners(sk);
1da177e4
LT
1210 netlink_table_ungrab();
1211
03292745
PNA
1212 if (nlk->netlink_bind && nlk->groups[0]) {
1213 int i;
1214
1215 for (i=0; i<nlk->ngroups; i++) {
1216 if (test_bit(i, nlk->groups))
1217 nlk->netlink_bind(i);
1218 }
1219 }
1220
1da177e4
LT
1221 return 0;
1222}
1223
1224static int netlink_connect(struct socket *sock, struct sockaddr *addr,
1225 int alen, int flags)
1226{
1227 int err = 0;
1228 struct sock *sk = sock->sk;
1229 struct netlink_sock *nlk = nlk_sk(sk);
6ac552fd 1230 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
1da177e4 1231
6503d961
CG
1232 if (alen < sizeof(addr->sa_family))
1233 return -EINVAL;
1234
1da177e4
LT
1235 if (addr->sa_family == AF_UNSPEC) {
1236 sk->sk_state = NETLINK_UNCONNECTED;
15e47304 1237 nlk->dst_portid = 0;
d629b836 1238 nlk->dst_group = 0;
1da177e4
LT
1239 return 0;
1240 }
1241 if (addr->sa_family != AF_NETLINK)
1242 return -EINVAL;
1243
1244 /* Only superuser is allowed to send multicasts */
9785e10a 1245 if (nladdr->nl_groups && !netlink_capable(sock, NL_CFG_F_NONROOT_SEND))
1da177e4
LT
1246 return -EPERM;
1247
15e47304 1248 if (!nlk->portid)
1da177e4
LT
1249 err = netlink_autobind(sock);
1250
1251 if (err == 0) {
1252 sk->sk_state = NETLINK_CONNECTED;
15e47304 1253 nlk->dst_portid = nladdr->nl_pid;
d629b836 1254 nlk->dst_group = ffs(nladdr->nl_groups);
1da177e4
LT
1255 }
1256
1257 return err;
1258}
1259
6ac552fd
PM
1260static int netlink_getname(struct socket *sock, struct sockaddr *addr,
1261 int *addr_len, int peer)
1da177e4
LT
1262{
1263 struct sock *sk = sock->sk;
1264 struct netlink_sock *nlk = nlk_sk(sk);
13cfa97b 1265 DECLARE_SOCKADDR(struct sockaddr_nl *, nladdr, addr);
746fac4d 1266
1da177e4
LT
1267 nladdr->nl_family = AF_NETLINK;
1268 nladdr->nl_pad = 0;
1269 *addr_len = sizeof(*nladdr);
1270
1271 if (peer) {
15e47304 1272 nladdr->nl_pid = nlk->dst_portid;
d629b836 1273 nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
1da177e4 1274 } else {
15e47304 1275 nladdr->nl_pid = nlk->portid;
513c2500 1276 nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
1da177e4
LT
1277 }
1278 return 0;
1279}
1280
1281static void netlink_overrun(struct sock *sk)
1282{
38938bfe
PNA
1283 struct netlink_sock *nlk = nlk_sk(sk);
1284
1285 if (!(nlk->flags & NETLINK_RECV_NO_ENOBUFS)) {
cd967e05 1286 if (!test_and_set_bit(NETLINK_CONGESTED, &nlk_sk(sk)->state)) {
38938bfe
PNA
1287 sk->sk_err = ENOBUFS;
1288 sk->sk_error_report(sk);
1289 }
1da177e4 1290 }
38938bfe 1291 atomic_inc(&sk->sk_drops);
1da177e4
LT
1292}
1293
15e47304 1294static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid)
1da177e4 1295{
1da177e4
LT
1296 struct sock *sock;
1297 struct netlink_sock *nlk;
1298
15e47304 1299 sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, portid);
1da177e4
LT
1300 if (!sock)
1301 return ERR_PTR(-ECONNREFUSED);
1302
1303 /* Don't bother queuing skb if kernel socket has no input function */
1304 nlk = nlk_sk(sock);
cd40b7d3 1305 if (sock->sk_state == NETLINK_CONNECTED &&
15e47304 1306 nlk->dst_portid != nlk_sk(ssk)->portid) {
1da177e4
LT
1307 sock_put(sock);
1308 return ERR_PTR(-ECONNREFUSED);
1309 }
1310 return sock;
1311}
1312
1313struct sock *netlink_getsockbyfilp(struct file *filp)
1314{
496ad9aa 1315 struct inode *inode = file_inode(filp);
1da177e4
LT
1316 struct sock *sock;
1317
1318 if (!S_ISSOCK(inode->i_mode))
1319 return ERR_PTR(-ENOTSOCK);
1320
1321 sock = SOCKET_I(inode)->sk;
1322 if (sock->sk_family != AF_NETLINK)
1323 return ERR_PTR(-EINVAL);
1324
1325 sock_hold(sock);
1326 return sock;
1327}
1328
1329/*
1330 * Attach a skb to a netlink socket.
1331 * The caller must hold a reference to the destination socket. On error, the
1332 * reference is dropped. The skb is not send to the destination, just all
1333 * all error checks are performed and memory in the queue is reserved.
1334 * Return values:
1335 * < 0: error. skb freed, reference to sock dropped.
1336 * 0: continue
1337 * 1: repeat lookup - reference dropped while waiting for socket memory.
1338 */
9457afee 1339int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
c3d8d1e3 1340 long *timeo, struct sock *ssk)
1da177e4
LT
1341{
1342 struct netlink_sock *nlk;
1343
1344 nlk = nlk_sk(sk);
1345
5fd96123
PM
1346 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
1347 test_bit(NETLINK_CONGESTED, &nlk->state)) &&
1348 !netlink_skb_is_mmaped(skb)) {
1da177e4 1349 DECLARE_WAITQUEUE(wait, current);
c3d8d1e3 1350 if (!*timeo) {
aed81560 1351 if (!ssk || netlink_is_kernel(ssk))
1da177e4
LT
1352 netlink_overrun(sk);
1353 sock_put(sk);
1354 kfree_skb(skb);
1355 return -EAGAIN;
1356 }
1357
1358 __set_current_state(TASK_INTERRUPTIBLE);
1359 add_wait_queue(&nlk->wait, &wait);
1360
1361 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
cd967e05 1362 test_bit(NETLINK_CONGESTED, &nlk->state)) &&
1da177e4 1363 !sock_flag(sk, SOCK_DEAD))
c3d8d1e3 1364 *timeo = schedule_timeout(*timeo);
1da177e4
LT
1365
1366 __set_current_state(TASK_RUNNING);
1367 remove_wait_queue(&nlk->wait, &wait);
1368 sock_put(sk);
1369
1370 if (signal_pending(current)) {
1371 kfree_skb(skb);
c3d8d1e3 1372 return sock_intr_errno(*timeo);
1da177e4
LT
1373 }
1374 return 1;
1375 }
cf0a018a 1376 netlink_skb_set_owner_r(skb, sk);
1da177e4
LT
1377 return 0;
1378}
1379
4a7e7c2a 1380static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1da177e4 1381{
1da177e4
LT
1382 int len = skb->len;
1383
1da177e4
LT
1384 skb_queue_tail(&sk->sk_receive_queue, skb);
1385 sk->sk_data_ready(sk, len);
4a7e7c2a
ED
1386 return len;
1387}
1388
1389int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1390{
1391 int len = __netlink_sendskb(sk, skb);
1392
1da177e4
LT
1393 sock_put(sk);
1394 return len;
1395}
1396
1397void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
1398{
1399 kfree_skb(skb);
1400 sock_put(sk);
1401}
1402
b57ef81f 1403static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
1da177e4
LT
1404{
1405 int delta;
1406
1298ca46 1407 WARN_ON(skb->sk != NULL);
5fd96123
PM
1408 if (netlink_skb_is_mmaped(skb))
1409 return skb;
1da177e4 1410
4305b541 1411 delta = skb->end - skb->tail;
1da177e4
LT
1412 if (delta * 2 < skb->truesize)
1413 return skb;
1414
1415 if (skb_shared(skb)) {
1416 struct sk_buff *nskb = skb_clone(skb, allocation);
1417 if (!nskb)
1418 return skb;
8460c00f 1419 consume_skb(skb);
1da177e4
LT
1420 skb = nskb;
1421 }
1422
1423 if (!pskb_expand_head(skb, 0, -delta, allocation))
1424 skb->truesize -= delta;
1425
1426 return skb;
1427}
1428
b57ef81f 1429static void netlink_rcv_wake(struct sock *sk)
cd40b7d3
DL
1430{
1431 struct netlink_sock *nlk = nlk_sk(sk);
1432
1433 if (skb_queue_empty(&sk->sk_receive_queue))
cd967e05
PM
1434 clear_bit(NETLINK_CONGESTED, &nlk->state);
1435 if (!test_bit(NETLINK_CONGESTED, &nlk->state))
cd40b7d3
DL
1436 wake_up_interruptible(&nlk->wait);
1437}
1438
3fbc2905
EB
1439static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb,
1440 struct sock *ssk)
cd40b7d3
DL
1441{
1442 int ret;
1443 struct netlink_sock *nlk = nlk_sk(sk);
1444
1445 ret = -ECONNREFUSED;
1446 if (nlk->netlink_rcv != NULL) {
1447 ret = skb->len;
cf0a018a 1448 netlink_skb_set_owner_r(skb, sk);
e32123e5 1449 NETLINK_CB(skb).sk = ssk;
cd40b7d3 1450 nlk->netlink_rcv(skb);
bfb253c9
ED
1451 consume_skb(skb);
1452 } else {
1453 kfree_skb(skb);
cd40b7d3 1454 }
cd40b7d3
DL
1455 sock_put(sk);
1456 return ret;
1457}
1458
1459int netlink_unicast(struct sock *ssk, struct sk_buff *skb,
15e47304 1460 u32 portid, int nonblock)
1da177e4
LT
1461{
1462 struct sock *sk;
1463 int err;
1464 long timeo;
1465
1466 skb = netlink_trim(skb, gfp_any());
1467
1468 timeo = sock_sndtimeo(ssk, nonblock);
1469retry:
15e47304 1470 sk = netlink_getsockbyportid(ssk, portid);
1da177e4
LT
1471 if (IS_ERR(sk)) {
1472 kfree_skb(skb);
1473 return PTR_ERR(sk);
1474 }
cd40b7d3 1475 if (netlink_is_kernel(sk))
3fbc2905 1476 return netlink_unicast_kernel(sk, skb, ssk);
cd40b7d3 1477
b1153f29 1478 if (sk_filter(sk, skb)) {
84874607 1479 err = skb->len;
b1153f29
SH
1480 kfree_skb(skb);
1481 sock_put(sk);
1482 return err;
1483 }
1484
9457afee 1485 err = netlink_attachskb(sk, skb, &timeo, ssk);
1da177e4
LT
1486 if (err == 1)
1487 goto retry;
1488 if (err)
1489 return err;
1490
7ee015e0 1491 return netlink_sendskb(sk, skb);
1da177e4 1492}
6ac552fd 1493EXPORT_SYMBOL(netlink_unicast);
1da177e4 1494
4277a083
PM
1495int netlink_has_listeners(struct sock *sk, unsigned int group)
1496{
1497 int res = 0;
5c398dc8 1498 struct listeners *listeners;
4277a083 1499
aed81560 1500 BUG_ON(!netlink_is_kernel(sk));
b4ff4f04
JB
1501
1502 rcu_read_lock();
1503 listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
1504
6d772ac5 1505 if (listeners && group - 1 < nl_table[sk->sk_protocol].groups)
5c398dc8 1506 res = test_bit(group - 1, listeners->masks);
b4ff4f04
JB
1507
1508 rcu_read_unlock();
1509
4277a083
PM
1510 return res;
1511}
1512EXPORT_SYMBOL_GPL(netlink_has_listeners);
1513
b57ef81f 1514static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
1da177e4
LT
1515{
1516 struct netlink_sock *nlk = nlk_sk(sk);
1517
1518 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
cd967e05 1519 !test_bit(NETLINK_CONGESTED, &nlk->state)) {
cf0a018a 1520 netlink_skb_set_owner_r(skb, sk);
4a7e7c2a 1521 __netlink_sendskb(sk, skb);
2c645800 1522 return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
1da177e4
LT
1523 }
1524 return -1;
1525}
1526
1527struct netlink_broadcast_data {
1528 struct sock *exclude_sk;
b4b51029 1529 struct net *net;
15e47304 1530 u32 portid;
1da177e4
LT
1531 u32 group;
1532 int failure;
ff491a73 1533 int delivery_failure;
1da177e4
LT
1534 int congested;
1535 int delivered;
7d877f3b 1536 gfp_t allocation;
1da177e4 1537 struct sk_buff *skb, *skb2;
910a7e90
EB
1538 int (*tx_filter)(struct sock *dsk, struct sk_buff *skb, void *data);
1539 void *tx_data;
1da177e4
LT
1540};
1541
b57ef81f 1542static int do_one_broadcast(struct sock *sk,
1da177e4
LT
1543 struct netlink_broadcast_data *p)
1544{
1545 struct netlink_sock *nlk = nlk_sk(sk);
1546 int val;
1547
1548 if (p->exclude_sk == sk)
1549 goto out;
1550
15e47304 1551 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
f7fa9b10 1552 !test_bit(p->group - 1, nlk->groups))
1da177e4
LT
1553 goto out;
1554
878628fb 1555 if (!net_eq(sock_net(sk), p->net))
b4b51029
EB
1556 goto out;
1557
1da177e4
LT
1558 if (p->failure) {
1559 netlink_overrun(sk);
1560 goto out;
1561 }
1562
1563 sock_hold(sk);
1564 if (p->skb2 == NULL) {
68acc024 1565 if (skb_shared(p->skb)) {
1da177e4
LT
1566 p->skb2 = skb_clone(p->skb, p->allocation);
1567 } else {
68acc024
TC
1568 p->skb2 = skb_get(p->skb);
1569 /*
1570 * skb ownership may have been set when
1571 * delivered to a previous socket.
1572 */
1573 skb_orphan(p->skb2);
1da177e4
LT
1574 }
1575 }
1576 if (p->skb2 == NULL) {
1577 netlink_overrun(sk);
1578 /* Clone failed. Notify ALL listeners. */
1579 p->failure = 1;
be0c22a4
PNA
1580 if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
1581 p->delivery_failure = 1;
910a7e90
EB
1582 } else if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) {
1583 kfree_skb(p->skb2);
1584 p->skb2 = NULL;
b1153f29
SH
1585 } else if (sk_filter(sk, p->skb2)) {
1586 kfree_skb(p->skb2);
1587 p->skb2 = NULL;
1da177e4
LT
1588 } else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) {
1589 netlink_overrun(sk);
be0c22a4
PNA
1590 if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
1591 p->delivery_failure = 1;
1da177e4
LT
1592 } else {
1593 p->congested |= val;
1594 p->delivered = 1;
1595 p->skb2 = NULL;
1596 }
1597 sock_put(sk);
1598
1599out:
1600 return 0;
1601}
1602
15e47304 1603int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid,
910a7e90
EB
1604 u32 group, gfp_t allocation,
1605 int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
1606 void *filter_data)
1da177e4 1607{
3b1e0a65 1608 struct net *net = sock_net(ssk);
1da177e4 1609 struct netlink_broadcast_data info;
1da177e4
LT
1610 struct sock *sk;
1611
1612 skb = netlink_trim(skb, allocation);
1613
1614 info.exclude_sk = ssk;
b4b51029 1615 info.net = net;
15e47304 1616 info.portid = portid;
1da177e4
LT
1617 info.group = group;
1618 info.failure = 0;
ff491a73 1619 info.delivery_failure = 0;
1da177e4
LT
1620 info.congested = 0;
1621 info.delivered = 0;
1622 info.allocation = allocation;
1623 info.skb = skb;
1624 info.skb2 = NULL;
910a7e90
EB
1625 info.tx_filter = filter;
1626 info.tx_data = filter_data;
1da177e4
LT
1627
1628 /* While we sleep in clone, do not allow to change socket list */
1629
1630 netlink_lock_table();
1631
b67bfe0d 1632 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
1da177e4
LT
1633 do_one_broadcast(sk, &info);
1634
70d4bf6d 1635 consume_skb(skb);
aa1c6a6f 1636
1da177e4
LT
1637 netlink_unlock_table();
1638
70d4bf6d
NH
1639 if (info.delivery_failure) {
1640 kfree_skb(info.skb2);
ff491a73 1641 return -ENOBUFS;
658cb354
ED
1642 }
1643 consume_skb(info.skb2);
ff491a73 1644
1da177e4
LT
1645 if (info.delivered) {
1646 if (info.congested && (allocation & __GFP_WAIT))
1647 yield();
1648 return 0;
1649 }
1da177e4
LT
1650 return -ESRCH;
1651}
910a7e90
EB
1652EXPORT_SYMBOL(netlink_broadcast_filtered);
1653
15e47304 1654int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 portid,
910a7e90
EB
1655 u32 group, gfp_t allocation)
1656{
15e47304 1657 return netlink_broadcast_filtered(ssk, skb, portid, group, allocation,
910a7e90
EB
1658 NULL, NULL);
1659}
6ac552fd 1660EXPORT_SYMBOL(netlink_broadcast);
1da177e4
LT
1661
1662struct netlink_set_err_data {
1663 struct sock *exclude_sk;
15e47304 1664 u32 portid;
1da177e4
LT
1665 u32 group;
1666 int code;
1667};
1668
b57ef81f 1669static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
1da177e4
LT
1670{
1671 struct netlink_sock *nlk = nlk_sk(sk);
1a50307b 1672 int ret = 0;
1da177e4
LT
1673
1674 if (sk == p->exclude_sk)
1675 goto out;
1676
09ad9bc7 1677 if (!net_eq(sock_net(sk), sock_net(p->exclude_sk)))
b4b51029
EB
1678 goto out;
1679
15e47304 1680 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
f7fa9b10 1681 !test_bit(p->group - 1, nlk->groups))
1da177e4
LT
1682 goto out;
1683
1a50307b
PNA
1684 if (p->code == ENOBUFS && nlk->flags & NETLINK_RECV_NO_ENOBUFS) {
1685 ret = 1;
1686 goto out;
1687 }
1688
1da177e4
LT
1689 sk->sk_err = p->code;
1690 sk->sk_error_report(sk);
1691out:
1a50307b 1692 return ret;
1da177e4
LT
1693}
1694
4843b93c
PNA
1695/**
1696 * netlink_set_err - report error to broadcast listeners
1697 * @ssk: the kernel netlink socket, as returned by netlink_kernel_create()
15e47304 1698 * @portid: the PORTID of a process that we want to skip (if any)
4843b93c
PNA
1699 * @groups: the broadcast group that will notice the error
1700 * @code: error code, must be negative (as usual in kernelspace)
1a50307b
PNA
1701 *
1702 * This function returns the number of broadcast listeners that have set the
1703 * NETLINK_RECV_NO_ENOBUFS socket option.
4843b93c 1704 */
15e47304 1705int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
1da177e4
LT
1706{
1707 struct netlink_set_err_data info;
1da177e4 1708 struct sock *sk;
1a50307b 1709 int ret = 0;
1da177e4
LT
1710
1711 info.exclude_sk = ssk;
15e47304 1712 info.portid = portid;
1da177e4 1713 info.group = group;
4843b93c
PNA
1714 /* sk->sk_err wants a positive error value */
1715 info.code = -code;
1da177e4
LT
1716
1717 read_lock(&nl_table_lock);
1718
b67bfe0d 1719 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
1a50307b 1720 ret += do_one_set_err(sk, &info);
1da177e4
LT
1721
1722 read_unlock(&nl_table_lock);
1a50307b 1723 return ret;
1da177e4 1724}
dd5b6ce6 1725EXPORT_SYMBOL(netlink_set_err);
1da177e4 1726
84659eb5
JB
1727/* must be called with netlink table grabbed */
1728static void netlink_update_socket_mc(struct netlink_sock *nlk,
1729 unsigned int group,
1730 int is_new)
1731{
1732 int old, new = !!is_new, subscriptions;
1733
1734 old = test_bit(group - 1, nlk->groups);
1735 subscriptions = nlk->subscriptions - old + new;
1736 if (new)
1737 __set_bit(group - 1, nlk->groups);
1738 else
1739 __clear_bit(group - 1, nlk->groups);
1740 netlink_update_subscriptions(&nlk->sk, subscriptions);
1741 netlink_update_listeners(&nlk->sk);
1742}
1743
9a4595bc 1744static int netlink_setsockopt(struct socket *sock, int level, int optname,
b7058842 1745 char __user *optval, unsigned int optlen)
9a4595bc
PM
1746{
1747 struct sock *sk = sock->sk;
1748 struct netlink_sock *nlk = nlk_sk(sk);
eb496534
JB
1749 unsigned int val = 0;
1750 int err;
9a4595bc
PM
1751
1752 if (level != SOL_NETLINK)
1753 return -ENOPROTOOPT;
1754
ccdfcc39
PM
1755 if (optname != NETLINK_RX_RING && optname != NETLINK_TX_RING &&
1756 optlen >= sizeof(int) &&
eb496534 1757 get_user(val, (unsigned int __user *)optval))
9a4595bc
PM
1758 return -EFAULT;
1759
1760 switch (optname) {
1761 case NETLINK_PKTINFO:
1762 if (val)
1763 nlk->flags |= NETLINK_RECV_PKTINFO;
1764 else
1765 nlk->flags &= ~NETLINK_RECV_PKTINFO;
1766 err = 0;
1767 break;
1768 case NETLINK_ADD_MEMBERSHIP:
1769 case NETLINK_DROP_MEMBERSHIP: {
9785e10a 1770 if (!netlink_capable(sock, NL_CFG_F_NONROOT_RECV))
9a4595bc 1771 return -EPERM;
b4ff4f04
JB
1772 err = netlink_realloc_groups(sk);
1773 if (err)
1774 return err;
9a4595bc
PM
1775 if (!val || val - 1 >= nlk->ngroups)
1776 return -EINVAL;
1777 netlink_table_grab();
84659eb5
JB
1778 netlink_update_socket_mc(nlk, val,
1779 optname == NETLINK_ADD_MEMBERSHIP);
9a4595bc 1780 netlink_table_ungrab();
03292745
PNA
1781
1782 if (nlk->netlink_bind)
1783 nlk->netlink_bind(val);
1784
9a4595bc
PM
1785 err = 0;
1786 break;
1787 }
be0c22a4
PNA
1788 case NETLINK_BROADCAST_ERROR:
1789 if (val)
1790 nlk->flags |= NETLINK_BROADCAST_SEND_ERROR;
1791 else
1792 nlk->flags &= ~NETLINK_BROADCAST_SEND_ERROR;
1793 err = 0;
1794 break;
38938bfe
PNA
1795 case NETLINK_NO_ENOBUFS:
1796 if (val) {
1797 nlk->flags |= NETLINK_RECV_NO_ENOBUFS;
cd967e05 1798 clear_bit(NETLINK_CONGESTED, &nlk->state);
38938bfe 1799 wake_up_interruptible(&nlk->wait);
658cb354 1800 } else {
38938bfe 1801 nlk->flags &= ~NETLINK_RECV_NO_ENOBUFS;
658cb354 1802 }
38938bfe
PNA
1803 err = 0;
1804 break;
ccdfcc39
PM
1805#ifdef CONFIG_NETLINK_MMAP
1806 case NETLINK_RX_RING:
1807 case NETLINK_TX_RING: {
1808 struct nl_mmap_req req;
1809
1810 /* Rings might consume more memory than queue limits, require
1811 * CAP_NET_ADMIN.
1812 */
1813 if (!capable(CAP_NET_ADMIN))
1814 return -EPERM;
1815 if (optlen < sizeof(req))
1816 return -EINVAL;
1817 if (copy_from_user(&req, optval, sizeof(req)))
1818 return -EFAULT;
1819 err = netlink_set_ring(sk, &req, false,
1820 optname == NETLINK_TX_RING);
1821 break;
1822 }
1823#endif /* CONFIG_NETLINK_MMAP */
9a4595bc
PM
1824 default:
1825 err = -ENOPROTOOPT;
1826 }
1827 return err;
1828}
1829
1830static int netlink_getsockopt(struct socket *sock, int level, int optname,
746fac4d 1831 char __user *optval, int __user *optlen)
9a4595bc
PM
1832{
1833 struct sock *sk = sock->sk;
1834 struct netlink_sock *nlk = nlk_sk(sk);
1835 int len, val, err;
1836
1837 if (level != SOL_NETLINK)
1838 return -ENOPROTOOPT;
1839
1840 if (get_user(len, optlen))
1841 return -EFAULT;
1842 if (len < 0)
1843 return -EINVAL;
1844
1845 switch (optname) {
1846 case NETLINK_PKTINFO:
1847 if (len < sizeof(int))
1848 return -EINVAL;
1849 len = sizeof(int);
1850 val = nlk->flags & NETLINK_RECV_PKTINFO ? 1 : 0;
a27b58fe
HC
1851 if (put_user(len, optlen) ||
1852 put_user(val, optval))
1853 return -EFAULT;
9a4595bc
PM
1854 err = 0;
1855 break;
be0c22a4
PNA
1856 case NETLINK_BROADCAST_ERROR:
1857 if (len < sizeof(int))
1858 return -EINVAL;
1859 len = sizeof(int);
1860 val = nlk->flags & NETLINK_BROADCAST_SEND_ERROR ? 1 : 0;
1861 if (put_user(len, optlen) ||
1862 put_user(val, optval))
1863 return -EFAULT;
1864 err = 0;
1865 break;
38938bfe
PNA
1866 case NETLINK_NO_ENOBUFS:
1867 if (len < sizeof(int))
1868 return -EINVAL;
1869 len = sizeof(int);
1870 val = nlk->flags & NETLINK_RECV_NO_ENOBUFS ? 1 : 0;
1871 if (put_user(len, optlen) ||
1872 put_user(val, optval))
1873 return -EFAULT;
1874 err = 0;
1875 break;
9a4595bc
PM
1876 default:
1877 err = -ENOPROTOOPT;
1878 }
1879 return err;
1880}
1881
1882static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
1883{
1884 struct nl_pktinfo info;
1885
1886 info.group = NETLINK_CB(skb).dst_group;
1887 put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info);
1888}
1889
1da177e4
LT
1890static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
1891 struct msghdr *msg, size_t len)
1892{
1893 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1894 struct sock *sk = sock->sk;
1895 struct netlink_sock *nlk = nlk_sk(sk);
6ac552fd 1896 struct sockaddr_nl *addr = msg->msg_name;
15e47304 1897 u32 dst_portid;
d629b836 1898 u32 dst_group;
1da177e4
LT
1899 struct sk_buff *skb;
1900 int err;
1901 struct scm_cookie scm;
1902
1903 if (msg->msg_flags&MSG_OOB)
1904 return -EOPNOTSUPP;
1905
16e57262 1906 if (NULL == siocb->scm)
1da177e4 1907 siocb->scm = &scm;
16e57262 1908
e0e3cea4 1909 err = scm_send(sock, msg, siocb->scm, true);
1da177e4
LT
1910 if (err < 0)
1911 return err;
1912
1913 if (msg->msg_namelen) {
b47030c7 1914 err = -EINVAL;
1da177e4 1915 if (addr->nl_family != AF_NETLINK)
b47030c7 1916 goto out;
15e47304 1917 dst_portid = addr->nl_pid;
d629b836 1918 dst_group = ffs(addr->nl_groups);
b47030c7 1919 err = -EPERM;
15e47304 1920 if ((dst_group || dst_portid) &&
9785e10a 1921 !netlink_capable(sock, NL_CFG_F_NONROOT_SEND))
b47030c7 1922 goto out;
1da177e4 1923 } else {
15e47304 1924 dst_portid = nlk->dst_portid;
d629b836 1925 dst_group = nlk->dst_group;
1da177e4
LT
1926 }
1927
15e47304 1928 if (!nlk->portid) {
1da177e4
LT
1929 err = netlink_autobind(sock);
1930 if (err)
1931 goto out;
1932 }
1933
5fd96123
PM
1934 if (netlink_tx_is_mmaped(sk) &&
1935 msg->msg_iov->iov_base == NULL) {
1936 err = netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group,
1937 siocb);
1938 goto out;
1939 }
1940
1da177e4
LT
1941 err = -EMSGSIZE;
1942 if (len > sk->sk_sndbuf - 32)
1943 goto out;
1944 err = -ENOBUFS;
339bf98f 1945 skb = alloc_skb(len, GFP_KERNEL);
6ac552fd 1946 if (skb == NULL)
1da177e4
LT
1947 goto out;
1948
15e47304 1949 NETLINK_CB(skb).portid = nlk->portid;
d629b836 1950 NETLINK_CB(skb).dst_group = dst_group;
dbe9a417 1951 NETLINK_CB(skb).creds = siocb->scm->creds;
1da177e4 1952
1da177e4 1953 err = -EFAULT;
6ac552fd 1954 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
1da177e4
LT
1955 kfree_skb(skb);
1956 goto out;
1957 }
1958
1959 err = security_netlink_send(sk, skb);
1960 if (err) {
1961 kfree_skb(skb);
1962 goto out;
1963 }
1964
d629b836 1965 if (dst_group) {
1da177e4 1966 atomic_inc(&skb->users);
15e47304 1967 netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL);
1da177e4 1968 }
15e47304 1969 err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags&MSG_DONTWAIT);
1da177e4
LT
1970
1971out:
b47030c7 1972 scm_destroy(siocb->scm);
1da177e4
LT
1973 return err;
1974}
1975
1976static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
1977 struct msghdr *msg, size_t len,
1978 int flags)
1979{
1980 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1981 struct scm_cookie scm;
1982 struct sock *sk = sock->sk;
1983 struct netlink_sock *nlk = nlk_sk(sk);
1984 int noblock = flags&MSG_DONTWAIT;
1985 size_t copied;
68d6ac6d 1986 struct sk_buff *skb, *data_skb;
b44d211e 1987 int err, ret;
1da177e4
LT
1988
1989 if (flags&MSG_OOB)
1990 return -EOPNOTSUPP;
1991
1992 copied = 0;
1993
6ac552fd
PM
1994 skb = skb_recv_datagram(sk, flags, noblock, &err);
1995 if (skb == NULL)
1da177e4
LT
1996 goto out;
1997
68d6ac6d
JB
1998 data_skb = skb;
1999
1dacc76d
JB
2000#ifdef CONFIG_COMPAT_NETLINK_MESSAGES
2001 if (unlikely(skb_shinfo(skb)->frag_list)) {
1dacc76d 2002 /*
68d6ac6d
JB
2003 * If this skb has a frag_list, then here that means that we
2004 * will have to use the frag_list skb's data for compat tasks
2005 * and the regular skb's data for normal (non-compat) tasks.
1dacc76d 2006 *
68d6ac6d
JB
2007 * If we need to send the compat skb, assign it to the
2008 * 'data_skb' variable so that it will be used below for data
2009 * copying. We keep 'skb' for everything else, including
2010 * freeing both later.
1dacc76d 2011 */
68d6ac6d
JB
2012 if (flags & MSG_CMSG_COMPAT)
2013 data_skb = skb_shinfo(skb)->frag_list;
1dacc76d
JB
2014 }
2015#endif
2016
1da177e4
LT
2017 msg->msg_namelen = 0;
2018
68d6ac6d 2019 copied = data_skb->len;
1da177e4
LT
2020 if (len < copied) {
2021 msg->msg_flags |= MSG_TRUNC;
2022 copied = len;
2023 }
2024
68d6ac6d
JB
2025 skb_reset_transport_header(data_skb);
2026 err = skb_copy_datagram_iovec(data_skb, 0, msg->msg_iov, copied);
1da177e4
LT
2027
2028 if (msg->msg_name) {
6ac552fd 2029 struct sockaddr_nl *addr = (struct sockaddr_nl *)msg->msg_name;
1da177e4
LT
2030 addr->nl_family = AF_NETLINK;
2031 addr->nl_pad = 0;
15e47304 2032 addr->nl_pid = NETLINK_CB(skb).portid;
d629b836 2033 addr->nl_groups = netlink_group_mask(NETLINK_CB(skb).dst_group);
1da177e4
LT
2034 msg->msg_namelen = sizeof(*addr);
2035 }
2036
cc9a06cd
PM
2037 if (nlk->flags & NETLINK_RECV_PKTINFO)
2038 netlink_cmsg_recv_pktinfo(msg, skb);
2039
1da177e4
LT
2040 if (NULL == siocb->scm) {
2041 memset(&scm, 0, sizeof(scm));
2042 siocb->scm = &scm;
2043 }
2044 siocb->scm->creds = *NETLINK_CREDS(skb);
188ccb55 2045 if (flags & MSG_TRUNC)
68d6ac6d 2046 copied = data_skb->len;
daa3766e 2047
1da177e4
LT
2048 skb_free_datagram(sk, skb);
2049
b44d211e
AV
2050 if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
2051 ret = netlink_dump(sk);
2052 if (ret) {
2053 sk->sk_err = ret;
2054 sk->sk_error_report(sk);
2055 }
2056 }
1da177e4
LT
2057
2058 scm_recv(sock, msg, siocb->scm, flags);
1da177e4
LT
2059out:
2060 netlink_rcv_wake(sk);
2061 return err ? : copied;
2062}
2063
2064static void netlink_data_ready(struct sock *sk, int len)
2065{
cd40b7d3 2066 BUG();
1da177e4
LT
2067}
2068
2069/*
746fac4d 2070 * We export these functions to other modules. They provide a
1da177e4
LT
2071 * complete set of kernel non-blocking support for message
2072 * queueing.
2073 */
2074
2075struct sock *
9f00d977
PNA
2076__netlink_kernel_create(struct net *net, int unit, struct module *module,
2077 struct netlink_kernel_cfg *cfg)
1da177e4
LT
2078{
2079 struct socket *sock;
2080 struct sock *sk;
77247bbb 2081 struct netlink_sock *nlk;
5c398dc8 2082 struct listeners *listeners = NULL;
a31f2d17
PNA
2083 struct mutex *cb_mutex = cfg ? cfg->cb_mutex : NULL;
2084 unsigned int groups;
1da177e4 2085
fab2caf6 2086 BUG_ON(!nl_table);
1da177e4 2087
6ac552fd 2088 if (unit < 0 || unit >= MAX_LINKS)
1da177e4
LT
2089 return NULL;
2090
2091 if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
2092 return NULL;
2093
23fe1866
PE
2094 /*
2095 * We have to just have a reference on the net from sk, but don't
2096 * get_net it. Besides, we cannot get and then put the net here.
2097 * So we create one inside init_net and the move it to net.
2098 */
2099
2100 if (__netlink_create(&init_net, sock, cb_mutex, unit) < 0)
2101 goto out_sock_release_nosk;
2102
2103 sk = sock->sk;
edf02087 2104 sk_change_net(sk, net);
4fdb3bb7 2105
a31f2d17 2106 if (!cfg || cfg->groups < 32)
4277a083 2107 groups = 32;
a31f2d17
PNA
2108 else
2109 groups = cfg->groups;
4277a083 2110
5c398dc8 2111 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
4277a083
PM
2112 if (!listeners)
2113 goto out_sock_release;
2114
1da177e4 2115 sk->sk_data_ready = netlink_data_ready;
a31f2d17
PNA
2116 if (cfg && cfg->input)
2117 nlk_sk(sk)->netlink_rcv = cfg->input;
1da177e4 2118
b4b51029 2119 if (netlink_insert(sk, net, 0))
77247bbb 2120 goto out_sock_release;
4fdb3bb7 2121
77247bbb
PM
2122 nlk = nlk_sk(sk);
2123 nlk->flags |= NETLINK_KERNEL_SOCKET;
4fdb3bb7 2124
4fdb3bb7 2125 netlink_table_grab();
b4b51029
EB
2126 if (!nl_table[unit].registered) {
2127 nl_table[unit].groups = groups;
5c398dc8 2128 rcu_assign_pointer(nl_table[unit].listeners, listeners);
b4b51029
EB
2129 nl_table[unit].cb_mutex = cb_mutex;
2130 nl_table[unit].module = module;
9785e10a
PNA
2131 if (cfg) {
2132 nl_table[unit].bind = cfg->bind;
2133 nl_table[unit].flags = cfg->flags;
2134 }
b4b51029 2135 nl_table[unit].registered = 1;
f937f1f4
JJ
2136 } else {
2137 kfree(listeners);
869e58f8 2138 nl_table[unit].registered++;
b4b51029 2139 }
4fdb3bb7 2140 netlink_table_ungrab();
77247bbb
PM
2141 return sk;
2142
4fdb3bb7 2143out_sock_release:
4277a083 2144 kfree(listeners);
9dfbec1f 2145 netlink_kernel_release(sk);
23fe1866
PE
2146 return NULL;
2147
2148out_sock_release_nosk:
4fdb3bb7 2149 sock_release(sock);
77247bbb 2150 return NULL;
1da177e4 2151}
9f00d977 2152EXPORT_SYMBOL(__netlink_kernel_create);
b7c6ba6e
DL
2153
2154void
2155netlink_kernel_release(struct sock *sk)
2156{
edf02087 2157 sk_release_kernel(sk);
b7c6ba6e
DL
2158}
2159EXPORT_SYMBOL(netlink_kernel_release);
2160
d136f1bd 2161int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
b4ff4f04 2162{
5c398dc8 2163 struct listeners *new, *old;
b4ff4f04 2164 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
b4ff4f04
JB
2165
2166 if (groups < 32)
2167 groups = 32;
2168
b4ff4f04 2169 if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) {
5c398dc8
ED
2170 new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
2171 if (!new)
d136f1bd 2172 return -ENOMEM;
6d772ac5 2173 old = nl_deref_protected(tbl->listeners);
5c398dc8
ED
2174 memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
2175 rcu_assign_pointer(tbl->listeners, new);
2176
37b6b935 2177 kfree_rcu(old, rcu);
b4ff4f04
JB
2178 }
2179 tbl->groups = groups;
2180
d136f1bd
JB
2181 return 0;
2182}
2183
2184/**
2185 * netlink_change_ngroups - change number of multicast groups
2186 *
2187 * This changes the number of multicast groups that are available
2188 * on a certain netlink family. Note that it is not possible to
2189 * change the number of groups to below 32. Also note that it does
2190 * not implicitly call netlink_clear_multicast_users() when the
2191 * number of groups is reduced.
2192 *
2193 * @sk: The kernel netlink socket, as returned by netlink_kernel_create().
2194 * @groups: The new number of groups.
2195 */
2196int netlink_change_ngroups(struct sock *sk, unsigned int groups)
2197{
2198 int err;
2199
2200 netlink_table_grab();
2201 err = __netlink_change_ngroups(sk, groups);
b4ff4f04 2202 netlink_table_ungrab();
d136f1bd 2203
b4ff4f04
JB
2204 return err;
2205}
b4ff4f04 2206
b8273570
JB
2207void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
2208{
2209 struct sock *sk;
b8273570
JB
2210 struct netlink_table *tbl = &nl_table[ksk->sk_protocol];
2211
b67bfe0d 2212 sk_for_each_bound(sk, &tbl->mc_list)
b8273570
JB
2213 netlink_update_socket_mc(nlk_sk(sk), group, 0);
2214}
2215
84659eb5
JB
2216/**
2217 * netlink_clear_multicast_users - kick off multicast listeners
2218 *
2219 * This function removes all listeners from the given group.
2220 * @ksk: The kernel netlink socket, as returned by
2221 * netlink_kernel_create().
2222 * @group: The multicast group to clear.
2223 */
2224void netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
2225{
84659eb5 2226 netlink_table_grab();
b8273570 2227 __netlink_clear_multicast_users(ksk, group);
84659eb5
JB
2228 netlink_table_ungrab();
2229}
84659eb5 2230
a46621a3 2231struct nlmsghdr *
15e47304 2232__nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags)
a46621a3
DV
2233{
2234 struct nlmsghdr *nlh;
573ce260 2235 int size = nlmsg_msg_size(len);
a46621a3
DV
2236
2237 nlh = (struct nlmsghdr*)skb_put(skb, NLMSG_ALIGN(size));
2238 nlh->nlmsg_type = type;
2239 nlh->nlmsg_len = size;
2240 nlh->nlmsg_flags = flags;
15e47304 2241 nlh->nlmsg_pid = portid;
a46621a3
DV
2242 nlh->nlmsg_seq = seq;
2243 if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0)
573ce260 2244 memset(nlmsg_data(nlh) + len, 0, NLMSG_ALIGN(size) - size);
a46621a3
DV
2245 return nlh;
2246}
2247EXPORT_SYMBOL(__nlmsg_put);
2248
1da177e4
LT
2249/*
2250 * It looks a bit ugly.
2251 * It would be better to create kernel thread.
2252 */
2253
2254static int netlink_dump(struct sock *sk)
2255{
2256 struct netlink_sock *nlk = nlk_sk(sk);
2257 struct netlink_callback *cb;
c7ac8679 2258 struct sk_buff *skb = NULL;
1da177e4 2259 struct nlmsghdr *nlh;
bf8b79e4 2260 int len, err = -ENOBUFS;
c7ac8679 2261 int alloc_size;
1da177e4 2262
af65bdfc 2263 mutex_lock(nlk->cb_mutex);
1da177e4
LT
2264
2265 cb = nlk->cb;
2266 if (cb == NULL) {
bf8b79e4
TG
2267 err = -EINVAL;
2268 goto errout_skb;
1da177e4
LT
2269 }
2270
c7ac8679
GR
2271 alloc_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
2272
2273 skb = sock_rmalloc(sk, alloc_size, 0, GFP_KERNEL);
2274 if (!skb)
c63d6ea3 2275 goto errout_skb;
c7ac8679 2276
1da177e4
LT
2277 len = cb->dump(skb, cb);
2278
2279 if (len > 0) {
af65bdfc 2280 mutex_unlock(nlk->cb_mutex);
b1153f29
SH
2281
2282 if (sk_filter(sk, skb))
2283 kfree_skb(skb);
4a7e7c2a
ED
2284 else
2285 __netlink_sendskb(sk, skb);
1da177e4
LT
2286 return 0;
2287 }
2288
bf8b79e4
TG
2289 nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI);
2290 if (!nlh)
2291 goto errout_skb;
2292
670dc283
JB
2293 nl_dump_check_consistent(cb, nlh);
2294
bf8b79e4
TG
2295 memcpy(nlmsg_data(nlh), &len, sizeof(len));
2296
b1153f29
SH
2297 if (sk_filter(sk, skb))
2298 kfree_skb(skb);
4a7e7c2a
ED
2299 else
2300 __netlink_sendskb(sk, skb);
1da177e4 2301
a8f74b22
TG
2302 if (cb->done)
2303 cb->done(cb);
1da177e4 2304 nlk->cb = NULL;
af65bdfc 2305 mutex_unlock(nlk->cb_mutex);
1da177e4 2306
6dc878a8 2307 module_put(cb->module);
bfb253c9 2308 netlink_consume_callback(cb);
1da177e4 2309 return 0;
1797754e 2310
bf8b79e4 2311errout_skb:
af65bdfc 2312 mutex_unlock(nlk->cb_mutex);
bf8b79e4 2313 kfree_skb(skb);
bf8b79e4 2314 return err;
1da177e4
LT
2315}
2316
6dc878a8
G
2317int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
2318 const struct nlmsghdr *nlh,
2319 struct netlink_dump_control *control)
1da177e4
LT
2320{
2321 struct netlink_callback *cb;
2322 struct sock *sk;
2323 struct netlink_sock *nlk;
b44d211e 2324 int ret;
1da177e4 2325
0da974f4 2326 cb = kzalloc(sizeof(*cb), GFP_KERNEL);
1da177e4
LT
2327 if (cb == NULL)
2328 return -ENOBUFS;
2329
80d326fa
PNA
2330 cb->dump = control->dump;
2331 cb->done = control->done;
1da177e4 2332 cb->nlh = nlh;
7175c883 2333 cb->data = control->data;
6dc878a8 2334 cb->module = control->module;
80d326fa 2335 cb->min_dump_alloc = control->min_dump_alloc;
1da177e4
LT
2336 atomic_inc(&skb->users);
2337 cb->skb = skb;
2338
15e47304 2339 sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid);
1da177e4
LT
2340 if (sk == NULL) {
2341 netlink_destroy_callback(cb);
2342 return -ECONNREFUSED;
2343 }
2344 nlk = nlk_sk(sk);
6dc878a8 2345
af65bdfc 2346 mutex_lock(nlk->cb_mutex);
6dc878a8 2347 /* A dump is in progress... */
3f660d66 2348 if (nlk->cb) {
af65bdfc 2349 mutex_unlock(nlk->cb_mutex);
1da177e4 2350 netlink_destroy_callback(cb);
6dc878a8
G
2351 ret = -EBUSY;
2352 goto out;
1da177e4 2353 }
6dc878a8
G
2354 /* add reference of module which cb->dump belongs to */
2355 if (!try_module_get(cb->module)) {
2356 mutex_unlock(nlk->cb_mutex);
2357 netlink_destroy_callback(cb);
2358 ret = -EPROTONOSUPPORT;
2359 goto out;
2360 }
2361
1da177e4 2362 nlk->cb = cb;
af65bdfc 2363 mutex_unlock(nlk->cb_mutex);
1da177e4 2364
b44d211e 2365 ret = netlink_dump(sk);
6dc878a8 2366out:
1da177e4 2367 sock_put(sk);
5c58298c 2368
b44d211e
AV
2369 if (ret)
2370 return ret;
2371
5c58298c
DL
2372 /* We successfully started a dump, by returning -EINTR we
2373 * signal not to send ACK even if it was requested.
2374 */
2375 return -EINTR;
1da177e4 2376}
6dc878a8 2377EXPORT_SYMBOL(__netlink_dump_start);
1da177e4
LT
2378
2379void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
2380{
2381 struct sk_buff *skb;
2382 struct nlmsghdr *rep;
2383 struct nlmsgerr *errmsg;
339bf98f 2384 size_t payload = sizeof(*errmsg);
1da177e4 2385
339bf98f
TG
2386 /* error messages get the original request appened */
2387 if (err)
2388 payload += nlmsg_len(nlh);
1da177e4 2389
339bf98f 2390 skb = nlmsg_new(payload, GFP_KERNEL);
1da177e4
LT
2391 if (!skb) {
2392 struct sock *sk;
2393
3b1e0a65 2394 sk = netlink_lookup(sock_net(in_skb->sk),
b4b51029 2395 in_skb->sk->sk_protocol,
15e47304 2396 NETLINK_CB(in_skb).portid);
1da177e4
LT
2397 if (sk) {
2398 sk->sk_err = ENOBUFS;
2399 sk->sk_error_report(sk);
2400 sock_put(sk);
2401 }
2402 return;
2403 }
2404
15e47304 2405 rep = __nlmsg_put(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
5dba93ae 2406 NLMSG_ERROR, payload, 0);
bf8b79e4 2407 errmsg = nlmsg_data(rep);
1da177e4 2408 errmsg->error = err;
bf8b79e4 2409 memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(*nlh));
15e47304 2410 netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid, MSG_DONTWAIT);
1da177e4 2411}
6ac552fd 2412EXPORT_SYMBOL(netlink_ack);
1da177e4 2413
cd40b7d3 2414int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
1d00a4eb 2415 struct nlmsghdr *))
82ace47a 2416{
82ace47a
TG
2417 struct nlmsghdr *nlh;
2418 int err;
2419
2420 while (skb->len >= nlmsg_total_size(0)) {
cd40b7d3
DL
2421 int msglen;
2422
b529ccf2 2423 nlh = nlmsg_hdr(skb);
d35b6856 2424 err = 0;
82ace47a 2425
ad8e4b75 2426 if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
82ace47a
TG
2427 return 0;
2428
d35b6856
TG
2429 /* Only requests are handled by the kernel */
2430 if (!(nlh->nlmsg_flags & NLM_F_REQUEST))
5c58298c 2431 goto ack;
45e7ae7f
TG
2432
2433 /* Skip control messages */
2434 if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
5c58298c 2435 goto ack;
d35b6856 2436
1d00a4eb 2437 err = cb(skb, nlh);
5c58298c
DL
2438 if (err == -EINTR)
2439 goto skip;
2440
2441ack:
d35b6856 2442 if (nlh->nlmsg_flags & NLM_F_ACK || err)
82ace47a 2443 netlink_ack(skb, nlh, err);
82ace47a 2444
5c58298c 2445skip:
6ac552fd 2446 msglen = NLMSG_ALIGN(nlh->nlmsg_len);
cd40b7d3
DL
2447 if (msglen > skb->len)
2448 msglen = skb->len;
2449 skb_pull(skb, msglen);
82ace47a
TG
2450 }
2451
2452 return 0;
2453}
6ac552fd 2454EXPORT_SYMBOL(netlink_rcv_skb);
82ace47a 2455
d387f6ad
TG
2456/**
2457 * nlmsg_notify - send a notification netlink message
2458 * @sk: netlink socket to use
2459 * @skb: notification message
15e47304 2460 * @portid: destination netlink portid for reports or 0
d387f6ad
TG
2461 * @group: destination multicast group or 0
2462 * @report: 1 to report back, 0 to disable
2463 * @flags: allocation flags
2464 */
15e47304 2465int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
d387f6ad
TG
2466 unsigned int group, int report, gfp_t flags)
2467{
2468 int err = 0;
2469
2470 if (group) {
15e47304 2471 int exclude_portid = 0;
d387f6ad
TG
2472
2473 if (report) {
2474 atomic_inc(&skb->users);
15e47304 2475 exclude_portid = portid;
d387f6ad
TG
2476 }
2477
1ce85fe4
PNA
2478 /* errors reported via destination sk->sk_err, but propagate
2479 * delivery errors if NETLINK_BROADCAST_ERROR flag is set */
15e47304 2480 err = nlmsg_multicast(sk, skb, exclude_portid, group, flags);
d387f6ad
TG
2481 }
2482
1ce85fe4
PNA
2483 if (report) {
2484 int err2;
2485
15e47304 2486 err2 = nlmsg_unicast(sk, skb, portid);
1ce85fe4
PNA
2487 if (!err || err == -ESRCH)
2488 err = err2;
2489 }
d387f6ad
TG
2490
2491 return err;
2492}
6ac552fd 2493EXPORT_SYMBOL(nlmsg_notify);
d387f6ad 2494
1da177e4
LT
2495#ifdef CONFIG_PROC_FS
2496struct nl_seq_iter {
e372c414 2497 struct seq_net_private p;
1da177e4
LT
2498 int link;
2499 int hash_idx;
2500};
2501
2502static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
2503{
2504 struct nl_seq_iter *iter = seq->private;
2505 int i, j;
2506 struct sock *s;
1da177e4
LT
2507 loff_t off = 0;
2508
6ac552fd 2509 for (i = 0; i < MAX_LINKS; i++) {
15e47304 2510 struct nl_portid_hash *hash = &nl_table[i].hash;
1da177e4
LT
2511
2512 for (j = 0; j <= hash->mask; j++) {
b67bfe0d 2513 sk_for_each(s, &hash->table[j]) {
1218854a 2514 if (sock_net(s) != seq_file_net(seq))
b4b51029 2515 continue;
1da177e4
LT
2516 if (off == pos) {
2517 iter->link = i;
2518 iter->hash_idx = j;
2519 return s;
2520 }
2521 ++off;
2522 }
2523 }
2524 }
2525 return NULL;
2526}
2527
2528static void *netlink_seq_start(struct seq_file *seq, loff_t *pos)
9a429c49 2529 __acquires(nl_table_lock)
1da177e4
LT
2530{
2531 read_lock(&nl_table_lock);
2532 return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2533}
2534
2535static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2536{
2537 struct sock *s;
2538 struct nl_seq_iter *iter;
2539 int i, j;
2540
2541 ++*pos;
2542
2543 if (v == SEQ_START_TOKEN)
2544 return netlink_seq_socket_idx(seq, 0);
746fac4d 2545
b4b51029
EB
2546 iter = seq->private;
2547 s = v;
2548 do {
2549 s = sk_next(s);
1218854a 2550 } while (s && sock_net(s) != seq_file_net(seq));
1da177e4
LT
2551 if (s)
2552 return s;
2553
1da177e4
LT
2554 i = iter->link;
2555 j = iter->hash_idx + 1;
2556
2557 do {
15e47304 2558 struct nl_portid_hash *hash = &nl_table[i].hash;
1da177e4
LT
2559
2560 for (; j <= hash->mask; j++) {
2561 s = sk_head(&hash->table[j]);
1218854a 2562 while (s && sock_net(s) != seq_file_net(seq))
b4b51029 2563 s = sk_next(s);
1da177e4
LT
2564 if (s) {
2565 iter->link = i;
2566 iter->hash_idx = j;
2567 return s;
2568 }
2569 }
2570
2571 j = 0;
2572 } while (++i < MAX_LINKS);
2573
2574 return NULL;
2575}
2576
2577static void netlink_seq_stop(struct seq_file *seq, void *v)
9a429c49 2578 __releases(nl_table_lock)
1da177e4
LT
2579{
2580 read_unlock(&nl_table_lock);
2581}
2582
2583
2584static int netlink_seq_show(struct seq_file *seq, void *v)
2585{
658cb354 2586 if (v == SEQ_START_TOKEN) {
1da177e4
LT
2587 seq_puts(seq,
2588 "sk Eth Pid Groups "
cf0aa4e0 2589 "Rmem Wmem Dump Locks Drops Inode\n");
658cb354 2590 } else {
1da177e4
LT
2591 struct sock *s = v;
2592 struct netlink_sock *nlk = nlk_sk(s);
2593
9f1e0ad0 2594 seq_printf(seq, "%pK %-3d %-6u %08x %-8d %-8d %pK %-8d %-8d %-8lu\n",
1da177e4
LT
2595 s,
2596 s->sk_protocol,
15e47304 2597 nlk->portid,
513c2500 2598 nlk->groups ? (u32)nlk->groups[0] : 0,
31e6d363
ED
2599 sk_rmem_alloc_get(s),
2600 sk_wmem_alloc_get(s),
1da177e4 2601 nlk->cb,
38938bfe 2602 atomic_read(&s->sk_refcnt),
cf0aa4e0
MY
2603 atomic_read(&s->sk_drops),
2604 sock_i_ino(s)
1da177e4
LT
2605 );
2606
2607 }
2608 return 0;
2609}
2610
56b3d975 2611static const struct seq_operations netlink_seq_ops = {
1da177e4
LT
2612 .start = netlink_seq_start,
2613 .next = netlink_seq_next,
2614 .stop = netlink_seq_stop,
2615 .show = netlink_seq_show,
2616};
2617
2618
2619static int netlink_seq_open(struct inode *inode, struct file *file)
2620{
e372c414
DL
2621 return seq_open_net(inode, file, &netlink_seq_ops,
2622 sizeof(struct nl_seq_iter));
b4b51029
EB
2623}
2624
da7071d7 2625static const struct file_operations netlink_seq_fops = {
1da177e4
LT
2626 .owner = THIS_MODULE,
2627 .open = netlink_seq_open,
2628 .read = seq_read,
2629 .llseek = seq_lseek,
e372c414 2630 .release = seq_release_net,
1da177e4
LT
2631};
2632
2633#endif
2634
2635int netlink_register_notifier(struct notifier_block *nb)
2636{
e041c683 2637 return atomic_notifier_chain_register(&netlink_chain, nb);
1da177e4 2638}
6ac552fd 2639EXPORT_SYMBOL(netlink_register_notifier);
1da177e4
LT
2640
2641int netlink_unregister_notifier(struct notifier_block *nb)
2642{
e041c683 2643 return atomic_notifier_chain_unregister(&netlink_chain, nb);
1da177e4 2644}
6ac552fd 2645EXPORT_SYMBOL(netlink_unregister_notifier);
746fac4d 2646
90ddc4f0 2647static const struct proto_ops netlink_ops = {
1da177e4
LT
2648 .family = PF_NETLINK,
2649 .owner = THIS_MODULE,
2650 .release = netlink_release,
2651 .bind = netlink_bind,
2652 .connect = netlink_connect,
2653 .socketpair = sock_no_socketpair,
2654 .accept = sock_no_accept,
2655 .getname = netlink_getname,
9652e931 2656 .poll = netlink_poll,
1da177e4
LT
2657 .ioctl = sock_no_ioctl,
2658 .listen = sock_no_listen,
2659 .shutdown = sock_no_shutdown,
9a4595bc
PM
2660 .setsockopt = netlink_setsockopt,
2661 .getsockopt = netlink_getsockopt,
1da177e4
LT
2662 .sendmsg = netlink_sendmsg,
2663 .recvmsg = netlink_recvmsg,
ccdfcc39 2664 .mmap = netlink_mmap,
1da177e4
LT
2665 .sendpage = sock_no_sendpage,
2666};
2667
ec1b4cf7 2668static const struct net_proto_family netlink_family_ops = {
1da177e4
LT
2669 .family = PF_NETLINK,
2670 .create = netlink_create,
2671 .owner = THIS_MODULE, /* for consistency 8) */
2672};
2673
4665079c 2674static int __net_init netlink_net_init(struct net *net)
b4b51029
EB
2675{
2676#ifdef CONFIG_PROC_FS
d4beaa66 2677 if (!proc_create("netlink", 0, net->proc_net, &netlink_seq_fops))
b4b51029
EB
2678 return -ENOMEM;
2679#endif
2680 return 0;
2681}
2682
4665079c 2683static void __net_exit netlink_net_exit(struct net *net)
b4b51029
EB
2684{
2685#ifdef CONFIG_PROC_FS
ece31ffd 2686 remove_proc_entry("netlink", net->proc_net);
b4b51029
EB
2687#endif
2688}
2689
b963ea89
DM
2690static void __init netlink_add_usersock_entry(void)
2691{
5c398dc8 2692 struct listeners *listeners;
b963ea89
DM
2693 int groups = 32;
2694
5c398dc8 2695 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
b963ea89 2696 if (!listeners)
5c398dc8 2697 panic("netlink_add_usersock_entry: Cannot allocate listeners\n");
b963ea89
DM
2698
2699 netlink_table_grab();
2700
2701 nl_table[NETLINK_USERSOCK].groups = groups;
5c398dc8 2702 rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
b963ea89
DM
2703 nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
2704 nl_table[NETLINK_USERSOCK].registered = 1;
9785e10a 2705 nl_table[NETLINK_USERSOCK].flags = NL_CFG_F_NONROOT_SEND;
b963ea89
DM
2706
2707 netlink_table_ungrab();
2708}
2709
022cbae6 2710static struct pernet_operations __net_initdata netlink_net_ops = {
b4b51029
EB
2711 .init = netlink_net_init,
2712 .exit = netlink_net_exit,
2713};
2714
1da177e4
LT
2715static int __init netlink_proto_init(void)
2716{
1da177e4 2717 int i;
26ff5ddc 2718 unsigned long limit;
1da177e4
LT
2719 unsigned int order;
2720 int err = proto_register(&netlink_proto, 0);
2721
2722 if (err != 0)
2723 goto out;
2724
fab25745 2725 BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
1da177e4 2726
0da974f4 2727 nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
fab2caf6
AM
2728 if (!nl_table)
2729 goto panic;
1da177e4 2730
4481374c
JB
2731 if (totalram_pages >= (128 * 1024))
2732 limit = totalram_pages >> (21 - PAGE_SHIFT);
1da177e4 2733 else
4481374c 2734 limit = totalram_pages >> (23 - PAGE_SHIFT);
1da177e4 2735
26ff5ddc
DC
2736 order = get_bitmask_order(limit) - 1 + PAGE_SHIFT;
2737 limit = (1UL << order) / sizeof(struct hlist_head);
2738 order = get_bitmask_order(min(limit, (unsigned long)UINT_MAX)) - 1;
1da177e4
LT
2739
2740 for (i = 0; i < MAX_LINKS; i++) {
15e47304 2741 struct nl_portid_hash *hash = &nl_table[i].hash;
1da177e4 2742
15e47304 2743 hash->table = nl_portid_hash_zalloc(1 * sizeof(*hash->table));
1da177e4
LT
2744 if (!hash->table) {
2745 while (i-- > 0)
15e47304 2746 nl_portid_hash_free(nl_table[i].hash.table,
1da177e4
LT
2747 1 * sizeof(*hash->table));
2748 kfree(nl_table);
fab2caf6 2749 goto panic;
1da177e4 2750 }
1da177e4
LT
2751 hash->max_shift = order;
2752 hash->shift = 0;
2753 hash->mask = 0;
2754 hash->rehash_time = jiffies;
2755 }
2756
b963ea89
DM
2757 netlink_add_usersock_entry();
2758
1da177e4 2759 sock_register(&netlink_family_ops);
b4b51029 2760 register_pernet_subsys(&netlink_net_ops);
746fac4d 2761 /* The netlink device handler may be needed early. */
1da177e4
LT
2762 rtnetlink_init();
2763out:
2764 return err;
fab2caf6
AM
2765panic:
2766 panic("netlink_init: Cannot allocate nl_table\n");
1da177e4
LT
2767}
2768
1da177e4 2769core_initcall(netlink_proto_init);
This page took 1.102563 seconds and 5 git commands to generate.