netlink: Avoid netlink mmap alloc if msg size exceeds frame size
[deliverable/linux.git] / net / netlink / genetlink.c
1 /*
2 * NETLINK Generic Netlink Family
3 *
4 * Authors: Jamal Hadi Salim
5 * Thomas Graf <tgraf@suug.ch>
6 * Johannes Berg <johannes@sipsolutions.net>
7 */
8
9 #include <linux/module.h>
10 #include <linux/kernel.h>
11 #include <linux/slab.h>
12 #include <linux/errno.h>
13 #include <linux/types.h>
14 #include <linux/socket.h>
15 #include <linux/string.h>
16 #include <linux/skbuff.h>
17 #include <linux/mutex.h>
18 #include <linux/bitmap.h>
19 #include <linux/rwsem.h>
20 #include <net/sock.h>
21 #include <net/genetlink.h>
22
23 static DEFINE_MUTEX(genl_mutex); /* serialization of message processing */
24 static DECLARE_RWSEM(cb_lock);
25
26 void genl_lock(void)
27 {
28 mutex_lock(&genl_mutex);
29 }
30 EXPORT_SYMBOL(genl_lock);
31
32 void genl_unlock(void)
33 {
34 mutex_unlock(&genl_mutex);
35 }
36 EXPORT_SYMBOL(genl_unlock);
37
38 #ifdef CONFIG_LOCKDEP
39 int lockdep_genl_is_held(void)
40 {
41 return lockdep_is_held(&genl_mutex);
42 }
43 EXPORT_SYMBOL(lockdep_genl_is_held);
44 #endif
45
46 static void genl_lock_all(void)
47 {
48 down_write(&cb_lock);
49 genl_lock();
50 }
51
52 static void genl_unlock_all(void)
53 {
54 genl_unlock();
55 up_write(&cb_lock);
56 }
57
58 #define GENL_FAM_TAB_SIZE 16
59 #define GENL_FAM_TAB_MASK (GENL_FAM_TAB_SIZE - 1)
60
61 static struct list_head family_ht[GENL_FAM_TAB_SIZE];
62 /*
63 * Bitmap of multicast groups that are currently in use.
64 *
65 * To avoid an allocation at boot of just one unsigned long,
66 * declare it global instead.
67 * Bit 0 is marked as already used since group 0 is invalid.
68 * Bit 1 is marked as already used since the drop-monitor code
69 * abuses the API and thinks it can statically use group 1.
70 * That group will typically conflict with other groups that
71 * any proper users use.
72 * Bit 16 is marked as used since it's used for generic netlink
73 * and the code no longer marks pre-reserved IDs as used.
74 * Bit 17 is marked as already used since the VFS quota code
75 * also abused this API and relied on family == group ID, we
76 * cater to that by giving it a static family and group ID.
77 */
78 static unsigned long mc_group_start = 0x3 | BIT(GENL_ID_CTRL) |
79 BIT(GENL_ID_VFS_DQUOT);
80 static unsigned long *mc_groups = &mc_group_start;
81 static unsigned long mc_groups_longs = 1;
82
83 static int genl_ctrl_event(int event, struct genl_family *family,
84 const struct genl_multicast_group *grp,
85 int grp_id);
86
87 static inline unsigned int genl_family_hash(unsigned int id)
88 {
89 return id & GENL_FAM_TAB_MASK;
90 }
91
92 static inline struct list_head *genl_family_chain(unsigned int id)
93 {
94 return &family_ht[genl_family_hash(id)];
95 }
96
97 static struct genl_family *genl_family_find_byid(unsigned int id)
98 {
99 struct genl_family *f;
100
101 list_for_each_entry(f, genl_family_chain(id), family_list)
102 if (f->id == id)
103 return f;
104
105 return NULL;
106 }
107
108 static struct genl_family *genl_family_find_byname(char *name)
109 {
110 struct genl_family *f;
111 int i;
112
113 for (i = 0; i < GENL_FAM_TAB_SIZE; i++)
114 list_for_each_entry(f, genl_family_chain(i), family_list)
115 if (strcmp(f->name, name) == 0)
116 return f;
117
118 return NULL;
119 }
120
121 static const struct genl_ops *genl_get_cmd(u8 cmd, struct genl_family *family)
122 {
123 int i;
124
125 for (i = 0; i < family->n_ops; i++)
126 if (family->ops[i].cmd == cmd)
127 return &family->ops[i];
128
129 return NULL;
130 }
131
132 /* Of course we are going to have problems once we hit
133 * 2^16 alive types, but that can only happen by year 2K
134 */
135 static u16 genl_generate_id(void)
136 {
137 static u16 id_gen_idx = GENL_MIN_ID;
138 int i;
139
140 for (i = 0; i <= GENL_MAX_ID - GENL_MIN_ID; i++) {
141 if (id_gen_idx != GENL_ID_VFS_DQUOT &&
142 !genl_family_find_byid(id_gen_idx))
143 return id_gen_idx;
144 if (++id_gen_idx > GENL_MAX_ID)
145 id_gen_idx = GENL_MIN_ID;
146 }
147
148 return 0;
149 }
150
151 static int genl_allocate_reserve_groups(int n_groups, int *first_id)
152 {
153 unsigned long *new_groups;
154 int start = 0;
155 int i;
156 int id;
157 bool fits;
158
159 do {
160 if (start == 0)
161 id = find_first_zero_bit(mc_groups,
162 mc_groups_longs *
163 BITS_PER_LONG);
164 else
165 id = find_next_zero_bit(mc_groups,
166 mc_groups_longs * BITS_PER_LONG,
167 start);
168
169 fits = true;
170 for (i = id;
171 i < min_t(int, id + n_groups,
172 mc_groups_longs * BITS_PER_LONG);
173 i++) {
174 if (test_bit(i, mc_groups)) {
175 start = i;
176 fits = false;
177 break;
178 }
179 }
180
181 if (id >= mc_groups_longs * BITS_PER_LONG) {
182 unsigned long new_longs = mc_groups_longs +
183 BITS_TO_LONGS(n_groups);
184 size_t nlen = new_longs * sizeof(unsigned long);
185
186 if (mc_groups == &mc_group_start) {
187 new_groups = kzalloc(nlen, GFP_KERNEL);
188 if (!new_groups)
189 return -ENOMEM;
190 mc_groups = new_groups;
191 *mc_groups = mc_group_start;
192 } else {
193 new_groups = krealloc(mc_groups, nlen,
194 GFP_KERNEL);
195 if (!new_groups)
196 return -ENOMEM;
197 mc_groups = new_groups;
198 for (i = 0; i < BITS_TO_LONGS(n_groups); i++)
199 mc_groups[mc_groups_longs + i] = 0;
200 }
201 mc_groups_longs = new_longs;
202 }
203 } while (!fits);
204
205 for (i = id; i < id + n_groups; i++)
206 set_bit(i, mc_groups);
207 *first_id = id;
208 return 0;
209 }
210
211 static struct genl_family genl_ctrl;
212
213 static int genl_validate_assign_mc_groups(struct genl_family *family)
214 {
215 int first_id;
216 int n_groups = family->n_mcgrps;
217 int err, i;
218 bool groups_allocated = false;
219
220 if (!n_groups)
221 return 0;
222
223 for (i = 0; i < n_groups; i++) {
224 const struct genl_multicast_group *grp = &family->mcgrps[i];
225
226 if (WARN_ON(grp->name[0] == '\0'))
227 return -EINVAL;
228 if (WARN_ON(memchr(grp->name, '\0', GENL_NAMSIZ) == NULL))
229 return -EINVAL;
230 }
231
232 /* special-case our own group and hacks */
233 if (family == &genl_ctrl) {
234 first_id = GENL_ID_CTRL;
235 BUG_ON(n_groups != 1);
236 } else if (strcmp(family->name, "NET_DM") == 0) {
237 first_id = 1;
238 BUG_ON(n_groups != 1);
239 } else if (strcmp(family->name, "VFS_DQUOT") == 0) {
240 first_id = GENL_ID_VFS_DQUOT;
241 BUG_ON(n_groups != 1);
242 } else {
243 groups_allocated = true;
244 err = genl_allocate_reserve_groups(n_groups, &first_id);
245 if (err)
246 return err;
247 }
248
249 family->mcgrp_offset = first_id;
250
251 /* if still initializing, can't and don't need to to realloc bitmaps */
252 if (!init_net.genl_sock)
253 return 0;
254
255 if (family->netnsok) {
256 struct net *net;
257
258 netlink_table_grab();
259 rcu_read_lock();
260 for_each_net_rcu(net) {
261 err = __netlink_change_ngroups(net->genl_sock,
262 mc_groups_longs * BITS_PER_LONG);
263 if (err) {
264 /*
265 * No need to roll back, can only fail if
266 * memory allocation fails and then the
267 * number of _possible_ groups has been
268 * increased on some sockets which is ok.
269 */
270 break;
271 }
272 }
273 rcu_read_unlock();
274 netlink_table_ungrab();
275 } else {
276 err = netlink_change_ngroups(init_net.genl_sock,
277 mc_groups_longs * BITS_PER_LONG);
278 }
279
280 if (groups_allocated && err) {
281 for (i = 0; i < family->n_mcgrps; i++)
282 clear_bit(family->mcgrp_offset + i, mc_groups);
283 }
284
285 return err;
286 }
287
288 static void genl_unregister_mc_groups(struct genl_family *family)
289 {
290 struct net *net;
291 int i;
292
293 netlink_table_grab();
294 rcu_read_lock();
295 for_each_net_rcu(net) {
296 for (i = 0; i < family->n_mcgrps; i++)
297 __netlink_clear_multicast_users(
298 net->genl_sock, family->mcgrp_offset + i);
299 }
300 rcu_read_unlock();
301 netlink_table_ungrab();
302
303 for (i = 0; i < family->n_mcgrps; i++) {
304 int grp_id = family->mcgrp_offset + i;
305
306 if (grp_id != 1)
307 clear_bit(grp_id, mc_groups);
308 genl_ctrl_event(CTRL_CMD_DELMCAST_GRP, family,
309 &family->mcgrps[i], grp_id);
310 }
311 }
312
313 static int genl_validate_ops(struct genl_family *family)
314 {
315 const struct genl_ops *ops = family->ops;
316 unsigned int n_ops = family->n_ops;
317 int i, j;
318
319 if (WARN_ON(n_ops && !ops))
320 return -EINVAL;
321
322 if (!n_ops)
323 return 0;
324
325 for (i = 0; i < n_ops; i++) {
326 if (ops[i].dumpit == NULL && ops[i].doit == NULL)
327 return -EINVAL;
328 for (j = i + 1; j < n_ops; j++)
329 if (ops[i].cmd == ops[j].cmd)
330 return -EINVAL;
331 }
332
333 /* family is not registered yet, so no locking needed */
334 family->ops = ops;
335 family->n_ops = n_ops;
336
337 return 0;
338 }
339
340 /**
341 * __genl_register_family - register a generic netlink family
342 * @family: generic netlink family
343 *
344 * Registers the specified family after validating it first. Only one
345 * family may be registered with the same family name or identifier.
346 * The family id may equal GENL_ID_GENERATE causing an unique id to
347 * be automatically generated and assigned.
348 *
349 * The family's ops array must already be assigned, you can use the
350 * genl_register_family_with_ops() helper function.
351 *
352 * Return 0 on success or a negative error code.
353 */
354 int __genl_register_family(struct genl_family *family)
355 {
356 int err = -EINVAL, i;
357
358 if (family->id && family->id < GENL_MIN_ID)
359 goto errout;
360
361 if (family->id > GENL_MAX_ID)
362 goto errout;
363
364 err = genl_validate_ops(family);
365 if (err)
366 return err;
367
368 genl_lock_all();
369
370 if (genl_family_find_byname(family->name)) {
371 err = -EEXIST;
372 goto errout_locked;
373 }
374
375 if (family->id == GENL_ID_GENERATE) {
376 u16 newid = genl_generate_id();
377
378 if (!newid) {
379 err = -ENOMEM;
380 goto errout_locked;
381 }
382
383 family->id = newid;
384 } else if (genl_family_find_byid(family->id)) {
385 err = -EEXIST;
386 goto errout_locked;
387 }
388
389 if (family->maxattr && !family->parallel_ops) {
390 family->attrbuf = kmalloc((family->maxattr+1) *
391 sizeof(struct nlattr *), GFP_KERNEL);
392 if (family->attrbuf == NULL) {
393 err = -ENOMEM;
394 goto errout_locked;
395 }
396 } else
397 family->attrbuf = NULL;
398
399 err = genl_validate_assign_mc_groups(family);
400 if (err)
401 goto errout_locked;
402
403 list_add_tail(&family->family_list, genl_family_chain(family->id));
404 genl_unlock_all();
405
406 /* send all events */
407 genl_ctrl_event(CTRL_CMD_NEWFAMILY, family, NULL, 0);
408 for (i = 0; i < family->n_mcgrps; i++)
409 genl_ctrl_event(CTRL_CMD_NEWMCAST_GRP, family,
410 &family->mcgrps[i], family->mcgrp_offset + i);
411
412 return 0;
413
414 errout_locked:
415 genl_unlock_all();
416 errout:
417 return err;
418 }
419 EXPORT_SYMBOL(__genl_register_family);
420
421 /**
422 * genl_unregister_family - unregister generic netlink family
423 * @family: generic netlink family
424 *
425 * Unregisters the specified family.
426 *
427 * Returns 0 on success or a negative error code.
428 */
429 int genl_unregister_family(struct genl_family *family)
430 {
431 struct genl_family *rc;
432
433 genl_lock_all();
434
435 genl_unregister_mc_groups(family);
436
437 list_for_each_entry(rc, genl_family_chain(family->id), family_list) {
438 if (family->id != rc->id || strcmp(rc->name, family->name))
439 continue;
440
441 list_del(&rc->family_list);
442 family->n_ops = 0;
443 genl_unlock_all();
444
445 kfree(family->attrbuf);
446 genl_ctrl_event(CTRL_CMD_DELFAMILY, family, NULL, 0);
447 return 0;
448 }
449
450 genl_unlock_all();
451
452 return -ENOENT;
453 }
454 EXPORT_SYMBOL(genl_unregister_family);
455
456 /**
457 * genlmsg_new_unicast - Allocate generic netlink message for unicast
458 * @payload: size of the message payload
459 * @info: information on destination
460 * @flags: the type of memory to allocate
461 *
462 * Allocates a new sk_buff large enough to cover the specified payload
463 * plus required Netlink headers. Will check receiving socket for
464 * memory mapped i/o capability and use it if enabled. Will fall back
465 * to non-mapped skb if message size exceeds the frame size of the ring.
466 */
467 struct sk_buff *genlmsg_new_unicast(size_t payload, struct genl_info *info,
468 gfp_t flags)
469 {
470 size_t len = nlmsg_total_size(genlmsg_total_size(payload));
471
472 return netlink_alloc_skb(info->dst_sk, len, info->snd_portid, flags);
473 }
474 EXPORT_SYMBOL_GPL(genlmsg_new_unicast);
475
476 /**
477 * genlmsg_put - Add generic netlink header to netlink message
478 * @skb: socket buffer holding the message
479 * @portid: netlink portid the message is addressed to
480 * @seq: sequence number (usually the one of the sender)
481 * @family: generic netlink family
482 * @flags: netlink message flags
483 * @cmd: generic netlink command
484 *
485 * Returns pointer to user specific header
486 */
487 void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
488 struct genl_family *family, int flags, u8 cmd)
489 {
490 struct nlmsghdr *nlh;
491 struct genlmsghdr *hdr;
492
493 nlh = nlmsg_put(skb, portid, seq, family->id, GENL_HDRLEN +
494 family->hdrsize, flags);
495 if (nlh == NULL)
496 return NULL;
497
498 hdr = nlmsg_data(nlh);
499 hdr->cmd = cmd;
500 hdr->version = family->version;
501 hdr->reserved = 0;
502
503 return (char *) hdr + GENL_HDRLEN;
504 }
505 EXPORT_SYMBOL(genlmsg_put);
506
507 static int genl_lock_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
508 {
509 /* our ops are always const - netlink API doesn't propagate that */
510 const struct genl_ops *ops = cb->data;
511 int rc;
512
513 genl_lock();
514 rc = ops->dumpit(skb, cb);
515 genl_unlock();
516 return rc;
517 }
518
519 static int genl_lock_done(struct netlink_callback *cb)
520 {
521 /* our ops are always const - netlink API doesn't propagate that */
522 const struct genl_ops *ops = cb->data;
523 int rc = 0;
524
525 if (ops->done) {
526 genl_lock();
527 rc = ops->done(cb);
528 genl_unlock();
529 }
530 return rc;
531 }
532
533 static int genl_family_rcv_msg(struct genl_family *family,
534 struct sk_buff *skb,
535 struct nlmsghdr *nlh)
536 {
537 const struct genl_ops *ops;
538 struct net *net = sock_net(skb->sk);
539 struct genl_info info;
540 struct genlmsghdr *hdr = nlmsg_data(nlh);
541 struct nlattr **attrbuf;
542 int hdrlen, err;
543
544 /* this family doesn't exist in this netns */
545 if (!family->netnsok && !net_eq(net, &init_net))
546 return -ENOENT;
547
548 hdrlen = GENL_HDRLEN + family->hdrsize;
549 if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen))
550 return -EINVAL;
551
552 ops = genl_get_cmd(hdr->cmd, family);
553 if (ops == NULL)
554 return -EOPNOTSUPP;
555
556 if ((ops->flags & GENL_ADMIN_PERM) &&
557 !capable(CAP_NET_ADMIN))
558 return -EPERM;
559
560 if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
561 int rc;
562
563 if (ops->dumpit == NULL)
564 return -EOPNOTSUPP;
565
566 if (!family->parallel_ops) {
567 struct netlink_dump_control c = {
568 .module = family->module,
569 /* we have const, but the netlink API doesn't */
570 .data = (void *)ops,
571 .dump = genl_lock_dumpit,
572 .done = genl_lock_done,
573 };
574
575 genl_unlock();
576 rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
577 genl_lock();
578
579 } else {
580 struct netlink_dump_control c = {
581 .module = family->module,
582 .dump = ops->dumpit,
583 .done = ops->done,
584 };
585
586 rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
587 }
588
589 return rc;
590 }
591
592 if (ops->doit == NULL)
593 return -EOPNOTSUPP;
594
595 if (family->maxattr && family->parallel_ops) {
596 attrbuf = kmalloc((family->maxattr+1) *
597 sizeof(struct nlattr *), GFP_KERNEL);
598 if (attrbuf == NULL)
599 return -ENOMEM;
600 } else
601 attrbuf = family->attrbuf;
602
603 if (attrbuf) {
604 err = nlmsg_parse(nlh, hdrlen, attrbuf, family->maxattr,
605 ops->policy);
606 if (err < 0)
607 goto out;
608 }
609
610 info.snd_seq = nlh->nlmsg_seq;
611 info.snd_portid = NETLINK_CB(skb).portid;
612 info.nlhdr = nlh;
613 info.genlhdr = nlmsg_data(nlh);
614 info.userhdr = nlmsg_data(nlh) + GENL_HDRLEN;
615 info.attrs = attrbuf;
616 info.dst_sk = skb->sk;
617 genl_info_net_set(&info, net);
618 memset(&info.user_ptr, 0, sizeof(info.user_ptr));
619
620 if (family->pre_doit) {
621 err = family->pre_doit(ops, skb, &info);
622 if (err)
623 goto out;
624 }
625
626 err = ops->doit(skb, &info);
627
628 if (family->post_doit)
629 family->post_doit(ops, skb, &info);
630
631 out:
632 if (family->parallel_ops)
633 kfree(attrbuf);
634
635 return err;
636 }
637
638 static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
639 {
640 struct genl_family *family;
641 int err;
642
643 family = genl_family_find_byid(nlh->nlmsg_type);
644 if (family == NULL)
645 return -ENOENT;
646
647 if (!family->parallel_ops)
648 genl_lock();
649
650 err = genl_family_rcv_msg(family, skb, nlh);
651
652 if (!family->parallel_ops)
653 genl_unlock();
654
655 return err;
656 }
657
658 static void genl_rcv(struct sk_buff *skb)
659 {
660 down_read(&cb_lock);
661 netlink_rcv_skb(skb, &genl_rcv_msg);
662 up_read(&cb_lock);
663 }
664
665 /**************************************************************************
666 * Controller
667 **************************************************************************/
668
669 static struct genl_family genl_ctrl = {
670 .id = GENL_ID_CTRL,
671 .name = "nlctrl",
672 .version = 0x2,
673 .maxattr = CTRL_ATTR_MAX,
674 .netnsok = true,
675 };
676
677 static int ctrl_fill_info(struct genl_family *family, u32 portid, u32 seq,
678 u32 flags, struct sk_buff *skb, u8 cmd)
679 {
680 void *hdr;
681
682 hdr = genlmsg_put(skb, portid, seq, &genl_ctrl, flags, cmd);
683 if (hdr == NULL)
684 return -1;
685
686 if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, family->name) ||
687 nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, family->id) ||
688 nla_put_u32(skb, CTRL_ATTR_VERSION, family->version) ||
689 nla_put_u32(skb, CTRL_ATTR_HDRSIZE, family->hdrsize) ||
690 nla_put_u32(skb, CTRL_ATTR_MAXATTR, family->maxattr))
691 goto nla_put_failure;
692
693 if (family->n_ops) {
694 struct nlattr *nla_ops;
695 int i;
696
697 nla_ops = nla_nest_start(skb, CTRL_ATTR_OPS);
698 if (nla_ops == NULL)
699 goto nla_put_failure;
700
701 for (i = 0; i < family->n_ops; i++) {
702 struct nlattr *nest;
703 const struct genl_ops *ops = &family->ops[i];
704 u32 op_flags = ops->flags;
705
706 if (ops->dumpit)
707 op_flags |= GENL_CMD_CAP_DUMP;
708 if (ops->doit)
709 op_flags |= GENL_CMD_CAP_DO;
710 if (ops->policy)
711 op_flags |= GENL_CMD_CAP_HASPOL;
712
713 nest = nla_nest_start(skb, i + 1);
714 if (nest == NULL)
715 goto nla_put_failure;
716
717 if (nla_put_u32(skb, CTRL_ATTR_OP_ID, ops->cmd) ||
718 nla_put_u32(skb, CTRL_ATTR_OP_FLAGS, op_flags))
719 goto nla_put_failure;
720
721 nla_nest_end(skb, nest);
722 }
723
724 nla_nest_end(skb, nla_ops);
725 }
726
727 if (family->n_mcgrps) {
728 struct nlattr *nla_grps;
729 int i;
730
731 nla_grps = nla_nest_start(skb, CTRL_ATTR_MCAST_GROUPS);
732 if (nla_grps == NULL)
733 goto nla_put_failure;
734
735 for (i = 0; i < family->n_mcgrps; i++) {
736 struct nlattr *nest;
737 const struct genl_multicast_group *grp;
738
739 grp = &family->mcgrps[i];
740
741 nest = nla_nest_start(skb, i + 1);
742 if (nest == NULL)
743 goto nla_put_failure;
744
745 if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID,
746 family->mcgrp_offset + i) ||
747 nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME,
748 grp->name))
749 goto nla_put_failure;
750
751 nla_nest_end(skb, nest);
752 }
753 nla_nest_end(skb, nla_grps);
754 }
755
756 return genlmsg_end(skb, hdr);
757
758 nla_put_failure:
759 genlmsg_cancel(skb, hdr);
760 return -EMSGSIZE;
761 }
762
763 static int ctrl_fill_mcgrp_info(struct genl_family *family,
764 const struct genl_multicast_group *grp,
765 int grp_id, u32 portid, u32 seq, u32 flags,
766 struct sk_buff *skb, u8 cmd)
767 {
768 void *hdr;
769 struct nlattr *nla_grps;
770 struct nlattr *nest;
771
772 hdr = genlmsg_put(skb, portid, seq, &genl_ctrl, flags, cmd);
773 if (hdr == NULL)
774 return -1;
775
776 if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, family->name) ||
777 nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, family->id))
778 goto nla_put_failure;
779
780 nla_grps = nla_nest_start(skb, CTRL_ATTR_MCAST_GROUPS);
781 if (nla_grps == NULL)
782 goto nla_put_failure;
783
784 nest = nla_nest_start(skb, 1);
785 if (nest == NULL)
786 goto nla_put_failure;
787
788 if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID, grp_id) ||
789 nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME,
790 grp->name))
791 goto nla_put_failure;
792
793 nla_nest_end(skb, nest);
794 nla_nest_end(skb, nla_grps);
795
796 return genlmsg_end(skb, hdr);
797
798 nla_put_failure:
799 genlmsg_cancel(skb, hdr);
800 return -EMSGSIZE;
801 }
802
803 static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
804 {
805
806 int i, n = 0;
807 struct genl_family *rt;
808 struct net *net = sock_net(skb->sk);
809 int chains_to_skip = cb->args[0];
810 int fams_to_skip = cb->args[1];
811
812 for (i = chains_to_skip; i < GENL_FAM_TAB_SIZE; i++) {
813 n = 0;
814 list_for_each_entry(rt, genl_family_chain(i), family_list) {
815 if (!rt->netnsok && !net_eq(net, &init_net))
816 continue;
817 if (++n < fams_to_skip)
818 continue;
819 if (ctrl_fill_info(rt, NETLINK_CB(cb->skb).portid,
820 cb->nlh->nlmsg_seq, NLM_F_MULTI,
821 skb, CTRL_CMD_NEWFAMILY) < 0)
822 goto errout;
823 }
824
825 fams_to_skip = 0;
826 }
827
828 errout:
829 cb->args[0] = i;
830 cb->args[1] = n;
831
832 return skb->len;
833 }
834
835 static struct sk_buff *ctrl_build_family_msg(struct genl_family *family,
836 u32 portid, int seq, u8 cmd)
837 {
838 struct sk_buff *skb;
839 int err;
840
841 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
842 if (skb == NULL)
843 return ERR_PTR(-ENOBUFS);
844
845 err = ctrl_fill_info(family, portid, seq, 0, skb, cmd);
846 if (err < 0) {
847 nlmsg_free(skb);
848 return ERR_PTR(err);
849 }
850
851 return skb;
852 }
853
854 static struct sk_buff *
855 ctrl_build_mcgrp_msg(struct genl_family *family,
856 const struct genl_multicast_group *grp,
857 int grp_id, u32 portid, int seq, u8 cmd)
858 {
859 struct sk_buff *skb;
860 int err;
861
862 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
863 if (skb == NULL)
864 return ERR_PTR(-ENOBUFS);
865
866 err = ctrl_fill_mcgrp_info(family, grp, grp_id, portid,
867 seq, 0, skb, cmd);
868 if (err < 0) {
869 nlmsg_free(skb);
870 return ERR_PTR(err);
871 }
872
873 return skb;
874 }
875
876 static const struct nla_policy ctrl_policy[CTRL_ATTR_MAX+1] = {
877 [CTRL_ATTR_FAMILY_ID] = { .type = NLA_U16 },
878 [CTRL_ATTR_FAMILY_NAME] = { .type = NLA_NUL_STRING,
879 .len = GENL_NAMSIZ - 1 },
880 };
881
882 static int ctrl_getfamily(struct sk_buff *skb, struct genl_info *info)
883 {
884 struct sk_buff *msg;
885 struct genl_family *res = NULL;
886 int err = -EINVAL;
887
888 if (info->attrs[CTRL_ATTR_FAMILY_ID]) {
889 u16 id = nla_get_u16(info->attrs[CTRL_ATTR_FAMILY_ID]);
890 res = genl_family_find_byid(id);
891 err = -ENOENT;
892 }
893
894 if (info->attrs[CTRL_ATTR_FAMILY_NAME]) {
895 char *name;
896
897 name = nla_data(info->attrs[CTRL_ATTR_FAMILY_NAME]);
898 res = genl_family_find_byname(name);
899 #ifdef CONFIG_MODULES
900 if (res == NULL) {
901 genl_unlock();
902 up_read(&cb_lock);
903 request_module("net-pf-%d-proto-%d-family-%s",
904 PF_NETLINK, NETLINK_GENERIC, name);
905 down_read(&cb_lock);
906 genl_lock();
907 res = genl_family_find_byname(name);
908 }
909 #endif
910 err = -ENOENT;
911 }
912
913 if (res == NULL)
914 return err;
915
916 if (!res->netnsok && !net_eq(genl_info_net(info), &init_net)) {
917 /* family doesn't exist here */
918 return -ENOENT;
919 }
920
921 msg = ctrl_build_family_msg(res, info->snd_portid, info->snd_seq,
922 CTRL_CMD_NEWFAMILY);
923 if (IS_ERR(msg))
924 return PTR_ERR(msg);
925
926 return genlmsg_reply(msg, info);
927 }
928
929 static int genl_ctrl_event(int event, struct genl_family *family,
930 const struct genl_multicast_group *grp,
931 int grp_id)
932 {
933 struct sk_buff *msg;
934
935 /* genl is still initialising */
936 if (!init_net.genl_sock)
937 return 0;
938
939 switch (event) {
940 case CTRL_CMD_NEWFAMILY:
941 case CTRL_CMD_DELFAMILY:
942 WARN_ON(grp);
943 msg = ctrl_build_family_msg(family, 0, 0, event);
944 break;
945 case CTRL_CMD_NEWMCAST_GRP:
946 case CTRL_CMD_DELMCAST_GRP:
947 BUG_ON(!grp);
948 msg = ctrl_build_mcgrp_msg(family, grp, grp_id, 0, 0, event);
949 break;
950 default:
951 return -EINVAL;
952 }
953
954 if (IS_ERR(msg))
955 return PTR_ERR(msg);
956
957 if (!family->netnsok) {
958 genlmsg_multicast_netns(&genl_ctrl, &init_net, msg, 0,
959 0, GFP_KERNEL);
960 } else {
961 rcu_read_lock();
962 genlmsg_multicast_allns(&genl_ctrl, msg, 0,
963 0, GFP_ATOMIC);
964 rcu_read_unlock();
965 }
966
967 return 0;
968 }
969
970 static struct genl_ops genl_ctrl_ops[] = {
971 {
972 .cmd = CTRL_CMD_GETFAMILY,
973 .doit = ctrl_getfamily,
974 .dumpit = ctrl_dumpfamily,
975 .policy = ctrl_policy,
976 },
977 };
978
979 static struct genl_multicast_group genl_ctrl_groups[] = {
980 { .name = "notify", },
981 };
982
983 static int __net_init genl_pernet_init(struct net *net)
984 {
985 struct netlink_kernel_cfg cfg = {
986 .input = genl_rcv,
987 .flags = NL_CFG_F_NONROOT_RECV,
988 };
989
990 /* we'll bump the group number right afterwards */
991 net->genl_sock = netlink_kernel_create(net, NETLINK_GENERIC, &cfg);
992
993 if (!net->genl_sock && net_eq(net, &init_net))
994 panic("GENL: Cannot initialize generic netlink\n");
995
996 if (!net->genl_sock)
997 return -ENOMEM;
998
999 return 0;
1000 }
1001
1002 static void __net_exit genl_pernet_exit(struct net *net)
1003 {
1004 netlink_kernel_release(net->genl_sock);
1005 net->genl_sock = NULL;
1006 }
1007
1008 static struct pernet_operations genl_pernet_ops = {
1009 .init = genl_pernet_init,
1010 .exit = genl_pernet_exit,
1011 };
1012
1013 static int __init genl_init(void)
1014 {
1015 int i, err;
1016
1017 for (i = 0; i < GENL_FAM_TAB_SIZE; i++)
1018 INIT_LIST_HEAD(&family_ht[i]);
1019
1020 err = genl_register_family_with_ops_groups(&genl_ctrl, genl_ctrl_ops,
1021 genl_ctrl_groups);
1022 if (err < 0)
1023 goto problem;
1024
1025 err = register_pernet_subsys(&genl_pernet_ops);
1026 if (err)
1027 goto problem;
1028
1029 return 0;
1030
1031 problem:
1032 panic("GENL: Cannot register controller: %d\n", err);
1033 }
1034
1035 subsys_initcall(genl_init);
1036
1037 static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
1038 gfp_t flags)
1039 {
1040 struct sk_buff *tmp;
1041 struct net *net, *prev = NULL;
1042 int err;
1043
1044 for_each_net_rcu(net) {
1045 if (prev) {
1046 tmp = skb_clone(skb, flags);
1047 if (!tmp) {
1048 err = -ENOMEM;
1049 goto error;
1050 }
1051 err = nlmsg_multicast(prev->genl_sock, tmp,
1052 portid, group, flags);
1053 if (err)
1054 goto error;
1055 }
1056
1057 prev = net;
1058 }
1059
1060 return nlmsg_multicast(prev->genl_sock, skb, portid, group, flags);
1061 error:
1062 kfree_skb(skb);
1063 return err;
1064 }
1065
1066 int genlmsg_multicast_allns(struct genl_family *family, struct sk_buff *skb,
1067 u32 portid, unsigned int group, gfp_t flags)
1068 {
1069 if (WARN_ON_ONCE(group >= family->n_mcgrps))
1070 return -EINVAL;
1071 group = family->mcgrp_offset + group;
1072 return genlmsg_mcast(skb, portid, group, flags);
1073 }
1074 EXPORT_SYMBOL(genlmsg_multicast_allns);
1075
1076 void genl_notify(struct genl_family *family,
1077 struct sk_buff *skb, struct net *net, u32 portid, u32 group,
1078 struct nlmsghdr *nlh, gfp_t flags)
1079 {
1080 struct sock *sk = net->genl_sock;
1081 int report = 0;
1082
1083 if (nlh)
1084 report = nlmsg_report(nlh);
1085
1086 if (WARN_ON_ONCE(group >= family->n_mcgrps))
1087 return;
1088 group = family->mcgrp_offset + group;
1089 nlmsg_notify(sk, skb, portid, group, report, flags);
1090 }
1091 EXPORT_SYMBOL(genl_notify);
This page took 0.055247 seconds and 5 git commands to generate.