2 * NETLINK Generic Netlink Family
4 * Authors: Jamal Hadi Salim
5 * Thomas Graf <tgraf@suug.ch>
6 * Johannes Berg <johannes@sipsolutions.net>
9 #include <linux/module.h>
10 #include <linux/kernel.h>
11 #include <linux/slab.h>
12 #include <linux/errno.h>
13 #include <linux/types.h>
14 #include <linux/socket.h>
15 #include <linux/string.h>
16 #include <linux/skbuff.h>
17 #include <linux/mutex.h>
18 #include <linux/bitmap.h>
19 #include <linux/rwsem.h>
21 #include <net/genetlink.h>
23 static DEFINE_MUTEX(genl_mutex
); /* serialization of message processing */
24 static DECLARE_RWSEM(cb_lock
);
28 mutex_lock(&genl_mutex
);
30 EXPORT_SYMBOL(genl_lock
);
32 void genl_unlock(void)
34 mutex_unlock(&genl_mutex
);
36 EXPORT_SYMBOL(genl_unlock
);
39 int lockdep_genl_is_held(void)
41 return lockdep_is_held(&genl_mutex
);
43 EXPORT_SYMBOL(lockdep_genl_is_held
);
46 static void genl_lock_all(void)
52 static void genl_unlock_all(void)
58 #define GENL_FAM_TAB_SIZE 16
59 #define GENL_FAM_TAB_MASK (GENL_FAM_TAB_SIZE - 1)
61 static struct list_head family_ht
[GENL_FAM_TAB_SIZE
];
63 * Bitmap of multicast groups that are currently in use.
65 * To avoid an allocation at boot of just one unsigned long,
66 * declare it global instead.
67 * Bit 0 is marked as already used since group 0 is invalid.
68 * Bit 1 is marked as already used since the drop-monitor code
69 * abuses the API and thinks it can statically use group 1.
70 * That group will typically conflict with other groups that
71 * any proper users use.
72 * Bit 16 is marked as used since it's used for generic netlink
73 * and the code no longer marks pre-reserved IDs as used.
74 * Bit 17 is marked as already used since the VFS quota code
75 * also abused this API and relied on family == group ID, we
76 * cater to that by giving it a static family and group ID.
78 static unsigned long mc_group_start
= 0x3 | BIT(GENL_ID_CTRL
) |
79 BIT(GENL_ID_VFS_DQUOT
);
80 static unsigned long *mc_groups
= &mc_group_start
;
81 static unsigned long mc_groups_longs
= 1;
83 static int genl_ctrl_event(int event
, struct genl_family
*family
,
84 const struct genl_multicast_group
*grp
,
87 static inline unsigned int genl_family_hash(unsigned int id
)
89 return id
& GENL_FAM_TAB_MASK
;
92 static inline struct list_head
*genl_family_chain(unsigned int id
)
94 return &family_ht
[genl_family_hash(id
)];
97 static struct genl_family
*genl_family_find_byid(unsigned int id
)
99 struct genl_family
*f
;
101 list_for_each_entry(f
, genl_family_chain(id
), family_list
)
108 static struct genl_family
*genl_family_find_byname(char *name
)
110 struct genl_family
*f
;
113 for (i
= 0; i
< GENL_FAM_TAB_SIZE
; i
++)
114 list_for_each_entry(f
, genl_family_chain(i
), family_list
)
115 if (strcmp(f
->name
, name
) == 0)
121 static const struct genl_ops
*genl_get_cmd(u8 cmd
, struct genl_family
*family
)
125 for (i
= 0; i
< family
->n_ops
; i
++)
126 if (family
->ops
[i
].cmd
== cmd
)
127 return &family
->ops
[i
];
132 /* Of course we are going to have problems once we hit
133 * 2^16 alive types, but that can only happen by year 2K
135 static u16
genl_generate_id(void)
137 static u16 id_gen_idx
= GENL_MIN_ID
;
140 for (i
= 0; i
<= GENL_MAX_ID
- GENL_MIN_ID
; i
++) {
141 if (id_gen_idx
!= GENL_ID_VFS_DQUOT
&&
142 !genl_family_find_byid(id_gen_idx
))
144 if (++id_gen_idx
> GENL_MAX_ID
)
145 id_gen_idx
= GENL_MIN_ID
;
151 static int genl_allocate_reserve_groups(int n_groups
, int *first_id
)
153 unsigned long *new_groups
;
161 id
= find_first_zero_bit(mc_groups
,
165 id
= find_next_zero_bit(mc_groups
,
166 mc_groups_longs
* BITS_PER_LONG
,
171 i
< min_t(int, id
+ n_groups
,
172 mc_groups_longs
* BITS_PER_LONG
);
174 if (test_bit(i
, mc_groups
)) {
181 if (id
>= mc_groups_longs
* BITS_PER_LONG
) {
182 unsigned long new_longs
= mc_groups_longs
+
183 BITS_TO_LONGS(n_groups
);
184 size_t nlen
= new_longs
* sizeof(unsigned long);
186 if (mc_groups
== &mc_group_start
) {
187 new_groups
= kzalloc(nlen
, GFP_KERNEL
);
190 mc_groups
= new_groups
;
191 *mc_groups
= mc_group_start
;
193 new_groups
= krealloc(mc_groups
, nlen
,
197 mc_groups
= new_groups
;
198 for (i
= 0; i
< BITS_TO_LONGS(n_groups
); i
++)
199 mc_groups
[mc_groups_longs
+ i
] = 0;
201 mc_groups_longs
= new_longs
;
205 for (i
= id
; i
< id
+ n_groups
; i
++)
206 set_bit(i
, mc_groups
);
211 static struct genl_family genl_ctrl
;
213 static int genl_validate_assign_mc_groups(struct genl_family
*family
)
216 int n_groups
= family
->n_mcgrps
;
218 bool groups_allocated
= false;
223 for (i
= 0; i
< n_groups
; i
++) {
224 const struct genl_multicast_group
*grp
= &family
->mcgrps
[i
];
226 if (WARN_ON(grp
->name
[0] == '\0'))
228 if (WARN_ON(memchr(grp
->name
, '\0', GENL_NAMSIZ
) == NULL
))
232 /* special-case our own group and hacks */
233 if (family
== &genl_ctrl
) {
234 first_id
= GENL_ID_CTRL
;
235 BUG_ON(n_groups
!= 1);
236 } else if (strcmp(family
->name
, "NET_DM") == 0) {
238 BUG_ON(n_groups
!= 1);
239 } else if (strcmp(family
->name
, "VFS_DQUOT") == 0) {
240 first_id
= GENL_ID_VFS_DQUOT
;
241 BUG_ON(n_groups
!= 1);
243 groups_allocated
= true;
244 err
= genl_allocate_reserve_groups(n_groups
, &first_id
);
249 family
->mcgrp_offset
= first_id
;
251 /* if still initializing, can't and don't need to to realloc bitmaps */
252 if (!init_net
.genl_sock
)
255 if (family
->netnsok
) {
258 netlink_table_grab();
260 for_each_net_rcu(net
) {
261 err
= __netlink_change_ngroups(net
->genl_sock
,
262 mc_groups_longs
* BITS_PER_LONG
);
265 * No need to roll back, can only fail if
266 * memory allocation fails and then the
267 * number of _possible_ groups has been
268 * increased on some sockets which is ok.
274 netlink_table_ungrab();
276 err
= netlink_change_ngroups(init_net
.genl_sock
,
277 mc_groups_longs
* BITS_PER_LONG
);
280 if (groups_allocated
&& err
) {
281 for (i
= 0; i
< family
->n_mcgrps
; i
++)
282 clear_bit(family
->mcgrp_offset
+ i
, mc_groups
);
288 static void genl_unregister_mc_groups(struct genl_family
*family
)
293 netlink_table_grab();
295 for_each_net_rcu(net
) {
296 for (i
= 0; i
< family
->n_mcgrps
; i
++)
297 __netlink_clear_multicast_users(
298 net
->genl_sock
, family
->mcgrp_offset
+ i
);
301 netlink_table_ungrab();
303 for (i
= 0; i
< family
->n_mcgrps
; i
++) {
304 int grp_id
= family
->mcgrp_offset
+ i
;
307 clear_bit(grp_id
, mc_groups
);
308 genl_ctrl_event(CTRL_CMD_DELMCAST_GRP
, family
,
309 &family
->mcgrps
[i
], grp_id
);
313 static int genl_validate_ops(struct genl_family
*family
)
315 const struct genl_ops
*ops
= family
->ops
;
316 unsigned int n_ops
= family
->n_ops
;
319 if (WARN_ON(n_ops
&& !ops
))
325 for (i
= 0; i
< n_ops
; i
++) {
326 if (ops
[i
].dumpit
== NULL
&& ops
[i
].doit
== NULL
)
328 for (j
= i
+ 1; j
< n_ops
; j
++)
329 if (ops
[i
].cmd
== ops
[j
].cmd
)
333 /* family is not registered yet, so no locking needed */
335 family
->n_ops
= n_ops
;
341 * __genl_register_family - register a generic netlink family
342 * @family: generic netlink family
344 * Registers the specified family after validating it first. Only one
345 * family may be registered with the same family name or identifier.
346 * The family id may equal GENL_ID_GENERATE causing an unique id to
347 * be automatically generated and assigned.
349 * The family's ops array must already be assigned, you can use the
350 * genl_register_family_with_ops() helper function.
352 * Return 0 on success or a negative error code.
354 int __genl_register_family(struct genl_family
*family
)
356 int err
= -EINVAL
, i
;
358 if (family
->id
&& family
->id
< GENL_MIN_ID
)
361 if (family
->id
> GENL_MAX_ID
)
364 err
= genl_validate_ops(family
);
370 if (genl_family_find_byname(family
->name
)) {
375 if (family
->id
== GENL_ID_GENERATE
) {
376 u16 newid
= genl_generate_id();
384 } else if (genl_family_find_byid(family
->id
)) {
389 if (family
->maxattr
&& !family
->parallel_ops
) {
390 family
->attrbuf
= kmalloc((family
->maxattr
+1) *
391 sizeof(struct nlattr
*), GFP_KERNEL
);
392 if (family
->attrbuf
== NULL
) {
397 family
->attrbuf
= NULL
;
399 err
= genl_validate_assign_mc_groups(family
);
403 list_add_tail(&family
->family_list
, genl_family_chain(family
->id
));
406 /* send all events */
407 genl_ctrl_event(CTRL_CMD_NEWFAMILY
, family
, NULL
, 0);
408 for (i
= 0; i
< family
->n_mcgrps
; i
++)
409 genl_ctrl_event(CTRL_CMD_NEWMCAST_GRP
, family
,
410 &family
->mcgrps
[i
], family
->mcgrp_offset
+ i
);
419 EXPORT_SYMBOL(__genl_register_family
);
422 * genl_unregister_family - unregister generic netlink family
423 * @family: generic netlink family
425 * Unregisters the specified family.
427 * Returns 0 on success or a negative error code.
429 int genl_unregister_family(struct genl_family
*family
)
431 struct genl_family
*rc
;
435 genl_unregister_mc_groups(family
);
437 list_for_each_entry(rc
, genl_family_chain(family
->id
), family_list
) {
438 if (family
->id
!= rc
->id
|| strcmp(rc
->name
, family
->name
))
441 list_del(&rc
->family_list
);
445 kfree(family
->attrbuf
);
446 genl_ctrl_event(CTRL_CMD_DELFAMILY
, family
, NULL
, 0);
454 EXPORT_SYMBOL(genl_unregister_family
);
457 * genlmsg_new_unicast - Allocate generic netlink message for unicast
458 * @payload: size of the message payload
459 * @info: information on destination
460 * @flags: the type of memory to allocate
462 * Allocates a new sk_buff large enough to cover the specified payload
463 * plus required Netlink headers. Will check receiving socket for
464 * memory mapped i/o capability and use it if enabled. Will fall back
465 * to non-mapped skb if message size exceeds the frame size of the ring.
467 struct sk_buff
*genlmsg_new_unicast(size_t payload
, struct genl_info
*info
,
470 size_t len
= nlmsg_total_size(genlmsg_total_size(payload
));
472 return netlink_alloc_skb(info
->dst_sk
, len
, info
->snd_portid
, flags
);
474 EXPORT_SYMBOL_GPL(genlmsg_new_unicast
);
477 * genlmsg_put - Add generic netlink header to netlink message
478 * @skb: socket buffer holding the message
479 * @portid: netlink portid the message is addressed to
480 * @seq: sequence number (usually the one of the sender)
481 * @family: generic netlink family
482 * @flags: netlink message flags
483 * @cmd: generic netlink command
485 * Returns pointer to user specific header
487 void *genlmsg_put(struct sk_buff
*skb
, u32 portid
, u32 seq
,
488 struct genl_family
*family
, int flags
, u8 cmd
)
490 struct nlmsghdr
*nlh
;
491 struct genlmsghdr
*hdr
;
493 nlh
= nlmsg_put(skb
, portid
, seq
, family
->id
, GENL_HDRLEN
+
494 family
->hdrsize
, flags
);
498 hdr
= nlmsg_data(nlh
);
500 hdr
->version
= family
->version
;
503 return (char *) hdr
+ GENL_HDRLEN
;
505 EXPORT_SYMBOL(genlmsg_put
);
507 static int genl_lock_dumpit(struct sk_buff
*skb
, struct netlink_callback
*cb
)
509 /* our ops are always const - netlink API doesn't propagate that */
510 const struct genl_ops
*ops
= cb
->data
;
514 rc
= ops
->dumpit(skb
, cb
);
519 static int genl_lock_done(struct netlink_callback
*cb
)
521 /* our ops are always const - netlink API doesn't propagate that */
522 const struct genl_ops
*ops
= cb
->data
;
533 static int genl_family_rcv_msg(struct genl_family
*family
,
535 struct nlmsghdr
*nlh
)
537 const struct genl_ops
*ops
;
538 struct net
*net
= sock_net(skb
->sk
);
539 struct genl_info info
;
540 struct genlmsghdr
*hdr
= nlmsg_data(nlh
);
541 struct nlattr
**attrbuf
;
544 /* this family doesn't exist in this netns */
545 if (!family
->netnsok
&& !net_eq(net
, &init_net
))
548 hdrlen
= GENL_HDRLEN
+ family
->hdrsize
;
549 if (nlh
->nlmsg_len
< nlmsg_msg_size(hdrlen
))
552 ops
= genl_get_cmd(hdr
->cmd
, family
);
556 if ((ops
->flags
& GENL_ADMIN_PERM
) &&
557 !capable(CAP_NET_ADMIN
))
560 if ((nlh
->nlmsg_flags
& NLM_F_DUMP
) == NLM_F_DUMP
) {
563 if (ops
->dumpit
== NULL
)
566 if (!family
->parallel_ops
) {
567 struct netlink_dump_control c
= {
568 .module
= family
->module
,
569 /* we have const, but the netlink API doesn't */
571 .dump
= genl_lock_dumpit
,
572 .done
= genl_lock_done
,
576 rc
= __netlink_dump_start(net
->genl_sock
, skb
, nlh
, &c
);
580 struct netlink_dump_control c
= {
581 .module
= family
->module
,
586 rc
= __netlink_dump_start(net
->genl_sock
, skb
, nlh
, &c
);
592 if (ops
->doit
== NULL
)
595 if (family
->maxattr
&& family
->parallel_ops
) {
596 attrbuf
= kmalloc((family
->maxattr
+1) *
597 sizeof(struct nlattr
*), GFP_KERNEL
);
601 attrbuf
= family
->attrbuf
;
604 err
= nlmsg_parse(nlh
, hdrlen
, attrbuf
, family
->maxattr
,
610 info
.snd_seq
= nlh
->nlmsg_seq
;
611 info
.snd_portid
= NETLINK_CB(skb
).portid
;
613 info
.genlhdr
= nlmsg_data(nlh
);
614 info
.userhdr
= nlmsg_data(nlh
) + GENL_HDRLEN
;
615 info
.attrs
= attrbuf
;
616 info
.dst_sk
= skb
->sk
;
617 genl_info_net_set(&info
, net
);
618 memset(&info
.user_ptr
, 0, sizeof(info
.user_ptr
));
620 if (family
->pre_doit
) {
621 err
= family
->pre_doit(ops
, skb
, &info
);
626 err
= ops
->doit(skb
, &info
);
628 if (family
->post_doit
)
629 family
->post_doit(ops
, skb
, &info
);
632 if (family
->parallel_ops
)
638 static int genl_rcv_msg(struct sk_buff
*skb
, struct nlmsghdr
*nlh
)
640 struct genl_family
*family
;
643 family
= genl_family_find_byid(nlh
->nlmsg_type
);
647 if (!family
->parallel_ops
)
650 err
= genl_family_rcv_msg(family
, skb
, nlh
);
652 if (!family
->parallel_ops
)
658 static void genl_rcv(struct sk_buff
*skb
)
661 netlink_rcv_skb(skb
, &genl_rcv_msg
);
665 /**************************************************************************
667 **************************************************************************/
669 static struct genl_family genl_ctrl
= {
673 .maxattr
= CTRL_ATTR_MAX
,
677 static int ctrl_fill_info(struct genl_family
*family
, u32 portid
, u32 seq
,
678 u32 flags
, struct sk_buff
*skb
, u8 cmd
)
682 hdr
= genlmsg_put(skb
, portid
, seq
, &genl_ctrl
, flags
, cmd
);
686 if (nla_put_string(skb
, CTRL_ATTR_FAMILY_NAME
, family
->name
) ||
687 nla_put_u16(skb
, CTRL_ATTR_FAMILY_ID
, family
->id
) ||
688 nla_put_u32(skb
, CTRL_ATTR_VERSION
, family
->version
) ||
689 nla_put_u32(skb
, CTRL_ATTR_HDRSIZE
, family
->hdrsize
) ||
690 nla_put_u32(skb
, CTRL_ATTR_MAXATTR
, family
->maxattr
))
691 goto nla_put_failure
;
694 struct nlattr
*nla_ops
;
697 nla_ops
= nla_nest_start(skb
, CTRL_ATTR_OPS
);
699 goto nla_put_failure
;
701 for (i
= 0; i
< family
->n_ops
; i
++) {
703 const struct genl_ops
*ops
= &family
->ops
[i
];
704 u32 op_flags
= ops
->flags
;
707 op_flags
|= GENL_CMD_CAP_DUMP
;
709 op_flags
|= GENL_CMD_CAP_DO
;
711 op_flags
|= GENL_CMD_CAP_HASPOL
;
713 nest
= nla_nest_start(skb
, i
+ 1);
715 goto nla_put_failure
;
717 if (nla_put_u32(skb
, CTRL_ATTR_OP_ID
, ops
->cmd
) ||
718 nla_put_u32(skb
, CTRL_ATTR_OP_FLAGS
, op_flags
))
719 goto nla_put_failure
;
721 nla_nest_end(skb
, nest
);
724 nla_nest_end(skb
, nla_ops
);
727 if (family
->n_mcgrps
) {
728 struct nlattr
*nla_grps
;
731 nla_grps
= nla_nest_start(skb
, CTRL_ATTR_MCAST_GROUPS
);
732 if (nla_grps
== NULL
)
733 goto nla_put_failure
;
735 for (i
= 0; i
< family
->n_mcgrps
; i
++) {
737 const struct genl_multicast_group
*grp
;
739 grp
= &family
->mcgrps
[i
];
741 nest
= nla_nest_start(skb
, i
+ 1);
743 goto nla_put_failure
;
745 if (nla_put_u32(skb
, CTRL_ATTR_MCAST_GRP_ID
,
746 family
->mcgrp_offset
+ i
) ||
747 nla_put_string(skb
, CTRL_ATTR_MCAST_GRP_NAME
,
749 goto nla_put_failure
;
751 nla_nest_end(skb
, nest
);
753 nla_nest_end(skb
, nla_grps
);
756 return genlmsg_end(skb
, hdr
);
759 genlmsg_cancel(skb
, hdr
);
763 static int ctrl_fill_mcgrp_info(struct genl_family
*family
,
764 const struct genl_multicast_group
*grp
,
765 int grp_id
, u32 portid
, u32 seq
, u32 flags
,
766 struct sk_buff
*skb
, u8 cmd
)
769 struct nlattr
*nla_grps
;
772 hdr
= genlmsg_put(skb
, portid
, seq
, &genl_ctrl
, flags
, cmd
);
776 if (nla_put_string(skb
, CTRL_ATTR_FAMILY_NAME
, family
->name
) ||
777 nla_put_u16(skb
, CTRL_ATTR_FAMILY_ID
, family
->id
))
778 goto nla_put_failure
;
780 nla_grps
= nla_nest_start(skb
, CTRL_ATTR_MCAST_GROUPS
);
781 if (nla_grps
== NULL
)
782 goto nla_put_failure
;
784 nest
= nla_nest_start(skb
, 1);
786 goto nla_put_failure
;
788 if (nla_put_u32(skb
, CTRL_ATTR_MCAST_GRP_ID
, grp_id
) ||
789 nla_put_string(skb
, CTRL_ATTR_MCAST_GRP_NAME
,
791 goto nla_put_failure
;
793 nla_nest_end(skb
, nest
);
794 nla_nest_end(skb
, nla_grps
);
796 return genlmsg_end(skb
, hdr
);
799 genlmsg_cancel(skb
, hdr
);
803 static int ctrl_dumpfamily(struct sk_buff
*skb
, struct netlink_callback
*cb
)
807 struct genl_family
*rt
;
808 struct net
*net
= sock_net(skb
->sk
);
809 int chains_to_skip
= cb
->args
[0];
810 int fams_to_skip
= cb
->args
[1];
812 for (i
= chains_to_skip
; i
< GENL_FAM_TAB_SIZE
; i
++) {
814 list_for_each_entry(rt
, genl_family_chain(i
), family_list
) {
815 if (!rt
->netnsok
&& !net_eq(net
, &init_net
))
817 if (++n
< fams_to_skip
)
819 if (ctrl_fill_info(rt
, NETLINK_CB(cb
->skb
).portid
,
820 cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
,
821 skb
, CTRL_CMD_NEWFAMILY
) < 0)
835 static struct sk_buff
*ctrl_build_family_msg(struct genl_family
*family
,
836 u32 portid
, int seq
, u8 cmd
)
841 skb
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
843 return ERR_PTR(-ENOBUFS
);
845 err
= ctrl_fill_info(family
, portid
, seq
, 0, skb
, cmd
);
854 static struct sk_buff
*
855 ctrl_build_mcgrp_msg(struct genl_family
*family
,
856 const struct genl_multicast_group
*grp
,
857 int grp_id
, u32 portid
, int seq
, u8 cmd
)
862 skb
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
864 return ERR_PTR(-ENOBUFS
);
866 err
= ctrl_fill_mcgrp_info(family
, grp
, grp_id
, portid
,
876 static const struct nla_policy ctrl_policy
[CTRL_ATTR_MAX
+1] = {
877 [CTRL_ATTR_FAMILY_ID
] = { .type
= NLA_U16
},
878 [CTRL_ATTR_FAMILY_NAME
] = { .type
= NLA_NUL_STRING
,
879 .len
= GENL_NAMSIZ
- 1 },
882 static int ctrl_getfamily(struct sk_buff
*skb
, struct genl_info
*info
)
885 struct genl_family
*res
= NULL
;
888 if (info
->attrs
[CTRL_ATTR_FAMILY_ID
]) {
889 u16 id
= nla_get_u16(info
->attrs
[CTRL_ATTR_FAMILY_ID
]);
890 res
= genl_family_find_byid(id
);
894 if (info
->attrs
[CTRL_ATTR_FAMILY_NAME
]) {
897 name
= nla_data(info
->attrs
[CTRL_ATTR_FAMILY_NAME
]);
898 res
= genl_family_find_byname(name
);
899 #ifdef CONFIG_MODULES
903 request_module("net-pf-%d-proto-%d-family-%s",
904 PF_NETLINK
, NETLINK_GENERIC
, name
);
907 res
= genl_family_find_byname(name
);
916 if (!res
->netnsok
&& !net_eq(genl_info_net(info
), &init_net
)) {
917 /* family doesn't exist here */
921 msg
= ctrl_build_family_msg(res
, info
->snd_portid
, info
->snd_seq
,
926 return genlmsg_reply(msg
, info
);
929 static int genl_ctrl_event(int event
, struct genl_family
*family
,
930 const struct genl_multicast_group
*grp
,
935 /* genl is still initialising */
936 if (!init_net
.genl_sock
)
940 case CTRL_CMD_NEWFAMILY
:
941 case CTRL_CMD_DELFAMILY
:
943 msg
= ctrl_build_family_msg(family
, 0, 0, event
);
945 case CTRL_CMD_NEWMCAST_GRP
:
946 case CTRL_CMD_DELMCAST_GRP
:
948 msg
= ctrl_build_mcgrp_msg(family
, grp
, grp_id
, 0, 0, event
);
957 if (!family
->netnsok
) {
958 genlmsg_multicast_netns(&genl_ctrl
, &init_net
, msg
, 0,
962 genlmsg_multicast_allns(&genl_ctrl
, msg
, 0,
970 static struct genl_ops genl_ctrl_ops
[] = {
972 .cmd
= CTRL_CMD_GETFAMILY
,
973 .doit
= ctrl_getfamily
,
974 .dumpit
= ctrl_dumpfamily
,
975 .policy
= ctrl_policy
,
979 static struct genl_multicast_group genl_ctrl_groups
[] = {
980 { .name
= "notify", },
983 static int __net_init
genl_pernet_init(struct net
*net
)
985 struct netlink_kernel_cfg cfg
= {
987 .flags
= NL_CFG_F_NONROOT_RECV
,
990 /* we'll bump the group number right afterwards */
991 net
->genl_sock
= netlink_kernel_create(net
, NETLINK_GENERIC
, &cfg
);
993 if (!net
->genl_sock
&& net_eq(net
, &init_net
))
994 panic("GENL: Cannot initialize generic netlink\n");
1002 static void __net_exit
genl_pernet_exit(struct net
*net
)
1004 netlink_kernel_release(net
->genl_sock
);
1005 net
->genl_sock
= NULL
;
1008 static struct pernet_operations genl_pernet_ops
= {
1009 .init
= genl_pernet_init
,
1010 .exit
= genl_pernet_exit
,
1013 static int __init
genl_init(void)
1017 for (i
= 0; i
< GENL_FAM_TAB_SIZE
; i
++)
1018 INIT_LIST_HEAD(&family_ht
[i
]);
1020 err
= genl_register_family_with_ops_groups(&genl_ctrl
, genl_ctrl_ops
,
1025 err
= register_pernet_subsys(&genl_pernet_ops
);
1032 panic("GENL: Cannot register controller: %d\n", err
);
1035 subsys_initcall(genl_init
);
1037 static int genlmsg_mcast(struct sk_buff
*skb
, u32 portid
, unsigned long group
,
1040 struct sk_buff
*tmp
;
1041 struct net
*net
, *prev
= NULL
;
1044 for_each_net_rcu(net
) {
1046 tmp
= skb_clone(skb
, flags
);
1051 err
= nlmsg_multicast(prev
->genl_sock
, tmp
,
1052 portid
, group
, flags
);
1060 return nlmsg_multicast(prev
->genl_sock
, skb
, portid
, group
, flags
);
1066 int genlmsg_multicast_allns(struct genl_family
*family
, struct sk_buff
*skb
,
1067 u32 portid
, unsigned int group
, gfp_t flags
)
1069 if (WARN_ON_ONCE(group
>= family
->n_mcgrps
))
1071 group
= family
->mcgrp_offset
+ group
;
1072 return genlmsg_mcast(skb
, portid
, group
, flags
);
1074 EXPORT_SYMBOL(genlmsg_multicast_allns
);
1076 void genl_notify(struct genl_family
*family
,
1077 struct sk_buff
*skb
, struct net
*net
, u32 portid
, u32 group
,
1078 struct nlmsghdr
*nlh
, gfp_t flags
)
1080 struct sock
*sk
= net
->genl_sock
;
1084 report
= nlmsg_report(nlh
);
1086 if (WARN_ON_ONCE(group
>= family
->n_mcgrps
))
1088 group
= family
->mcgrp_offset
+ group
;
1089 nlmsg_notify(sk
, skb
, portid
, group
, report
, flags
);
1091 EXPORT_SYMBOL(genl_notify
);