netlink: add RX/TX-ring support to netlink diag
[deliverable/linux.git] / net / netlink / diag.c
1 #include <linux/module.h>
2
3 #include <net/sock.h>
4 #include <linux/netlink.h>
5 #include <linux/sock_diag.h>
6 #include <linux/netlink_diag.h>
7
8 #include "af_netlink.h"
9
10 static int sk_diag_put_ring(struct netlink_ring *ring, int nl_type,
11 struct sk_buff *nlskb)
12 {
13 struct netlink_diag_ring ndr;
14
15 ndr.ndr_block_size = ring->pg_vec_pages << PAGE_SHIFT;
16 ndr.ndr_block_nr = ring->pg_vec_len;
17 ndr.ndr_frame_size = ring->frame_size;
18 ndr.ndr_frame_nr = ring->frame_max + 1;
19
20 return nla_put(nlskb, nl_type, sizeof(ndr), &ndr);
21 }
22
23 static int sk_diag_put_rings_cfg(struct sock *sk, struct sk_buff *nlskb)
24 {
25 struct netlink_sock *nlk = nlk_sk(sk);
26 int ret;
27
28 mutex_lock(&nlk->pg_vec_lock);
29 ret = sk_diag_put_ring(&nlk->rx_ring, NETLINK_DIAG_RX_RING, nlskb);
30 if (!ret)
31 ret = sk_diag_put_ring(&nlk->tx_ring, NETLINK_DIAG_TX_RING,
32 nlskb);
33 mutex_unlock(&nlk->pg_vec_lock);
34
35 return ret;
36 }
37
38 static int sk_diag_dump_groups(struct sock *sk, struct sk_buff *nlskb)
39 {
40 struct netlink_sock *nlk = nlk_sk(sk);
41
42 if (nlk->groups == NULL)
43 return 0;
44
45 return nla_put(nlskb, NETLINK_DIAG_GROUPS, NLGRPSZ(nlk->ngroups),
46 nlk->groups);
47 }
48
49 static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
50 struct netlink_diag_req *req,
51 u32 portid, u32 seq, u32 flags, int sk_ino)
52 {
53 struct nlmsghdr *nlh;
54 struct netlink_diag_msg *rep;
55 struct netlink_sock *nlk = nlk_sk(sk);
56
57 nlh = nlmsg_put(skb, portid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rep),
58 flags);
59 if (!nlh)
60 return -EMSGSIZE;
61
62 rep = nlmsg_data(nlh);
63 rep->ndiag_family = AF_NETLINK;
64 rep->ndiag_type = sk->sk_type;
65 rep->ndiag_protocol = sk->sk_protocol;
66 rep->ndiag_state = sk->sk_state;
67
68 rep->ndiag_ino = sk_ino;
69 rep->ndiag_portid = nlk->portid;
70 rep->ndiag_dst_portid = nlk->dst_portid;
71 rep->ndiag_dst_group = nlk->dst_group;
72 sock_diag_save_cookie(sk, rep->ndiag_cookie);
73
74 if ((req->ndiag_show & NDIAG_SHOW_GROUPS) &&
75 sk_diag_dump_groups(sk, skb))
76 goto out_nlmsg_trim;
77
78 if ((req->ndiag_show & NDIAG_SHOW_MEMINFO) &&
79 sock_diag_put_meminfo(sk, skb, NETLINK_DIAG_MEMINFO))
80 goto out_nlmsg_trim;
81
82 if ((req->ndiag_show & NDIAG_SHOW_RING_CFG) &&
83 sk_diag_put_rings_cfg(sk, skb))
84 goto out_nlmsg_trim;
85
86 return nlmsg_end(skb, nlh);
87
88 out_nlmsg_trim:
89 nlmsg_cancel(skb, nlh);
90 return -EMSGSIZE;
91 }
92
93 static int __netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
94 int protocol, int s_num)
95 {
96 struct netlink_table *tbl = &nl_table[protocol];
97 struct nl_portid_hash *hash = &tbl->hash;
98 struct net *net = sock_net(skb->sk);
99 struct netlink_diag_req *req;
100 struct sock *sk;
101 int ret = 0, num = 0, i;
102
103 req = nlmsg_data(cb->nlh);
104
105 for (i = 0; i <= hash->mask; i++) {
106 sk_for_each(sk, &hash->table[i]) {
107 if (!net_eq(sock_net(sk), net))
108 continue;
109 if (num < s_num) {
110 num++;
111 continue;
112 }
113
114 if (sk_diag_fill(sk, skb, req,
115 NETLINK_CB(cb->skb).portid,
116 cb->nlh->nlmsg_seq,
117 NLM_F_MULTI,
118 sock_i_ino(sk)) < 0) {
119 ret = 1;
120 goto done;
121 }
122
123 num++;
124 }
125 }
126
127 sk_for_each_bound(sk, &tbl->mc_list) {
128 if (sk_hashed(sk))
129 continue;
130 if (!net_eq(sock_net(sk), net))
131 continue;
132 if (num < s_num) {
133 num++;
134 continue;
135 }
136
137 if (sk_diag_fill(sk, skb, req,
138 NETLINK_CB(cb->skb).portid,
139 cb->nlh->nlmsg_seq,
140 NLM_F_MULTI,
141 sock_i_ino(sk)) < 0) {
142 ret = 1;
143 goto done;
144 }
145 num++;
146 }
147 done:
148 cb->args[0] = num;
149 cb->args[1] = protocol;
150
151 return ret;
152 }
153
154 static int netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
155 {
156 struct netlink_diag_req *req;
157 int s_num = cb->args[0];
158
159 req = nlmsg_data(cb->nlh);
160
161 read_lock(&nl_table_lock);
162
163 if (req->sdiag_protocol == NDIAG_PROTO_ALL) {
164 int i;
165
166 for (i = cb->args[1]; i < MAX_LINKS; i++) {
167 if (__netlink_diag_dump(skb, cb, i, s_num))
168 break;
169 s_num = 0;
170 }
171 } else {
172 if (req->sdiag_protocol >= MAX_LINKS) {
173 read_unlock(&nl_table_lock);
174 return -ENOENT;
175 }
176
177 __netlink_diag_dump(skb, cb, req->sdiag_protocol, s_num);
178 }
179
180 read_unlock(&nl_table_lock);
181
182 return skb->len;
183 }
184
185 static int netlink_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
186 {
187 int hdrlen = sizeof(struct netlink_diag_req);
188 struct net *net = sock_net(skb->sk);
189
190 if (nlmsg_len(h) < hdrlen)
191 return -EINVAL;
192
193 if (h->nlmsg_flags & NLM_F_DUMP) {
194 struct netlink_dump_control c = {
195 .dump = netlink_diag_dump,
196 };
197 return netlink_dump_start(net->diag_nlsk, skb, h, &c);
198 } else
199 return -EOPNOTSUPP;
200 }
201
202 static const struct sock_diag_handler netlink_diag_handler = {
203 .family = AF_NETLINK,
204 .dump = netlink_diag_handler_dump,
205 };
206
207 static int __init netlink_diag_init(void)
208 {
209 return sock_diag_register(&netlink_diag_handler);
210 }
211
212 static void __exit netlink_diag_exit(void)
213 {
214 sock_diag_unregister(&netlink_diag_handler);
215 }
216
217 module_init(netlink_diag_init);
218 module_exit(netlink_diag_exit);
219 MODULE_LICENSE("GPL");
220 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 16 /* AF_NETLINK */);
This page took 0.03542 seconds and 5 git commands to generate.