f3508aa758154c7214a83c42f19821556cbc5fb4
[deliverable/linux.git] / net / sctp / sctp_diag.c
1 #include <linux/module.h>
2 #include <linux/inet_diag.h>
3 #include <linux/sock_diag.h>
4 #include <net/sctp/sctp.h>
5
6 static void sctp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
7 void *info);
8
9 /* define some functions to make asoc/ep fill look clean */
10 static void inet_diag_msg_sctpasoc_fill(struct inet_diag_msg *r,
11 struct sock *sk,
12 struct sctp_association *asoc)
13 {
14 union sctp_addr laddr, paddr;
15 struct dst_entry *dst;
16 struct timer_list *t3_rtx = &asoc->peer.primary_path->T3_rtx_timer;
17
18 laddr = list_entry(asoc->base.bind_addr.address_list.next,
19 struct sctp_sockaddr_entry, list)->a;
20 paddr = asoc->peer.primary_path->ipaddr;
21 dst = asoc->peer.primary_path->dst;
22
23 r->idiag_family = sk->sk_family;
24 r->id.idiag_sport = htons(asoc->base.bind_addr.port);
25 r->id.idiag_dport = htons(asoc->peer.port);
26 r->id.idiag_if = dst ? dst->dev->ifindex : 0;
27 sock_diag_save_cookie(sk, r->id.idiag_cookie);
28
29 #if IS_ENABLED(CONFIG_IPV6)
30 if (sk->sk_family == AF_INET6) {
31 *(struct in6_addr *)r->id.idiag_src = laddr.v6.sin6_addr;
32 *(struct in6_addr *)r->id.idiag_dst = paddr.v6.sin6_addr;
33 } else
34 #endif
35 {
36 memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
37 memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
38
39 r->id.idiag_src[0] = laddr.v4.sin_addr.s_addr;
40 r->id.idiag_dst[0] = paddr.v4.sin_addr.s_addr;
41 }
42
43 r->idiag_state = asoc->state;
44 if (timer_pending(t3_rtx)) {
45 r->idiag_timer = SCTP_EVENT_TIMEOUT_T3_RTX;
46 r->idiag_retrans = asoc->rtx_data_chunks;
47 r->idiag_expires = jiffies_to_msecs(t3_rtx->expires - jiffies);
48 } else {
49 r->idiag_timer = 0;
50 r->idiag_retrans = 0;
51 r->idiag_expires = 0;
52 }
53 }
54
55 static int inet_diag_msg_sctpladdrs_fill(struct sk_buff *skb,
56 struct list_head *address_list)
57 {
58 struct sctp_sockaddr_entry *laddr;
59 int addrlen = sizeof(struct sockaddr_storage);
60 int addrcnt = 0;
61 struct nlattr *attr;
62 void *info = NULL;
63
64 list_for_each_entry_rcu(laddr, address_list, list)
65 addrcnt++;
66
67 attr = nla_reserve(skb, INET_DIAG_LOCALS, addrlen * addrcnt);
68 if (!attr)
69 return -EMSGSIZE;
70
71 info = nla_data(attr);
72 list_for_each_entry_rcu(laddr, address_list, list) {
73 memcpy(info, &laddr->a, addrlen);
74 info += addrlen;
75 }
76
77 return 0;
78 }
79
80 static int inet_diag_msg_sctpaddrs_fill(struct sk_buff *skb,
81 struct sctp_association *asoc)
82 {
83 int addrlen = sizeof(struct sockaddr_storage);
84 struct sctp_transport *from;
85 struct nlattr *attr;
86 void *info = NULL;
87
88 attr = nla_reserve(skb, INET_DIAG_PEERS,
89 addrlen * asoc->peer.transport_count);
90 if (!attr)
91 return -EMSGSIZE;
92
93 info = nla_data(attr);
94 list_for_each_entry(from, &asoc->peer.transport_addr_list,
95 transports) {
96 memcpy(info, &from->ipaddr, addrlen);
97 info += addrlen;
98 }
99
100 return 0;
101 }
102
103 /* sctp asoc/ep fill*/
104 static int inet_sctp_diag_fill(struct sock *sk, struct sctp_association *asoc,
105 struct sk_buff *skb,
106 const struct inet_diag_req_v2 *req,
107 struct user_namespace *user_ns,
108 int portid, u32 seq, u16 nlmsg_flags,
109 const struct nlmsghdr *unlh)
110 {
111 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
112 struct list_head *addr_list;
113 struct inet_diag_msg *r;
114 struct nlmsghdr *nlh;
115 int ext = req->idiag_ext;
116 struct sctp_infox infox;
117 void *info = NULL;
118
119 nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
120 nlmsg_flags);
121 if (!nlh)
122 return -EMSGSIZE;
123
124 r = nlmsg_data(nlh);
125 BUG_ON(!sk_fullsock(sk));
126
127 if (asoc) {
128 inet_diag_msg_sctpasoc_fill(r, sk, asoc);
129 } else {
130 inet_diag_msg_common_fill(r, sk);
131 r->idiag_state = sk->sk_state;
132 r->idiag_timer = 0;
133 r->idiag_retrans = 0;
134 }
135
136 if (inet_diag_msg_attrs_fill(sk, skb, r, ext, user_ns))
137 goto errout;
138
139 if (ext & (1 << (INET_DIAG_SKMEMINFO - 1))) {
140 u32 mem[SK_MEMINFO_VARS];
141 int amt;
142
143 if (asoc && asoc->ep->sndbuf_policy)
144 amt = asoc->sndbuf_used;
145 else
146 amt = sk_wmem_alloc_get(sk);
147 mem[SK_MEMINFO_WMEM_ALLOC] = amt;
148 if (asoc && asoc->ep->rcvbuf_policy)
149 amt = atomic_read(&asoc->rmem_alloc);
150 else
151 amt = sk_rmem_alloc_get(sk);
152 mem[SK_MEMINFO_RMEM_ALLOC] = amt;
153 mem[SK_MEMINFO_RCVBUF] = sk->sk_rcvbuf;
154 mem[SK_MEMINFO_SNDBUF] = sk->sk_sndbuf;
155 mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc;
156 mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued;
157 mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
158 mem[SK_MEMINFO_BACKLOG] = sk->sk_backlog.len;
159 mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops);
160
161 if (nla_put(skb, INET_DIAG_SKMEMINFO, sizeof(mem), &mem) < 0)
162 goto errout;
163 }
164
165 if (ext & (1 << (INET_DIAG_INFO - 1))) {
166 struct nlattr *attr;
167
168 attr = nla_reserve_64bit(skb, INET_DIAG_INFO,
169 sizeof(struct sctp_info),
170 INET_DIAG_PAD);
171 if (!attr)
172 goto errout;
173
174 info = nla_data(attr);
175 }
176 infox.sctpinfo = (struct sctp_info *)info;
177 infox.asoc = asoc;
178 sctp_diag_get_info(sk, r, &infox);
179
180 addr_list = asoc ? &asoc->base.bind_addr.address_list
181 : &ep->base.bind_addr.address_list;
182 if (inet_diag_msg_sctpladdrs_fill(skb, addr_list))
183 goto errout;
184
185 if (asoc && (ext & (1 << (INET_DIAG_CONG - 1))))
186 if (nla_put_string(skb, INET_DIAG_CONG, "reno") < 0)
187 goto errout;
188
189 if (asoc && inet_diag_msg_sctpaddrs_fill(skb, asoc))
190 goto errout;
191
192 nlmsg_end(skb, nlh);
193 return 0;
194
195 errout:
196 nlmsg_cancel(skb, nlh);
197 return -EMSGSIZE;
198 }
199
200 /* callback and param */
201 struct sctp_comm_param {
202 struct sk_buff *skb;
203 struct netlink_callback *cb;
204 const struct inet_diag_req_v2 *r;
205 const struct nlmsghdr *nlh;
206 };
207
208 static size_t inet_assoc_attr_size(struct sctp_association *asoc)
209 {
210 int addrlen = sizeof(struct sockaddr_storage);
211 int addrcnt = 0;
212 struct sctp_sockaddr_entry *laddr;
213
214 list_for_each_entry_rcu(laddr, &asoc->base.bind_addr.address_list,
215 list)
216 addrcnt++;
217
218 return nla_total_size(sizeof(struct sctp_info))
219 + nla_total_size(1) /* INET_DIAG_SHUTDOWN */
220 + nla_total_size(1) /* INET_DIAG_TOS */
221 + nla_total_size(1) /* INET_DIAG_TCLASS */
222 + nla_total_size(addrlen * asoc->peer.transport_count)
223 + nla_total_size(addrlen * addrcnt)
224 + nla_total_size(sizeof(struct inet_diag_meminfo))
225 + nla_total_size(sizeof(struct inet_diag_msg))
226 + 64;
227 }
228
229 static int sctp_tsp_dump_one(struct sctp_transport *tsp, void *p)
230 {
231 struct sctp_association *assoc = tsp->asoc;
232 struct sock *sk = tsp->asoc->base.sk;
233 struct sctp_comm_param *commp = p;
234 struct sk_buff *in_skb = commp->skb;
235 const struct inet_diag_req_v2 *req = commp->r;
236 const struct nlmsghdr *nlh = commp->nlh;
237 struct net *net = sock_net(in_skb->sk);
238 struct sk_buff *rep;
239 int err;
240
241 err = sock_diag_check_cookie(sk, req->id.idiag_cookie);
242 if (err)
243 goto out;
244
245 err = -ENOMEM;
246 rep = nlmsg_new(inet_assoc_attr_size(assoc), GFP_KERNEL);
247 if (!rep)
248 goto out;
249
250 lock_sock(sk);
251 if (sk != assoc->base.sk) {
252 release_sock(sk);
253 sk = assoc->base.sk;
254 lock_sock(sk);
255 }
256 err = inet_sctp_diag_fill(sk, assoc, rep, req,
257 sk_user_ns(NETLINK_CB(in_skb).sk),
258 NETLINK_CB(in_skb).portid,
259 nlh->nlmsg_seq, 0, nlh);
260 release_sock(sk);
261 if (err < 0) {
262 WARN_ON(err == -EMSGSIZE);
263 kfree_skb(rep);
264 goto out;
265 }
266
267 err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid,
268 MSG_DONTWAIT);
269 if (err > 0)
270 err = 0;
271 out:
272 return err;
273 }
274
275 static int sctp_tsp_dump(struct sctp_transport *tsp, void *p)
276 {
277 struct sctp_endpoint *ep = tsp->asoc->ep;
278 struct sctp_comm_param *commp = p;
279 struct sock *sk = ep->base.sk;
280 struct sk_buff *skb = commp->skb;
281 struct netlink_callback *cb = commp->cb;
282 const struct inet_diag_req_v2 *r = commp->r;
283 struct sctp_association *assoc =
284 list_entry(ep->asocs.next, struct sctp_association, asocs);
285 int err = 0;
286
287 /* find the ep only once through the transports by this condition */
288 if (tsp->asoc != assoc)
289 goto out;
290
291 if (r->sdiag_family != AF_UNSPEC && sk->sk_family != r->sdiag_family)
292 goto out;
293
294 lock_sock(sk);
295 if (sk != assoc->base.sk)
296 goto release;
297 list_for_each_entry(assoc, &ep->asocs, asocs) {
298 if (cb->args[4] < cb->args[1])
299 goto next;
300
301 if (r->id.idiag_sport != htons(assoc->base.bind_addr.port) &&
302 r->id.idiag_sport)
303 goto next;
304 if (r->id.idiag_dport != htons(assoc->peer.port) &&
305 r->id.idiag_dport)
306 goto next;
307
308 if (!cb->args[3] &&
309 inet_sctp_diag_fill(sk, NULL, skb, r,
310 sk_user_ns(NETLINK_CB(cb->skb).sk),
311 NETLINK_CB(cb->skb).portid,
312 cb->nlh->nlmsg_seq,
313 NLM_F_MULTI, cb->nlh) < 0) {
314 cb->args[3] = 1;
315 err = 2;
316 goto release;
317 }
318 cb->args[3] = 1;
319
320 if (inet_sctp_diag_fill(sk, assoc, skb, r,
321 sk_user_ns(NETLINK_CB(cb->skb).sk),
322 NETLINK_CB(cb->skb).portid,
323 cb->nlh->nlmsg_seq, 0, cb->nlh) < 0) {
324 err = 2;
325 goto release;
326 }
327 next:
328 cb->args[4]++;
329 }
330 cb->args[1] = 0;
331 cb->args[2]++;
332 cb->args[3] = 0;
333 cb->args[4] = 0;
334 release:
335 release_sock(sk);
336 return err;
337 out:
338 cb->args[2]++;
339 return err;
340 }
341
342 static int sctp_ep_dump(struct sctp_endpoint *ep, void *p)
343 {
344 struct sctp_comm_param *commp = p;
345 struct sock *sk = ep->base.sk;
346 struct sk_buff *skb = commp->skb;
347 struct netlink_callback *cb = commp->cb;
348 const struct inet_diag_req_v2 *r = commp->r;
349 struct net *net = sock_net(skb->sk);
350 struct inet_sock *inet = inet_sk(sk);
351 int err = 0;
352
353 if (!net_eq(sock_net(sk), net))
354 goto out;
355
356 if (cb->args[4] < cb->args[1])
357 goto next;
358
359 if (!(r->idiag_states & TCPF_LISTEN) && !list_empty(&ep->asocs))
360 goto next;
361
362 if (r->sdiag_family != AF_UNSPEC &&
363 sk->sk_family != r->sdiag_family)
364 goto next;
365
366 if (r->id.idiag_sport != inet->inet_sport &&
367 r->id.idiag_sport)
368 goto next;
369
370 if (r->id.idiag_dport != inet->inet_dport &&
371 r->id.idiag_dport)
372 goto next;
373
374 if (inet_sctp_diag_fill(sk, NULL, skb, r,
375 sk_user_ns(NETLINK_CB(cb->skb).sk),
376 NETLINK_CB(cb->skb).portid,
377 cb->nlh->nlmsg_seq, NLM_F_MULTI,
378 cb->nlh) < 0) {
379 err = 2;
380 goto out;
381 }
382 next:
383 cb->args[4]++;
384 out:
385 return err;
386 }
387
388 /* define the functions for sctp_diag_handler*/
389 static void sctp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
390 void *info)
391 {
392 struct sctp_infox *infox = (struct sctp_infox *)info;
393
394 if (infox->asoc) {
395 r->idiag_rqueue = atomic_read(&infox->asoc->rmem_alloc);
396 r->idiag_wqueue = infox->asoc->sndbuf_used;
397 } else {
398 r->idiag_rqueue = sk->sk_ack_backlog;
399 r->idiag_wqueue = sk->sk_max_ack_backlog;
400 }
401 if (infox->sctpinfo)
402 sctp_get_sctp_info(sk, infox->asoc, infox->sctpinfo);
403 }
404
405 static int sctp_diag_dump_one(struct sk_buff *in_skb,
406 const struct nlmsghdr *nlh,
407 const struct inet_diag_req_v2 *req)
408 {
409 struct net *net = sock_net(in_skb->sk);
410 union sctp_addr laddr, paddr;
411 struct sctp_comm_param commp = {
412 .skb = in_skb,
413 .r = req,
414 .nlh = nlh,
415 };
416
417 if (req->sdiag_family == AF_INET) {
418 laddr.v4.sin_port = req->id.idiag_sport;
419 laddr.v4.sin_addr.s_addr = req->id.idiag_src[0];
420 laddr.v4.sin_family = AF_INET;
421
422 paddr.v4.sin_port = req->id.idiag_dport;
423 paddr.v4.sin_addr.s_addr = req->id.idiag_dst[0];
424 paddr.v4.sin_family = AF_INET;
425 } else {
426 laddr.v6.sin6_port = req->id.idiag_sport;
427 memcpy(&laddr.v6.sin6_addr, req->id.idiag_src,
428 sizeof(laddr.v6.sin6_addr));
429 laddr.v6.sin6_family = AF_INET6;
430
431 paddr.v6.sin6_port = req->id.idiag_dport;
432 memcpy(&paddr.v6.sin6_addr, req->id.idiag_dst,
433 sizeof(paddr.v6.sin6_addr));
434 paddr.v6.sin6_family = AF_INET6;
435 }
436
437 return sctp_transport_lookup_process(sctp_tsp_dump_one,
438 net, &laddr, &paddr, &commp);
439 }
440
441 static void sctp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
442 const struct inet_diag_req_v2 *r, struct nlattr *bc)
443 {
444 u32 idiag_states = r->idiag_states;
445 struct net *net = sock_net(skb->sk);
446 struct sctp_comm_param commp = {
447 .skb = skb,
448 .cb = cb,
449 .r = r,
450 };
451
452 /* eps hashtable dumps
453 * args:
454 * 0 : if it will traversal listen sock
455 * 1 : to record the sock pos of this time's traversal
456 * 4 : to work as a temporary variable to traversal list
457 */
458 if (cb->args[0] == 0) {
459 if (!(idiag_states & TCPF_LISTEN))
460 goto skip;
461 if (sctp_for_each_endpoint(sctp_ep_dump, &commp))
462 goto done;
463 skip:
464 cb->args[0] = 1;
465 cb->args[1] = 0;
466 cb->args[4] = 0;
467 }
468
469 /* asocs by transport hashtable dump
470 * args:
471 * 1 : to record the assoc pos of this time's traversal
472 * 2 : to record the transport pos of this time's traversal
473 * 3 : to mark if we have dumped the ep info of the current asoc
474 * 4 : to work as a temporary variable to traversal list
475 */
476 if (!(idiag_states & ~(TCPF_LISTEN | TCPF_CLOSE)))
477 goto done;
478 sctp_for_each_transport(sctp_tsp_dump, net, cb->args[2], &commp);
479 done:
480 cb->args[1] = cb->args[4];
481 cb->args[4] = 0;
482 }
483
484 static const struct inet_diag_handler sctp_diag_handler = {
485 .dump = sctp_diag_dump,
486 .dump_one = sctp_diag_dump_one,
487 .idiag_get_info = sctp_diag_get_info,
488 .idiag_type = IPPROTO_SCTP,
489 .idiag_info_size = sizeof(struct sctp_info),
490 };
491
492 static int __init sctp_diag_init(void)
493 {
494 return inet_diag_register(&sctp_diag_handler);
495 }
496
497 static void __exit sctp_diag_exit(void)
498 {
499 inet_diag_unregister(&sctp_diag_handler);
500 }
501
502 module_init(sctp_diag_init);
503 module_exit(sctp_diag_exit);
504 MODULE_LICENSE("GPL");
505 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-132);
This page took 0.041152 seconds and 4 git commands to generate.