Merge remote-tracking branch 'drm-panel/drm/panel/for-next'
[deliverable/linux.git] / drivers / net / ppp / pptp.c
1 /*
2 * Point-to-Point Tunneling Protocol for Linux
3 *
4 * Authors: Dmitry Kozlov <xeb@mail.ru>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13 #include <linux/string.h>
14 #include <linux/module.h>
15 #include <linux/kernel.h>
16 #include <linux/slab.h>
17 #include <linux/errno.h>
18 #include <linux/netdevice.h>
19 #include <linux/net.h>
20 #include <linux/skbuff.h>
21 #include <linux/vmalloc.h>
22 #include <linux/init.h>
23 #include <linux/ppp_channel.h>
24 #include <linux/ppp_defs.h>
25 #include <linux/if_pppox.h>
26 #include <linux/ppp-ioctl.h>
27 #include <linux/notifier.h>
28 #include <linux/file.h>
29 #include <linux/in.h>
30 #include <linux/ip.h>
31 #include <linux/rcupdate.h>
32 #include <linux/spinlock.h>
33
34 #include <net/sock.h>
35 #include <net/protocol.h>
36 #include <net/ip.h>
37 #include <net/icmp.h>
38 #include <net/route.h>
39 #include <net/gre.h>
40 #include <net/pptp.h>
41
42 #include <linux/uaccess.h>
43
44 #define PPTP_DRIVER_VERSION "0.8.5"
45
46 #define MAX_CALLID 65535
47
48 static DECLARE_BITMAP(callid_bitmap, MAX_CALLID + 1);
49 static struct pppox_sock __rcu **callid_sock;
50
51 static DEFINE_SPINLOCK(chan_lock);
52
53 static struct proto pptp_sk_proto __read_mostly;
54 static const struct ppp_channel_ops pptp_chan_ops;
55 static const struct proto_ops pptp_ops;
56
57 static struct pppox_sock *lookup_chan(u16 call_id, __be32 s_addr)
58 {
59 struct pppox_sock *sock;
60 struct pptp_opt *opt;
61
62 rcu_read_lock();
63 sock = rcu_dereference(callid_sock[call_id]);
64 if (sock) {
65 opt = &sock->proto.pptp;
66 if (opt->dst_addr.sin_addr.s_addr != s_addr)
67 sock = NULL;
68 else
69 sock_hold(sk_pppox(sock));
70 }
71 rcu_read_unlock();
72
73 return sock;
74 }
75
76 static int lookup_chan_dst(u16 call_id, __be32 d_addr)
77 {
78 struct pppox_sock *sock;
79 struct pptp_opt *opt;
80 int i;
81
82 rcu_read_lock();
83 i = 1;
84 for_each_set_bit_from(i, callid_bitmap, MAX_CALLID) {
85 sock = rcu_dereference(callid_sock[i]);
86 if (!sock)
87 continue;
88 opt = &sock->proto.pptp;
89 if (opt->dst_addr.call_id == call_id &&
90 opt->dst_addr.sin_addr.s_addr == d_addr)
91 break;
92 }
93 rcu_read_unlock();
94
95 return i < MAX_CALLID;
96 }
97
98 static int add_chan(struct pppox_sock *sock,
99 struct pptp_addr *sa)
100 {
101 static int call_id;
102
103 spin_lock(&chan_lock);
104 if (!sa->call_id) {
105 call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, call_id + 1);
106 if (call_id == MAX_CALLID) {
107 call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, 1);
108 if (call_id == MAX_CALLID)
109 goto out_err;
110 }
111 sa->call_id = call_id;
112 } else if (test_bit(sa->call_id, callid_bitmap)) {
113 goto out_err;
114 }
115
116 sock->proto.pptp.src_addr = *sa;
117 set_bit(sa->call_id, callid_bitmap);
118 rcu_assign_pointer(callid_sock[sa->call_id], sock);
119 spin_unlock(&chan_lock);
120
121 return 0;
122
123 out_err:
124 spin_unlock(&chan_lock);
125 return -1;
126 }
127
128 static void del_chan(struct pppox_sock *sock)
129 {
130 spin_lock(&chan_lock);
131 clear_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap);
132 RCU_INIT_POINTER(callid_sock[sock->proto.pptp.src_addr.call_id], NULL);
133 spin_unlock(&chan_lock);
134 synchronize_rcu();
135 }
136
137 static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
138 {
139 struct sock *sk = (struct sock *) chan->private;
140 struct pppox_sock *po = pppox_sk(sk);
141 struct net *net = sock_net(sk);
142 struct pptp_opt *opt = &po->proto.pptp;
143 struct pptp_gre_header *hdr;
144 unsigned int header_len = sizeof(*hdr);
145 struct flowi4 fl4;
146 int islcp;
147 int len;
148 unsigned char *data;
149 __u32 seq_recv;
150
151
152 struct rtable *rt;
153 struct net_device *tdev;
154 struct iphdr *iph;
155 int max_headroom;
156
157 if (sk_pppox(po)->sk_state & PPPOX_DEAD)
158 goto tx_error;
159
160 rt = ip_route_output_ports(net, &fl4, NULL,
161 opt->dst_addr.sin_addr.s_addr,
162 opt->src_addr.sin_addr.s_addr,
163 0, 0, IPPROTO_GRE,
164 RT_TOS(0), 0);
165 if (IS_ERR(rt))
166 goto tx_error;
167
168 tdev = rt->dst.dev;
169
170 max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(*iph) + sizeof(*hdr) + 2;
171
172 if (skb_headroom(skb) < max_headroom || skb_cloned(skb) || skb_shared(skb)) {
173 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
174 if (!new_skb) {
175 ip_rt_put(rt);
176 goto tx_error;
177 }
178 if (skb->sk)
179 skb_set_owner_w(new_skb, skb->sk);
180 consume_skb(skb);
181 skb = new_skb;
182 }
183
184 data = skb->data;
185 islcp = ((data[0] << 8) + data[1]) == PPP_LCP && 1 <= data[2] && data[2] <= 7;
186
187 /* compress protocol field */
188 if ((opt->ppp_flags & SC_COMP_PROT) && data[0] == 0 && !islcp)
189 skb_pull(skb, 1);
190
191 /* Put in the address/control bytes if necessary */
192 if ((opt->ppp_flags & SC_COMP_AC) == 0 || islcp) {
193 data = skb_push(skb, 2);
194 data[0] = PPP_ALLSTATIONS;
195 data[1] = PPP_UI;
196 }
197
198 len = skb->len;
199
200 seq_recv = opt->seq_recv;
201
202 if (opt->ack_sent == seq_recv)
203 header_len -= sizeof(hdr->ack);
204
205 /* Push down and install GRE header */
206 skb_push(skb, header_len);
207 hdr = (struct pptp_gre_header *)(skb->data);
208
209 hdr->gre_hd.flags = GRE_KEY | GRE_VERSION_1 | GRE_SEQ;
210 hdr->gre_hd.protocol = GRE_PROTO_PPP;
211 hdr->call_id = htons(opt->dst_addr.call_id);
212
213 hdr->seq = htonl(++opt->seq_sent);
214 if (opt->ack_sent != seq_recv) {
215 /* send ack with this message */
216 hdr->gre_hd.flags |= GRE_ACK;
217 hdr->ack = htonl(seq_recv);
218 opt->ack_sent = seq_recv;
219 }
220 hdr->payload_len = htons(len);
221
222 /* Push down and install the IP header. */
223
224 skb_reset_transport_header(skb);
225 skb_push(skb, sizeof(*iph));
226 skb_reset_network_header(skb);
227 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
228 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED);
229
230 iph = ip_hdr(skb);
231 iph->version = 4;
232 iph->ihl = sizeof(struct iphdr) >> 2;
233 if (ip_dont_fragment(sk, &rt->dst))
234 iph->frag_off = htons(IP_DF);
235 else
236 iph->frag_off = 0;
237 iph->protocol = IPPROTO_GRE;
238 iph->tos = 0;
239 iph->daddr = fl4.daddr;
240 iph->saddr = fl4.saddr;
241 iph->ttl = ip4_dst_hoplimit(&rt->dst);
242 iph->tot_len = htons(skb->len);
243
244 skb_dst_drop(skb);
245 skb_dst_set(skb, &rt->dst);
246
247 nf_reset(skb);
248
249 skb->ip_summed = CHECKSUM_NONE;
250 ip_select_ident(net, skb, NULL);
251 ip_send_check(iph);
252
253 ip_local_out(net, skb->sk, skb);
254 return 1;
255
256 tx_error:
257 kfree_skb(skb);
258 return 1;
259 }
260
261 static int pptp_rcv_core(struct sock *sk, struct sk_buff *skb)
262 {
263 struct pppox_sock *po = pppox_sk(sk);
264 struct pptp_opt *opt = &po->proto.pptp;
265 int headersize, payload_len, seq;
266 __u8 *payload;
267 struct pptp_gre_header *header;
268
269 if (!(sk->sk_state & PPPOX_CONNECTED)) {
270 if (sock_queue_rcv_skb(sk, skb))
271 goto drop;
272 return NET_RX_SUCCESS;
273 }
274
275 header = (struct pptp_gre_header *)(skb->data);
276 headersize = sizeof(*header);
277
278 /* test if acknowledgement present */
279 if (GRE_IS_ACK(header->gre_hd.flags)) {
280 __u32 ack;
281
282 if (!pskb_may_pull(skb, headersize))
283 goto drop;
284 header = (struct pptp_gre_header *)(skb->data);
285
286 /* ack in different place if S = 0 */
287 ack = GRE_IS_SEQ(header->gre_hd.flags) ? header->ack : header->seq;
288
289 ack = ntohl(ack);
290
291 if (ack > opt->ack_recv)
292 opt->ack_recv = ack;
293 /* also handle sequence number wrap-around */
294 if (WRAPPED(ack, opt->ack_recv))
295 opt->ack_recv = ack;
296 } else {
297 headersize -= sizeof(header->ack);
298 }
299 /* test if payload present */
300 if (!GRE_IS_SEQ(header->gre_hd.flags))
301 goto drop;
302
303 payload_len = ntohs(header->payload_len);
304 seq = ntohl(header->seq);
305
306 /* check for incomplete packet (length smaller than expected) */
307 if (!pskb_may_pull(skb, headersize + payload_len))
308 goto drop;
309
310 payload = skb->data + headersize;
311 /* check for expected sequence number */
312 if (seq < opt->seq_recv + 1 || WRAPPED(opt->seq_recv, seq)) {
313 if ((payload[0] == PPP_ALLSTATIONS) && (payload[1] == PPP_UI) &&
314 (PPP_PROTOCOL(payload) == PPP_LCP) &&
315 ((payload[4] == PPP_LCP_ECHOREQ) || (payload[4] == PPP_LCP_ECHOREP)))
316 goto allow_packet;
317 } else {
318 opt->seq_recv = seq;
319 allow_packet:
320 skb_pull(skb, headersize);
321
322 if (payload[0] == PPP_ALLSTATIONS && payload[1] == PPP_UI) {
323 /* chop off address/control */
324 if (skb->len < 3)
325 goto drop;
326 skb_pull(skb, 2);
327 }
328
329 if ((*skb->data) & 1) {
330 /* protocol is compressed */
331 skb_push(skb, 1)[0] = 0;
332 }
333
334 skb->ip_summed = CHECKSUM_NONE;
335 skb_set_network_header(skb, skb->head-skb->data);
336 ppp_input(&po->chan, skb);
337
338 return NET_RX_SUCCESS;
339 }
340 drop:
341 kfree_skb(skb);
342 return NET_RX_DROP;
343 }
344
345 static int pptp_rcv(struct sk_buff *skb)
346 {
347 struct pppox_sock *po;
348 struct pptp_gre_header *header;
349 struct iphdr *iph;
350
351 if (skb->pkt_type != PACKET_HOST)
352 goto drop;
353
354 if (!pskb_may_pull(skb, 12))
355 goto drop;
356
357 iph = ip_hdr(skb);
358
359 header = (struct pptp_gre_header *)skb->data;
360
361 if (header->gre_hd.protocol != GRE_PROTO_PPP || /* PPTP-GRE protocol for PPTP */
362 GRE_IS_CSUM(header->gre_hd.flags) || /* flag CSUM should be clear */
363 GRE_IS_ROUTING(header->gre_hd.flags) || /* flag ROUTING should be clear */
364 !GRE_IS_KEY(header->gre_hd.flags) || /* flag KEY should be set */
365 (header->gre_hd.flags & GRE_FLAGS)) /* flag Recursion Ctrl should be clear */
366 /* if invalid, discard this packet */
367 goto drop;
368
369 po = lookup_chan(htons(header->call_id), iph->saddr);
370 if (po) {
371 skb_dst_drop(skb);
372 nf_reset(skb);
373 return sk_receive_skb(sk_pppox(po), skb, 0);
374 }
375 drop:
376 kfree_skb(skb);
377 return NET_RX_DROP;
378 }
379
380 static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
381 int sockaddr_len)
382 {
383 struct sock *sk = sock->sk;
384 struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr;
385 struct pppox_sock *po = pppox_sk(sk);
386 int error = 0;
387
388 if (sockaddr_len < sizeof(struct sockaddr_pppox))
389 return -EINVAL;
390
391 lock_sock(sk);
392
393 if (sk->sk_state & PPPOX_DEAD) {
394 error = -EALREADY;
395 goto out;
396 }
397
398 if (sk->sk_state & PPPOX_BOUND) {
399 error = -EBUSY;
400 goto out;
401 }
402
403 if (add_chan(po, &sp->sa_addr.pptp))
404 error = -EBUSY;
405 else
406 sk->sk_state |= PPPOX_BOUND;
407
408 out:
409 release_sock(sk);
410 return error;
411 }
412
413 static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
414 int sockaddr_len, int flags)
415 {
416 struct sock *sk = sock->sk;
417 struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr;
418 struct pppox_sock *po = pppox_sk(sk);
419 struct pptp_opt *opt = &po->proto.pptp;
420 struct rtable *rt;
421 struct flowi4 fl4;
422 int error = 0;
423
424 if (sockaddr_len < sizeof(struct sockaddr_pppox))
425 return -EINVAL;
426
427 if (sp->sa_protocol != PX_PROTO_PPTP)
428 return -EINVAL;
429
430 if (lookup_chan_dst(sp->sa_addr.pptp.call_id, sp->sa_addr.pptp.sin_addr.s_addr))
431 return -EALREADY;
432
433 lock_sock(sk);
434 /* Check for already bound sockets */
435 if (sk->sk_state & PPPOX_CONNECTED) {
436 error = -EBUSY;
437 goto end;
438 }
439
440 /* Check for already disconnected sockets, on attempts to disconnect */
441 if (sk->sk_state & PPPOX_DEAD) {
442 error = -EALREADY;
443 goto end;
444 }
445
446 if (!opt->src_addr.sin_addr.s_addr || !sp->sa_addr.pptp.sin_addr.s_addr) {
447 error = -EINVAL;
448 goto end;
449 }
450
451 po->chan.private = sk;
452 po->chan.ops = &pptp_chan_ops;
453
454 rt = ip_route_output_ports(sock_net(sk), &fl4, sk,
455 opt->dst_addr.sin_addr.s_addr,
456 opt->src_addr.sin_addr.s_addr,
457 0, 0,
458 IPPROTO_GRE, RT_CONN_FLAGS(sk), 0);
459 if (IS_ERR(rt)) {
460 error = -EHOSTUNREACH;
461 goto end;
462 }
463 sk_setup_caps(sk, &rt->dst);
464
465 po->chan.mtu = dst_mtu(&rt->dst);
466 if (!po->chan.mtu)
467 po->chan.mtu = PPP_MRU;
468 ip_rt_put(rt);
469 po->chan.mtu -= PPTP_HEADER_OVERHEAD;
470
471 po->chan.hdrlen = 2 + sizeof(struct pptp_gre_header);
472 error = ppp_register_channel(&po->chan);
473 if (error) {
474 pr_err("PPTP: failed to register PPP channel (%d)\n", error);
475 goto end;
476 }
477
478 opt->dst_addr = sp->sa_addr.pptp;
479 sk->sk_state |= PPPOX_CONNECTED;
480
481 end:
482 release_sock(sk);
483 return error;
484 }
485
486 static int pptp_getname(struct socket *sock, struct sockaddr *uaddr,
487 int *usockaddr_len, int peer)
488 {
489 int len = sizeof(struct sockaddr_pppox);
490 struct sockaddr_pppox sp;
491
492 memset(&sp.sa_addr, 0, sizeof(sp.sa_addr));
493
494 sp.sa_family = AF_PPPOX;
495 sp.sa_protocol = PX_PROTO_PPTP;
496 sp.sa_addr.pptp = pppox_sk(sock->sk)->proto.pptp.src_addr;
497
498 memcpy(uaddr, &sp, len);
499
500 *usockaddr_len = len;
501
502 return 0;
503 }
504
505 static int pptp_release(struct socket *sock)
506 {
507 struct sock *sk = sock->sk;
508 struct pppox_sock *po;
509 struct pptp_opt *opt;
510 int error = 0;
511
512 if (!sk)
513 return 0;
514
515 lock_sock(sk);
516
517 if (sock_flag(sk, SOCK_DEAD)) {
518 release_sock(sk);
519 return -EBADF;
520 }
521
522 po = pppox_sk(sk);
523 opt = &po->proto.pptp;
524 del_chan(po);
525
526 pppox_unbind_sock(sk);
527 sk->sk_state = PPPOX_DEAD;
528
529 sock_orphan(sk);
530 sock->sk = NULL;
531
532 release_sock(sk);
533 sock_put(sk);
534
535 return error;
536 }
537
538 static void pptp_sock_destruct(struct sock *sk)
539 {
540 if (!(sk->sk_state & PPPOX_DEAD)) {
541 del_chan(pppox_sk(sk));
542 pppox_unbind_sock(sk);
543 }
544 skb_queue_purge(&sk->sk_receive_queue);
545 }
546
547 static int pptp_create(struct net *net, struct socket *sock, int kern)
548 {
549 int error = -ENOMEM;
550 struct sock *sk;
551 struct pppox_sock *po;
552 struct pptp_opt *opt;
553
554 sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pptp_sk_proto, kern);
555 if (!sk)
556 goto out;
557
558 sock_init_data(sock, sk);
559
560 sock->state = SS_UNCONNECTED;
561 sock->ops = &pptp_ops;
562
563 sk->sk_backlog_rcv = pptp_rcv_core;
564 sk->sk_state = PPPOX_NONE;
565 sk->sk_type = SOCK_STREAM;
566 sk->sk_family = PF_PPPOX;
567 sk->sk_protocol = PX_PROTO_PPTP;
568 sk->sk_destruct = pptp_sock_destruct;
569
570 po = pppox_sk(sk);
571 opt = &po->proto.pptp;
572
573 opt->seq_sent = 0; opt->seq_recv = 0xffffffff;
574 opt->ack_recv = 0; opt->ack_sent = 0xffffffff;
575
576 error = 0;
577 out:
578 return error;
579 }
580
581 static int pptp_ppp_ioctl(struct ppp_channel *chan, unsigned int cmd,
582 unsigned long arg)
583 {
584 struct sock *sk = (struct sock *) chan->private;
585 struct pppox_sock *po = pppox_sk(sk);
586 struct pptp_opt *opt = &po->proto.pptp;
587 void __user *argp = (void __user *)arg;
588 int __user *p = argp;
589 int err, val;
590
591 err = -EFAULT;
592 switch (cmd) {
593 case PPPIOCGFLAGS:
594 val = opt->ppp_flags;
595 if (put_user(val, p))
596 break;
597 err = 0;
598 break;
599 case PPPIOCSFLAGS:
600 if (get_user(val, p))
601 break;
602 opt->ppp_flags = val & ~SC_RCV_BITS;
603 err = 0;
604 break;
605 default:
606 err = -ENOTTY;
607 }
608
609 return err;
610 }
611
612 static const struct ppp_channel_ops pptp_chan_ops = {
613 .start_xmit = pptp_xmit,
614 .ioctl = pptp_ppp_ioctl,
615 };
616
617 static struct proto pptp_sk_proto __read_mostly = {
618 .name = "PPTP",
619 .owner = THIS_MODULE,
620 .obj_size = sizeof(struct pppox_sock),
621 };
622
623 static const struct proto_ops pptp_ops = {
624 .family = AF_PPPOX,
625 .owner = THIS_MODULE,
626 .release = pptp_release,
627 .bind = pptp_bind,
628 .connect = pptp_connect,
629 .socketpair = sock_no_socketpair,
630 .accept = sock_no_accept,
631 .getname = pptp_getname,
632 .poll = sock_no_poll,
633 .listen = sock_no_listen,
634 .shutdown = sock_no_shutdown,
635 .setsockopt = sock_no_setsockopt,
636 .getsockopt = sock_no_getsockopt,
637 .sendmsg = sock_no_sendmsg,
638 .recvmsg = sock_no_recvmsg,
639 .mmap = sock_no_mmap,
640 .ioctl = pppox_ioctl,
641 };
642
643 static const struct pppox_proto pppox_pptp_proto = {
644 .create = pptp_create,
645 .owner = THIS_MODULE,
646 };
647
648 static const struct gre_protocol gre_pptp_protocol = {
649 .handler = pptp_rcv,
650 };
651
652 static int __init pptp_init_module(void)
653 {
654 int err = 0;
655 pr_info("PPTP driver version " PPTP_DRIVER_VERSION "\n");
656
657 callid_sock = vzalloc((MAX_CALLID + 1) * sizeof(void *));
658 if (!callid_sock)
659 return -ENOMEM;
660
661 err = gre_add_protocol(&gre_pptp_protocol, GREPROTO_PPTP);
662 if (err) {
663 pr_err("PPTP: can't add gre protocol\n");
664 goto out_mem_free;
665 }
666
667 err = proto_register(&pptp_sk_proto, 0);
668 if (err) {
669 pr_err("PPTP: can't register sk_proto\n");
670 goto out_gre_del_protocol;
671 }
672
673 err = register_pppox_proto(PX_PROTO_PPTP, &pppox_pptp_proto);
674 if (err) {
675 pr_err("PPTP: can't register pppox_proto\n");
676 goto out_unregister_sk_proto;
677 }
678
679 return 0;
680
681 out_unregister_sk_proto:
682 proto_unregister(&pptp_sk_proto);
683 out_gre_del_protocol:
684 gre_del_protocol(&gre_pptp_protocol, GREPROTO_PPTP);
685 out_mem_free:
686 vfree(callid_sock);
687
688 return err;
689 }
690
691 static void __exit pptp_exit_module(void)
692 {
693 unregister_pppox_proto(PX_PROTO_PPTP);
694 proto_unregister(&pptp_sk_proto);
695 gre_del_protocol(&gre_pptp_protocol, GREPROTO_PPTP);
696 vfree(callid_sock);
697 }
698
699 module_init(pptp_init_module);
700 module_exit(pptp_exit_module);
701
702 MODULE_DESCRIPTION("Point-to-Point Tunneling Protocol");
703 MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)");
704 MODULE_LICENSE("GPL");
705 MODULE_ALIAS_NET_PF_PROTO(PF_PPPOX, PX_PROTO_PPTP);
This page took 0.07693 seconds and 6 git commands to generate.