Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * PPP async serial channel driver for Linux. | |
3 | * | |
4 | * Copyright 1999 Paul Mackerras. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License | |
8 | * as published by the Free Software Foundation; either version | |
9 | * 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This driver provides the encapsulation and framing for sending | |
12 | * and receiving PPP frames over async serial lines. It relies on | |
13 | * the generic PPP layer to give it frames to send and to process | |
14 | * received frames. It implements the PPP line discipline. | |
15 | * | |
16 | * Part of the code in this driver was inspired by the old async-only | |
17 | * PPP driver, written by Michael Callahan and Al Longyear, and | |
18 | * subsequently hacked by Paul Mackerras. | |
19 | */ | |
20 | ||
21 | #include <linux/module.h> | |
22 | #include <linux/kernel.h> | |
23 | #include <linux/skbuff.h> | |
24 | #include <linux/tty.h> | |
25 | #include <linux/netdevice.h> | |
26 | #include <linux/poll.h> | |
27 | #include <linux/crc-ccitt.h> | |
28 | #include <linux/ppp_defs.h> | |
29 | #include <linux/if_ppp.h> | |
30 | #include <linux/ppp_channel.h> | |
31 | #include <linux/spinlock.h> | |
32 | #include <linux/init.h> | |
33 | #include <asm/uaccess.h> | |
34 | ||
35 | #define PPP_VERSION "2.4.2" | |
36 | ||
37 | #define OBUFSIZE 256 | |
38 | ||
39 | /* Structure for storing local state. */ | |
40 | struct asyncppp { | |
41 | struct tty_struct *tty; | |
42 | unsigned int flags; | |
43 | unsigned int state; | |
44 | unsigned int rbits; | |
45 | int mru; | |
46 | spinlock_t xmit_lock; | |
47 | spinlock_t recv_lock; | |
48 | unsigned long xmit_flags; | |
49 | u32 xaccm[8]; | |
50 | u32 raccm; | |
51 | unsigned int bytes_sent; | |
52 | unsigned int bytes_rcvd; | |
53 | ||
54 | struct sk_buff *tpkt; | |
55 | int tpkt_pos; | |
56 | u16 tfcs; | |
57 | unsigned char *optr; | |
58 | unsigned char *olim; | |
59 | unsigned long last_xmit; | |
60 | ||
61 | struct sk_buff *rpkt; | |
62 | int lcp_fcs; | |
63 | struct sk_buff_head rqueue; | |
64 | ||
65 | struct tasklet_struct tsk; | |
66 | ||
67 | atomic_t refcnt; | |
68 | struct semaphore dead_sem; | |
69 | struct ppp_channel chan; /* interface to generic ppp layer */ | |
70 | unsigned char obuf[OBUFSIZE]; | |
71 | }; | |
72 | ||
73 | /* Bit numbers in xmit_flags */ | |
74 | #define XMIT_WAKEUP 0 | |
75 | #define XMIT_FULL 1 | |
76 | #define XMIT_BUSY 2 | |
77 | ||
78 | /* State bits */ | |
79 | #define SC_TOSS 1 | |
80 | #define SC_ESCAPE 2 | |
81 | #define SC_PREV_ERROR 4 | |
82 | ||
83 | /* Bits in rbits */ | |
84 | #define SC_RCV_BITS (SC_RCV_B7_1|SC_RCV_B7_0|SC_RCV_ODDP|SC_RCV_EVNP) | |
85 | ||
86 | static int flag_time = HZ; | |
87 | module_param(flag_time, int, 0); | |
88 | MODULE_PARM_DESC(flag_time, "ppp_async: interval between flagged packets (in clock ticks)"); | |
89 | MODULE_LICENSE("GPL"); | |
90 | MODULE_ALIAS_LDISC(N_PPP); | |
91 | ||
92 | /* | |
93 | * Prototypes. | |
94 | */ | |
95 | static int ppp_async_encode(struct asyncppp *ap); | |
96 | static int ppp_async_send(struct ppp_channel *chan, struct sk_buff *skb); | |
97 | static int ppp_async_push(struct asyncppp *ap); | |
98 | static void ppp_async_flush_output(struct asyncppp *ap); | |
99 | static void ppp_async_input(struct asyncppp *ap, const unsigned char *buf, | |
100 | char *flags, int count); | |
101 | static int ppp_async_ioctl(struct ppp_channel *chan, unsigned int cmd, | |
102 | unsigned long arg); | |
103 | static void ppp_async_process(unsigned long arg); | |
104 | ||
105 | static void async_lcp_peek(struct asyncppp *ap, unsigned char *data, | |
106 | int len, int inbound); | |
107 | ||
108 | static struct ppp_channel_ops async_ops = { | |
109 | ppp_async_send, | |
110 | ppp_async_ioctl | |
111 | }; | |
112 | ||
113 | /* | |
114 | * Routines implementing the PPP line discipline. | |
115 | */ | |
116 | ||
117 | /* | |
118 | * We have a potential race on dereferencing tty->disc_data, | |
119 | * because the tty layer provides no locking at all - thus one | |
120 | * cpu could be running ppp_asynctty_receive while another | |
121 | * calls ppp_asynctty_close, which zeroes tty->disc_data and | |
122 | * frees the memory that ppp_asynctty_receive is using. The best | |
123 | * way to fix this is to use a rwlock in the tty struct, but for now | |
124 | * we use a single global rwlock for all ttys in ppp line discipline. | |
125 | * | |
126 | * FIXME: this is no longer true. The _close path for the ldisc is | |
127 | * now guaranteed to be sane. | |
128 | */ | |
129 | static DEFINE_RWLOCK(disc_data_lock); | |
130 | ||
131 | static struct asyncppp *ap_get(struct tty_struct *tty) | |
132 | { | |
133 | struct asyncppp *ap; | |
134 | ||
135 | read_lock(&disc_data_lock); | |
136 | ap = tty->disc_data; | |
137 | if (ap != NULL) | |
138 | atomic_inc(&ap->refcnt); | |
139 | read_unlock(&disc_data_lock); | |
140 | return ap; | |
141 | } | |
142 | ||
143 | static void ap_put(struct asyncppp *ap) | |
144 | { | |
145 | if (atomic_dec_and_test(&ap->refcnt)) | |
146 | up(&ap->dead_sem); | |
147 | } | |
148 | ||
149 | /* | |
150 | * Called when a tty is put into PPP line discipline. Called in process | |
151 | * context. | |
152 | */ | |
153 | static int | |
154 | ppp_asynctty_open(struct tty_struct *tty) | |
155 | { | |
156 | struct asyncppp *ap; | |
157 | int err; | |
158 | ||
159 | err = -ENOMEM; | |
160 | ap = kmalloc(sizeof(*ap), GFP_KERNEL); | |
161 | if (ap == 0) | |
162 | goto out; | |
163 | ||
164 | /* initialize the asyncppp structure */ | |
165 | memset(ap, 0, sizeof(*ap)); | |
166 | ap->tty = tty; | |
167 | ap->mru = PPP_MRU; | |
168 | spin_lock_init(&ap->xmit_lock); | |
169 | spin_lock_init(&ap->recv_lock); | |
170 | ap->xaccm[0] = ~0U; | |
171 | ap->xaccm[3] = 0x60000000U; | |
172 | ap->raccm = ~0U; | |
173 | ap->optr = ap->obuf; | |
174 | ap->olim = ap->obuf; | |
175 | ap->lcp_fcs = -1; | |
176 | ||
177 | skb_queue_head_init(&ap->rqueue); | |
178 | tasklet_init(&ap->tsk, ppp_async_process, (unsigned long) ap); | |
179 | ||
180 | atomic_set(&ap->refcnt, 1); | |
181 | init_MUTEX_LOCKED(&ap->dead_sem); | |
182 | ||
183 | ap->chan.private = ap; | |
184 | ap->chan.ops = &async_ops; | |
185 | ap->chan.mtu = PPP_MRU; | |
186 | err = ppp_register_channel(&ap->chan); | |
187 | if (err) | |
188 | goto out_free; | |
189 | ||
190 | tty->disc_data = ap; | |
191 | ||
192 | return 0; | |
193 | ||
194 | out_free: | |
195 | kfree(ap); | |
196 | out: | |
197 | return err; | |
198 | } | |
199 | ||
200 | /* | |
201 | * Called when the tty is put into another line discipline | |
202 | * or it hangs up. We have to wait for any cpu currently | |
203 | * executing in any of the other ppp_asynctty_* routines to | |
204 | * finish before we can call ppp_unregister_channel and free | |
205 | * the asyncppp struct. This routine must be called from | |
206 | * process context, not interrupt or softirq context. | |
207 | */ | |
208 | static void | |
209 | ppp_asynctty_close(struct tty_struct *tty) | |
210 | { | |
211 | struct asyncppp *ap; | |
212 | ||
213 | write_lock_irq(&disc_data_lock); | |
214 | ap = tty->disc_data; | |
215 | tty->disc_data = NULL; | |
216 | write_unlock_irq(&disc_data_lock); | |
217 | if (ap == 0) | |
218 | return; | |
219 | ||
220 | /* | |
221 | * We have now ensured that nobody can start using ap from now | |
222 | * on, but we have to wait for all existing users to finish. | |
223 | * Note that ppp_unregister_channel ensures that no calls to | |
224 | * our channel ops (i.e. ppp_async_send/ioctl) are in progress | |
225 | * by the time it returns. | |
226 | */ | |
227 | if (!atomic_dec_and_test(&ap->refcnt)) | |
228 | down(&ap->dead_sem); | |
229 | tasklet_kill(&ap->tsk); | |
230 | ||
231 | ppp_unregister_channel(&ap->chan); | |
232 | if (ap->rpkt != 0) | |
233 | kfree_skb(ap->rpkt); | |
234 | skb_queue_purge(&ap->rqueue); | |
235 | if (ap->tpkt != 0) | |
236 | kfree_skb(ap->tpkt); | |
237 | kfree(ap); | |
238 | } | |
239 | ||
240 | /* | |
241 | * Called on tty hangup in process context. | |
242 | * | |
243 | * Wait for I/O to driver to complete and unregister PPP channel. | |
244 | * This is already done by the close routine, so just call that. | |
245 | */ | |
246 | static int ppp_asynctty_hangup(struct tty_struct *tty) | |
247 | { | |
248 | ppp_asynctty_close(tty); | |
249 | return 0; | |
250 | } | |
251 | ||
252 | /* | |
253 | * Read does nothing - no data is ever available this way. | |
254 | * Pppd reads and writes packets via /dev/ppp instead. | |
255 | */ | |
256 | static ssize_t | |
257 | ppp_asynctty_read(struct tty_struct *tty, struct file *file, | |
258 | unsigned char __user *buf, size_t count) | |
259 | { | |
260 | return -EAGAIN; | |
261 | } | |
262 | ||
263 | /* | |
264 | * Write on the tty does nothing, the packets all come in | |
265 | * from the ppp generic stuff. | |
266 | */ | |
267 | static ssize_t | |
268 | ppp_asynctty_write(struct tty_struct *tty, struct file *file, | |
269 | const unsigned char *buf, size_t count) | |
270 | { | |
271 | return -EAGAIN; | |
272 | } | |
273 | ||
274 | /* | |
275 | * Called in process context only. May be re-entered by multiple | |
276 | * ioctl calling threads. | |
277 | */ | |
278 | ||
279 | static int | |
280 | ppp_asynctty_ioctl(struct tty_struct *tty, struct file *file, | |
281 | unsigned int cmd, unsigned long arg) | |
282 | { | |
283 | struct asyncppp *ap = ap_get(tty); | |
284 | int err, val; | |
285 | int __user *p = (int __user *)arg; | |
286 | ||
287 | if (ap == 0) | |
288 | return -ENXIO; | |
289 | err = -EFAULT; | |
290 | switch (cmd) { | |
291 | case PPPIOCGCHAN: | |
292 | err = -ENXIO; | |
293 | if (ap == 0) | |
294 | break; | |
295 | err = -EFAULT; | |
296 | if (put_user(ppp_channel_index(&ap->chan), p)) | |
297 | break; | |
298 | err = 0; | |
299 | break; | |
300 | ||
301 | case PPPIOCGUNIT: | |
302 | err = -ENXIO; | |
303 | if (ap == 0) | |
304 | break; | |
305 | err = -EFAULT; | |
306 | if (put_user(ppp_unit_number(&ap->chan), p)) | |
307 | break; | |
308 | err = 0; | |
309 | break; | |
310 | ||
311 | case TCGETS: | |
312 | case TCGETA: | |
313 | err = n_tty_ioctl(tty, file, cmd, arg); | |
314 | break; | |
315 | ||
316 | case TCFLSH: | |
317 | /* flush our buffers and the serial port's buffer */ | |
318 | if (arg == TCIOFLUSH || arg == TCOFLUSH) | |
319 | ppp_async_flush_output(ap); | |
320 | err = n_tty_ioctl(tty, file, cmd, arg); | |
321 | break; | |
322 | ||
323 | case FIONREAD: | |
324 | val = 0; | |
325 | if (put_user(val, p)) | |
326 | break; | |
327 | err = 0; | |
328 | break; | |
329 | ||
330 | default: | |
331 | err = -ENOIOCTLCMD; | |
332 | } | |
333 | ||
334 | ap_put(ap); | |
335 | return err; | |
336 | } | |
337 | ||
338 | /* No kernel lock - fine */ | |
339 | static unsigned int | |
340 | ppp_asynctty_poll(struct tty_struct *tty, struct file *file, poll_table *wait) | |
341 | { | |
342 | return 0; | |
343 | } | |
344 | ||
345 | static int | |
346 | ppp_asynctty_room(struct tty_struct *tty) | |
347 | { | |
348 | return 65535; | |
349 | } | |
350 | ||
351 | /* | |
352 | * This can now be called from hard interrupt level as well | |
353 | * as soft interrupt level or mainline. | |
354 | */ | |
355 | static void | |
356 | ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf, | |
357 | char *cflags, int count) | |
358 | { | |
359 | struct asyncppp *ap = ap_get(tty); | |
360 | unsigned long flags; | |
361 | ||
362 | if (ap == 0) | |
363 | return; | |
364 | spin_lock_irqsave(&ap->recv_lock, flags); | |
365 | ppp_async_input(ap, buf, cflags, count); | |
366 | spin_unlock_irqrestore(&ap->recv_lock, flags); | |
367 | if (skb_queue_len(&ap->rqueue)) | |
368 | tasklet_schedule(&ap->tsk); | |
369 | ap_put(ap); | |
370 | if (test_and_clear_bit(TTY_THROTTLED, &tty->flags) | |
371 | && tty->driver->unthrottle) | |
372 | tty->driver->unthrottle(tty); | |
373 | } | |
374 | ||
375 | static void | |
376 | ppp_asynctty_wakeup(struct tty_struct *tty) | |
377 | { | |
378 | struct asyncppp *ap = ap_get(tty); | |
379 | ||
380 | clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); | |
381 | if (ap == 0) | |
382 | return; | |
383 | set_bit(XMIT_WAKEUP, &ap->xmit_flags); | |
384 | tasklet_schedule(&ap->tsk); | |
385 | ap_put(ap); | |
386 | } | |
387 | ||
388 | ||
389 | static struct tty_ldisc ppp_ldisc = { | |
390 | .owner = THIS_MODULE, | |
391 | .magic = TTY_LDISC_MAGIC, | |
392 | .name = "ppp", | |
393 | .open = ppp_asynctty_open, | |
394 | .close = ppp_asynctty_close, | |
395 | .hangup = ppp_asynctty_hangup, | |
396 | .read = ppp_asynctty_read, | |
397 | .write = ppp_asynctty_write, | |
398 | .ioctl = ppp_asynctty_ioctl, | |
399 | .poll = ppp_asynctty_poll, | |
400 | .receive_room = ppp_asynctty_room, | |
401 | .receive_buf = ppp_asynctty_receive, | |
402 | .write_wakeup = ppp_asynctty_wakeup, | |
403 | }; | |
404 | ||
405 | static int __init | |
406 | ppp_async_init(void) | |
407 | { | |
408 | int err; | |
409 | ||
410 | err = tty_register_ldisc(N_PPP, &ppp_ldisc); | |
411 | if (err != 0) | |
412 | printk(KERN_ERR "PPP_async: error %d registering line disc.\n", | |
413 | err); | |
414 | return err; | |
415 | } | |
416 | ||
417 | /* | |
418 | * The following routines provide the PPP channel interface. | |
419 | */ | |
420 | static int | |
421 | ppp_async_ioctl(struct ppp_channel *chan, unsigned int cmd, unsigned long arg) | |
422 | { | |
423 | struct asyncppp *ap = chan->private; | |
424 | void __user *argp = (void __user *)arg; | |
425 | int __user *p = argp; | |
426 | int err, val; | |
427 | u32 accm[8]; | |
428 | ||
429 | err = -EFAULT; | |
430 | switch (cmd) { | |
431 | case PPPIOCGFLAGS: | |
432 | val = ap->flags | ap->rbits; | |
433 | if (put_user(val, p)) | |
434 | break; | |
435 | err = 0; | |
436 | break; | |
437 | case PPPIOCSFLAGS: | |
438 | if (get_user(val, p)) | |
439 | break; | |
440 | ap->flags = val & ~SC_RCV_BITS; | |
441 | spin_lock_irq(&ap->recv_lock); | |
442 | ap->rbits = val & SC_RCV_BITS; | |
443 | spin_unlock_irq(&ap->recv_lock); | |
444 | err = 0; | |
445 | break; | |
446 | ||
447 | case PPPIOCGASYNCMAP: | |
448 | if (put_user(ap->xaccm[0], (u32 __user *)argp)) | |
449 | break; | |
450 | err = 0; | |
451 | break; | |
452 | case PPPIOCSASYNCMAP: | |
453 | if (get_user(ap->xaccm[0], (u32 __user *)argp)) | |
454 | break; | |
455 | err = 0; | |
456 | break; | |
457 | ||
458 | case PPPIOCGRASYNCMAP: | |
459 | if (put_user(ap->raccm, (u32 __user *)argp)) | |
460 | break; | |
461 | err = 0; | |
462 | break; | |
463 | case PPPIOCSRASYNCMAP: | |
464 | if (get_user(ap->raccm, (u32 __user *)argp)) | |
465 | break; | |
466 | err = 0; | |
467 | break; | |
468 | ||
469 | case PPPIOCGXASYNCMAP: | |
470 | if (copy_to_user(argp, ap->xaccm, sizeof(ap->xaccm))) | |
471 | break; | |
472 | err = 0; | |
473 | break; | |
474 | case PPPIOCSXASYNCMAP: | |
475 | if (copy_from_user(accm, argp, sizeof(accm))) | |
476 | break; | |
477 | accm[2] &= ~0x40000000U; /* can't escape 0x5e */ | |
478 | accm[3] |= 0x60000000U; /* must escape 0x7d, 0x7e */ | |
479 | memcpy(ap->xaccm, accm, sizeof(ap->xaccm)); | |
480 | err = 0; | |
481 | break; | |
482 | ||
483 | case PPPIOCGMRU: | |
484 | if (put_user(ap->mru, p)) | |
485 | break; | |
486 | err = 0; | |
487 | break; | |
488 | case PPPIOCSMRU: | |
489 | if (get_user(val, p)) | |
490 | break; | |
491 | if (val < PPP_MRU) | |
492 | val = PPP_MRU; | |
493 | ap->mru = val; | |
494 | err = 0; | |
495 | break; | |
496 | ||
497 | default: | |
498 | err = -ENOTTY; | |
499 | } | |
500 | ||
501 | return err; | |
502 | } | |
503 | ||
504 | /* | |
505 | * This is called at softirq level to deliver received packets | |
506 | * to the ppp_generic code, and to tell the ppp_generic code | |
507 | * if we can accept more output now. | |
508 | */ | |
509 | static void ppp_async_process(unsigned long arg) | |
510 | { | |
511 | struct asyncppp *ap = (struct asyncppp *) arg; | |
512 | struct sk_buff *skb; | |
513 | ||
514 | /* process received packets */ | |
515 | while ((skb = skb_dequeue(&ap->rqueue)) != NULL) { | |
516 | if (skb->cb[0]) | |
517 | ppp_input_error(&ap->chan, 0); | |
518 | ppp_input(&ap->chan, skb); | |
519 | } | |
520 | ||
521 | /* try to push more stuff out */ | |
522 | if (test_bit(XMIT_WAKEUP, &ap->xmit_flags) && ppp_async_push(ap)) | |
523 | ppp_output_wakeup(&ap->chan); | |
524 | } | |
525 | ||
526 | /* | |
527 | * Procedures for encapsulation and framing. | |
528 | */ | |
529 | ||
530 | /* | |
531 | * Procedure to encode the data for async serial transmission. | |
532 | * Does octet stuffing (escaping), puts the address/control bytes | |
533 | * on if A/C compression is disabled, and does protocol compression. | |
534 | * Assumes ap->tpkt != 0 on entry. | |
535 | * Returns 1 if we finished the current frame, 0 otherwise. | |
536 | */ | |
537 | ||
538 | #define PUT_BYTE(ap, buf, c, islcp) do { \ | |
539 | if ((islcp && c < 0x20) || (ap->xaccm[c >> 5] & (1 << (c & 0x1f)))) {\ | |
540 | *buf++ = PPP_ESCAPE; \ | |
541 | *buf++ = c ^ 0x20; \ | |
542 | } else \ | |
543 | *buf++ = c; \ | |
544 | } while (0) | |
545 | ||
546 | static int | |
547 | ppp_async_encode(struct asyncppp *ap) | |
548 | { | |
549 | int fcs, i, count, c, proto; | |
550 | unsigned char *buf, *buflim; | |
551 | unsigned char *data; | |
552 | int islcp; | |
553 | ||
554 | buf = ap->obuf; | |
555 | ap->olim = buf; | |
556 | ap->optr = buf; | |
557 | i = ap->tpkt_pos; | |
558 | data = ap->tpkt->data; | |
559 | count = ap->tpkt->len; | |
560 | fcs = ap->tfcs; | |
561 | proto = (data[0] << 8) + data[1]; | |
562 | ||
563 | /* | |
564 | * LCP packets with code values between 1 (configure-reqest) | |
565 | * and 7 (code-reject) must be sent as though no options | |
566 | * had been negotiated. | |
567 | */ | |
568 | islcp = proto == PPP_LCP && 1 <= data[2] && data[2] <= 7; | |
569 | ||
570 | if (i == 0) { | |
571 | if (islcp) | |
572 | async_lcp_peek(ap, data, count, 0); | |
573 | ||
574 | /* | |
575 | * Start of a new packet - insert the leading FLAG | |
576 | * character if necessary. | |
577 | */ | |
578 | if (islcp || flag_time == 0 | |
579 | || jiffies - ap->last_xmit >= flag_time) | |
580 | *buf++ = PPP_FLAG; | |
581 | ap->last_xmit = jiffies; | |
582 | fcs = PPP_INITFCS; | |
583 | ||
584 | /* | |
585 | * Put in the address/control bytes if necessary | |
586 | */ | |
587 | if ((ap->flags & SC_COMP_AC) == 0 || islcp) { | |
588 | PUT_BYTE(ap, buf, 0xff, islcp); | |
589 | fcs = PPP_FCS(fcs, 0xff); | |
590 | PUT_BYTE(ap, buf, 0x03, islcp); | |
591 | fcs = PPP_FCS(fcs, 0x03); | |
592 | } | |
593 | } | |
594 | ||
595 | /* | |
596 | * Once we put in the last byte, we need to put in the FCS | |
597 | * and closing flag, so make sure there is at least 7 bytes | |
598 | * of free space in the output buffer. | |
599 | */ | |
600 | buflim = ap->obuf + OBUFSIZE - 6; | |
601 | while (i < count && buf < buflim) { | |
602 | c = data[i++]; | |
603 | if (i == 1 && c == 0 && (ap->flags & SC_COMP_PROT)) | |
604 | continue; /* compress protocol field */ | |
605 | fcs = PPP_FCS(fcs, c); | |
606 | PUT_BYTE(ap, buf, c, islcp); | |
607 | } | |
608 | ||
609 | if (i < count) { | |
610 | /* | |
611 | * Remember where we are up to in this packet. | |
612 | */ | |
613 | ap->olim = buf; | |
614 | ap->tpkt_pos = i; | |
615 | ap->tfcs = fcs; | |
616 | return 0; | |
617 | } | |
618 | ||
619 | /* | |
620 | * We have finished the packet. Add the FCS and flag. | |
621 | */ | |
622 | fcs = ~fcs; | |
623 | c = fcs & 0xff; | |
624 | PUT_BYTE(ap, buf, c, islcp); | |
625 | c = (fcs >> 8) & 0xff; | |
626 | PUT_BYTE(ap, buf, c, islcp); | |
627 | *buf++ = PPP_FLAG; | |
628 | ap->olim = buf; | |
629 | ||
630 | kfree_skb(ap->tpkt); | |
631 | ap->tpkt = NULL; | |
632 | return 1; | |
633 | } | |
634 | ||
635 | /* | |
636 | * Transmit-side routines. | |
637 | */ | |
638 | ||
639 | /* | |
640 | * Send a packet to the peer over an async tty line. | |
641 | * Returns 1 iff the packet was accepted. | |
642 | * If the packet was not accepted, we will call ppp_output_wakeup | |
643 | * at some later time. | |
644 | */ | |
645 | static int | |
646 | ppp_async_send(struct ppp_channel *chan, struct sk_buff *skb) | |
647 | { | |
648 | struct asyncppp *ap = chan->private; | |
649 | ||
650 | ppp_async_push(ap); | |
651 | ||
652 | if (test_and_set_bit(XMIT_FULL, &ap->xmit_flags)) | |
653 | return 0; /* already full */ | |
654 | ap->tpkt = skb; | |
655 | ap->tpkt_pos = 0; | |
656 | ||
657 | ppp_async_push(ap); | |
658 | return 1; | |
659 | } | |
660 | ||
661 | /* | |
662 | * Push as much data as possible out to the tty. | |
663 | */ | |
664 | static int | |
665 | ppp_async_push(struct asyncppp *ap) | |
666 | { | |
667 | int avail, sent, done = 0; | |
668 | struct tty_struct *tty = ap->tty; | |
669 | int tty_stuffed = 0; | |
670 | ||
671 | /* | |
672 | * We can get called recursively here if the tty write | |
673 | * function calls our wakeup function. This can happen | |
674 | * for example on a pty with both the master and slave | |
675 | * set to PPP line discipline. | |
676 | * We use the XMIT_BUSY bit to detect this and get out, | |
677 | * leaving the XMIT_WAKEUP bit set to tell the other | |
678 | * instance that it may now be able to write more now. | |
679 | */ | |
680 | if (test_and_set_bit(XMIT_BUSY, &ap->xmit_flags)) | |
681 | return 0; | |
682 | spin_lock_bh(&ap->xmit_lock); | |
683 | for (;;) { | |
684 | if (test_and_clear_bit(XMIT_WAKEUP, &ap->xmit_flags)) | |
685 | tty_stuffed = 0; | |
686 | if (!tty_stuffed && ap->optr < ap->olim) { | |
687 | avail = ap->olim - ap->optr; | |
688 | set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); | |
689 | sent = tty->driver->write(tty, ap->optr, avail); | |
690 | if (sent < 0) | |
691 | goto flush; /* error, e.g. loss of CD */ | |
692 | ap->optr += sent; | |
693 | if (sent < avail) | |
694 | tty_stuffed = 1; | |
695 | continue; | |
696 | } | |
697 | if (ap->optr >= ap->olim && ap->tpkt != 0) { | |
698 | if (ppp_async_encode(ap)) { | |
699 | /* finished processing ap->tpkt */ | |
700 | clear_bit(XMIT_FULL, &ap->xmit_flags); | |
701 | done = 1; | |
702 | } | |
703 | continue; | |
704 | } | |
705 | /* | |
706 | * We haven't made any progress this time around. | |
707 | * Clear XMIT_BUSY to let other callers in, but | |
708 | * after doing so we have to check if anyone set | |
709 | * XMIT_WAKEUP since we last checked it. If they | |
710 | * did, we should try again to set XMIT_BUSY and go | |
711 | * around again in case XMIT_BUSY was still set when | |
712 | * the other caller tried. | |
713 | */ | |
714 | clear_bit(XMIT_BUSY, &ap->xmit_flags); | |
715 | /* any more work to do? if not, exit the loop */ | |
716 | if (!(test_bit(XMIT_WAKEUP, &ap->xmit_flags) | |
717 | || (!tty_stuffed && ap->tpkt != 0))) | |
718 | break; | |
719 | /* more work to do, see if we can do it now */ | |
720 | if (test_and_set_bit(XMIT_BUSY, &ap->xmit_flags)) | |
721 | break; | |
722 | } | |
723 | spin_unlock_bh(&ap->xmit_lock); | |
724 | return done; | |
725 | ||
726 | flush: | |
727 | clear_bit(XMIT_BUSY, &ap->xmit_flags); | |
728 | if (ap->tpkt != 0) { | |
729 | kfree_skb(ap->tpkt); | |
730 | ap->tpkt = NULL; | |
731 | clear_bit(XMIT_FULL, &ap->xmit_flags); | |
732 | done = 1; | |
733 | } | |
734 | ap->optr = ap->olim; | |
735 | spin_unlock_bh(&ap->xmit_lock); | |
736 | return done; | |
737 | } | |
738 | ||
739 | /* | |
740 | * Flush output from our internal buffers. | |
741 | * Called for the TCFLSH ioctl. Can be entered in parallel | |
742 | * but this is covered by the xmit_lock. | |
743 | */ | |
744 | static void | |
745 | ppp_async_flush_output(struct asyncppp *ap) | |
746 | { | |
747 | int done = 0; | |
748 | ||
749 | spin_lock_bh(&ap->xmit_lock); | |
750 | ap->optr = ap->olim; | |
751 | if (ap->tpkt != NULL) { | |
752 | kfree_skb(ap->tpkt); | |
753 | ap->tpkt = NULL; | |
754 | clear_bit(XMIT_FULL, &ap->xmit_flags); | |
755 | done = 1; | |
756 | } | |
757 | spin_unlock_bh(&ap->xmit_lock); | |
758 | if (done) | |
759 | ppp_output_wakeup(&ap->chan); | |
760 | } | |
761 | ||
762 | /* | |
763 | * Receive-side routines. | |
764 | */ | |
765 | ||
766 | /* see how many ordinary chars there are at the start of buf */ | |
767 | static inline int | |
768 | scan_ordinary(struct asyncppp *ap, const unsigned char *buf, int count) | |
769 | { | |
770 | int i, c; | |
771 | ||
772 | for (i = 0; i < count; ++i) { | |
773 | c = buf[i]; | |
774 | if (c == PPP_ESCAPE || c == PPP_FLAG | |
775 | || (c < 0x20 && (ap->raccm & (1 << c)) != 0)) | |
776 | break; | |
777 | } | |
778 | return i; | |
779 | } | |
780 | ||
781 | /* called when a flag is seen - do end-of-packet processing */ | |
782 | static void | |
783 | process_input_packet(struct asyncppp *ap) | |
784 | { | |
785 | struct sk_buff *skb; | |
786 | unsigned char *p; | |
787 | unsigned int len, fcs, proto; | |
788 | ||
789 | skb = ap->rpkt; | |
790 | if (ap->state & (SC_TOSS | SC_ESCAPE)) | |
791 | goto err; | |
792 | ||
793 | if (skb == NULL) | |
794 | return; /* 0-length packet */ | |
795 | ||
796 | /* check the FCS */ | |
797 | p = skb->data; | |
798 | len = skb->len; | |
799 | if (len < 3) | |
800 | goto err; /* too short */ | |
801 | fcs = PPP_INITFCS; | |
802 | for (; len > 0; --len) | |
803 | fcs = PPP_FCS(fcs, *p++); | |
804 | if (fcs != PPP_GOODFCS) | |
805 | goto err; /* bad FCS */ | |
806 | skb_trim(skb, skb->len - 2); | |
807 | ||
808 | /* check for address/control and protocol compression */ | |
809 | p = skb->data; | |
810 | if (p[0] == PPP_ALLSTATIONS && p[1] == PPP_UI) { | |
811 | /* chop off address/control */ | |
812 | if (skb->len < 3) | |
813 | goto err; | |
814 | p = skb_pull(skb, 2); | |
815 | } | |
816 | proto = p[0]; | |
817 | if (proto & 1) { | |
818 | /* protocol is compressed */ | |
819 | skb_push(skb, 1)[0] = 0; | |
820 | } else { | |
821 | if (skb->len < 2) | |
822 | goto err; | |
823 | proto = (proto << 8) + p[1]; | |
824 | if (proto == PPP_LCP) | |
825 | async_lcp_peek(ap, p, skb->len, 1); | |
826 | } | |
827 | ||
828 | /* queue the frame to be processed */ | |
829 | skb->cb[0] = ap->state; | |
830 | skb_queue_tail(&ap->rqueue, skb); | |
831 | ap->rpkt = NULL; | |
832 | ap->state = 0; | |
833 | return; | |
834 | ||
835 | err: | |
836 | /* frame had an error, remember that, reset SC_TOSS & SC_ESCAPE */ | |
837 | ap->state = SC_PREV_ERROR; | |
838 | if (skb) | |
839 | skb_trim(skb, 0); | |
840 | } | |
841 | ||
842 | /* Called when the tty driver has data for us. Runs parallel with the | |
843 | other ldisc functions but will not be re-entered */ | |
844 | ||
845 | static void | |
846 | ppp_async_input(struct asyncppp *ap, const unsigned char *buf, | |
847 | char *flags, int count) | |
848 | { | |
849 | struct sk_buff *skb; | |
850 | int c, i, j, n, s, f; | |
851 | unsigned char *sp; | |
852 | ||
853 | /* update bits used for 8-bit cleanness detection */ | |
854 | if (~ap->rbits & SC_RCV_BITS) { | |
855 | s = 0; | |
856 | for (i = 0; i < count; ++i) { | |
857 | c = buf[i]; | |
858 | if (flags != 0 && flags[i] != 0) | |
859 | continue; | |
860 | s |= (c & 0x80)? SC_RCV_B7_1: SC_RCV_B7_0; | |
861 | c = ((c >> 4) ^ c) & 0xf; | |
862 | s |= (0x6996 & (1 << c))? SC_RCV_ODDP: SC_RCV_EVNP; | |
863 | } | |
864 | ap->rbits |= s; | |
865 | } | |
866 | ||
867 | while (count > 0) { | |
868 | /* scan through and see how many chars we can do in bulk */ | |
869 | if ((ap->state & SC_ESCAPE) && buf[0] == PPP_ESCAPE) | |
870 | n = 1; | |
871 | else | |
872 | n = scan_ordinary(ap, buf, count); | |
873 | ||
874 | f = 0; | |
875 | if (flags != 0 && (ap->state & SC_TOSS) == 0) { | |
876 | /* check the flags to see if any char had an error */ | |
877 | for (j = 0; j < n; ++j) | |
878 | if ((f = flags[j]) != 0) | |
879 | break; | |
880 | } | |
881 | if (f != 0) { | |
882 | /* start tossing */ | |
883 | ap->state |= SC_TOSS; | |
884 | ||
885 | } else if (n > 0 && (ap->state & SC_TOSS) == 0) { | |
886 | /* stuff the chars in the skb */ | |
887 | skb = ap->rpkt; | |
888 | if (skb == 0) { | |
889 | skb = dev_alloc_skb(ap->mru + PPP_HDRLEN + 2); | |
890 | if (skb == 0) | |
891 | goto nomem; | |
892 | /* Try to get the payload 4-byte aligned */ | |
893 | if (buf[0] != PPP_ALLSTATIONS) | |
894 | skb_reserve(skb, 2 + (buf[0] & 1)); | |
895 | ap->rpkt = skb; | |
896 | } | |
897 | if (n > skb_tailroom(skb)) { | |
898 | /* packet overflowed MRU */ | |
899 | ap->state |= SC_TOSS; | |
900 | } else { | |
901 | sp = skb_put(skb, n); | |
902 | memcpy(sp, buf, n); | |
903 | if (ap->state & SC_ESCAPE) { | |
904 | sp[0] ^= 0x20; | |
905 | ap->state &= ~SC_ESCAPE; | |
906 | } | |
907 | } | |
908 | } | |
909 | ||
910 | if (n >= count) | |
911 | break; | |
912 | ||
913 | c = buf[n]; | |
914 | if (flags != NULL && flags[n] != 0) { | |
915 | ap->state |= SC_TOSS; | |
916 | } else if (c == PPP_FLAG) { | |
917 | process_input_packet(ap); | |
918 | } else if (c == PPP_ESCAPE) { | |
919 | ap->state |= SC_ESCAPE; | |
920 | } else if (I_IXON(ap->tty)) { | |
921 | if (c == START_CHAR(ap->tty)) | |
922 | start_tty(ap->tty); | |
923 | else if (c == STOP_CHAR(ap->tty)) | |
924 | stop_tty(ap->tty); | |
925 | } | |
926 | /* otherwise it's a char in the recv ACCM */ | |
927 | ++n; | |
928 | ||
929 | buf += n; | |
930 | if (flags != 0) | |
931 | flags += n; | |
932 | count -= n; | |
933 | } | |
934 | return; | |
935 | ||
936 | nomem: | |
937 | printk(KERN_ERR "PPPasync: no memory (input pkt)\n"); | |
938 | ap->state |= SC_TOSS; | |
939 | } | |
940 | ||
941 | /* | |
942 | * We look at LCP frames going past so that we can notice | |
943 | * and react to the LCP configure-ack from the peer. | |
944 | * In the situation where the peer has been sent a configure-ack | |
945 | * already, LCP is up once it has sent its configure-ack | |
946 | * so the immediately following packet can be sent with the | |
947 | * configured LCP options. This allows us to process the following | |
948 | * packet correctly without pppd needing to respond quickly. | |
949 | * | |
950 | * We only respond to the received configure-ack if we have just | |
951 | * sent a configure-request, and the configure-ack contains the | |
952 | * same data (this is checked using a 16-bit crc of the data). | |
953 | */ | |
954 | #define CONFREQ 1 /* LCP code field values */ | |
955 | #define CONFACK 2 | |
956 | #define LCP_MRU 1 /* LCP option numbers */ | |
957 | #define LCP_ASYNCMAP 2 | |
958 | ||
959 | static void async_lcp_peek(struct asyncppp *ap, unsigned char *data, | |
960 | int len, int inbound) | |
961 | { | |
962 | int dlen, fcs, i, code; | |
963 | u32 val; | |
964 | ||
965 | data += 2; /* skip protocol bytes */ | |
966 | len -= 2; | |
967 | if (len < 4) /* 4 = code, ID, length */ | |
968 | return; | |
969 | code = data[0]; | |
970 | if (code != CONFACK && code != CONFREQ) | |
971 | return; | |
972 | dlen = (data[2] << 8) + data[3]; | |
973 | if (len < dlen) | |
974 | return; /* packet got truncated or length is bogus */ | |
975 | ||
976 | if (code == (inbound? CONFACK: CONFREQ)) { | |
977 | /* | |
978 | * sent confreq or received confack: | |
979 | * calculate the crc of the data from the ID field on. | |
980 | */ | |
981 | fcs = PPP_INITFCS; | |
982 | for (i = 1; i < dlen; ++i) | |
983 | fcs = PPP_FCS(fcs, data[i]); | |
984 | ||
985 | if (!inbound) { | |
986 | /* outbound confreq - remember the crc for later */ | |
987 | ap->lcp_fcs = fcs; | |
988 | return; | |
989 | } | |
990 | ||
991 | /* received confack, check the crc */ | |
992 | fcs ^= ap->lcp_fcs; | |
993 | ap->lcp_fcs = -1; | |
994 | if (fcs != 0) | |
995 | return; | |
996 | } else if (inbound) | |
997 | return; /* not interested in received confreq */ | |
998 | ||
999 | /* process the options in the confack */ | |
1000 | data += 4; | |
1001 | dlen -= 4; | |
1002 | /* data[0] is code, data[1] is length */ | |
1003 | while (dlen >= 2 && dlen >= data[1] && data[1] >= 2) { | |
1004 | switch (data[0]) { | |
1005 | case LCP_MRU: | |
1006 | val = (data[2] << 8) + data[3]; | |
1007 | if (inbound) | |
1008 | ap->mru = val; | |
1009 | else | |
1010 | ap->chan.mtu = val; | |
1011 | break; | |
1012 | case LCP_ASYNCMAP: | |
1013 | val = (data[2] << 24) + (data[3] << 16) | |
1014 | + (data[4] << 8) + data[5]; | |
1015 | if (inbound) | |
1016 | ap->raccm = val; | |
1017 | else | |
1018 | ap->xaccm[0] = val; | |
1019 | break; | |
1020 | } | |
1021 | dlen -= data[1]; | |
1022 | data += data[1]; | |
1023 | } | |
1024 | } | |
1025 | ||
1026 | static void __exit ppp_async_cleanup(void) | |
1027 | { | |
1028 | if (tty_register_ldisc(N_PPP, NULL) != 0) | |
1029 | printk(KERN_ERR "failed to unregister PPP line discipline\n"); | |
1030 | } | |
1031 | ||
1032 | module_init(ppp_async_init); | |
1033 | module_exit(ppp_async_cleanup); |