1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * PPP synchronous tty channel driver for Linux.
5 * This is a ppp channel driver that can be used with tty device drivers
6 * that are frame oriented, such as synchronous HDLC devices.
8 * Complete PPP frames without encoding/decoding are exchanged between
9 * the channel driver and the device driver.
11 * The async map IOCTL codes are implemented to keep the user mode
12 * applications happy if they call them. Synchronous PPP does not use
15 * Copyright 1999 Paul Mackerras.
19 * This driver provides the encapsulation and framing for sending
20 * and receiving PPP frames over sync serial lines. It relies on
21 * the generic PPP layer to give it frames to send and to process
22 * received frames. It implements the PPP line discipline.
24 * Part of the code in this driver was inspired by the old async-only
25 * PPP driver, written by Michael Callahan and Al Longyear, and
26 * subsequently hacked by Paul Mackerras.
28 * ==FILEVERSION 20040616==
31 #include <linux/module.h>
32 #include <linux/kernel.h>
33 #include <linux/skbuff.h>
34 #include <linux/tty.h>
35 #include <linux/netdevice.h>
36 #include <linux/poll.h>
37 #include <linux/ppp_defs.h>
38 #include <linux/ppp-ioctl.h>
39 #include <linux/ppp_channel.h>
40 #include <linux/spinlock.h>
41 #include <linux/completion.h>
42 #include <linux/init.h>
43 #include <linux/interrupt.h>
44 #include <linux/slab.h>
45 #include <linux/refcount.h>
46 #include <asm/unaligned.h>
47 #include <linux/uaccess.h>
49 #define PPP_VERSION "2.4.2"
51 /* Structure for storing local state. */
53 struct tty_struct *tty;
59 unsigned long xmit_flags;
62 unsigned int bytes_sent;
63 unsigned int bytes_rcvd;
66 unsigned long last_xmit;
68 struct sk_buff_head rqueue;
70 struct tasklet_struct tsk;
73 struct completion dead_cmp;
74 struct ppp_channel chan; /* interface to generic ppp layer */
77 /* Bit numbers in xmit_flags */
82 #define SC_RCV_BITS (SC_RCV_B7_1|SC_RCV_B7_0|SC_RCV_ODDP|SC_RCV_EVNP)
84 #define PPPSYNC_MAX_RQLEN 32 /* arbitrary */
89 static struct sk_buff* ppp_sync_txmunge(struct syncppp *ap, struct sk_buff *);
90 static int ppp_sync_send(struct ppp_channel *chan, struct sk_buff *skb);
91 static int ppp_sync_ioctl(struct ppp_channel *chan, unsigned int cmd,
93 static void ppp_sync_process(struct tasklet_struct *t);
94 static int ppp_sync_push(struct syncppp *ap);
95 static void ppp_sync_flush_output(struct syncppp *ap);
96 static void ppp_sync_input(struct syncppp *ap, const u8 *buf, const u8 *flags,
99 static const struct ppp_channel_ops sync_ops = {
100 .start_xmit = ppp_sync_send,
101 .ioctl = ppp_sync_ioctl,
105 * Utility procedure to print a buffer in hex/ascii
108 ppp_print_buffer (const char *name, const __u8 *buf, int count)
111 printk(KERN_DEBUG "ppp_synctty: %s, count = %d\n", name, count);
113 print_hex_dump_bytes("", DUMP_PREFIX_NONE, buf, count);
118 * Routines implementing the synchronous PPP line discipline.
122 * We have a potential race on dereferencing tty->disc_data,
123 * because the tty layer provides no locking at all - thus one
124 * cpu could be running ppp_synctty_receive while another
125 * calls ppp_synctty_close, which zeroes tty->disc_data and
126 * frees the memory that ppp_synctty_receive is using. The best
127 * way to fix this is to use a rwlock in the tty struct, but for now
128 * we use a single global rwlock for all ttys in ppp line discipline.
130 * FIXME: Fixed in tty_io nowadays.
132 static DEFINE_RWLOCK(disc_data_lock);
134 static struct syncppp *sp_get(struct tty_struct *tty)
138 read_lock(&disc_data_lock);
141 refcount_inc(&ap->refcnt);
142 read_unlock(&disc_data_lock);
146 static void sp_put(struct syncppp *ap)
148 if (refcount_dec_and_test(&ap->refcnt))
149 complete(&ap->dead_cmp);
153 * Called when a tty is put into sync-PPP line discipline.
156 ppp_sync_open(struct tty_struct *tty)
162 if (tty->ops->write == NULL)
165 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
170 /* initialize the syncppp structure */
173 spin_lock_init(&ap->xmit_lock);
174 spin_lock_init(&ap->recv_lock);
176 ap->xaccm[3] = 0x60000000U;
179 skb_queue_head_init(&ap->rqueue);
180 tasklet_setup(&ap->tsk, ppp_sync_process);
182 refcount_set(&ap->refcnt, 1);
183 init_completion(&ap->dead_cmp);
185 ap->chan.private = ap;
186 ap->chan.ops = &sync_ops;
187 ap->chan.mtu = PPP_MRU;
188 ap->chan.hdrlen = 2; /* for A/C bytes */
189 speed = tty_get_baud_rate(tty);
190 ap->chan.speed = speed;
191 err = ppp_register_channel(&ap->chan);
196 tty->receive_room = 65536;
206 * Called when the tty is put into another line discipline
207 * or it hangs up. We have to wait for any cpu currently
208 * executing in any of the other ppp_synctty_* routines to
209 * finish before we can call ppp_unregister_channel and free
210 * the syncppp struct. This routine must be called from
211 * process context, not interrupt or softirq context.
214 ppp_sync_close(struct tty_struct *tty)
218 write_lock_irq(&disc_data_lock);
220 tty->disc_data = NULL;
221 write_unlock_irq(&disc_data_lock);
226 * We have now ensured that nobody can start using ap from now
227 * on, but we have to wait for all existing users to finish.
228 * Note that ppp_unregister_channel ensures that no calls to
229 * our channel ops (i.e. ppp_sync_send/ioctl) are in progress
230 * by the time it returns.
232 if (!refcount_dec_and_test(&ap->refcnt))
233 wait_for_completion(&ap->dead_cmp);
234 tasklet_kill(&ap->tsk);
236 ppp_unregister_channel(&ap->chan);
237 skb_queue_purge(&ap->rqueue);
243 * Called on tty hangup in process context.
245 * Wait for I/O to driver to complete and unregister PPP channel.
246 * This is already done by the close routine, so just call that.
248 static void ppp_sync_hangup(struct tty_struct *tty)
254 * Read does nothing - no data is ever available this way.
255 * Pppd reads and writes packets via /dev/ppp instead.
258 ppp_sync_read(struct tty_struct *tty, struct file *file, u8 *buf, size_t count,
259 void **cookie, unsigned long offset)
265 * Write on the tty does nothing, the packets all come in
266 * from the ppp generic stuff.
269 ppp_sync_write(struct tty_struct *tty, struct file *file, const u8 *buf,
276 ppp_synctty_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
278 struct syncppp *ap = sp_get(tty);
279 int __user *p = (int __user *)arg;
288 if (put_user(ppp_channel_index(&ap->chan), p))
295 if (put_user(ppp_unit_number(&ap->chan), p))
301 /* flush our buffers and the serial port's buffer */
302 if (arg == TCIOFLUSH || arg == TCOFLUSH)
303 ppp_sync_flush_output(ap);
304 err = n_tty_ioctl_helper(tty, cmd, arg);
309 if (put_user(val, p))
315 err = tty_mode_ioctl(tty, cmd, arg);
323 /* May sleep, don't call from interrupt level or with interrupts disabled */
325 ppp_sync_receive(struct tty_struct *tty, const u8 *buf, const u8 *cflags,
328 struct syncppp *ap = sp_get(tty);
333 spin_lock_irqsave(&ap->recv_lock, flags);
334 ppp_sync_input(ap, buf, cflags, count);
335 spin_unlock_irqrestore(&ap->recv_lock, flags);
336 if (!skb_queue_empty(&ap->rqueue))
337 tasklet_schedule(&ap->tsk);
343 ppp_sync_wakeup(struct tty_struct *tty)
345 struct syncppp *ap = sp_get(tty);
347 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
350 set_bit(XMIT_WAKEUP, &ap->xmit_flags);
351 tasklet_schedule(&ap->tsk);
356 static struct tty_ldisc_ops ppp_sync_ldisc = {
357 .owner = THIS_MODULE,
360 .open = ppp_sync_open,
361 .close = ppp_sync_close,
362 .hangup = ppp_sync_hangup,
363 .read = ppp_sync_read,
364 .write = ppp_sync_write,
365 .ioctl = ppp_synctty_ioctl,
366 .receive_buf = ppp_sync_receive,
367 .write_wakeup = ppp_sync_wakeup,
375 err = tty_register_ldisc(&ppp_sync_ldisc);
377 printk(KERN_ERR "PPP_sync: error %d registering line disc.\n",
383 * The following routines provide the PPP channel interface.
386 ppp_sync_ioctl(struct ppp_channel *chan, unsigned int cmd, unsigned long arg)
388 struct syncppp *ap = chan->private;
391 void __user *argp = (void __user *)arg;
392 u32 __user *p = argp;
397 val = ap->flags | ap->rbits;
398 if (put_user(val, (int __user *) argp))
403 if (get_user(val, (int __user *) argp))
405 ap->flags = val & ~SC_RCV_BITS;
406 spin_lock_irq(&ap->recv_lock);
407 ap->rbits = val & SC_RCV_BITS;
408 spin_unlock_irq(&ap->recv_lock);
412 case PPPIOCGASYNCMAP:
413 if (put_user(ap->xaccm[0], p))
417 case PPPIOCSASYNCMAP:
418 if (get_user(ap->xaccm[0], p))
423 case PPPIOCGRASYNCMAP:
424 if (put_user(ap->raccm, p))
428 case PPPIOCSRASYNCMAP:
429 if (get_user(ap->raccm, p))
434 case PPPIOCGXASYNCMAP:
435 if (copy_to_user(argp, ap->xaccm, sizeof(ap->xaccm)))
439 case PPPIOCSXASYNCMAP:
440 if (copy_from_user(accm, argp, sizeof(accm)))
442 accm[2] &= ~0x40000000U; /* can't escape 0x5e */
443 accm[3] |= 0x60000000U; /* must escape 0x7d, 0x7e */
444 memcpy(ap->xaccm, accm, sizeof(ap->xaccm));
449 if (put_user(ap->mru, (int __user *) argp))
454 if (get_user(val, (int __user *) argp))
469 * This is called at softirq level to deliver received packets
470 * to the ppp_generic code, and to tell the ppp_generic code
471 * if we can accept more output now.
473 static void ppp_sync_process(struct tasklet_struct *t)
475 struct syncppp *ap = from_tasklet(ap, t, tsk);
478 /* process received packets */
479 while ((skb = skb_dequeue(&ap->rqueue)) != NULL) {
481 /* zero length buffers indicate error */
482 ppp_input_error(&ap->chan, 0);
486 ppp_input(&ap->chan, skb);
489 /* try to push more stuff out */
490 if (test_bit(XMIT_WAKEUP, &ap->xmit_flags) && ppp_sync_push(ap))
491 ppp_output_wakeup(&ap->chan);
495 * Procedures for encapsulation and framing.
498 static struct sk_buff*
499 ppp_sync_txmunge(struct syncppp *ap, struct sk_buff *skb)
506 proto = get_unaligned_be16(data);
508 /* LCP packets with codes between 1 (configure-request)
509 * and 7 (code-reject) must be sent as though no options
510 * have been negotiated.
512 islcp = proto == PPP_LCP && 1 <= data[2] && data[2] <= 7;
514 /* compress protocol field if option enabled */
515 if (data[0] == 0 && (ap->flags & SC_COMP_PROT) && !islcp)
518 /* prepend address/control fields if necessary */
519 if ((ap->flags & SC_COMP_AC) == 0 || islcp) {
520 if (skb_headroom(skb) < 2) {
521 struct sk_buff *npkt = dev_alloc_skb(skb->len + 2);
527 skb_copy_from_linear_data(skb,
528 skb_put(npkt, skb->len), skb->len);
533 skb->data[0] = PPP_ALLSTATIONS;
534 skb->data[1] = PPP_UI;
537 ap->last_xmit = jiffies;
539 if (skb && ap->flags & SC_LOG_OUTPKT)
540 ppp_print_buffer ("send buffer", skb->data, skb->len);
546 * Transmit-side routines.
550 * Send a packet to the peer over an sync tty line.
551 * Returns 1 iff the packet was accepted.
552 * If the packet was not accepted, we will call ppp_output_wakeup
553 * at some later time.
556 ppp_sync_send(struct ppp_channel *chan, struct sk_buff *skb)
558 struct syncppp *ap = chan->private;
562 if (test_and_set_bit(XMIT_FULL, &ap->xmit_flags))
563 return 0; /* already full */
564 skb = ppp_sync_txmunge(ap, skb);
568 clear_bit(XMIT_FULL, &ap->xmit_flags);
575 * Push as much data as possible out to the tty.
578 ppp_sync_push(struct syncppp *ap)
581 struct tty_struct *tty = ap->tty;
584 if (!spin_trylock_bh(&ap->xmit_lock))
587 if (test_and_clear_bit(XMIT_WAKEUP, &ap->xmit_flags))
589 if (!tty_stuffed && ap->tpkt) {
590 set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
591 sent = tty->ops->write(tty, ap->tpkt->data, ap->tpkt->len);
593 goto flush; /* error, e.g. loss of CD */
594 if (sent < ap->tpkt->len) {
597 consume_skb(ap->tpkt);
599 clear_bit(XMIT_FULL, &ap->xmit_flags);
604 /* haven't made any progress */
605 spin_unlock_bh(&ap->xmit_lock);
606 if (!(test_bit(XMIT_WAKEUP, &ap->xmit_flags) ||
607 (!tty_stuffed && ap->tpkt)))
609 if (!spin_trylock_bh(&ap->xmit_lock))
618 clear_bit(XMIT_FULL, &ap->xmit_flags);
621 spin_unlock_bh(&ap->xmit_lock);
626 * Flush output from our internal buffers.
627 * Called for the TCFLSH ioctl.
630 ppp_sync_flush_output(struct syncppp *ap)
634 spin_lock_bh(&ap->xmit_lock);
635 if (ap->tpkt != NULL) {
638 clear_bit(XMIT_FULL, &ap->xmit_flags);
641 spin_unlock_bh(&ap->xmit_lock);
643 ppp_output_wakeup(&ap->chan);
647 * Receive-side routines.
650 /* called when the tty driver has data for us.
652 * Data is frame oriented: each call to ppp_sync_input is considered
653 * a whole frame. If the 1st flag byte is non-zero then the whole
654 * frame is considered to be in error and is tossed.
657 ppp_sync_input(struct syncppp *ap, const u8 *buf, const u8 *flags, int count)
665 if (ap->flags & SC_LOG_INPKT)
666 ppp_print_buffer ("receive buffer", buf, count);
668 /* stuff the chars in the skb */
669 skb = dev_alloc_skb(ap->mru + PPP_HDRLEN + 2);
671 printk(KERN_ERR "PPPsync: no memory (input pkt)\n");
674 /* Try to get the payload 4-byte aligned */
675 if (buf[0] != PPP_ALLSTATIONS)
676 skb_reserve(skb, 2 + (buf[0] & 1));
678 if (flags && *flags) {
679 /* error flag set, ignore frame */
681 } else if (count > skb_tailroom(skb)) {
682 /* packet overflowed MRU */
686 skb_put_data(skb, buf, count);
688 /* strip address/control field if present */
690 if (p[0] == PPP_ALLSTATIONS && p[1] == PPP_UI) {
691 /* chop off address/control */
694 p = skb_pull(skb, 2);
697 /* PPP packet length should be >= 2 bytes when protocol field is not
700 if (!(p[0] & 0x01) && skb->len < 2)
703 /* queue the frame to be processed */
704 skb_queue_tail(&ap->rqueue, skb);
708 /* queue zero length packet as error indication */
709 if (skb || (skb = dev_alloc_skb(0))) {
711 skb_queue_tail(&ap->rqueue, skb);
716 ppp_sync_cleanup(void)
718 tty_unregister_ldisc(&ppp_sync_ldisc);
721 module_init(ppp_sync_init);
722 module_exit(ppp_sync_cleanup);
723 MODULE_LICENSE("GPL");
724 MODULE_ALIAS_LDISC(N_SYNC_PPP);