2 * u_serial.c - utilities for USB gadget "serial port"/TTY support
5 * Copyright (C) 2008 David Brownell
6 * Copyright (C) 2008 by Nokia Corporation
8 * This code also borrows from usbserial.c, which is
13 * This software is distributed under the terms of the GNU General
14 * Public License ("GPL") as published by the Free Software Foundation,
15 * either version 2 of that License or (at your option) any later version.
18 /* #define VERBOSE_DEBUG */
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/interrupt.h>
23 #include <linux/device.h>
24 #include <linux/delay.h>
25 #include <linux/tty.h>
26 #include <linux/tty_flip.h>
27 #include <linux/slab.h>
28 #include <linux/export.h>
29 #include <linux/module.h>
35 * This component encapsulates the TTY layer glue needed to provide basic
36 * "serial port" functionality through the USB gadget stack. Each such
37 * port is exposed through a /dev/ttyGS* node.
39 * After this module has been loaded, the individual TTY port can be requested
40 * (gserial_alloc_line()) and it will stay available until they are removed
41 * (gserial_free_line()). Each one may be connected to a USB function
42 * (gserial_connect), or disconnected (with gserial_disconnect) when the USB
43 * host issues a config change event. Data can only flow when the port is
44 * connected to the host.
46 * A given TTY port can be made available in multiple configurations.
47 * For example, each one might expose a ttyGS0 node which provides a
48 * login application. In one case that might use CDC ACM interface 0,
49 * while another configuration might use interface 3 for that. The
50 * work to handle that (including descriptor management) is not part
53 * Configurations may expose more than one TTY port. For example, if
54 * ttyGS0 provides login service, then ttyGS1 might provide dialer access
55 * for a telephone or fax link. And ttyGS2 might be something that just
56 * needs a simple byte stream interface for some messaging protocol that
57 * is managed in userspace ... OBEX, PTP, and MTP have been mentioned.
60 #define PREFIX "ttyGS"
63 * gserial is the lifecycle interface, used by USB functions
64 * gs_port is the I/O nexus, used by the tty driver
65 * tty_struct links to the tty/filesystem framework
67 * gserial <---> gs_port ... links will be null when the USB link is
68 * inactive; managed by gserial_{connect,disconnect}(). each gserial
69 * instance can wrap its own USB control protocol.
70 * gserial->ioport == usb_ep->driver_data ... gs_port
71 * gs_port->port_usb ... gserial
73 * gs_port <---> tty_struct ... links will be null when the TTY file
74 * isn't opened; managed by gs_open()/gs_close()
75 * gserial->port_tty ... tty_struct
76 * tty_struct->driver_data ... gserial
79 /* RX and TX queues can buffer QUEUE_SIZE packets before they hit the
80 * next layer of buffering. For TX that's a circular buffer; for RX
81 * consider it a NOP. A third layer is provided by the TTY code.
84 #define WRITE_BUF_SIZE 8192 /* TX only */
95 * The port structure holds info for each port, one for each minor number
96 * (and thus for each /dev/ node).
100 spinlock_t port_lock; /* guard port_* access */
102 struct gserial *port_usb;
104 bool openclose; /* open/close in progress */
107 struct list_head read_pool;
110 struct list_head read_queue;
112 struct tasklet_struct push;
114 struct list_head write_pool;
117 struct gs_buf port_write_buf;
118 wait_queue_head_t drain_wait; /* wait while writes drain */
120 /* REVISIT this state ... */
121 struct usb_cdc_line_coding port_line_coding; /* 8-N-1 etc */
124 static struct portmaster {
125 struct mutex lock; /* protect open/close */
126 struct gs_port *port;
127 } ports[MAX_U_SERIAL_PORTS];
129 #define GS_CLOSE_TIMEOUT 15 /* seconds */
135 #define pr_vdebug(fmt, arg...) \
137 #endif /* pr_vdebug */
140 #define pr_vdebug(fmt, arg...) \
141 ({ if (0) pr_debug(fmt, ##arg); })
142 #endif /* pr_vdebug */
145 /*-------------------------------------------------------------------------*/
147 /* Circular Buffer */
152 * Allocate a circular buffer and all associated memory.
154 static int gs_buf_alloc(struct gs_buf *gb, unsigned size)
156 gb->buf_buf = kmalloc(size, GFP_KERNEL);
157 if (gb->buf_buf == NULL)
161 gb->buf_put = gb->buf_buf;
162 gb->buf_get = gb->buf_buf;
170 * Free the buffer and all associated memory.
172 static void gs_buf_free(struct gs_buf *gb)
181 * Clear out all data in the circular buffer.
183 static void gs_buf_clear(struct gs_buf *gb)
185 gb->buf_get = gb->buf_put;
186 /* equivalent to a get of all data available */
192 * Return the number of bytes of data written into the circular
195 static unsigned gs_buf_data_avail(struct gs_buf *gb)
197 return (gb->buf_size + gb->buf_put - gb->buf_get) % gb->buf_size;
203 * Return the number of bytes of space available in the circular
206 static unsigned gs_buf_space_avail(struct gs_buf *gb)
208 return (gb->buf_size + gb->buf_get - gb->buf_put - 1) % gb->buf_size;
214 * Copy data data from a user buffer and put it into the circular buffer.
215 * Restrict to the amount of space available.
217 * Return the number of bytes copied.
220 gs_buf_put(struct gs_buf *gb, const char *buf, unsigned count)
224 len = gs_buf_space_avail(gb);
231 len = gb->buf_buf + gb->buf_size - gb->buf_put;
233 memcpy(gb->buf_put, buf, len);
234 memcpy(gb->buf_buf, buf+len, count - len);
235 gb->buf_put = gb->buf_buf + count - len;
237 memcpy(gb->buf_put, buf, count);
239 gb->buf_put += count;
240 else /* count == len */
241 gb->buf_put = gb->buf_buf;
250 * Get data from the circular buffer and copy to the given buffer.
251 * Restrict to the amount of data available.
253 * Return the number of bytes copied.
256 gs_buf_get(struct gs_buf *gb, char *buf, unsigned count)
260 len = gs_buf_data_avail(gb);
267 len = gb->buf_buf + gb->buf_size - gb->buf_get;
269 memcpy(buf, gb->buf_get, len);
270 memcpy(buf+len, gb->buf_buf, count - len);
271 gb->buf_get = gb->buf_buf + count - len;
273 memcpy(buf, gb->buf_get, count);
275 gb->buf_get += count;
276 else /* count == len */
277 gb->buf_get = gb->buf_buf;
283 /*-------------------------------------------------------------------------*/
285 /* I/O glue between TTY (upper) and USB function (lower) driver layers */
290 * Allocate a usb_request and its buffer. Returns a pointer to the
291 * usb_request or NULL if there is an error.
294 gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags)
296 struct usb_request *req;
298 req = usb_ep_alloc_request(ep, kmalloc_flags);
302 req->buf = kmalloc(len, kmalloc_flags);
303 if (req->buf == NULL) {
304 usb_ep_free_request(ep, req);
311 EXPORT_SYMBOL_GPL(gs_alloc_req);
316 * Free a usb_request and its buffer.
318 void gs_free_req(struct usb_ep *ep, struct usb_request *req)
321 usb_ep_free_request(ep, req);
323 EXPORT_SYMBOL_GPL(gs_free_req);
328 * If there is data to send, a packet is built in the given
329 * buffer and the size is returned. If there is no data to
330 * send, 0 is returned.
332 * Called with port_lock held.
335 gs_send_packet(struct gs_port *port, char *packet, unsigned size)
339 len = gs_buf_data_avail(&port->port_write_buf);
343 size = gs_buf_get(&port->port_write_buf, packet, size);
350 * This function finds available write requests, calls
351 * gs_send_packet to fill these packets with data, and
352 * continues until either there are no more write requests
353 * available or no more data to send. This function is
354 * run whenever data arrives or write requests are available.
356 * Context: caller owns port_lock; port_usb is non-null.
358 static int gs_start_tx(struct gs_port *port)
360 __releases(&port->port_lock)
361 __acquires(&port->port_lock)
364 struct list_head *pool = &port->write_pool;
365 struct usb_ep *in = port->port_usb->in;
367 bool do_tty_wake = false;
369 while (!list_empty(pool)) {
370 struct usb_request *req;
373 if (port->write_started >= QUEUE_SIZE)
376 req = list_entry(pool->next, struct usb_request, list);
377 len = gs_send_packet(port, req->buf, in->maxpacket);
379 wake_up_interruptible(&port->drain_wait);
385 list_del(&req->list);
386 req->zero = (gs_buf_data_avail(&port->port_write_buf) == 0);
388 pr_vdebug(PREFIX "%d: tx len=%d, 0x%02x 0x%02x 0x%02x ...\n",
389 port->port_num, len, *((u8 *)req->buf),
390 *((u8 *)req->buf+1), *((u8 *)req->buf+2));
392 /* Drop lock while we call out of driver; completions
393 * could be issued while we do so. Disconnection may
394 * happen too; maybe immediately before we queue this!
396 * NOTE that we may keep sending data for a while after
397 * the TTY closed (dev->ioport->port_tty is NULL).
399 spin_unlock(&port->port_lock);
400 status = usb_ep_queue(in, req, GFP_ATOMIC);
401 spin_lock(&port->port_lock);
404 pr_debug("%s: %s %s err %d\n",
405 __func__, "queue", in->name, status);
406 list_add(&req->list, pool);
410 port->write_started++;
412 /* abort immediately after disconnect */
417 if (do_tty_wake && port->port.tty)
418 tty_wakeup(port->port.tty);
423 * Context: caller owns port_lock, and port_usb is set
425 static unsigned gs_start_rx(struct gs_port *port)
427 __releases(&port->port_lock)
428 __acquires(&port->port_lock)
431 struct list_head *pool = &port->read_pool;
432 struct usb_ep *out = port->port_usb->out;
434 while (!list_empty(pool)) {
435 struct usb_request *req;
437 struct tty_struct *tty;
439 /* no more rx if closed */
440 tty = port->port.tty;
444 if (port->read_started >= QUEUE_SIZE)
447 req = list_entry(pool->next, struct usb_request, list);
448 list_del(&req->list);
449 req->length = out->maxpacket;
451 /* drop lock while we call out; the controller driver
452 * may need to call us back (e.g. for disconnect)
454 spin_unlock(&port->port_lock);
455 status = usb_ep_queue(out, req, GFP_ATOMIC);
456 spin_lock(&port->port_lock);
459 pr_debug("%s: %s %s err %d\n",
460 __func__, "queue", out->name, status);
461 list_add(&req->list, pool);
464 port->read_started++;
466 /* abort immediately after disconnect */
470 return port->read_started;
474 * RX tasklet takes data out of the RX queue and hands it up to the TTY
475 * layer until it refuses to take any more data (or is throttled back).
476 * Then it issues reads for any further data.
478 * If the RX queue becomes full enough that no usb_request is queued,
479 * the OUT endpoint may begin NAKing as soon as its FIFO fills up.
480 * So QUEUE_SIZE packets plus however many the FIFO holds (usually two)
481 * can be buffered before the TTY layer's buffers (currently 64 KB).
483 static void gs_rx_push(unsigned long _port)
485 struct gs_port *port = (void *)_port;
486 struct tty_struct *tty;
487 struct list_head *queue = &port->read_queue;
488 bool disconnect = false;
489 bool do_push = false;
491 /* hand any queued data to the tty */
492 spin_lock_irq(&port->port_lock);
493 tty = port->port.tty;
494 while (!list_empty(queue)) {
495 struct usb_request *req;
497 req = list_first_entry(queue, struct usb_request, list);
499 /* leave data queued if tty was rx throttled */
500 if (tty && test_bit(TTY_THROTTLED, &tty->flags))
503 switch (req->status) {
506 pr_vdebug(PREFIX "%d: shutdown\n", port->port_num);
510 /* presumably a transient fault */
511 pr_warning(PREFIX "%d: unexpected RX status %d\n",
512 port->port_num, req->status);
515 /* normal completion */
519 /* push data to (open) tty */
521 char *packet = req->buf;
522 unsigned size = req->actual;
526 /* we may have pushed part of this packet already... */
533 count = tty_insert_flip_string(&port->port, packet,
538 /* stop pushing; TTY layer can't handle more */
539 port->n_read += count;
540 pr_vdebug(PREFIX "%d: rx block %d/%d\n",
548 list_move(&req->list, &port->read_pool);
549 port->read_started--;
552 /* Push from tty to ldisc; this is handled by a workqueue,
553 * so we won't get callbacks and can hold port_lock
556 tty_flip_buffer_push(&port->port);
559 /* We want our data queue to become empty ASAP, keeping data
560 * in the tty and ldisc (not here). If we couldn't push any
561 * this time around, there may be trouble unless there's an
562 * implicit tty_unthrottle() call on its way...
564 * REVISIT we should probably add a timer to keep the tasklet
565 * from starving ... but it's not clear that case ever happens.
567 if (!list_empty(queue) && tty) {
568 if (!test_bit(TTY_THROTTLED, &tty->flags)) {
570 tasklet_schedule(&port->push);
572 pr_warning(PREFIX "%d: RX not scheduled?\n",
577 /* If we're still connected, refill the USB RX queue. */
578 if (!disconnect && port->port_usb)
581 spin_unlock_irq(&port->port_lock);
584 static void gs_read_complete(struct usb_ep *ep, struct usb_request *req)
586 struct gs_port *port = ep->driver_data;
588 /* Queue all received data until the tty layer is ready for it. */
589 spin_lock(&port->port_lock);
590 list_add_tail(&req->list, &port->read_queue);
591 tasklet_schedule(&port->push);
592 spin_unlock(&port->port_lock);
595 static void gs_write_complete(struct usb_ep *ep, struct usb_request *req)
597 struct gs_port *port = ep->driver_data;
599 spin_lock(&port->port_lock);
600 list_add(&req->list, &port->write_pool);
601 port->write_started--;
603 switch (req->status) {
605 /* presumably a transient fault */
606 pr_warning("%s: unexpected %s status %d\n",
607 __func__, ep->name, req->status);
610 /* normal completion */
616 pr_vdebug("%s: %s shutdown\n", __func__, ep->name);
620 spin_unlock(&port->port_lock);
623 static void gs_free_requests(struct usb_ep *ep, struct list_head *head,
626 struct usb_request *req;
628 while (!list_empty(head)) {
629 req = list_entry(head->next, struct usb_request, list);
630 list_del(&req->list);
631 gs_free_req(ep, req);
637 static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head,
638 void (*fn)(struct usb_ep *, struct usb_request *),
642 struct usb_request *req;
643 int n = allocated ? QUEUE_SIZE - *allocated : QUEUE_SIZE;
645 /* Pre-allocate up to QUEUE_SIZE transfers, but if we can't
646 * do quite that many this time, don't fail ... we just won't
647 * be as speedy as we might otherwise be.
649 for (i = 0; i < n; i++) {
650 req = gs_alloc_req(ep, ep->maxpacket, GFP_ATOMIC);
652 return list_empty(head) ? -ENOMEM : 0;
654 list_add_tail(&req->list, head);
662 * gs_start_io - start USB I/O streams
663 * @dev: encapsulates endpoints to use
664 * Context: holding port_lock; port_tty and port_usb are non-null
666 * We only start I/O when something is connected to both sides of
667 * this port. If nothing is listening on the host side, we may
668 * be pointlessly filling up our TX buffers and FIFO.
670 static int gs_start_io(struct gs_port *port)
672 struct list_head *head = &port->read_pool;
673 struct usb_ep *ep = port->port_usb->out;
677 /* Allocate RX and TX I/O buffers. We can't easily do this much
678 * earlier (with GFP_KERNEL) because the requests are coupled to
679 * endpoints, as are the packet sizes we'll be using. Different
680 * configurations may use different endpoints with a given port;
681 * and high speed vs full speed changes packet sizes too.
683 status = gs_alloc_requests(ep, head, gs_read_complete,
684 &port->read_allocated);
688 status = gs_alloc_requests(port->port_usb->in, &port->write_pool,
689 gs_write_complete, &port->write_allocated);
691 gs_free_requests(ep, head, &port->read_allocated);
695 /* queue read requests */
697 started = gs_start_rx(port);
699 /* unblock any pending writes into our circular buffer */
701 tty_wakeup(port->port.tty);
703 gs_free_requests(ep, head, &port->read_allocated);
704 gs_free_requests(port->port_usb->in, &port->write_pool,
705 &port->write_allocated);
712 /*-------------------------------------------------------------------------*/
717 * gs_open sets up the link between a gs_port and its associated TTY.
718 * That link is broken *only* by TTY close(), and all driver methods
721 static int gs_open(struct tty_struct *tty, struct file *file)
723 int port_num = tty->index;
724 struct gs_port *port;
728 mutex_lock(&ports[port_num].lock);
729 port = ports[port_num].port;
733 spin_lock_irq(&port->port_lock);
735 /* already open? Great. */
736 if (port->port.count) {
740 /* currently opening/closing? wait ... */
741 } else if (port->openclose) {
744 /* ... else we do the work */
747 port->openclose = true;
749 spin_unlock_irq(&port->port_lock);
751 mutex_unlock(&ports[port_num].lock);
758 /* must do the work */
761 /* wait for EAGAIN task to finish */
763 /* REVISIT could have a waitchannel here, if
764 * concurrent open performance is important
768 } while (status != -EAGAIN);
770 /* Do the "real open" */
771 spin_lock_irq(&port->port_lock);
773 /* allocate circular buffer on first open */
774 if (port->port_write_buf.buf_buf == NULL) {
776 spin_unlock_irq(&port->port_lock);
777 status = gs_buf_alloc(&port->port_write_buf, WRITE_BUF_SIZE);
778 spin_lock_irq(&port->port_lock);
781 pr_debug("gs_open: ttyGS%d (%p,%p) no buffer\n",
782 port->port_num, tty, file);
783 port->openclose = false;
784 goto exit_unlock_port;
788 /* REVISIT if REMOVED (ports[].port NULL), abort the open
789 * to let rmmod work faster (but this way isn't wrong).
792 /* REVISIT maybe wait for "carrier detect" */
794 tty->driver_data = port;
795 port->port.tty = tty;
797 port->port.count = 1;
798 port->openclose = false;
800 /* if connected, start the I/O stream */
801 if (port->port_usb) {
802 struct gserial *gser = port->port_usb;
804 pr_debug("gs_open: start ttyGS%d\n", port->port_num);
811 pr_debug("gs_open: ttyGS%d (%p,%p)\n", port->port_num, tty, file);
816 spin_unlock_irq(&port->port_lock);
820 static int gs_writes_finished(struct gs_port *p)
824 /* return true on disconnect or empty buffer */
825 spin_lock_irq(&p->port_lock);
826 cond = (p->port_usb == NULL) || !gs_buf_data_avail(&p->port_write_buf);
827 spin_unlock_irq(&p->port_lock);
832 static void gs_close(struct tty_struct *tty, struct file *file)
834 struct gs_port *port = tty->driver_data;
835 struct gserial *gser;
837 spin_lock_irq(&port->port_lock);
839 if (port->port.count != 1) {
840 if (port->port.count == 0)
847 pr_debug("gs_close: ttyGS%d (%p,%p) ...\n", port->port_num, tty, file);
849 /* mark port as closing but in use; we can drop port lock
850 * and sleep if necessary
852 port->openclose = true;
853 port->port.count = 0;
855 gser = port->port_usb;
856 if (gser && gser->disconnect)
857 gser->disconnect(gser);
859 /* wait for circular write buffer to drain, disconnect, or at
860 * most GS_CLOSE_TIMEOUT seconds; then discard the rest
862 if (gs_buf_data_avail(&port->port_write_buf) > 0 && gser) {
863 spin_unlock_irq(&port->port_lock);
864 wait_event_interruptible_timeout(port->drain_wait,
865 gs_writes_finished(port),
866 GS_CLOSE_TIMEOUT * HZ);
867 spin_lock_irq(&port->port_lock);
868 gser = port->port_usb;
871 /* Iff we're disconnected, there can be no I/O in flight so it's
872 * ok to free the circular buffer; else just scrub it. And don't
873 * let the push tasklet fire again until we're re-opened.
876 gs_buf_free(&port->port_write_buf);
878 gs_buf_clear(&port->port_write_buf);
880 tty->driver_data = NULL;
881 port->port.tty = NULL;
883 port->openclose = false;
885 pr_debug("gs_close: ttyGS%d (%p,%p) done!\n",
886 port->port_num, tty, file);
888 wake_up(&port->port.close_wait);
890 spin_unlock_irq(&port->port_lock);
893 static int gs_write(struct tty_struct *tty, const unsigned char *buf, int count)
895 struct gs_port *port = tty->driver_data;
899 pr_vdebug("gs_write: ttyGS%d (%p) writing %d bytes\n",
900 port->port_num, tty, count);
902 spin_lock_irqsave(&port->port_lock, flags);
904 count = gs_buf_put(&port->port_write_buf, buf, count);
905 /* treat count == 0 as flush_chars() */
907 status = gs_start_tx(port);
908 spin_unlock_irqrestore(&port->port_lock, flags);
913 static int gs_put_char(struct tty_struct *tty, unsigned char ch)
915 struct gs_port *port = tty->driver_data;
919 pr_vdebug("gs_put_char: (%d,%p) char=0x%x, called from %pf\n",
920 port->port_num, tty, ch, __builtin_return_address(0));
922 spin_lock_irqsave(&port->port_lock, flags);
923 status = gs_buf_put(&port->port_write_buf, &ch, 1);
924 spin_unlock_irqrestore(&port->port_lock, flags);
929 static void gs_flush_chars(struct tty_struct *tty)
931 struct gs_port *port = tty->driver_data;
934 pr_vdebug("gs_flush_chars: (%d,%p)\n", port->port_num, tty);
936 spin_lock_irqsave(&port->port_lock, flags);
939 spin_unlock_irqrestore(&port->port_lock, flags);
942 static int gs_write_room(struct tty_struct *tty)
944 struct gs_port *port = tty->driver_data;
948 spin_lock_irqsave(&port->port_lock, flags);
950 room = gs_buf_space_avail(&port->port_write_buf);
951 spin_unlock_irqrestore(&port->port_lock, flags);
953 pr_vdebug("gs_write_room: (%d,%p) room=%d\n",
954 port->port_num, tty, room);
959 static int gs_chars_in_buffer(struct tty_struct *tty)
961 struct gs_port *port = tty->driver_data;
965 spin_lock_irqsave(&port->port_lock, flags);
966 chars = gs_buf_data_avail(&port->port_write_buf);
967 spin_unlock_irqrestore(&port->port_lock, flags);
969 pr_vdebug("gs_chars_in_buffer: (%d,%p) chars=%d\n",
970 port->port_num, tty, chars);
975 /* undo side effects of setting TTY_THROTTLED */
976 static void gs_unthrottle(struct tty_struct *tty)
978 struct gs_port *port = tty->driver_data;
981 spin_lock_irqsave(&port->port_lock, flags);
982 if (port->port_usb) {
983 /* Kickstart read queue processing. We don't do xon/xoff,
984 * rts/cts, or other handshaking with the host, but if the
985 * read queue backs up enough we'll be NAKing OUT packets.
987 tasklet_schedule(&port->push);
988 pr_vdebug(PREFIX "%d: unthrottle\n", port->port_num);
990 spin_unlock_irqrestore(&port->port_lock, flags);
993 static int gs_break_ctl(struct tty_struct *tty, int duration)
995 struct gs_port *port = tty->driver_data;
997 struct gserial *gser;
999 pr_vdebug("gs_break_ctl: ttyGS%d, send break (%d) \n",
1000 port->port_num, duration);
1002 spin_lock_irq(&port->port_lock);
1003 gser = port->port_usb;
1004 if (gser && gser->send_break)
1005 status = gser->send_break(gser, duration);
1006 spin_unlock_irq(&port->port_lock);
1011 static const struct tty_operations gs_tty_ops = {
1015 .put_char = gs_put_char,
1016 .flush_chars = gs_flush_chars,
1017 .write_room = gs_write_room,
1018 .chars_in_buffer = gs_chars_in_buffer,
1019 .unthrottle = gs_unthrottle,
1020 .break_ctl = gs_break_ctl,
1023 /*-------------------------------------------------------------------------*/
1025 static struct tty_driver *gs_tty_driver;
1028 gs_port_alloc(unsigned port_num, struct usb_cdc_line_coding *coding)
1030 struct gs_port *port;
1033 mutex_lock(&ports[port_num].lock);
1034 if (ports[port_num].port) {
1039 port = kzalloc(sizeof(struct gs_port), GFP_KERNEL);
1045 tty_port_init(&port->port);
1046 spin_lock_init(&port->port_lock);
1047 init_waitqueue_head(&port->drain_wait);
1049 tasklet_init(&port->push, gs_rx_push, (unsigned long) port);
1051 INIT_LIST_HEAD(&port->read_pool);
1052 INIT_LIST_HEAD(&port->read_queue);
1053 INIT_LIST_HEAD(&port->write_pool);
1055 port->port_num = port_num;
1056 port->port_line_coding = *coding;
1058 ports[port_num].port = port;
1060 mutex_unlock(&ports[port_num].lock);
1064 static int gs_closed(struct gs_port *port)
1068 spin_lock_irq(&port->port_lock);
1069 cond = (port->port.count == 0) && !port->openclose;
1070 spin_unlock_irq(&port->port_lock);
1074 static void gserial_free_port(struct gs_port *port)
1076 tasklet_kill(&port->push);
1077 /* wait for old opens to finish */
1078 wait_event(port->port.close_wait, gs_closed(port));
1079 WARN_ON(port->port_usb != NULL);
1080 tty_port_destroy(&port->port);
1084 void gserial_free_line(unsigned char port_num)
1086 struct gs_port *port;
1088 mutex_lock(&ports[port_num].lock);
1089 if (WARN_ON(!ports[port_num].port)) {
1090 mutex_unlock(&ports[port_num].lock);
1093 port = ports[port_num].port;
1094 ports[port_num].port = NULL;
1095 mutex_unlock(&ports[port_num].lock);
1097 gserial_free_port(port);
1098 tty_unregister_device(gs_tty_driver, port_num);
1100 EXPORT_SYMBOL_GPL(gserial_free_line);
1102 int gserial_alloc_line(unsigned char *line_num)
1104 struct usb_cdc_line_coding coding;
1105 struct device *tty_dev;
1109 coding.dwDTERate = cpu_to_le32(9600);
1110 coding.bCharFormat = 8;
1111 coding.bParityType = USB_CDC_NO_PARITY;
1112 coding.bDataBits = USB_CDC_1_STOP_BITS;
1114 for (port_num = 0; port_num < MAX_U_SERIAL_PORTS; port_num++) {
1115 ret = gs_port_alloc(port_num, &coding);
1125 /* ... and sysfs class devices, so mdev/udev make /dev/ttyGS* */
1127 tty_dev = tty_port_register_device(&ports[port_num].port->port,
1128 gs_tty_driver, port_num, NULL);
1129 if (IS_ERR(tty_dev)) {
1130 struct gs_port *port;
1131 pr_err("%s: failed to register tty for port %d, err %ld\n",
1132 __func__, port_num, PTR_ERR(tty_dev));
1134 ret = PTR_ERR(tty_dev);
1135 port = ports[port_num].port;
1136 ports[port_num].port = NULL;
1137 gserial_free_port(port);
1140 *line_num = port_num;
1144 EXPORT_SYMBOL_GPL(gserial_alloc_line);
1147 * gserial_connect - notify TTY I/O glue that USB link is active
1148 * @gser: the function, set up with endpoints and descriptors
1149 * @port_num: which port is active
1150 * Context: any (usually from irq)
1152 * This is called activate endpoints and let the TTY layer know that
1153 * the connection is active ... not unlike "carrier detect". It won't
1154 * necessarily start I/O queues; unless the TTY is held open by any
1155 * task, there would be no point. However, the endpoints will be
1156 * activated so the USB host can perform I/O, subject to basic USB
1157 * hardware flow control.
1159 * Caller needs to have set up the endpoints and USB function in @dev
1160 * before calling this, as well as the appropriate (speed-specific)
1161 * endpoint descriptors, and also have allocate @port_num by calling
1162 * @gserial_alloc_line().
1164 * Returns negative errno or zero.
1165 * On success, ep->driver_data will be overwritten.
1167 int gserial_connect(struct gserial *gser, u8 port_num)
1169 struct gs_port *port;
1170 unsigned long flags;
1173 if (port_num >= MAX_U_SERIAL_PORTS)
1176 port = ports[port_num].port;
1178 pr_err("serial line %d not allocated.\n", port_num);
1181 if (port->port_usb) {
1182 pr_err("serial line %d is in use.\n", port_num);
1186 /* activate the endpoints */
1187 status = usb_ep_enable(gser->in);
1190 gser->in->driver_data = port;
1192 status = usb_ep_enable(gser->out);
1195 gser->out->driver_data = port;
1197 /* then tell the tty glue that I/O can work */
1198 spin_lock_irqsave(&port->port_lock, flags);
1199 gser->ioport = port;
1200 port->port_usb = gser;
1202 /* REVISIT unclear how best to handle this state...
1203 * we don't really couple it with the Linux TTY.
1205 gser->port_line_coding = port->port_line_coding;
1207 /* REVISIT if waiting on "carrier detect", signal. */
1209 /* if it's already open, start I/O ... and notify the serial
1210 * protocol about open/close status (connect/disconnect).
1212 if (port->port.count) {
1213 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
1216 gser->connect(gser);
1218 if (gser->disconnect)
1219 gser->disconnect(gser);
1222 spin_unlock_irqrestore(&port->port_lock, flags);
1227 usb_ep_disable(gser->in);
1228 gser->in->driver_data = NULL;
1231 EXPORT_SYMBOL_GPL(gserial_connect);
1233 * gserial_disconnect - notify TTY I/O glue that USB link is inactive
1234 * @gser: the function, on which gserial_connect() was called
1235 * Context: any (usually from irq)
1237 * This is called to deactivate endpoints and let the TTY layer know
1238 * that the connection went inactive ... not unlike "hangup".
1240 * On return, the state is as if gserial_connect() had never been called;
1241 * there is no active USB I/O on these endpoints.
1243 void gserial_disconnect(struct gserial *gser)
1245 struct gs_port *port = gser->ioport;
1246 unsigned long flags;
1251 /* tell the TTY glue not to do I/O here any more */
1252 spin_lock_irqsave(&port->port_lock, flags);
1254 /* REVISIT as above: how best to track this? */
1255 port->port_line_coding = gser->port_line_coding;
1257 port->port_usb = NULL;
1258 gser->ioport = NULL;
1259 if (port->port.count > 0 || port->openclose) {
1260 wake_up_interruptible(&port->drain_wait);
1262 tty_hangup(port->port.tty);
1264 spin_unlock_irqrestore(&port->port_lock, flags);
1266 /* disable endpoints, aborting down any active I/O */
1267 usb_ep_disable(gser->out);
1268 gser->out->driver_data = NULL;
1270 usb_ep_disable(gser->in);
1271 gser->in->driver_data = NULL;
1273 /* finally, free any unused/unusable I/O buffers */
1274 spin_lock_irqsave(&port->port_lock, flags);
1275 if (port->port.count == 0 && !port->openclose)
1276 gs_buf_free(&port->port_write_buf);
1277 gs_free_requests(gser->out, &port->read_pool, NULL);
1278 gs_free_requests(gser->out, &port->read_queue, NULL);
1279 gs_free_requests(gser->in, &port->write_pool, NULL);
1281 port->read_allocated = port->read_started =
1282 port->write_allocated = port->write_started = 0;
1284 spin_unlock_irqrestore(&port->port_lock, flags);
1286 EXPORT_SYMBOL_GPL(gserial_disconnect);
1288 static int userial_init(void)
1293 gs_tty_driver = alloc_tty_driver(MAX_U_SERIAL_PORTS);
1297 gs_tty_driver->driver_name = "g_serial";
1298 gs_tty_driver->name = PREFIX;
1299 /* uses dynamically assigned dev_t values */
1301 gs_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
1302 gs_tty_driver->subtype = SERIAL_TYPE_NORMAL;
1303 gs_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
1304 gs_tty_driver->init_termios = tty_std_termios;
1306 /* 9600-8-N-1 ... matches defaults expected by "usbser.sys" on
1307 * MS-Windows. Otherwise, most of these flags shouldn't affect
1308 * anything unless we were to actually hook up to a serial line.
1310 gs_tty_driver->init_termios.c_cflag =
1311 B9600 | CS8 | CREAD | HUPCL | CLOCAL;
1312 gs_tty_driver->init_termios.c_ispeed = 9600;
1313 gs_tty_driver->init_termios.c_ospeed = 9600;
1315 tty_set_operations(gs_tty_driver, &gs_tty_ops);
1316 for (i = 0; i < MAX_U_SERIAL_PORTS; i++)
1317 mutex_init(&ports[i].lock);
1319 /* export the driver ... */
1320 status = tty_register_driver(gs_tty_driver);
1322 pr_err("%s: cannot register, err %d\n",
1327 pr_debug("%s: registered %d ttyGS* device%s\n", __func__,
1329 (MAX_U_SERIAL_PORTS == 1) ? "" : "s");
1333 put_tty_driver(gs_tty_driver);
1334 gs_tty_driver = NULL;
1337 module_init(userial_init);
1339 static void userial_cleanup(void)
1341 tty_unregister_driver(gs_tty_driver);
1342 put_tty_driver(gs_tty_driver);
1343 gs_tty_driver = NULL;
1345 module_exit(userial_cleanup);
1347 MODULE_LICENSE("GPL");