3 * Copyright (C) 2015-2016 Samsung Electronics
6 * Based on dummy_hcd.c, which is:
7 * Copyright (C) 2003 David Brownell
8 * Copyright (C) 2003-2005 Alan Stern
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program. If not, see <http://www.gnu.org/licenses/>.
24 #include <linux/usb.h>
25 #include <linux/timer.h>
26 #include <linux/usb/ch9.h>
30 #define DEV_REQUEST (USB_TYPE_STANDARD | USB_RECIP_DEVICE)
31 #define DEV_INREQUEST (DEV_REQUEST | USB_DIR_IN)
32 #define INTF_REQUEST (USB_TYPE_STANDARD | USB_RECIP_INTERFACE)
33 #define INTF_INREQUEST (INTF_REQUEST | USB_DIR_IN)
34 #define EP_REQUEST (USB_TYPE_STANDARD | USB_RECIP_ENDPOINT)
35 #define EP_INREQUEST (EP_REQUEST | USB_DIR_IN)
37 static int get_frame_limit(enum usb_device_speed speed)
41 return 8 /*bytes*/ * 12 /*packets*/;
43 return 64 /*bytes*/ * 19 /*packets*/;
45 return 512 /*bytes*/ * 13 /*packets*/ * 8 /*uframes*/;
47 /* Bus speed is 500000 bytes/ms, so use a little less */
57 * handle_control_request() - handles all control transfers
58 * @udc: pointer to vudc
59 * @urb: the urb request to handle
60 * @setup: pointer to the setup data for a USB device control
62 * @status: pointer to request handling status
64 * Return 0 - if the request was handled
65 * 1 - if the request wasn't handles
68 * Adapted from drivers/usb/gadget/udc/dummy_hcd.c
70 static int handle_control_request(struct vudc *udc, struct urb *urb,
71 struct usb_ctrlrequest *setup,
79 w_index = le16_to_cpu(setup->wIndex);
80 w_value = le16_to_cpu(setup->wValue);
81 switch (setup->bRequest) {
82 case USB_REQ_SET_ADDRESS:
83 if (setup->bRequestType != DEV_REQUEST)
85 udc->address = w_value;
89 case USB_REQ_SET_FEATURE:
90 if (setup->bRequestType == DEV_REQUEST) {
93 case USB_DEVICE_REMOTE_WAKEUP:
95 case USB_DEVICE_B_HNP_ENABLE:
96 udc->gadget.b_hnp_enable = 1;
98 case USB_DEVICE_A_HNP_SUPPORT:
99 udc->gadget.a_hnp_support = 1;
101 case USB_DEVICE_A_ALT_HNP_SUPPORT:
102 udc->gadget.a_alt_hnp_support = 1;
105 ret_val = -EOPNOTSUPP;
108 udc->devstatus |= (1 << w_value);
111 } else if (setup->bRequestType == EP_REQUEST) {
113 ep2 = vudc_find_endpoint(udc, w_index);
114 if (!ep2 || ep2->ep.name == udc->ep[0].ep.name) {
115 ret_val = -EOPNOTSUPP;
123 case USB_REQ_CLEAR_FEATURE:
124 if (setup->bRequestType == DEV_REQUEST) {
127 case USB_DEVICE_REMOTE_WAKEUP:
128 w_value = USB_DEVICE_REMOTE_WAKEUP;
131 case USB_DEVICE_U1_ENABLE:
132 case USB_DEVICE_U2_ENABLE:
133 case USB_DEVICE_LTM_ENABLE:
134 ret_val = -EOPNOTSUPP;
137 ret_val = -EOPNOTSUPP;
141 udc->devstatus &= ~(1 << w_value);
144 } else if (setup->bRequestType == EP_REQUEST) {
146 ep2 = vudc_find_endpoint(udc, w_index);
148 ret_val = -EOPNOTSUPP;
157 case USB_REQ_GET_STATUS:
158 if (setup->bRequestType == DEV_INREQUEST
159 || setup->bRequestType == INTF_INREQUEST
160 || setup->bRequestType == EP_INREQUEST) {
163 * device: remote wakeup, selfpowered
167 buf = (char *)urb->transfer_buffer;
168 if (urb->transfer_buffer_length > 0) {
169 if (setup->bRequestType == EP_INREQUEST) {
170 ep2 = vudc_find_endpoint(udc, w_index);
172 ret_val = -EOPNOTSUPP;
175 buf[0] = ep2->halted;
176 } else if (setup->bRequestType ==
178 buf[0] = (u8)udc->devstatus;
182 if (urb->transfer_buffer_length > 1)
184 urb->actual_length = min_t(u32, 2,
185 urb->transfer_buffer_length);
194 /* Adapted from dummy_hcd.c ; caller must hold lock */
195 static int transfer(struct vudc *udc,
196 struct urb *urb, struct vep *ep, int limit)
198 struct vrequest *req;
201 /* if there's no request queued, the device is NAKing; return */
202 list_for_each_entry(req, &ep->req_queue, req_entry) {
203 unsigned host_len, dev_len, len;
204 void *ubuf_pos, *rbuf_pos;
205 int is_short, to_host;
209 * 1..N packets of ep->ep.maxpacket each ... the last one
210 * may be short (including zero length).
212 * writer can send a zlp explicitly (length 0) or implicitly
213 * (length mod maxpacket zero, and 'zero' flag); they always
216 host_len = urb->transfer_buffer_length - urb->actual_length;
217 dev_len = req->req.length - req->req.actual;
218 len = min(host_len, dev_len);
220 to_host = usb_pipein(urb->pipe);
221 if (unlikely(len == 0))
224 /* send multiple of maxpacket first, then remainder */
225 if (len >= ep->ep.maxpacket) {
227 if (len % ep->ep.maxpacket > 0)
229 len -= len % ep->ep.maxpacket;
234 ubuf_pos = urb->transfer_buffer + urb->actual_length;
235 rbuf_pos = req->req.buf + req->req.actual;
237 if (urb->pipe & USB_DIR_IN)
238 memcpy(ubuf_pos, rbuf_pos, len);
240 memcpy(rbuf_pos, ubuf_pos, len);
242 urb->actual_length += len;
243 req->req.actual += len;
248 * short packets terminate, maybe with overflow/underflow.
249 * it's only really an error to write too much.
251 * partially filling a buffer optionally blocks queue advances
252 * (so completion handlers can clean up the queue) but we don't
253 * need to emulate such data-in-flight.
256 if (host_len == dev_len) {
259 } else if (to_host) {
261 if (dev_len > host_len)
262 urb->status = -EOVERFLOW;
267 if (host_len > dev_len)
268 req->req.status = -EOVERFLOW;
273 /* many requests terminate without a short packet */
274 /* also check if we need to send zlp */
276 if (req->req.length == req->req.actual) {
277 if (req->req.zero && to_host)
282 if (urb->transfer_buffer_length == urb->actual_length) {
283 if (urb->transfer_flags & URB_ZERO_PACKET &&
291 /* device side completion --> continuable */
292 if (req->req.status != -EINPROGRESS) {
294 list_del_init(&req->req_entry);
295 spin_unlock(&udc->lock);
296 usb_gadget_giveback_request(&ep->ep, &req->req);
297 spin_lock(&udc->lock);
299 /* requests might have been unlinked... */
303 /* host side completion --> terminate */
304 if (urb->status != -EINPROGRESS)
307 /* rescan to continue with any other queued i/o */
314 static void v_timer(unsigned long _vudc)
316 struct vudc *udc = (struct vudc *) _vudc;
317 struct transfer_timer *timer = &udc->tr_timer;
318 struct urbp *urb_p, *tmp;
325 spin_lock_irqsave(&udc->lock, flags);
327 total = get_frame_limit(udc->gadget.speed);
328 if (total < 0) { /* unknown speed, or not set yet */
329 timer->state = VUDC_TR_IDLE;
330 spin_unlock_irqrestore(&udc->lock, flags);
333 /* is it next frame now? */
334 if (time_after(jiffies, timer->frame_start + msecs_to_jiffies(1))) {
335 timer->frame_limit = total;
336 /* FIXME: how to make it accurate? */
337 timer->frame_start = jiffies;
339 total = timer->frame_limit;
342 list_for_each_entry(_ep, &udc->gadget.ep_list, ep_list) {
344 ep->already_seen = 0;
348 list_for_each_entry_safe(urb_p, tmp, &udc->urb_queue, urb_entry) {
349 struct urb *urb = urb_p->urb;
354 if (timer->state != VUDC_TR_RUNNING)
358 urb->status = -EPROTO;
362 /* Used up bandwidth? */
363 if (total <= 0 && ep->type == USB_ENDPOINT_XFER_BULK)
366 if (ep->already_seen)
368 ep->already_seen = 1;
369 if (ep == &udc->ep[0] && urb_p->new) {
373 if (ep->halted && !ep->setup_stage) {
374 urb->status = -EPIPE;
378 if (ep == &udc->ep[0] && ep->setup_stage) {
379 /* TODO - flush any stale requests */
383 ret = handle_control_request(udc, urb,
384 (struct usb_ctrlrequest *) urb->setup_packet,
387 spin_unlock(&udc->lock);
388 ret = udc->driver->setup(&udc->gadget,
389 (struct usb_ctrlrequest *)
391 spin_lock(&udc->lock);
394 /* no delays (max 64kb data stage) */
396 goto treat_control_like_bulk;
398 urb->status = -EPIPE;
399 urb->actual_length = 0;
406 case USB_ENDPOINT_XFER_ISOC:
408 urb->status = -EXDEV;
411 case USB_ENDPOINT_XFER_INT:
413 * TODO: figure out bandwidth guarantees
414 * for now, give unlimited bandwidth
416 limit += urb->transfer_buffer_length;
419 treat_control_like_bulk:
420 total -= transfer(udc, urb, ep, limit);
422 if (urb->status == -EINPROGRESS)
427 ep->already_seen = ep->setup_stage = 0;
429 spin_lock(&udc->lock_tx);
430 list_del(&urb_p->urb_entry);
431 if (!urb->unlinked) {
432 v_enqueue_ret_submit(udc, urb_p);
434 v_enqueue_ret_unlink(udc, urb_p->seqnum,
436 free_urbp_and_urb(urb_p);
438 wake_up(&udc->tx_waitq);
439 spin_unlock(&udc->lock_tx);
444 /* TODO - also wait on empty usb_request queues? */
445 if (list_empty(&udc->urb_queue))
446 timer->state = VUDC_TR_IDLE;
448 mod_timer(&timer->timer,
449 timer->frame_start + msecs_to_jiffies(1));
451 spin_unlock_irqrestore(&udc->lock, flags);
454 /* All timer functions are run with udc->lock held */
456 void v_init_timer(struct vudc *udc)
458 struct transfer_timer *t = &udc->tr_timer;
460 setup_timer(&t->timer, v_timer, (unsigned long) udc);
461 t->state = VUDC_TR_STOPPED;
464 void v_start_timer(struct vudc *udc)
466 struct transfer_timer *t = &udc->tr_timer;
468 dev_dbg(&udc->pdev->dev, "timer start");
470 case VUDC_TR_RUNNING:
473 return v_kick_timer(udc, jiffies);
474 case VUDC_TR_STOPPED:
475 t->state = VUDC_TR_IDLE;
476 t->frame_start = jiffies;
477 t->frame_limit = get_frame_limit(udc->gadget.speed);
478 return v_kick_timer(udc, jiffies);
482 void v_kick_timer(struct vudc *udc, unsigned long time)
484 struct transfer_timer *t = &udc->tr_timer;
486 dev_dbg(&udc->pdev->dev, "timer kick");
488 case VUDC_TR_RUNNING:
491 t->state = VUDC_TR_RUNNING;
493 case VUDC_TR_STOPPED:
494 /* we may want to kick timer to unqueue urbs */
495 mod_timer(&t->timer, time);
499 void v_stop_timer(struct vudc *udc)
501 struct transfer_timer *t = &udc->tr_timer;
503 /* timer itself will take care of stopping */
504 dev_dbg(&udc->pdev->dev, "timer stop");
505 t->state = VUDC_TR_STOPPED;