1 // SPDX-License-Identifier: GPL-2.0
3 * ep0.c - DesignWare USB3 DRD Controller Endpoint 0 Handling
5 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
11 #include <linux/kernel.h>
12 #include <linux/slab.h>
13 #include <linux/spinlock.h>
14 #include <linux/platform_device.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/interrupt.h>
18 #include <linux/list.h>
19 #include <linux/dma-mapping.h>
21 #include <linux/usb/ch9.h>
22 #include <linux/usb/gadget.h>
23 #include <linux/usb/composite.h>
30 static void __dwc3_ep0_do_control_status(struct dwc3 *dwc, struct dwc3_ep *dep);
31 static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
32 struct dwc3_ep *dep, struct dwc3_request *req);
34 static void dwc3_ep0_prepare_one_trb(struct dwc3_ep *dep,
35 dma_addr_t buf_dma, u32 len, u32 type, bool chain)
41 trb = &dwc->ep0_trb[dep->trb_enqueue];
46 trb->bpl = lower_32_bits(buf_dma);
47 trb->bph = upper_32_bits(buf_dma);
51 trb->ctrl |= (DWC3_TRB_CTRL_HWO
52 | DWC3_TRB_CTRL_ISP_IMI);
55 trb->ctrl |= DWC3_TRB_CTRL_CHN;
57 trb->ctrl |= (DWC3_TRB_CTRL_IOC
60 trace_dwc3_prepare_trb(dep, trb);
63 static int dwc3_ep0_start_trans(struct dwc3_ep *dep)
65 struct dwc3_gadget_ep_cmd_params params;
69 if (dep->flags & DWC3_EP_TRANSFER_STARTED)
74 memset(¶ms, 0, sizeof(params));
75 params.param0 = upper_32_bits(dwc->ep0_trb_addr);
76 params.param1 = lower_32_bits(dwc->ep0_trb_addr);
78 ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_STARTTRANSFER, ¶ms);
82 dwc->ep0_next_event = DWC3_EP0_COMPLETE;
87 static int __dwc3_gadget_ep0_queue(struct dwc3_ep *dep,
88 struct dwc3_request *req)
90 struct dwc3 *dwc = dep->dwc;
92 req->request.actual = 0;
93 req->request.status = -EINPROGRESS;
94 req->epnum = dep->number;
96 list_add_tail(&req->list, &dep->pending_list);
99 * Gadget driver might not be quick enough to queue a request
100 * before we get a Transfer Not Ready event on this endpoint.
102 * In that case, we will set DWC3_EP_PENDING_REQUEST. When that
103 * flag is set, it's telling us that as soon as Gadget queues the
104 * required request, we should kick the transfer here because the
105 * IRQ we were waiting for is long gone.
107 if (dep->flags & DWC3_EP_PENDING_REQUEST) {
110 direction = !!(dep->flags & DWC3_EP0_DIR_IN);
112 if (dwc->ep0state != EP0_DATA_PHASE) {
113 dev_WARN(dwc->dev, "Unexpected pending request\n");
117 __dwc3_ep0_do_control_data(dwc, dwc->eps[direction], req);
119 dep->flags &= ~(DWC3_EP_PENDING_REQUEST |
126 * In case gadget driver asked us to delay the STATUS phase,
129 if (dwc->delayed_status) {
132 direction = !dwc->ep0_expect_in;
133 dwc->delayed_status = false;
134 usb_gadget_set_state(&dwc->gadget, USB_STATE_CONFIGURED);
136 if (dwc->ep0state == EP0_STATUS_PHASE)
137 __dwc3_ep0_do_control_status(dwc, dwc->eps[direction]);
143 * Unfortunately we have uncovered a limitation wrt the Data Phase.
145 * Section 9.4 says we can wait for the XferNotReady(DATA) event to
146 * come before issueing Start Transfer command, but if we do, we will
147 * miss situations where the host starts another SETUP phase instead of
148 * the DATA phase. Such cases happen at least on TD.7.6 of the Link
149 * Layer Compliance Suite.
151 * The problem surfaces due to the fact that in case of back-to-back
152 * SETUP packets there will be no XferNotReady(DATA) generated and we
153 * will be stuck waiting for XferNotReady(DATA) forever.
155 * By looking at tables 9-13 and 9-14 of the Databook, we can see that
156 * it tells us to start Data Phase right away. It also mentions that if
157 * we receive a SETUP phase instead of the DATA phase, core will issue
158 * XferComplete for the DATA phase, before actually initiating it in
159 * the wire, with the TRB's status set to "SETUP_PENDING". Such status
160 * can only be used to print some debugging logs, as the core expects
161 * us to go through to the STATUS phase and start a CONTROL_STATUS TRB,
162 * just so it completes right away, without transferring anything and,
163 * only then, we can go back to the SETUP phase.
165 * Because of this scenario, SNPS decided to change the programming
166 * model of control transfers and support on-demand transfers only for
167 * the STATUS phase. To fix the issue we have now, we will always wait
168 * for gadget driver to queue the DATA phase's struct usb_request, then
169 * start it right away.
171 * If we're actually in a 2-stage transfer, we will wait for
172 * XferNotReady(STATUS).
174 if (dwc->three_stage_setup) {
177 direction = dwc->ep0_expect_in;
178 dwc->ep0state = EP0_DATA_PHASE;
180 __dwc3_ep0_do_control_data(dwc, dwc->eps[direction], req);
182 dep->flags &= ~DWC3_EP0_DIR_IN;
188 int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
191 struct dwc3_request *req = to_dwc3_request(request);
192 struct dwc3_ep *dep = to_dwc3_ep(ep);
193 struct dwc3 *dwc = dep->dwc;
199 spin_lock_irqsave(&dwc->lock, flags);
200 if (!dep->endpoint.desc) {
201 dev_err(dwc->dev, "%s: can't queue to disabled endpoint\n",
207 /* we share one TRB for ep0/1 */
208 if (!list_empty(&dep->pending_list)) {
213 ret = __dwc3_gadget_ep0_queue(dep, req);
216 spin_unlock_irqrestore(&dwc->lock, flags);
221 static void dwc3_ep0_stall_and_restart(struct dwc3 *dwc)
225 /* reinitialize physical ep1 */
227 dep->flags = DWC3_EP_ENABLED;
229 /* stall is always issued on EP0 */
231 __dwc3_gadget_ep_set_halt(dep, 1, false);
232 dep->flags = DWC3_EP_ENABLED;
233 dwc->delayed_status = false;
235 if (!list_empty(&dep->pending_list)) {
236 struct dwc3_request *req;
238 req = next_request(&dep->pending_list);
239 dwc3_gadget_giveback(dep, req, -ECONNRESET);
242 dwc->ep0state = EP0_SETUP_PHASE;
243 dwc3_ep0_out_start(dwc);
246 int __dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value)
248 struct dwc3_ep *dep = to_dwc3_ep(ep);
249 struct dwc3 *dwc = dep->dwc;
251 dwc3_ep0_stall_and_restart(dwc);
256 int dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value)
258 struct dwc3_ep *dep = to_dwc3_ep(ep);
259 struct dwc3 *dwc = dep->dwc;
263 spin_lock_irqsave(&dwc->lock, flags);
264 ret = __dwc3_gadget_ep0_set_halt(ep, value);
265 spin_unlock_irqrestore(&dwc->lock, flags);
270 void dwc3_ep0_out_start(struct dwc3 *dwc)
275 complete(&dwc->ep0_in_setup);
278 dwc3_ep0_prepare_one_trb(dep, dwc->ep0_trb_addr, 8,
279 DWC3_TRBCTL_CONTROL_SETUP, false);
280 ret = dwc3_ep0_start_trans(dep);
284 static struct dwc3_ep *dwc3_wIndex_to_dep(struct dwc3 *dwc, __le16 wIndex_le)
287 u32 windex = le16_to_cpu(wIndex_le);
290 epnum = (windex & USB_ENDPOINT_NUMBER_MASK) << 1;
291 if ((windex & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN)
294 dep = dwc->eps[epnum];
295 if (dep->flags & DWC3_EP_ENABLED)
301 static void dwc3_ep0_status_cmpl(struct usb_ep *ep, struct usb_request *req)
307 static int dwc3_ep0_handle_status(struct dwc3 *dwc,
308 struct usb_ctrlrequest *ctrl)
315 __le16 *response_pkt;
317 /* We don't support PTM_STATUS */
318 value = le16_to_cpu(ctrl->wValue);
322 recip = ctrl->bRequestType & USB_RECIP_MASK;
324 case USB_RECIP_DEVICE:
326 * LTM will be set once we know how to set this in HW.
328 usb_status |= dwc->gadget.is_selfpowered;
330 if ((dwc->speed == DWC3_DSTS_SUPERSPEED) ||
331 (dwc->speed == DWC3_DSTS_SUPERSPEED_PLUS)) {
332 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
333 if (reg & DWC3_DCTL_INITU1ENA)
334 usb_status |= 1 << USB_DEV_STAT_U1_ENABLED;
335 if (reg & DWC3_DCTL_INITU2ENA)
336 usb_status |= 1 << USB_DEV_STAT_U2_ENABLED;
341 case USB_RECIP_INTERFACE:
343 * Function Remote Wake Capable D0
344 * Function Remote Wakeup D1
348 case USB_RECIP_ENDPOINT:
349 dep = dwc3_wIndex_to_dep(dwc, ctrl->wIndex);
353 if (dep->flags & DWC3_EP_STALL)
354 usb_status = 1 << USB_ENDPOINT_HALT;
360 response_pkt = (__le16 *) dwc->setup_buf;
361 *response_pkt = cpu_to_le16(usb_status);
364 dwc->ep0_usb_req.dep = dep;
365 dwc->ep0_usb_req.request.length = sizeof(*response_pkt);
366 dwc->ep0_usb_req.request.buf = dwc->setup_buf;
367 dwc->ep0_usb_req.request.complete = dwc3_ep0_status_cmpl;
369 return __dwc3_gadget_ep0_queue(dep, &dwc->ep0_usb_req);
372 static int dwc3_ep0_handle_u1(struct dwc3 *dwc, enum usb_device_state state,
377 if (state != USB_STATE_CONFIGURED)
379 if ((dwc->speed != DWC3_DSTS_SUPERSPEED) &&
380 (dwc->speed != DWC3_DSTS_SUPERSPEED_PLUS))
383 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
385 reg |= DWC3_DCTL_INITU1ENA;
387 reg &= ~DWC3_DCTL_INITU1ENA;
388 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
393 static int dwc3_ep0_handle_u2(struct dwc3 *dwc, enum usb_device_state state,
399 if (state != USB_STATE_CONFIGURED)
401 if ((dwc->speed != DWC3_DSTS_SUPERSPEED) &&
402 (dwc->speed != DWC3_DSTS_SUPERSPEED_PLUS))
405 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
407 reg |= DWC3_DCTL_INITU2ENA;
409 reg &= ~DWC3_DCTL_INITU2ENA;
410 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
415 static int dwc3_ep0_handle_test(struct dwc3 *dwc, enum usb_device_state state,
418 if ((wIndex & 0xff) != 0)
423 switch (wIndex >> 8) {
429 dwc->test_mode_nr = wIndex >> 8;
430 dwc->test_mode = true;
439 static int dwc3_ep0_handle_device(struct dwc3 *dwc,
440 struct usb_ctrlrequest *ctrl, int set)
442 enum usb_device_state state;
447 wValue = le16_to_cpu(ctrl->wValue);
448 wIndex = le16_to_cpu(ctrl->wIndex);
449 state = dwc->gadget.state;
452 case USB_DEVICE_REMOTE_WAKEUP:
455 * 9.4.1 says only only for SS, in AddressState only for
456 * default control pipe
458 case USB_DEVICE_U1_ENABLE:
459 ret = dwc3_ep0_handle_u1(dwc, state, set);
461 case USB_DEVICE_U2_ENABLE:
462 ret = dwc3_ep0_handle_u2(dwc, state, set);
464 case USB_DEVICE_LTM_ENABLE:
467 case USB_DEVICE_TEST_MODE:
468 ret = dwc3_ep0_handle_test(dwc, state, wIndex, set);
477 static int dwc3_ep0_handle_intf(struct dwc3 *dwc,
478 struct usb_ctrlrequest *ctrl, int set)
483 wValue = le16_to_cpu(ctrl->wValue);
486 case USB_INTRF_FUNC_SUSPEND:
488 * REVISIT: Ideally we would enable some low power mode here,
489 * however it's unclear what we should be doing here.
491 * For now, we're not doing anything, just making sure we return
492 * 0 so USB Command Verifier tests pass without any errors.
502 static int dwc3_ep0_handle_endpoint(struct dwc3 *dwc,
503 struct usb_ctrlrequest *ctrl, int set)
509 wValue = le16_to_cpu(ctrl->wValue);
512 case USB_ENDPOINT_HALT:
513 dep = dwc3_wIndex_to_dep(dwc, ctrl->wIndex);
517 if (set == 0 && (dep->flags & DWC3_EP_WEDGE))
520 ret = __dwc3_gadget_ep_set_halt(dep, set, true);
531 static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
532 struct usb_ctrlrequest *ctrl, int set)
537 recip = ctrl->bRequestType & USB_RECIP_MASK;
540 case USB_RECIP_DEVICE:
541 ret = dwc3_ep0_handle_device(dwc, ctrl, set);
543 case USB_RECIP_INTERFACE:
544 ret = dwc3_ep0_handle_intf(dwc, ctrl, set);
546 case USB_RECIP_ENDPOINT:
547 ret = dwc3_ep0_handle_endpoint(dwc, ctrl, set);
556 static int dwc3_ep0_set_address(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
558 enum usb_device_state state = dwc->gadget.state;
562 addr = le16_to_cpu(ctrl->wValue);
564 dev_err(dwc->dev, "invalid device address %d\n", addr);
568 if (state == USB_STATE_CONFIGURED) {
569 dev_err(dwc->dev, "can't SetAddress() from Configured State\n");
573 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
574 reg &= ~(DWC3_DCFG_DEVADDR_MASK);
575 reg |= DWC3_DCFG_DEVADDR(addr);
576 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
579 usb_gadget_set_state(&dwc->gadget, USB_STATE_ADDRESS);
581 usb_gadget_set_state(&dwc->gadget, USB_STATE_DEFAULT);
586 static int dwc3_ep0_delegate_req(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
590 spin_unlock(&dwc->lock);
591 ret = dwc->gadget_driver->setup(&dwc->gadget, ctrl);
592 spin_lock(&dwc->lock);
596 static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
598 enum usb_device_state state = dwc->gadget.state;
603 cfg = le16_to_cpu(ctrl->wValue);
606 case USB_STATE_DEFAULT:
609 case USB_STATE_ADDRESS:
610 ret = dwc3_ep0_delegate_req(dwc, ctrl);
611 /* if the cfg matches and the cfg is non zero */
612 if (cfg && (!ret || (ret == USB_GADGET_DELAYED_STATUS))) {
615 * only change state if set_config has already
616 * been processed. If gadget driver returns
617 * USB_GADGET_DELAYED_STATUS, we will wait
618 * to change the state on the next usb_ep_queue()
621 usb_gadget_set_state(&dwc->gadget,
622 USB_STATE_CONFIGURED);
625 * Enable transition to U1/U2 state when
626 * nothing is pending from application.
628 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
629 reg |= (DWC3_DCTL_ACCEPTU1ENA | DWC3_DCTL_ACCEPTU2ENA);
630 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
634 case USB_STATE_CONFIGURED:
635 ret = dwc3_ep0_delegate_req(dwc, ctrl);
637 usb_gadget_set_state(&dwc->gadget,
646 static void dwc3_ep0_set_sel_cmpl(struct usb_ep *ep, struct usb_request *req)
648 struct dwc3_ep *dep = to_dwc3_ep(ep);
649 struct dwc3 *dwc = dep->dwc;
663 memcpy(&timing, req->buf, sizeof(timing));
665 dwc->u1sel = timing.u1sel;
666 dwc->u1pel = timing.u1pel;
667 dwc->u2sel = le16_to_cpu(timing.u2sel);
668 dwc->u2pel = le16_to_cpu(timing.u2pel);
670 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
671 if (reg & DWC3_DCTL_INITU2ENA)
673 if (reg & DWC3_DCTL_INITU1ENA)
677 * According to Synopsys Databook, if parameter is
678 * greater than 125, a value of zero should be
679 * programmed in the register.
684 /* now that we have the time, issue DGCMD Set Sel */
685 ret = dwc3_send_gadget_generic_command(dwc,
686 DWC3_DGCMD_SET_PERIODIC_PAR, param);
690 static int dwc3_ep0_set_sel(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
693 enum usb_device_state state = dwc->gadget.state;
696 if (state == USB_STATE_DEFAULT)
699 wLength = le16_to_cpu(ctrl->wLength);
702 dev_err(dwc->dev, "Set SEL should be 6 bytes, got %d\n",
708 * To handle Set SEL we need to receive 6 bytes from Host. So let's
709 * queue a usb_request for 6 bytes.
711 * Remember, though, this controller can't handle non-wMaxPacketSize
712 * aligned transfers on the OUT direction, so we queue a request for
713 * wMaxPacketSize instead.
716 dwc->ep0_usb_req.dep = dep;
717 dwc->ep0_usb_req.request.length = dep->endpoint.maxpacket;
718 dwc->ep0_usb_req.request.buf = dwc->setup_buf;
719 dwc->ep0_usb_req.request.complete = dwc3_ep0_set_sel_cmpl;
721 return __dwc3_gadget_ep0_queue(dep, &dwc->ep0_usb_req);
724 static int dwc3_ep0_set_isoch_delay(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
730 wValue = le16_to_cpu(ctrl->wValue);
731 wLength = le16_to_cpu(ctrl->wLength);
732 wIndex = le16_to_cpu(ctrl->wIndex);
734 if (wIndex || wLength)
737 dwc->gadget.isoch_delay = wValue;
742 static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
746 switch (ctrl->bRequest) {
747 case USB_REQ_GET_STATUS:
748 ret = dwc3_ep0_handle_status(dwc, ctrl);
750 case USB_REQ_CLEAR_FEATURE:
751 ret = dwc3_ep0_handle_feature(dwc, ctrl, 0);
753 case USB_REQ_SET_FEATURE:
754 ret = dwc3_ep0_handle_feature(dwc, ctrl, 1);
756 case USB_REQ_SET_ADDRESS:
757 ret = dwc3_ep0_set_address(dwc, ctrl);
759 case USB_REQ_SET_CONFIGURATION:
760 ret = dwc3_ep0_set_config(dwc, ctrl);
762 case USB_REQ_SET_SEL:
763 ret = dwc3_ep0_set_sel(dwc, ctrl);
765 case USB_REQ_SET_ISOCH_DELAY:
766 ret = dwc3_ep0_set_isoch_delay(dwc, ctrl);
769 ret = dwc3_ep0_delegate_req(dwc, ctrl);
776 static void dwc3_ep0_inspect_setup(struct dwc3 *dwc,
777 const struct dwc3_event_depevt *event)
779 struct usb_ctrlrequest *ctrl = (void *) dwc->ep0_trb;
783 if (!dwc->gadget_driver)
786 trace_dwc3_ctrl_req(ctrl);
788 len = le16_to_cpu(ctrl->wLength);
790 dwc->three_stage_setup = false;
791 dwc->ep0_expect_in = false;
792 dwc->ep0_next_event = DWC3_EP0_NRDY_STATUS;
794 dwc->three_stage_setup = true;
795 dwc->ep0_expect_in = !!(ctrl->bRequestType & USB_DIR_IN);
796 dwc->ep0_next_event = DWC3_EP0_NRDY_DATA;
799 if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
800 ret = dwc3_ep0_std_request(dwc, ctrl);
802 ret = dwc3_ep0_delegate_req(dwc, ctrl);
804 if (ret == USB_GADGET_DELAYED_STATUS)
805 dwc->delayed_status = true;
809 dwc3_ep0_stall_and_restart(dwc);
812 static void dwc3_ep0_complete_data(struct dwc3 *dwc,
813 const struct dwc3_event_depevt *event)
815 struct dwc3_request *r;
816 struct usb_request *ur;
817 struct dwc3_trb *trb;
824 epnum = event->endpoint_number;
827 dwc->ep0_next_event = DWC3_EP0_NRDY_STATUS;
829 trace_dwc3_complete_trb(ep0, trb);
831 r = next_request(&ep0->pending_list);
835 status = DWC3_TRB_SIZE_TRBSTS(trb->size);
836 if (status == DWC3_TRBSTS_SETUP_PENDING) {
837 dwc->setup_packet_pending = true;
839 dwc3_gadget_giveback(ep0, r, -ECONNRESET);
846 length = trb->size & DWC3_TRB_SIZE_MASK;
847 transferred = ur->length - length;
848 ur->actual += transferred;
850 if ((IS_ALIGNED(ur->length, ep0->endpoint.maxpacket) &&
851 ur->length && ur->zero) || dwc->ep0_bounced) {
853 trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
854 trace_dwc3_complete_trb(ep0, trb);
857 dwc->eps[1]->trb_enqueue = 0;
859 dwc->eps[0]->trb_enqueue = 0;
861 dwc->ep0_bounced = false;
864 if ((epnum & 1) && ur->actual < ur->length)
865 dwc3_ep0_stall_and_restart(dwc);
867 dwc3_gadget_giveback(ep0, r, 0);
870 static void dwc3_ep0_complete_status(struct dwc3 *dwc,
871 const struct dwc3_event_depevt *event)
873 struct dwc3_request *r;
875 struct dwc3_trb *trb;
881 trace_dwc3_complete_trb(dep, trb);
883 if (!list_empty(&dep->pending_list)) {
884 r = next_request(&dep->pending_list);
886 dwc3_gadget_giveback(dep, r, 0);
889 if (dwc->test_mode) {
892 ret = dwc3_gadget_set_test_mode(dwc, dwc->test_mode_nr);
894 dev_err(dwc->dev, "invalid test #%d\n",
896 dwc3_ep0_stall_and_restart(dwc);
901 status = DWC3_TRB_SIZE_TRBSTS(trb->size);
902 if (status == DWC3_TRBSTS_SETUP_PENDING)
903 dwc->setup_packet_pending = true;
905 dwc->ep0state = EP0_SETUP_PHASE;
906 dwc3_ep0_out_start(dwc);
909 static void dwc3_ep0_xfer_complete(struct dwc3 *dwc,
910 const struct dwc3_event_depevt *event)
912 struct dwc3_ep *dep = dwc->eps[event->endpoint_number];
914 dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
915 dep->resource_index = 0;
916 dwc->setup_packet_pending = false;
918 switch (dwc->ep0state) {
919 case EP0_SETUP_PHASE:
920 dwc3_ep0_inspect_setup(dwc, event);
924 dwc3_ep0_complete_data(dwc, event);
927 case EP0_STATUS_PHASE:
928 dwc3_ep0_complete_status(dwc, event);
931 WARN(true, "UNKNOWN ep0state %d\n", dwc->ep0state);
935 static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
936 struct dwc3_ep *dep, struct dwc3_request *req)
940 req->direction = !!dep->number;
942 if (req->request.length == 0) {
943 dwc3_ep0_prepare_one_trb(dep, dwc->ep0_trb_addr, 0,
944 DWC3_TRBCTL_CONTROL_DATA, false);
945 ret = dwc3_ep0_start_trans(dep);
946 } else if (!IS_ALIGNED(req->request.length, dep->endpoint.maxpacket)
947 && (dep->number == 0)) {
951 ret = usb_gadget_map_request_by_dev(dwc->sysdev,
952 &req->request, dep->number);
956 maxpacket = dep->endpoint.maxpacket;
957 rem = req->request.length % maxpacket;
958 dwc->ep0_bounced = true;
960 /* prepare normal TRB */
961 dwc3_ep0_prepare_one_trb(dep, req->request.dma,
963 DWC3_TRBCTL_CONTROL_DATA,
966 req->trb = &dwc->ep0_trb[dep->trb_enqueue - 1];
968 /* Now prepare one extra TRB to align transfer size */
969 dwc3_ep0_prepare_one_trb(dep, dwc->bounce_addr,
971 DWC3_TRBCTL_CONTROL_DATA,
973 ret = dwc3_ep0_start_trans(dep);
974 } else if (IS_ALIGNED(req->request.length, dep->endpoint.maxpacket) &&
975 req->request.length && req->request.zero) {
978 ret = usb_gadget_map_request_by_dev(dwc->sysdev,
979 &req->request, dep->number);
983 maxpacket = dep->endpoint.maxpacket;
985 /* prepare normal TRB */
986 dwc3_ep0_prepare_one_trb(dep, req->request.dma,
988 DWC3_TRBCTL_CONTROL_DATA,
991 req->trb = &dwc->ep0_trb[dep->trb_enqueue - 1];
993 /* Now prepare one extra TRB to align transfer size */
994 dwc3_ep0_prepare_one_trb(dep, dwc->bounce_addr,
995 0, DWC3_TRBCTL_CONTROL_DATA,
997 ret = dwc3_ep0_start_trans(dep);
999 ret = usb_gadget_map_request_by_dev(dwc->sysdev,
1000 &req->request, dep->number);
1004 dwc3_ep0_prepare_one_trb(dep, req->request.dma,
1005 req->request.length, DWC3_TRBCTL_CONTROL_DATA,
1008 req->trb = &dwc->ep0_trb[dep->trb_enqueue];
1010 ret = dwc3_ep0_start_trans(dep);
1016 static int dwc3_ep0_start_control_status(struct dwc3_ep *dep)
1018 struct dwc3 *dwc = dep->dwc;
1021 type = dwc->three_stage_setup ? DWC3_TRBCTL_CONTROL_STATUS3
1022 : DWC3_TRBCTL_CONTROL_STATUS2;
1024 dwc3_ep0_prepare_one_trb(dep, dwc->ep0_trb_addr, 0, type, false);
1025 return dwc3_ep0_start_trans(dep);
1028 static void __dwc3_ep0_do_control_status(struct dwc3 *dwc, struct dwc3_ep *dep)
1030 WARN_ON(dwc3_ep0_start_control_status(dep));
1033 static void dwc3_ep0_do_control_status(struct dwc3 *dwc,
1034 const struct dwc3_event_depevt *event)
1036 struct dwc3_ep *dep = dwc->eps[event->endpoint_number];
1038 __dwc3_ep0_do_control_status(dwc, dep);
1041 static void dwc3_ep0_end_control_data(struct dwc3 *dwc, struct dwc3_ep *dep)
1043 struct dwc3_gadget_ep_cmd_params params;
1047 if (!dep->resource_index)
1050 cmd = DWC3_DEPCMD_ENDTRANSFER;
1051 cmd |= DWC3_DEPCMD_CMDIOC;
1052 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
1053 memset(¶ms, 0, sizeof(params));
1054 ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms);
1056 dep->resource_index = 0;
1059 static void dwc3_ep0_xfernotready(struct dwc3 *dwc,
1060 const struct dwc3_event_depevt *event)
1062 switch (event->status) {
1063 case DEPEVT_STATUS_CONTROL_DATA:
1065 * We already have a DATA transfer in the controller's cache,
1066 * if we receive a XferNotReady(DATA) we will ignore it, unless
1067 * it's for the wrong direction.
1069 * In that case, we must issue END_TRANSFER command to the Data
1070 * Phase we already have started and issue SetStall on the
1073 if (dwc->ep0_expect_in != event->endpoint_number) {
1074 struct dwc3_ep *dep = dwc->eps[dwc->ep0_expect_in];
1076 dev_err(dwc->dev, "unexpected direction for Data Phase\n");
1077 dwc3_ep0_end_control_data(dwc, dep);
1078 dwc3_ep0_stall_and_restart(dwc);
1084 case DEPEVT_STATUS_CONTROL_STATUS:
1085 if (dwc->ep0_next_event != DWC3_EP0_NRDY_STATUS)
1088 dwc->ep0state = EP0_STATUS_PHASE;
1090 if (dwc->delayed_status) {
1091 struct dwc3_ep *dep = dwc->eps[0];
1093 WARN_ON_ONCE(event->endpoint_number != 1);
1095 * We should handle the delay STATUS phase here if the
1096 * request for handling delay STATUS has been queued
1099 if (!list_empty(&dep->pending_list)) {
1100 dwc->delayed_status = false;
1101 usb_gadget_set_state(&dwc->gadget,
1102 USB_STATE_CONFIGURED);
1103 dwc3_ep0_do_control_status(dwc, event);
1109 dwc3_ep0_do_control_status(dwc, event);
1113 void dwc3_ep0_interrupt(struct dwc3 *dwc,
1114 const struct dwc3_event_depevt *event)
1116 switch (event->endpoint_event) {
1117 case DWC3_DEPEVT_XFERCOMPLETE:
1118 dwc3_ep0_xfer_complete(dwc, event);
1121 case DWC3_DEPEVT_XFERNOTREADY:
1122 dwc3_ep0_xfernotready(dwc, event);
1125 case DWC3_DEPEVT_XFERINPROGRESS:
1126 case DWC3_DEPEVT_RXTXFIFOEVT:
1127 case DWC3_DEPEVT_STREAMEVT:
1128 case DWC3_DEPEVT_EPCMDCMPLT: