1 // SPDX-License-Identifier: GPL-2.0
3 * ep0.c - DesignWare USB3 DRD Controller Endpoint 0 Handling
5 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
11 #include <linux/kernel.h>
12 #include <linux/slab.h>
13 #include <linux/spinlock.h>
14 #include <linux/platform_device.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/interrupt.h>
18 #include <linux/list.h>
19 #include <linux/dma-mapping.h>
21 #include <linux/usb/ch9.h>
22 #include <linux/usb/gadget.h>
23 #include <linux/usb/composite.h>
30 static void __dwc3_ep0_do_control_status(struct dwc3 *dwc, struct dwc3_ep *dep);
31 static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
32 struct dwc3_ep *dep, struct dwc3_request *req);
34 static void dwc3_ep0_prepare_one_trb(struct dwc3_ep *dep,
35 dma_addr_t buf_dma, u32 len, u32 type, bool chain)
41 trb = &dwc->ep0_trb[dep->trb_enqueue];
46 trb->bpl = lower_32_bits(buf_dma);
47 trb->bph = upper_32_bits(buf_dma);
51 trb->ctrl |= (DWC3_TRB_CTRL_HWO
52 | DWC3_TRB_CTRL_ISP_IMI);
55 trb->ctrl |= DWC3_TRB_CTRL_CHN;
57 trb->ctrl |= (DWC3_TRB_CTRL_IOC
60 trace_dwc3_prepare_trb(dep, trb);
63 static int dwc3_ep0_start_trans(struct dwc3_ep *dep)
65 struct dwc3_gadget_ep_cmd_params params;
69 if (dep->flags & DWC3_EP_TRANSFER_STARTED)
74 memset(¶ms, 0, sizeof(params));
75 params.param0 = upper_32_bits(dwc->ep0_trb_addr);
76 params.param1 = lower_32_bits(dwc->ep0_trb_addr);
78 ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_STARTTRANSFER, ¶ms);
82 dwc->ep0_next_event = DWC3_EP0_COMPLETE;
87 static int __dwc3_gadget_ep0_queue(struct dwc3_ep *dep,
88 struct dwc3_request *req)
90 struct dwc3 *dwc = dep->dwc;
92 req->request.actual = 0;
93 req->request.status = -EINPROGRESS;
94 req->epnum = dep->number;
96 list_add_tail(&req->list, &dep->pending_list);
99 * Gadget driver might not be quick enough to queue a request
100 * before we get a Transfer Not Ready event on this endpoint.
102 * In that case, we will set DWC3_EP_PENDING_REQUEST. When that
103 * flag is set, it's telling us that as soon as Gadget queues the
104 * required request, we should kick the transfer here because the
105 * IRQ we were waiting for is long gone.
107 if (dep->flags & DWC3_EP_PENDING_REQUEST) {
110 direction = !!(dep->flags & DWC3_EP0_DIR_IN);
112 if (dwc->ep0state != EP0_DATA_PHASE) {
113 dev_WARN(dwc->dev, "Unexpected pending request\n");
117 __dwc3_ep0_do_control_data(dwc, dwc->eps[direction], req);
119 dep->flags &= ~(DWC3_EP_PENDING_REQUEST |
126 * In case gadget driver asked us to delay the STATUS phase,
129 if (dwc->delayed_status) {
132 direction = !dwc->ep0_expect_in;
133 dwc->delayed_status = false;
134 usb_gadget_set_state(&dwc->gadget, USB_STATE_CONFIGURED);
136 if (dwc->ep0state == EP0_STATUS_PHASE)
137 __dwc3_ep0_do_control_status(dwc, dwc->eps[direction]);
143 * Unfortunately we have uncovered a limitation wrt the Data Phase.
145 * Section 9.4 says we can wait for the XferNotReady(DATA) event to
146 * come before issueing Start Transfer command, but if we do, we will
147 * miss situations where the host starts another SETUP phase instead of
148 * the DATA phase. Such cases happen at least on TD.7.6 of the Link
149 * Layer Compliance Suite.
151 * The problem surfaces due to the fact that in case of back-to-back
152 * SETUP packets there will be no XferNotReady(DATA) generated and we
153 * will be stuck waiting for XferNotReady(DATA) forever.
155 * By looking at tables 9-13 and 9-14 of the Databook, we can see that
156 * it tells us to start Data Phase right away. It also mentions that if
157 * we receive a SETUP phase instead of the DATA phase, core will issue
158 * XferComplete for the DATA phase, before actually initiating it in
159 * the wire, with the TRB's status set to "SETUP_PENDING". Such status
160 * can only be used to print some debugging logs, as the core expects
161 * us to go through to the STATUS phase and start a CONTROL_STATUS TRB,
162 * just so it completes right away, without transferring anything and,
163 * only then, we can go back to the SETUP phase.
165 * Because of this scenario, SNPS decided to change the programming
166 * model of control transfers and support on-demand transfers only for
167 * the STATUS phase. To fix the issue we have now, we will always wait
168 * for gadget driver to queue the DATA phase's struct usb_request, then
169 * start it right away.
171 * If we're actually in a 2-stage transfer, we will wait for
172 * XferNotReady(STATUS).
174 if (dwc->three_stage_setup) {
177 direction = dwc->ep0_expect_in;
178 dwc->ep0state = EP0_DATA_PHASE;
180 __dwc3_ep0_do_control_data(dwc, dwc->eps[direction], req);
182 dep->flags &= ~DWC3_EP0_DIR_IN;
188 int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
191 struct dwc3_request *req = to_dwc3_request(request);
192 struct dwc3_ep *dep = to_dwc3_ep(ep);
193 struct dwc3 *dwc = dep->dwc;
199 spin_lock_irqsave(&dwc->lock, flags);
200 if (!dep->endpoint.desc) {
201 dev_err(dwc->dev, "%s: can't queue to disabled endpoint\n",
207 /* we share one TRB for ep0/1 */
208 if (!list_empty(&dep->pending_list)) {
213 ret = __dwc3_gadget_ep0_queue(dep, req);
216 spin_unlock_irqrestore(&dwc->lock, flags);
221 static void dwc3_ep0_stall_and_restart(struct dwc3 *dwc)
225 /* reinitialize physical ep1 */
227 dep->flags = DWC3_EP_ENABLED;
229 /* stall is always issued on EP0 */
231 __dwc3_gadget_ep_set_halt(dep, 1, false);
232 dep->flags = DWC3_EP_ENABLED;
233 dwc->delayed_status = false;
235 if (!list_empty(&dep->pending_list)) {
236 struct dwc3_request *req;
238 req = next_request(&dep->pending_list);
239 dwc3_gadget_giveback(dep, req, -ECONNRESET);
242 dwc->ep0state = EP0_SETUP_PHASE;
243 dwc3_ep0_out_start(dwc);
246 int __dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value)
248 struct dwc3_ep *dep = to_dwc3_ep(ep);
249 struct dwc3 *dwc = dep->dwc;
251 dwc3_ep0_stall_and_restart(dwc);
256 int dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value)
258 struct dwc3_ep *dep = to_dwc3_ep(ep);
259 struct dwc3 *dwc = dep->dwc;
263 spin_lock_irqsave(&dwc->lock, flags);
264 ret = __dwc3_gadget_ep0_set_halt(ep, value);
265 spin_unlock_irqrestore(&dwc->lock, flags);
270 void dwc3_ep0_out_start(struct dwc3 *dwc)
275 complete(&dwc->ep0_in_setup);
278 dwc3_ep0_prepare_one_trb(dep, dwc->ep0_trb_addr, 8,
279 DWC3_TRBCTL_CONTROL_SETUP, false);
280 ret = dwc3_ep0_start_trans(dep);
284 static struct dwc3_ep *dwc3_wIndex_to_dep(struct dwc3 *dwc, __le16 wIndex_le)
287 u32 windex = le16_to_cpu(wIndex_le);
290 epnum = (windex & USB_ENDPOINT_NUMBER_MASK) << 1;
291 if ((windex & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN)
294 dep = dwc->eps[epnum];
295 if (dep->flags & DWC3_EP_ENABLED)
301 static void dwc3_ep0_status_cmpl(struct usb_ep *ep, struct usb_request *req)
307 static int dwc3_ep0_handle_status(struct dwc3 *dwc,
308 struct usb_ctrlrequest *ctrl)
315 __le16 *response_pkt;
317 /* We don't support PTM_STATUS */
318 value = le16_to_cpu(ctrl->wValue);
322 recip = ctrl->bRequestType & USB_RECIP_MASK;
324 case USB_RECIP_DEVICE:
326 * LTM will be set once we know how to set this in HW.
328 usb_status |= dwc->gadget.is_selfpowered;
330 if ((dwc->speed == DWC3_DSTS_SUPERSPEED) ||
331 (dwc->speed == DWC3_DSTS_SUPERSPEED_PLUS)) {
332 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
333 if (reg & DWC3_DCTL_INITU1ENA)
334 usb_status |= 1 << USB_DEV_STAT_U1_ENABLED;
335 if (reg & DWC3_DCTL_INITU2ENA)
336 usb_status |= 1 << USB_DEV_STAT_U2_ENABLED;
341 case USB_RECIP_INTERFACE:
343 * Function Remote Wake Capable D0
344 * Function Remote Wakeup D1
348 case USB_RECIP_ENDPOINT:
349 dep = dwc3_wIndex_to_dep(dwc, ctrl->wIndex);
353 if (dep->flags & DWC3_EP_STALL)
354 usb_status = 1 << USB_ENDPOINT_HALT;
360 response_pkt = (__le16 *) dwc->setup_buf;
361 *response_pkt = cpu_to_le16(usb_status);
364 dwc->ep0_usb_req.dep = dep;
365 dwc->ep0_usb_req.request.length = sizeof(*response_pkt);
366 dwc->ep0_usb_req.request.buf = dwc->setup_buf;
367 dwc->ep0_usb_req.request.complete = dwc3_ep0_status_cmpl;
369 return __dwc3_gadget_ep0_queue(dep, &dwc->ep0_usb_req);
372 static int dwc3_ep0_handle_u1(struct dwc3 *dwc, enum usb_device_state state,
377 if (state != USB_STATE_CONFIGURED)
379 if ((dwc->speed != DWC3_DSTS_SUPERSPEED) &&
380 (dwc->speed != DWC3_DSTS_SUPERSPEED_PLUS))
382 if (set && dwc->dis_u1_entry_quirk)
385 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
387 reg |= DWC3_DCTL_INITU1ENA;
389 reg &= ~DWC3_DCTL_INITU1ENA;
390 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
395 static int dwc3_ep0_handle_u2(struct dwc3 *dwc, enum usb_device_state state,
401 if (state != USB_STATE_CONFIGURED)
403 if ((dwc->speed != DWC3_DSTS_SUPERSPEED) &&
404 (dwc->speed != DWC3_DSTS_SUPERSPEED_PLUS))
406 if (set && dwc->dis_u2_entry_quirk)
409 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
411 reg |= DWC3_DCTL_INITU2ENA;
413 reg &= ~DWC3_DCTL_INITU2ENA;
414 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
419 static int dwc3_ep0_handle_test(struct dwc3 *dwc, enum usb_device_state state,
422 if ((wIndex & 0xff) != 0)
427 switch (wIndex >> 8) {
433 dwc->test_mode_nr = wIndex >> 8;
434 dwc->test_mode = true;
443 static int dwc3_ep0_handle_device(struct dwc3 *dwc,
444 struct usb_ctrlrequest *ctrl, int set)
446 enum usb_device_state state;
451 wValue = le16_to_cpu(ctrl->wValue);
452 wIndex = le16_to_cpu(ctrl->wIndex);
453 state = dwc->gadget.state;
456 case USB_DEVICE_REMOTE_WAKEUP:
459 * 9.4.1 says only only for SS, in AddressState only for
460 * default control pipe
462 case USB_DEVICE_U1_ENABLE:
463 ret = dwc3_ep0_handle_u1(dwc, state, set);
465 case USB_DEVICE_U2_ENABLE:
466 ret = dwc3_ep0_handle_u2(dwc, state, set);
468 case USB_DEVICE_LTM_ENABLE:
471 case USB_DEVICE_TEST_MODE:
472 ret = dwc3_ep0_handle_test(dwc, state, wIndex, set);
481 static int dwc3_ep0_handle_intf(struct dwc3 *dwc,
482 struct usb_ctrlrequest *ctrl, int set)
487 wValue = le16_to_cpu(ctrl->wValue);
490 case USB_INTRF_FUNC_SUSPEND:
492 * REVISIT: Ideally we would enable some low power mode here,
493 * however it's unclear what we should be doing here.
495 * For now, we're not doing anything, just making sure we return
496 * 0 so USB Command Verifier tests pass without any errors.
506 static int dwc3_ep0_handle_endpoint(struct dwc3 *dwc,
507 struct usb_ctrlrequest *ctrl, int set)
513 wValue = le16_to_cpu(ctrl->wValue);
516 case USB_ENDPOINT_HALT:
517 dep = dwc3_wIndex_to_dep(dwc, ctrl->wIndex);
521 if (set == 0 && (dep->flags & DWC3_EP_WEDGE))
524 ret = __dwc3_gadget_ep_set_halt(dep, set, true);
535 static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
536 struct usb_ctrlrequest *ctrl, int set)
541 recip = ctrl->bRequestType & USB_RECIP_MASK;
544 case USB_RECIP_DEVICE:
545 ret = dwc3_ep0_handle_device(dwc, ctrl, set);
547 case USB_RECIP_INTERFACE:
548 ret = dwc3_ep0_handle_intf(dwc, ctrl, set);
550 case USB_RECIP_ENDPOINT:
551 ret = dwc3_ep0_handle_endpoint(dwc, ctrl, set);
560 static int dwc3_ep0_set_address(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
562 enum usb_device_state state = dwc->gadget.state;
566 addr = le16_to_cpu(ctrl->wValue);
568 dev_err(dwc->dev, "invalid device address %d\n", addr);
572 if (state == USB_STATE_CONFIGURED) {
573 dev_err(dwc->dev, "can't SetAddress() from Configured State\n");
577 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
578 reg &= ~(DWC3_DCFG_DEVADDR_MASK);
579 reg |= DWC3_DCFG_DEVADDR(addr);
580 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
583 usb_gadget_set_state(&dwc->gadget, USB_STATE_ADDRESS);
585 usb_gadget_set_state(&dwc->gadget, USB_STATE_DEFAULT);
590 static int dwc3_ep0_delegate_req(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
594 spin_unlock(&dwc->lock);
595 ret = dwc->gadget_driver->setup(&dwc->gadget, ctrl);
596 spin_lock(&dwc->lock);
600 static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
602 enum usb_device_state state = dwc->gadget.state;
607 cfg = le16_to_cpu(ctrl->wValue);
610 case USB_STATE_DEFAULT:
613 case USB_STATE_ADDRESS:
614 ret = dwc3_ep0_delegate_req(dwc, ctrl);
615 /* if the cfg matches and the cfg is non zero */
616 if (cfg && (!ret || (ret == USB_GADGET_DELAYED_STATUS))) {
619 * only change state if set_config has already
620 * been processed. If gadget driver returns
621 * USB_GADGET_DELAYED_STATUS, we will wait
622 * to change the state on the next usb_ep_queue()
625 usb_gadget_set_state(&dwc->gadget,
626 USB_STATE_CONFIGURED);
629 * Enable transition to U1/U2 state when
630 * nothing is pending from application.
632 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
633 if (!dwc->dis_u1_entry_quirk)
634 reg |= DWC3_DCTL_ACCEPTU1ENA;
635 if (!dwc->dis_u2_entry_quirk)
636 reg |= DWC3_DCTL_ACCEPTU2ENA;
637 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
641 case USB_STATE_CONFIGURED:
642 ret = dwc3_ep0_delegate_req(dwc, ctrl);
644 usb_gadget_set_state(&dwc->gadget,
653 static void dwc3_ep0_set_sel_cmpl(struct usb_ep *ep, struct usb_request *req)
655 struct dwc3_ep *dep = to_dwc3_ep(ep);
656 struct dwc3 *dwc = dep->dwc;
670 memcpy(&timing, req->buf, sizeof(timing));
672 dwc->u1sel = timing.u1sel;
673 dwc->u1pel = timing.u1pel;
674 dwc->u2sel = le16_to_cpu(timing.u2sel);
675 dwc->u2pel = le16_to_cpu(timing.u2pel);
677 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
678 if (reg & DWC3_DCTL_INITU2ENA)
680 if (reg & DWC3_DCTL_INITU1ENA)
684 * According to Synopsys Databook, if parameter is
685 * greater than 125, a value of zero should be
686 * programmed in the register.
691 /* now that we have the time, issue DGCMD Set Sel */
692 ret = dwc3_send_gadget_generic_command(dwc,
693 DWC3_DGCMD_SET_PERIODIC_PAR, param);
697 static int dwc3_ep0_set_sel(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
700 enum usb_device_state state = dwc->gadget.state;
703 if (state == USB_STATE_DEFAULT)
706 wLength = le16_to_cpu(ctrl->wLength);
709 dev_err(dwc->dev, "Set SEL should be 6 bytes, got %d\n",
715 * To handle Set SEL we need to receive 6 bytes from Host. So let's
716 * queue a usb_request for 6 bytes.
718 * Remember, though, this controller can't handle non-wMaxPacketSize
719 * aligned transfers on the OUT direction, so we queue a request for
720 * wMaxPacketSize instead.
723 dwc->ep0_usb_req.dep = dep;
724 dwc->ep0_usb_req.request.length = dep->endpoint.maxpacket;
725 dwc->ep0_usb_req.request.buf = dwc->setup_buf;
726 dwc->ep0_usb_req.request.complete = dwc3_ep0_set_sel_cmpl;
728 return __dwc3_gadget_ep0_queue(dep, &dwc->ep0_usb_req);
731 static int dwc3_ep0_set_isoch_delay(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
737 wValue = le16_to_cpu(ctrl->wValue);
738 wLength = le16_to_cpu(ctrl->wLength);
739 wIndex = le16_to_cpu(ctrl->wIndex);
741 if (wIndex || wLength)
744 dwc->gadget.isoch_delay = wValue;
749 static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
753 switch (ctrl->bRequest) {
754 case USB_REQ_GET_STATUS:
755 ret = dwc3_ep0_handle_status(dwc, ctrl);
757 case USB_REQ_CLEAR_FEATURE:
758 ret = dwc3_ep0_handle_feature(dwc, ctrl, 0);
760 case USB_REQ_SET_FEATURE:
761 ret = dwc3_ep0_handle_feature(dwc, ctrl, 1);
763 case USB_REQ_SET_ADDRESS:
764 ret = dwc3_ep0_set_address(dwc, ctrl);
766 case USB_REQ_SET_CONFIGURATION:
767 ret = dwc3_ep0_set_config(dwc, ctrl);
769 case USB_REQ_SET_SEL:
770 ret = dwc3_ep0_set_sel(dwc, ctrl);
772 case USB_REQ_SET_ISOCH_DELAY:
773 ret = dwc3_ep0_set_isoch_delay(dwc, ctrl);
776 ret = dwc3_ep0_delegate_req(dwc, ctrl);
783 static void dwc3_ep0_inspect_setup(struct dwc3 *dwc,
784 const struct dwc3_event_depevt *event)
786 struct usb_ctrlrequest *ctrl = (void *) dwc->ep0_trb;
790 if (!dwc->gadget_driver)
793 trace_dwc3_ctrl_req(ctrl);
795 len = le16_to_cpu(ctrl->wLength);
797 dwc->three_stage_setup = false;
798 dwc->ep0_expect_in = false;
799 dwc->ep0_next_event = DWC3_EP0_NRDY_STATUS;
801 dwc->three_stage_setup = true;
802 dwc->ep0_expect_in = !!(ctrl->bRequestType & USB_DIR_IN);
803 dwc->ep0_next_event = DWC3_EP0_NRDY_DATA;
806 if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
807 ret = dwc3_ep0_std_request(dwc, ctrl);
809 ret = dwc3_ep0_delegate_req(dwc, ctrl);
811 if (ret == USB_GADGET_DELAYED_STATUS)
812 dwc->delayed_status = true;
816 dwc3_ep0_stall_and_restart(dwc);
819 static void dwc3_ep0_complete_data(struct dwc3 *dwc,
820 const struct dwc3_event_depevt *event)
822 struct dwc3_request *r;
823 struct usb_request *ur;
824 struct dwc3_trb *trb;
831 epnum = event->endpoint_number;
834 dwc->ep0_next_event = DWC3_EP0_NRDY_STATUS;
836 trace_dwc3_complete_trb(ep0, trb);
838 r = next_request(&ep0->pending_list);
842 status = DWC3_TRB_SIZE_TRBSTS(trb->size);
843 if (status == DWC3_TRBSTS_SETUP_PENDING) {
844 dwc->setup_packet_pending = true;
846 dwc3_gadget_giveback(ep0, r, -ECONNRESET);
853 length = trb->size & DWC3_TRB_SIZE_MASK;
854 transferred = ur->length - length;
855 ur->actual += transferred;
857 if ((IS_ALIGNED(ur->length, ep0->endpoint.maxpacket) &&
858 ur->length && ur->zero) || dwc->ep0_bounced) {
860 trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
861 trace_dwc3_complete_trb(ep0, trb);
864 dwc->eps[1]->trb_enqueue = 0;
866 dwc->eps[0]->trb_enqueue = 0;
868 dwc->ep0_bounced = false;
871 if ((epnum & 1) && ur->actual < ur->length)
872 dwc3_ep0_stall_and_restart(dwc);
874 dwc3_gadget_giveback(ep0, r, 0);
877 static void dwc3_ep0_complete_status(struct dwc3 *dwc,
878 const struct dwc3_event_depevt *event)
880 struct dwc3_request *r;
882 struct dwc3_trb *trb;
888 trace_dwc3_complete_trb(dep, trb);
890 if (!list_empty(&dep->pending_list)) {
891 r = next_request(&dep->pending_list);
893 dwc3_gadget_giveback(dep, r, 0);
896 if (dwc->test_mode) {
899 ret = dwc3_gadget_set_test_mode(dwc, dwc->test_mode_nr);
901 dev_err(dwc->dev, "invalid test #%d\n",
903 dwc3_ep0_stall_and_restart(dwc);
908 status = DWC3_TRB_SIZE_TRBSTS(trb->size);
909 if (status == DWC3_TRBSTS_SETUP_PENDING)
910 dwc->setup_packet_pending = true;
912 dwc->ep0state = EP0_SETUP_PHASE;
913 dwc3_ep0_out_start(dwc);
916 static void dwc3_ep0_xfer_complete(struct dwc3 *dwc,
917 const struct dwc3_event_depevt *event)
919 struct dwc3_ep *dep = dwc->eps[event->endpoint_number];
921 dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
922 dep->resource_index = 0;
923 dwc->setup_packet_pending = false;
925 switch (dwc->ep0state) {
926 case EP0_SETUP_PHASE:
927 dwc3_ep0_inspect_setup(dwc, event);
931 dwc3_ep0_complete_data(dwc, event);
934 case EP0_STATUS_PHASE:
935 dwc3_ep0_complete_status(dwc, event);
938 WARN(true, "UNKNOWN ep0state %d\n", dwc->ep0state);
942 static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
943 struct dwc3_ep *dep, struct dwc3_request *req)
947 req->direction = !!dep->number;
949 if (req->request.length == 0) {
950 dwc3_ep0_prepare_one_trb(dep, dwc->ep0_trb_addr, 0,
951 DWC3_TRBCTL_CONTROL_DATA, false);
952 ret = dwc3_ep0_start_trans(dep);
953 } else if (!IS_ALIGNED(req->request.length, dep->endpoint.maxpacket)
954 && (dep->number == 0)) {
958 ret = usb_gadget_map_request_by_dev(dwc->sysdev,
959 &req->request, dep->number);
963 maxpacket = dep->endpoint.maxpacket;
964 rem = req->request.length % maxpacket;
965 dwc->ep0_bounced = true;
967 /* prepare normal TRB */
968 dwc3_ep0_prepare_one_trb(dep, req->request.dma,
970 DWC3_TRBCTL_CONTROL_DATA,
973 req->trb = &dwc->ep0_trb[dep->trb_enqueue - 1];
975 /* Now prepare one extra TRB to align transfer size */
976 dwc3_ep0_prepare_one_trb(dep, dwc->bounce_addr,
978 DWC3_TRBCTL_CONTROL_DATA,
980 ret = dwc3_ep0_start_trans(dep);
981 } else if (IS_ALIGNED(req->request.length, dep->endpoint.maxpacket) &&
982 req->request.length && req->request.zero) {
984 ret = usb_gadget_map_request_by_dev(dwc->sysdev,
985 &req->request, dep->number);
989 /* prepare normal TRB */
990 dwc3_ep0_prepare_one_trb(dep, req->request.dma,
992 DWC3_TRBCTL_CONTROL_DATA,
995 req->trb = &dwc->ep0_trb[dep->trb_enqueue - 1];
997 /* Now prepare one extra TRB to align transfer size */
998 dwc3_ep0_prepare_one_trb(dep, dwc->bounce_addr,
999 0, DWC3_TRBCTL_CONTROL_DATA,
1001 ret = dwc3_ep0_start_trans(dep);
1003 ret = usb_gadget_map_request_by_dev(dwc->sysdev,
1004 &req->request, dep->number);
1008 dwc3_ep0_prepare_one_trb(dep, req->request.dma,
1009 req->request.length, DWC3_TRBCTL_CONTROL_DATA,
1012 req->trb = &dwc->ep0_trb[dep->trb_enqueue];
1014 ret = dwc3_ep0_start_trans(dep);
1020 static int dwc3_ep0_start_control_status(struct dwc3_ep *dep)
1022 struct dwc3 *dwc = dep->dwc;
1025 type = dwc->three_stage_setup ? DWC3_TRBCTL_CONTROL_STATUS3
1026 : DWC3_TRBCTL_CONTROL_STATUS2;
1028 dwc3_ep0_prepare_one_trb(dep, dwc->ep0_trb_addr, 0, type, false);
1029 return dwc3_ep0_start_trans(dep);
1032 static void __dwc3_ep0_do_control_status(struct dwc3 *dwc, struct dwc3_ep *dep)
1034 WARN_ON(dwc3_ep0_start_control_status(dep));
1037 static void dwc3_ep0_do_control_status(struct dwc3 *dwc,
1038 const struct dwc3_event_depevt *event)
1040 struct dwc3_ep *dep = dwc->eps[event->endpoint_number];
1042 __dwc3_ep0_do_control_status(dwc, dep);
1045 static void dwc3_ep0_end_control_data(struct dwc3 *dwc, struct dwc3_ep *dep)
1047 struct dwc3_gadget_ep_cmd_params params;
1051 if (!dep->resource_index)
1054 cmd = DWC3_DEPCMD_ENDTRANSFER;
1055 cmd |= DWC3_DEPCMD_CMDIOC;
1056 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
1057 memset(¶ms, 0, sizeof(params));
1058 ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms);
1060 dep->resource_index = 0;
1063 static void dwc3_ep0_xfernotready(struct dwc3 *dwc,
1064 const struct dwc3_event_depevt *event)
1066 switch (event->status) {
1067 case DEPEVT_STATUS_CONTROL_DATA:
1069 * We already have a DATA transfer in the controller's cache,
1070 * if we receive a XferNotReady(DATA) we will ignore it, unless
1071 * it's for the wrong direction.
1073 * In that case, we must issue END_TRANSFER command to the Data
1074 * Phase we already have started and issue SetStall on the
1077 if (dwc->ep0_expect_in != event->endpoint_number) {
1078 struct dwc3_ep *dep = dwc->eps[dwc->ep0_expect_in];
1080 dev_err(dwc->dev, "unexpected direction for Data Phase\n");
1081 dwc3_ep0_end_control_data(dwc, dep);
1082 dwc3_ep0_stall_and_restart(dwc);
1088 case DEPEVT_STATUS_CONTROL_STATUS:
1089 if (dwc->ep0_next_event != DWC3_EP0_NRDY_STATUS)
1092 dwc->ep0state = EP0_STATUS_PHASE;
1094 if (dwc->delayed_status) {
1095 struct dwc3_ep *dep = dwc->eps[0];
1097 WARN_ON_ONCE(event->endpoint_number != 1);
1099 * We should handle the delay STATUS phase here if the
1100 * request for handling delay STATUS has been queued
1103 if (!list_empty(&dep->pending_list)) {
1104 dwc->delayed_status = false;
1105 usb_gadget_set_state(&dwc->gadget,
1106 USB_STATE_CONFIGURED);
1107 dwc3_ep0_do_control_status(dwc, event);
1113 dwc3_ep0_do_control_status(dwc, event);
1117 void dwc3_ep0_interrupt(struct dwc3 *dwc,
1118 const struct dwc3_event_depevt *event)
1120 switch (event->endpoint_event) {
1121 case DWC3_DEPEVT_XFERCOMPLETE:
1122 dwc3_ep0_xfer_complete(dwc, event);
1125 case DWC3_DEPEVT_XFERNOTREADY:
1126 dwc3_ep0_xfernotready(dwc, event);
1129 case DWC3_DEPEVT_XFERINPROGRESS:
1130 case DWC3_DEPEVT_RXTXFIFOEVT:
1131 case DWC3_DEPEVT_STREAMEVT:
1132 case DWC3_DEPEVT_EPCMDCMPLT: