]>
Commit | Line | Data |
---|---|---|
72246da4 FB |
1 | /** |
2 | * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link | |
3 | * | |
4 | * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com | |
72246da4 FB |
5 | * |
6 | * Authors: Felipe Balbi <[email protected]>, | |
7 | * Sebastian Andrzej Siewior <[email protected]> | |
8 | * | |
9 | * Redistribution and use in source and binary forms, with or without | |
10 | * modification, are permitted provided that the following conditions | |
11 | * are met: | |
12 | * 1. Redistributions of source code must retain the above copyright | |
13 | * notice, this list of conditions, and the following disclaimer, | |
14 | * without modification. | |
15 | * 2. Redistributions in binary form must reproduce the above copyright | |
16 | * notice, this list of conditions and the following disclaimer in the | |
17 | * documentation and/or other materials provided with the distribution. | |
18 | * 3. The names of the above-listed copyright holders may not be used | |
19 | * to endorse or promote products derived from this software without | |
20 | * specific prior written permission. | |
21 | * | |
22 | * ALTERNATIVELY, this software may be distributed under the terms of the | |
23 | * GNU General Public License ("GPL") version 2, as published by the Free | |
24 | * Software Foundation. | |
25 | * | |
26 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS | |
27 | * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, | |
28 | * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | |
29 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR | |
30 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, | |
31 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, | |
32 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR | |
33 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF | |
34 | * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING | |
35 | * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | |
36 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
37 | */ | |
38 | ||
39 | #include <linux/kernel.h> | |
40 | #include <linux/delay.h> | |
41 | #include <linux/slab.h> | |
42 | #include <linux/spinlock.h> | |
43 | #include <linux/platform_device.h> | |
44 | #include <linux/pm_runtime.h> | |
45 | #include <linux/interrupt.h> | |
46 | #include <linux/io.h> | |
47 | #include <linux/list.h> | |
48 | #include <linux/dma-mapping.h> | |
49 | ||
50 | #include <linux/usb/ch9.h> | |
51 | #include <linux/usb/gadget.h> | |
52 | ||
53 | #include "core.h" | |
54 | #include "gadget.h" | |
55 | #include "io.h" | |
56 | ||
57 | #define DMA_ADDR_INVALID (~(dma_addr_t)0) | |
58 | ||
04a9bfcd FB |
59 | /** |
60 | * dwc3_gadget_set_test_mode - Enables USB2 Test Modes | |
61 | * @dwc: pointer to our context structure | |
62 | * @mode: the mode to set (J, K SE0 NAK, Force Enable) | |
63 | * | |
64 | * Caller should take care of locking. This function will | |
65 | * return 0 on success or -EINVAL if wrong Test Selector | |
66 | * is passed | |
67 | */ | |
68 | int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode) | |
69 | { | |
70 | u32 reg; | |
71 | ||
72 | reg = dwc3_readl(dwc->regs, DWC3_DCTL); | |
73 | reg &= ~DWC3_DCTL_TSTCTRL_MASK; | |
74 | ||
75 | switch (mode) { | |
76 | case TEST_J: | |
77 | case TEST_K: | |
78 | case TEST_SE0_NAK: | |
79 | case TEST_PACKET: | |
80 | case TEST_FORCE_EN: | |
81 | reg |= mode << 1; | |
82 | break; | |
83 | default: | |
84 | return -EINVAL; | |
85 | } | |
86 | ||
87 | dwc3_writel(dwc->regs, DWC3_DCTL, reg); | |
88 | ||
89 | return 0; | |
90 | } | |
91 | ||
8598bde7 FB |
92 | /** |
93 | * dwc3_gadget_set_link_state - Sets USB Link to a particular State | |
94 | * @dwc: pointer to our context structure | |
95 | * @state: the state to put link into | |
96 | * | |
97 | * Caller should take care of locking. This function will | |
98 | * return 0 on success or -EINVAL. | |
99 | */ | |
100 | int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state) | |
101 | { | |
102 | int retries = 100; | |
103 | u32 reg; | |
104 | ||
105 | reg = dwc3_readl(dwc->regs, DWC3_DCTL); | |
106 | reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK; | |
107 | ||
108 | /* set requested state */ | |
109 | reg |= DWC3_DCTL_ULSTCHNGREQ(state); | |
110 | dwc3_writel(dwc->regs, DWC3_DCTL, reg); | |
111 | ||
112 | /* wait for a change in DSTS */ | |
113 | while (--retries) { | |
114 | reg = dwc3_readl(dwc->regs, DWC3_DSTS); | |
115 | ||
116 | /* in HS, means ON */ | |
117 | if (DWC3_DSTS_USBLNKST(reg) == state) | |
118 | return 0; | |
119 | ||
138801aa | 120 | udelay(500); |
8598bde7 FB |
121 | } |
122 | ||
123 | dev_vdbg(dwc->dev, "link state change request timed out\n"); | |
124 | ||
125 | return -ETIMEDOUT; | |
126 | } | |
127 | ||
72246da4 FB |
128 | void dwc3_map_buffer_to_dma(struct dwc3_request *req) |
129 | { | |
130 | struct dwc3 *dwc = req->dep->dwc; | |
131 | ||
78c58a53 SAS |
132 | if (req->request.length == 0) { |
133 | /* req->request.dma = dwc->setup_buf_addr; */ | |
134 | return; | |
135 | } | |
136 | ||
eeb720fb FB |
137 | if (req->request.num_sgs) { |
138 | int mapped; | |
139 | ||
140 | mapped = dma_map_sg(dwc->dev, req->request.sg, | |
141 | req->request.num_sgs, | |
142 | req->direction ? DMA_TO_DEVICE | |
143 | : DMA_FROM_DEVICE); | |
144 | if (mapped < 0) { | |
145 | dev_err(dwc->dev, "failed to map SGs\n"); | |
146 | return; | |
147 | } | |
148 | ||
149 | req->request.num_mapped_sgs = mapped; | |
150 | return; | |
151 | } | |
152 | ||
72246da4 FB |
153 | if (req->request.dma == DMA_ADDR_INVALID) { |
154 | req->request.dma = dma_map_single(dwc->dev, req->request.buf, | |
155 | req->request.length, req->direction | |
156 | ? DMA_TO_DEVICE : DMA_FROM_DEVICE); | |
157 | req->mapped = true; | |
72246da4 FB |
158 | } |
159 | } | |
160 | ||
161 | void dwc3_unmap_buffer_from_dma(struct dwc3_request *req) | |
162 | { | |
163 | struct dwc3 *dwc = req->dep->dwc; | |
164 | ||
78c58a53 SAS |
165 | if (req->request.length == 0) { |
166 | req->request.dma = DMA_ADDR_INVALID; | |
167 | return; | |
168 | } | |
169 | ||
eeb720fb FB |
170 | if (req->request.num_mapped_sgs) { |
171 | req->request.dma = DMA_ADDR_INVALID; | |
172 | dma_unmap_sg(dwc->dev, req->request.sg, | |
c09d6b51 | 173 | req->request.num_mapped_sgs, |
eeb720fb FB |
174 | req->direction ? DMA_TO_DEVICE |
175 | : DMA_FROM_DEVICE); | |
176 | ||
177 | req->request.num_mapped_sgs = 0; | |
178 | return; | |
179 | } | |
180 | ||
72246da4 FB |
181 | if (req->mapped) { |
182 | dma_unmap_single(dwc->dev, req->request.dma, | |
183 | req->request.length, req->direction | |
184 | ? DMA_TO_DEVICE : DMA_FROM_DEVICE); | |
185 | req->mapped = 0; | |
f198ead2 | 186 | req->request.dma = DMA_ADDR_INVALID; |
72246da4 FB |
187 | } |
188 | } | |
189 | ||
190 | void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, | |
191 | int status) | |
192 | { | |
193 | struct dwc3 *dwc = dep->dwc; | |
194 | ||
195 | if (req->queued) { | |
eeb720fb FB |
196 | if (req->request.num_mapped_sgs) |
197 | dep->busy_slot += req->request.num_mapped_sgs; | |
198 | else | |
199 | dep->busy_slot++; | |
200 | ||
72246da4 FB |
201 | /* |
202 | * Skip LINK TRB. We can't use req->trb and check for | |
203 | * DWC3_TRBCTL_LINK_TRB because it points the TRB we just | |
204 | * completed (not the LINK TRB). | |
205 | */ | |
206 | if (((dep->busy_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) && | |
207 | usb_endpoint_xfer_isoc(dep->desc)) | |
208 | dep->busy_slot++; | |
209 | } | |
210 | list_del(&req->list); | |
eeb720fb | 211 | req->trb = NULL; |
72246da4 FB |
212 | |
213 | if (req->request.status == -EINPROGRESS) | |
214 | req->request.status = status; | |
215 | ||
216 | dwc3_unmap_buffer_from_dma(req); | |
217 | ||
218 | dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n", | |
219 | req, dep->name, req->request.actual, | |
220 | req->request.length, status); | |
221 | ||
222 | spin_unlock(&dwc->lock); | |
223 | req->request.complete(&req->dep->endpoint, &req->request); | |
224 | spin_lock(&dwc->lock); | |
225 | } | |
226 | ||
227 | static const char *dwc3_gadget_ep_cmd_string(u8 cmd) | |
228 | { | |
229 | switch (cmd) { | |
230 | case DWC3_DEPCMD_DEPSTARTCFG: | |
231 | return "Start New Configuration"; | |
232 | case DWC3_DEPCMD_ENDTRANSFER: | |
233 | return "End Transfer"; | |
234 | case DWC3_DEPCMD_UPDATETRANSFER: | |
235 | return "Update Transfer"; | |
236 | case DWC3_DEPCMD_STARTTRANSFER: | |
237 | return "Start Transfer"; | |
238 | case DWC3_DEPCMD_CLEARSTALL: | |
239 | return "Clear Stall"; | |
240 | case DWC3_DEPCMD_SETSTALL: | |
241 | return "Set Stall"; | |
242 | case DWC3_DEPCMD_GETSEQNUMBER: | |
243 | return "Get Data Sequence Number"; | |
244 | case DWC3_DEPCMD_SETTRANSFRESOURCE: | |
245 | return "Set Endpoint Transfer Resource"; | |
246 | case DWC3_DEPCMD_SETEPCONFIG: | |
247 | return "Set Endpoint Configuration"; | |
248 | default: | |
249 | return "UNKNOWN command"; | |
250 | } | |
251 | } | |
252 | ||
253 | int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep, | |
254 | unsigned cmd, struct dwc3_gadget_ep_cmd_params *params) | |
255 | { | |
256 | struct dwc3_ep *dep = dwc->eps[ep]; | |
61d58242 | 257 | u32 timeout = 500; |
72246da4 FB |
258 | u32 reg; |
259 | ||
260 | dev_vdbg(dwc->dev, "%s: cmd '%s' params %08x %08x %08x\n", | |
261 | dep->name, | |
dc1c70a7 FB |
262 | dwc3_gadget_ep_cmd_string(cmd), params->param0, |
263 | params->param1, params->param2); | |
72246da4 | 264 | |
dc1c70a7 FB |
265 | dwc3_writel(dwc->regs, DWC3_DEPCMDPAR0(ep), params->param0); |
266 | dwc3_writel(dwc->regs, DWC3_DEPCMDPAR1(ep), params->param1); | |
267 | dwc3_writel(dwc->regs, DWC3_DEPCMDPAR2(ep), params->param2); | |
72246da4 FB |
268 | |
269 | dwc3_writel(dwc->regs, DWC3_DEPCMD(ep), cmd | DWC3_DEPCMD_CMDACT); | |
270 | do { | |
271 | reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep)); | |
272 | if (!(reg & DWC3_DEPCMD_CMDACT)) { | |
164f6e14 FB |
273 | dev_vdbg(dwc->dev, "Command Complete --> %d\n", |
274 | DWC3_DEPCMD_STATUS(reg)); | |
72246da4 FB |
275 | return 0; |
276 | } | |
277 | ||
278 | /* | |
72246da4 FB |
279 | * We can't sleep here, because it is also called from |
280 | * interrupt context. | |
281 | */ | |
282 | timeout--; | |
283 | if (!timeout) | |
284 | return -ETIMEDOUT; | |
285 | ||
61d58242 | 286 | udelay(1); |
72246da4 FB |
287 | } while (1); |
288 | } | |
289 | ||
290 | static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep, | |
291 | struct dwc3_trb_hw *trb) | |
292 | { | |
c439ef87 | 293 | u32 offset = (char *) trb - (char *) dep->trb_pool; |
72246da4 FB |
294 | |
295 | return dep->trb_pool_dma + offset; | |
296 | } | |
297 | ||
298 | static int dwc3_alloc_trb_pool(struct dwc3_ep *dep) | |
299 | { | |
300 | struct dwc3 *dwc = dep->dwc; | |
301 | ||
302 | if (dep->trb_pool) | |
303 | return 0; | |
304 | ||
305 | if (dep->number == 0 || dep->number == 1) | |
306 | return 0; | |
307 | ||
308 | dep->trb_pool = dma_alloc_coherent(dwc->dev, | |
309 | sizeof(struct dwc3_trb) * DWC3_TRB_NUM, | |
310 | &dep->trb_pool_dma, GFP_KERNEL); | |
311 | if (!dep->trb_pool) { | |
312 | dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n", | |
313 | dep->name); | |
314 | return -ENOMEM; | |
315 | } | |
316 | ||
317 | return 0; | |
318 | } | |
319 | ||
320 | static void dwc3_free_trb_pool(struct dwc3_ep *dep) | |
321 | { | |
322 | struct dwc3 *dwc = dep->dwc; | |
323 | ||
324 | dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM, | |
325 | dep->trb_pool, dep->trb_pool_dma); | |
326 | ||
327 | dep->trb_pool = NULL; | |
328 | dep->trb_pool_dma = 0; | |
329 | } | |
330 | ||
331 | static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep) | |
332 | { | |
333 | struct dwc3_gadget_ep_cmd_params params; | |
334 | u32 cmd; | |
335 | ||
336 | memset(¶ms, 0x00, sizeof(params)); | |
337 | ||
338 | if (dep->number != 1) { | |
339 | cmd = DWC3_DEPCMD_DEPSTARTCFG; | |
340 | /* XferRscIdx == 0 for ep0 and 2 for the remaining */ | |
b23c8439 PZ |
341 | if (dep->number > 1) { |
342 | if (dwc->start_config_issued) | |
343 | return 0; | |
344 | dwc->start_config_issued = true; | |
72246da4 | 345 | cmd |= DWC3_DEPCMD_PARAM(2); |
b23c8439 | 346 | } |
72246da4 FB |
347 | |
348 | return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, ¶ms); | |
349 | } | |
350 | ||
351 | return 0; | |
352 | } | |
353 | ||
354 | static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep, | |
c90bfaec FB |
355 | const struct usb_endpoint_descriptor *desc, |
356 | const struct usb_ss_ep_comp_descriptor *comp_desc) | |
72246da4 FB |
357 | { |
358 | struct dwc3_gadget_ep_cmd_params params; | |
359 | ||
360 | memset(¶ms, 0x00, sizeof(params)); | |
361 | ||
dc1c70a7 FB |
362 | params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc)) |
363 | | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc)) | |
364 | | DWC3_DEPCFG_BURST_SIZE(dep->endpoint.maxburst); | |
72246da4 | 365 | |
dc1c70a7 FB |
366 | params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN |
367 | | DWC3_DEPCFG_XFER_NOT_READY_EN; | |
72246da4 | 368 | |
18b7ede5 | 369 | if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) { |
dc1c70a7 FB |
370 | params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE |
371 | | DWC3_DEPCFG_STREAM_EVENT_EN; | |
879631aa FB |
372 | dep->stream_capable = true; |
373 | } | |
374 | ||
72246da4 | 375 | if (usb_endpoint_xfer_isoc(desc)) |
dc1c70a7 | 376 | params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN; |
72246da4 FB |
377 | |
378 | /* | |
379 | * We are doing 1:1 mapping for endpoints, meaning | |
380 | * Physical Endpoints 2 maps to Logical Endpoint 2 and | |
381 | * so on. We consider the direction bit as part of the physical | |
382 | * endpoint number. So USB endpoint 0x81 is 0x03. | |
383 | */ | |
dc1c70a7 | 384 | params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number); |
72246da4 FB |
385 | |
386 | /* | |
387 | * We must use the lower 16 TX FIFOs even though | |
388 | * HW might have more | |
389 | */ | |
390 | if (dep->direction) | |
dc1c70a7 | 391 | params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1); |
72246da4 FB |
392 | |
393 | if (desc->bInterval) { | |
dc1c70a7 | 394 | params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1); |
72246da4 FB |
395 | dep->interval = 1 << (desc->bInterval - 1); |
396 | } | |
397 | ||
398 | return dwc3_send_gadget_ep_cmd(dwc, dep->number, | |
399 | DWC3_DEPCMD_SETEPCONFIG, ¶ms); | |
400 | } | |
401 | ||
402 | static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep) | |
403 | { | |
404 | struct dwc3_gadget_ep_cmd_params params; | |
405 | ||
406 | memset(¶ms, 0x00, sizeof(params)); | |
407 | ||
dc1c70a7 | 408 | params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1); |
72246da4 FB |
409 | |
410 | return dwc3_send_gadget_ep_cmd(dwc, dep->number, | |
411 | DWC3_DEPCMD_SETTRANSFRESOURCE, ¶ms); | |
412 | } | |
413 | ||
414 | /** | |
415 | * __dwc3_gadget_ep_enable - Initializes a HW endpoint | |
416 | * @dep: endpoint to be initialized | |
417 | * @desc: USB Endpoint Descriptor | |
418 | * | |
419 | * Caller should take care of locking | |
420 | */ | |
421 | static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, | |
c90bfaec FB |
422 | const struct usb_endpoint_descriptor *desc, |
423 | const struct usb_ss_ep_comp_descriptor *comp_desc) | |
72246da4 FB |
424 | { |
425 | struct dwc3 *dwc = dep->dwc; | |
426 | u32 reg; | |
427 | int ret = -ENOMEM; | |
428 | ||
429 | if (!(dep->flags & DWC3_EP_ENABLED)) { | |
430 | ret = dwc3_gadget_start_config(dwc, dep); | |
431 | if (ret) | |
432 | return ret; | |
433 | } | |
434 | ||
c90bfaec | 435 | ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc); |
72246da4 FB |
436 | if (ret) |
437 | return ret; | |
438 | ||
439 | if (!(dep->flags & DWC3_EP_ENABLED)) { | |
440 | struct dwc3_trb_hw *trb_st_hw; | |
441 | struct dwc3_trb_hw *trb_link_hw; | |
442 | struct dwc3_trb trb_link; | |
443 | ||
444 | ret = dwc3_gadget_set_xfer_resource(dwc, dep); | |
445 | if (ret) | |
446 | return ret; | |
447 | ||
448 | dep->desc = desc; | |
c90bfaec | 449 | dep->comp_desc = comp_desc; |
72246da4 FB |
450 | dep->type = usb_endpoint_type(desc); |
451 | dep->flags |= DWC3_EP_ENABLED; | |
452 | ||
453 | reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); | |
454 | reg |= DWC3_DALEPENA_EP(dep->number); | |
455 | dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); | |
456 | ||
457 | if (!usb_endpoint_xfer_isoc(desc)) | |
458 | return 0; | |
459 | ||
460 | memset(&trb_link, 0, sizeof(trb_link)); | |
461 | ||
462 | /* Link TRB for ISOC. The HWO but is never reset */ | |
463 | trb_st_hw = &dep->trb_pool[0]; | |
464 | ||
465 | trb_link.bplh = dwc3_trb_dma_offset(dep, trb_st_hw); | |
466 | trb_link.trbctl = DWC3_TRBCTL_LINK_TRB; | |
467 | trb_link.hwo = true; | |
468 | ||
469 | trb_link_hw = &dep->trb_pool[DWC3_TRB_NUM - 1]; | |
470 | dwc3_trb_to_hw(&trb_link, trb_link_hw); | |
471 | } | |
472 | ||
473 | return 0; | |
474 | } | |
475 | ||
624407f9 SAS |
476 | static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum); |
477 | static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep) | |
72246da4 FB |
478 | { |
479 | struct dwc3_request *req; | |
480 | ||
624407f9 SAS |
481 | if (!list_empty(&dep->req_queued)) |
482 | dwc3_stop_active_transfer(dwc, dep->number); | |
483 | ||
72246da4 FB |
484 | while (!list_empty(&dep->request_list)) { |
485 | req = next_request(&dep->request_list); | |
486 | ||
624407f9 | 487 | dwc3_gadget_giveback(dep, req, -ESHUTDOWN); |
72246da4 | 488 | } |
72246da4 FB |
489 | } |
490 | ||
491 | /** | |
492 | * __dwc3_gadget_ep_disable - Disables a HW endpoint | |
493 | * @dep: the endpoint to disable | |
494 | * | |
624407f9 SAS |
495 | * This function also removes requests which are currently processed ny the |
496 | * hardware and those which are not yet scheduled. | |
497 | * Caller should take care of locking. | |
72246da4 | 498 | */ |
72246da4 FB |
499 | static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep) |
500 | { | |
501 | struct dwc3 *dwc = dep->dwc; | |
502 | u32 reg; | |
503 | ||
624407f9 | 504 | dwc3_remove_requests(dwc, dep); |
72246da4 FB |
505 | |
506 | reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); | |
507 | reg &= ~DWC3_DALEPENA_EP(dep->number); | |
508 | dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); | |
509 | ||
879631aa | 510 | dep->stream_capable = false; |
72246da4 | 511 | dep->desc = NULL; |
c90bfaec | 512 | dep->comp_desc = NULL; |
72246da4 | 513 | dep->type = 0; |
879631aa | 514 | dep->flags = 0; |
72246da4 FB |
515 | |
516 | return 0; | |
517 | } | |
518 | ||
519 | /* -------------------------------------------------------------------------- */ | |
520 | ||
521 | static int dwc3_gadget_ep0_enable(struct usb_ep *ep, | |
522 | const struct usb_endpoint_descriptor *desc) | |
523 | { | |
524 | return -EINVAL; | |
525 | } | |
526 | ||
527 | static int dwc3_gadget_ep0_disable(struct usb_ep *ep) | |
528 | { | |
529 | return -EINVAL; | |
530 | } | |
531 | ||
532 | /* -------------------------------------------------------------------------- */ | |
533 | ||
534 | static int dwc3_gadget_ep_enable(struct usb_ep *ep, | |
535 | const struct usb_endpoint_descriptor *desc) | |
536 | { | |
537 | struct dwc3_ep *dep; | |
538 | struct dwc3 *dwc; | |
539 | unsigned long flags; | |
540 | int ret; | |
541 | ||
542 | if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) { | |
543 | pr_debug("dwc3: invalid parameters\n"); | |
544 | return -EINVAL; | |
545 | } | |
546 | ||
547 | if (!desc->wMaxPacketSize) { | |
548 | pr_debug("dwc3: missing wMaxPacketSize\n"); | |
549 | return -EINVAL; | |
550 | } | |
551 | ||
552 | dep = to_dwc3_ep(ep); | |
553 | dwc = dep->dwc; | |
554 | ||
555 | switch (usb_endpoint_type(desc)) { | |
556 | case USB_ENDPOINT_XFER_CONTROL: | |
557 | strncat(dep->name, "-control", sizeof(dep->name)); | |
558 | break; | |
559 | case USB_ENDPOINT_XFER_ISOC: | |
560 | strncat(dep->name, "-isoc", sizeof(dep->name)); | |
561 | break; | |
562 | case USB_ENDPOINT_XFER_BULK: | |
563 | strncat(dep->name, "-bulk", sizeof(dep->name)); | |
564 | break; | |
565 | case USB_ENDPOINT_XFER_INT: | |
566 | strncat(dep->name, "-int", sizeof(dep->name)); | |
567 | break; | |
568 | default: | |
569 | dev_err(dwc->dev, "invalid endpoint transfer type\n"); | |
570 | } | |
571 | ||
572 | if (dep->flags & DWC3_EP_ENABLED) { | |
573 | dev_WARN_ONCE(dwc->dev, true, "%s is already enabled\n", | |
574 | dep->name); | |
575 | return 0; | |
576 | } | |
577 | ||
578 | dev_vdbg(dwc->dev, "Enabling %s\n", dep->name); | |
579 | ||
580 | spin_lock_irqsave(&dwc->lock, flags); | |
c90bfaec | 581 | ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc); |
72246da4 FB |
582 | spin_unlock_irqrestore(&dwc->lock, flags); |
583 | ||
584 | return ret; | |
585 | } | |
586 | ||
587 | static int dwc3_gadget_ep_disable(struct usb_ep *ep) | |
588 | { | |
589 | struct dwc3_ep *dep; | |
590 | struct dwc3 *dwc; | |
591 | unsigned long flags; | |
592 | int ret; | |
593 | ||
594 | if (!ep) { | |
595 | pr_debug("dwc3: invalid parameters\n"); | |
596 | return -EINVAL; | |
597 | } | |
598 | ||
599 | dep = to_dwc3_ep(ep); | |
600 | dwc = dep->dwc; | |
601 | ||
602 | if (!(dep->flags & DWC3_EP_ENABLED)) { | |
603 | dev_WARN_ONCE(dwc->dev, true, "%s is already disabled\n", | |
604 | dep->name); | |
605 | return 0; | |
606 | } | |
607 | ||
608 | snprintf(dep->name, sizeof(dep->name), "ep%d%s", | |
609 | dep->number >> 1, | |
610 | (dep->number & 1) ? "in" : "out"); | |
611 | ||
612 | spin_lock_irqsave(&dwc->lock, flags); | |
613 | ret = __dwc3_gadget_ep_disable(dep); | |
614 | spin_unlock_irqrestore(&dwc->lock, flags); | |
615 | ||
616 | return ret; | |
617 | } | |
618 | ||
619 | static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep, | |
620 | gfp_t gfp_flags) | |
621 | { | |
622 | struct dwc3_request *req; | |
623 | struct dwc3_ep *dep = to_dwc3_ep(ep); | |
624 | struct dwc3 *dwc = dep->dwc; | |
625 | ||
626 | req = kzalloc(sizeof(*req), gfp_flags); | |
627 | if (!req) { | |
628 | dev_err(dwc->dev, "not enough memory\n"); | |
629 | return NULL; | |
630 | } | |
631 | ||
632 | req->epnum = dep->number; | |
633 | req->dep = dep; | |
634 | req->request.dma = DMA_ADDR_INVALID; | |
635 | ||
636 | return &req->request; | |
637 | } | |
638 | ||
639 | static void dwc3_gadget_ep_free_request(struct usb_ep *ep, | |
640 | struct usb_request *request) | |
641 | { | |
642 | struct dwc3_request *req = to_dwc3_request(request); | |
643 | ||
644 | kfree(req); | |
645 | } | |
646 | ||
c71fc37c FB |
647 | /** |
648 | * dwc3_prepare_one_trb - setup one TRB from one request | |
649 | * @dep: endpoint for which this request is prepared | |
650 | * @req: dwc3_request pointer | |
651 | */ | |
68e823e2 | 652 | static void dwc3_prepare_one_trb(struct dwc3_ep *dep, |
eeb720fb FB |
653 | struct dwc3_request *req, dma_addr_t dma, |
654 | unsigned length, unsigned last, unsigned chain) | |
c71fc37c | 655 | { |
eeb720fb | 656 | struct dwc3 *dwc = dep->dwc; |
c71fc37c FB |
657 | struct dwc3_trb_hw *trb_hw; |
658 | struct dwc3_trb trb; | |
659 | ||
660 | unsigned int cur_slot; | |
661 | ||
eeb720fb FB |
662 | dev_vdbg(dwc->dev, "%s: req %p dma %08llx length %d%s%s\n", |
663 | dep->name, req, (unsigned long long) dma, | |
664 | length, last ? " last" : "", | |
665 | chain ? " chain" : ""); | |
666 | ||
c71fc37c FB |
667 | trb_hw = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK]; |
668 | cur_slot = dep->free_slot; | |
669 | dep->free_slot++; | |
670 | ||
671 | /* Skip the LINK-TRB on ISOC */ | |
672 | if (((cur_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) && | |
673 | usb_endpoint_xfer_isoc(dep->desc)) | |
68e823e2 | 674 | return; |
c71fc37c | 675 | |
c71fc37c | 676 | memset(&trb, 0, sizeof(trb)); |
eeb720fb FB |
677 | if (!req->trb) { |
678 | dwc3_gadget_move_request_queued(req); | |
679 | req->trb = trb_hw; | |
680 | req->trb_dma = dwc3_trb_dma_offset(dep, trb_hw); | |
681 | } | |
c71fc37c FB |
682 | |
683 | if (usb_endpoint_xfer_isoc(dep->desc)) { | |
684 | trb.isp_imi = true; | |
685 | trb.csp = true; | |
686 | } else { | |
eeb720fb | 687 | trb.chn = chain; |
c71fc37c FB |
688 | trb.lst = last; |
689 | } | |
690 | ||
691 | if (usb_endpoint_xfer_bulk(dep->desc) && dep->stream_capable) | |
692 | trb.sid_sofn = req->request.stream_id; | |
693 | ||
694 | switch (usb_endpoint_type(dep->desc)) { | |
695 | case USB_ENDPOINT_XFER_CONTROL: | |
696 | trb.trbctl = DWC3_TRBCTL_CONTROL_SETUP; | |
697 | break; | |
698 | ||
699 | case USB_ENDPOINT_XFER_ISOC: | |
700 | trb.trbctl = DWC3_TRBCTL_ISOCHRONOUS_FIRST; | |
701 | ||
702 | /* IOC every DWC3_TRB_NUM / 4 so we can refill */ | |
703 | if (!(cur_slot % (DWC3_TRB_NUM / 4))) | |
704 | trb.ioc = last; | |
705 | break; | |
706 | ||
707 | case USB_ENDPOINT_XFER_BULK: | |
708 | case USB_ENDPOINT_XFER_INT: | |
709 | trb.trbctl = DWC3_TRBCTL_NORMAL; | |
710 | break; | |
711 | default: | |
712 | /* | |
713 | * This is only possible with faulty memory because we | |
714 | * checked it already :) | |
715 | */ | |
716 | BUG(); | |
717 | } | |
718 | ||
eeb720fb FB |
719 | trb.length = length; |
720 | trb.bplh = dma; | |
c71fc37c FB |
721 | trb.hwo = true; |
722 | ||
723 | dwc3_trb_to_hw(&trb, trb_hw); | |
c71fc37c FB |
724 | } |
725 | ||
72246da4 FB |
726 | /* |
727 | * dwc3_prepare_trbs - setup TRBs from requests | |
728 | * @dep: endpoint for which requests are being prepared | |
729 | * @starting: true if the endpoint is idle and no requests are queued. | |
730 | * | |
731 | * The functions goes through the requests list and setups TRBs for the | |
732 | * transfers. The functions returns once there are not more TRBs available or | |
733 | * it run out of requests. | |
734 | */ | |
68e823e2 | 735 | static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting) |
72246da4 | 736 | { |
68e823e2 | 737 | struct dwc3_request *req, *n; |
72246da4 | 738 | u32 trbs_left; |
c71fc37c | 739 | unsigned int last_one = 0; |
72246da4 FB |
740 | |
741 | BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM); | |
742 | ||
743 | /* the first request must not be queued */ | |
744 | trbs_left = (dep->busy_slot - dep->free_slot) & DWC3_TRB_MASK; | |
c71fc37c | 745 | |
72246da4 FB |
746 | /* |
747 | * if busy & slot are equal than it is either full or empty. If we are | |
748 | * starting to proceed requests then we are empty. Otherwise we ar | |
749 | * full and don't do anything | |
750 | */ | |
751 | if (!trbs_left) { | |
752 | if (!starting) | |
68e823e2 | 753 | return; |
72246da4 FB |
754 | trbs_left = DWC3_TRB_NUM; |
755 | /* | |
756 | * In case we start from scratch, we queue the ISOC requests | |
757 | * starting from slot 1. This is done because we use ring | |
758 | * buffer and have no LST bit to stop us. Instead, we place | |
759 | * IOC bit TRB_NUM/4. We try to avoid to having an interrupt | |
760 | * after the first request so we start at slot 1 and have | |
761 | * 7 requests proceed before we hit the first IOC. | |
762 | * Other transfer types don't use the ring buffer and are | |
763 | * processed from the first TRB until the last one. Since we | |
764 | * don't wrap around we have to start at the beginning. | |
765 | */ | |
766 | if (usb_endpoint_xfer_isoc(dep->desc)) { | |
767 | dep->busy_slot = 1; | |
768 | dep->free_slot = 1; | |
769 | } else { | |
770 | dep->busy_slot = 0; | |
771 | dep->free_slot = 0; | |
772 | } | |
773 | } | |
774 | ||
775 | /* The last TRB is a link TRB, not used for xfer */ | |
776 | if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->desc)) | |
68e823e2 | 777 | return; |
72246da4 FB |
778 | |
779 | list_for_each_entry_safe(req, n, &dep->request_list, list) { | |
eeb720fb FB |
780 | unsigned length; |
781 | dma_addr_t dma; | |
72246da4 | 782 | |
eeb720fb FB |
783 | if (req->request.num_mapped_sgs > 0) { |
784 | struct usb_request *request = &req->request; | |
785 | struct scatterlist *sg = request->sg; | |
786 | struct scatterlist *s; | |
787 | int i; | |
72246da4 | 788 | |
eeb720fb FB |
789 | for_each_sg(sg, s, request->num_mapped_sgs, i) { |
790 | unsigned chain = true; | |
72246da4 | 791 | |
eeb720fb FB |
792 | length = sg_dma_len(s); |
793 | dma = sg_dma_address(s); | |
72246da4 | 794 | |
eeb720fb FB |
795 | if (i == (request->num_mapped_sgs - 1) |
796 | || sg_is_last(s)) { | |
797 | last_one = true; | |
798 | chain = false; | |
799 | } | |
72246da4 | 800 | |
eeb720fb FB |
801 | trbs_left--; |
802 | if (!trbs_left) | |
803 | last_one = true; | |
72246da4 | 804 | |
eeb720fb FB |
805 | if (last_one) |
806 | chain = false; | |
72246da4 | 807 | |
eeb720fb FB |
808 | dwc3_prepare_one_trb(dep, req, dma, length, |
809 | last_one, chain); | |
72246da4 | 810 | |
eeb720fb FB |
811 | if (last_one) |
812 | break; | |
813 | } | |
72246da4 | 814 | } else { |
eeb720fb FB |
815 | dma = req->request.dma; |
816 | length = req->request.length; | |
817 | trbs_left--; | |
72246da4 | 818 | |
eeb720fb FB |
819 | if (!trbs_left) |
820 | last_one = 1; | |
879631aa | 821 | |
eeb720fb FB |
822 | /* Is this the last request? */ |
823 | if (list_is_last(&req->list, &dep->request_list)) | |
824 | last_one = 1; | |
72246da4 | 825 | |
eeb720fb FB |
826 | dwc3_prepare_one_trb(dep, req, dma, length, |
827 | last_one, false); | |
72246da4 | 828 | |
eeb720fb FB |
829 | if (last_one) |
830 | break; | |
72246da4 | 831 | } |
72246da4 | 832 | } |
72246da4 FB |
833 | } |
834 | ||
835 | static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param, | |
836 | int start_new) | |
837 | { | |
838 | struct dwc3_gadget_ep_cmd_params params; | |
839 | struct dwc3_request *req; | |
840 | struct dwc3 *dwc = dep->dwc; | |
841 | int ret; | |
842 | u32 cmd; | |
843 | ||
844 | if (start_new && (dep->flags & DWC3_EP_BUSY)) { | |
845 | dev_vdbg(dwc->dev, "%s: endpoint busy\n", dep->name); | |
846 | return -EBUSY; | |
847 | } | |
848 | dep->flags &= ~DWC3_EP_PENDING_REQUEST; | |
849 | ||
850 | /* | |
851 | * If we are getting here after a short-out-packet we don't enqueue any | |
852 | * new requests as we try to set the IOC bit only on the last request. | |
853 | */ | |
854 | if (start_new) { | |
855 | if (list_empty(&dep->req_queued)) | |
856 | dwc3_prepare_trbs(dep, start_new); | |
857 | ||
858 | /* req points to the first request which will be sent */ | |
859 | req = next_request(&dep->req_queued); | |
860 | } else { | |
68e823e2 FB |
861 | dwc3_prepare_trbs(dep, start_new); |
862 | ||
72246da4 FB |
863 | /* |
864 | * req points to the first request where HWO changed | |
865 | * from 0 to 1 | |
866 | */ | |
68e823e2 | 867 | req = next_request(&dep->req_queued); |
72246da4 FB |
868 | } |
869 | if (!req) { | |
870 | dep->flags |= DWC3_EP_PENDING_REQUEST; | |
871 | return 0; | |
872 | } | |
873 | ||
874 | memset(¶ms, 0, sizeof(params)); | |
dc1c70a7 FB |
875 | params.param0 = upper_32_bits(req->trb_dma); |
876 | params.param1 = lower_32_bits(req->trb_dma); | |
72246da4 FB |
877 | |
878 | if (start_new) | |
879 | cmd = DWC3_DEPCMD_STARTTRANSFER; | |
880 | else | |
881 | cmd = DWC3_DEPCMD_UPDATETRANSFER; | |
882 | ||
883 | cmd |= DWC3_DEPCMD_PARAM(cmd_param); | |
884 | ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, ¶ms); | |
885 | if (ret < 0) { | |
886 | dev_dbg(dwc->dev, "failed to send STARTTRANSFER command\n"); | |
887 | ||
888 | /* | |
889 | * FIXME we need to iterate over the list of requests | |
890 | * here and stop, unmap, free and del each of the linked | |
891 | * requests instead of we do now. | |
892 | */ | |
893 | dwc3_unmap_buffer_from_dma(req); | |
894 | list_del(&req->list); | |
895 | return ret; | |
896 | } | |
897 | ||
898 | dep->flags |= DWC3_EP_BUSY; | |
899 | dep->res_trans_idx = dwc3_gadget_ep_get_transfer_index(dwc, | |
900 | dep->number); | |
25b8ff68 FB |
901 | |
902 | WARN_ON_ONCE(!dep->res_trans_idx); | |
903 | ||
72246da4 FB |
904 | return 0; |
905 | } | |
906 | ||
907 | static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req) | |
908 | { | |
909 | req->request.actual = 0; | |
910 | req->request.status = -EINPROGRESS; | |
911 | req->direction = dep->direction; | |
912 | req->epnum = dep->number; | |
913 | ||
914 | /* | |
915 | * We only add to our list of requests now and | |
916 | * start consuming the list once we get XferNotReady | |
917 | * IRQ. | |
918 | * | |
919 | * That way, we avoid doing anything that we don't need | |
920 | * to do now and defer it until the point we receive a | |
921 | * particular token from the Host side. | |
922 | * | |
923 | * This will also avoid Host cancelling URBs due to too | |
924 | * many NACKs. | |
925 | */ | |
926 | dwc3_map_buffer_to_dma(req); | |
927 | list_add_tail(&req->list, &dep->request_list); | |
928 | ||
929 | /* | |
930 | * There is one special case: XferNotReady with | |
931 | * empty list of requests. We need to kick the | |
932 | * transfer here in that situation, otherwise | |
933 | * we will be NAKing forever. | |
934 | * | |
935 | * If we get XferNotReady before gadget driver | |
936 | * has a chance to queue a request, we will ACK | |
937 | * the IRQ but won't be able to receive the data | |
938 | * until the next request is queued. The following | |
939 | * code is handling exactly that. | |
940 | */ | |
941 | if (dep->flags & DWC3_EP_PENDING_REQUEST) { | |
942 | int ret; | |
943 | int start_trans; | |
944 | ||
945 | start_trans = 1; | |
7b7dd025 | 946 | if (usb_endpoint_xfer_isoc(dep->desc) && |
72246da4 FB |
947 | dep->flags & DWC3_EP_BUSY) |
948 | start_trans = 0; | |
949 | ||
950 | ret = __dwc3_gadget_kick_transfer(dep, 0, start_trans); | |
951 | if (ret && ret != -EBUSY) { | |
952 | struct dwc3 *dwc = dep->dwc; | |
953 | ||
954 | dev_dbg(dwc->dev, "%s: failed to kick transfers\n", | |
955 | dep->name); | |
956 | } | |
957 | }; | |
958 | ||
959 | return 0; | |
960 | } | |
961 | ||
962 | static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request, | |
963 | gfp_t gfp_flags) | |
964 | { | |
965 | struct dwc3_request *req = to_dwc3_request(request); | |
966 | struct dwc3_ep *dep = to_dwc3_ep(ep); | |
967 | struct dwc3 *dwc = dep->dwc; | |
968 | ||
969 | unsigned long flags; | |
970 | ||
971 | int ret; | |
972 | ||
973 | if (!dep->desc) { | |
974 | dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n", | |
975 | request, ep->name); | |
976 | return -ESHUTDOWN; | |
977 | } | |
978 | ||
979 | dev_vdbg(dwc->dev, "queing request %p to %s length %d\n", | |
980 | request, ep->name, request->length); | |
981 | ||
982 | spin_lock_irqsave(&dwc->lock, flags); | |
983 | ret = __dwc3_gadget_ep_queue(dep, req); | |
984 | spin_unlock_irqrestore(&dwc->lock, flags); | |
985 | ||
986 | return ret; | |
987 | } | |
988 | ||
989 | static int dwc3_gadget_ep_dequeue(struct usb_ep *ep, | |
990 | struct usb_request *request) | |
991 | { | |
992 | struct dwc3_request *req = to_dwc3_request(request); | |
993 | struct dwc3_request *r = NULL; | |
994 | ||
995 | struct dwc3_ep *dep = to_dwc3_ep(ep); | |
996 | struct dwc3 *dwc = dep->dwc; | |
997 | ||
998 | unsigned long flags; | |
999 | int ret = 0; | |
1000 | ||
1001 | spin_lock_irqsave(&dwc->lock, flags); | |
1002 | ||
1003 | list_for_each_entry(r, &dep->request_list, list) { | |
1004 | if (r == req) | |
1005 | break; | |
1006 | } | |
1007 | ||
1008 | if (r != req) { | |
1009 | list_for_each_entry(r, &dep->req_queued, list) { | |
1010 | if (r == req) | |
1011 | break; | |
1012 | } | |
1013 | if (r == req) { | |
1014 | /* wait until it is processed */ | |
1015 | dwc3_stop_active_transfer(dwc, dep->number); | |
1016 | goto out0; | |
1017 | } | |
1018 | dev_err(dwc->dev, "request %p was not queued to %s\n", | |
1019 | request, ep->name); | |
1020 | ret = -EINVAL; | |
1021 | goto out0; | |
1022 | } | |
1023 | ||
1024 | /* giveback the request */ | |
1025 | dwc3_gadget_giveback(dep, req, -ECONNRESET); | |
1026 | ||
1027 | out0: | |
1028 | spin_unlock_irqrestore(&dwc->lock, flags); | |
1029 | ||
1030 | return ret; | |
1031 | } | |
1032 | ||
1033 | int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value) | |
1034 | { | |
1035 | struct dwc3_gadget_ep_cmd_params params; | |
1036 | struct dwc3 *dwc = dep->dwc; | |
1037 | int ret; | |
1038 | ||
1039 | memset(¶ms, 0x00, sizeof(params)); | |
1040 | ||
1041 | if (value) { | |
0b7836a9 FB |
1042 | if (dep->number == 0 || dep->number == 1) { |
1043 | /* | |
1044 | * Whenever EP0 is stalled, we will restart | |
1045 | * the state machine, thus moving back to | |
1046 | * Setup Phase | |
1047 | */ | |
1048 | dwc->ep0state = EP0_SETUP_PHASE; | |
1049 | } | |
72246da4 FB |
1050 | |
1051 | ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, | |
1052 | DWC3_DEPCMD_SETSTALL, ¶ms); | |
1053 | if (ret) | |
1054 | dev_err(dwc->dev, "failed to %s STALL on %s\n", | |
1055 | value ? "set" : "clear", | |
1056 | dep->name); | |
1057 | else | |
1058 | dep->flags |= DWC3_EP_STALL; | |
1059 | } else { | |
5275455a PZ |
1060 | if (dep->flags & DWC3_EP_WEDGE) |
1061 | return 0; | |
1062 | ||
72246da4 FB |
1063 | ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, |
1064 | DWC3_DEPCMD_CLEARSTALL, ¶ms); | |
1065 | if (ret) | |
1066 | dev_err(dwc->dev, "failed to %s STALL on %s\n", | |
1067 | value ? "set" : "clear", | |
1068 | dep->name); | |
1069 | else | |
1070 | dep->flags &= ~DWC3_EP_STALL; | |
1071 | } | |
5275455a | 1072 | |
72246da4 FB |
1073 | return ret; |
1074 | } | |
1075 | ||
1076 | static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value) | |
1077 | { | |
1078 | struct dwc3_ep *dep = to_dwc3_ep(ep); | |
1079 | struct dwc3 *dwc = dep->dwc; | |
1080 | ||
1081 | unsigned long flags; | |
1082 | ||
1083 | int ret; | |
1084 | ||
1085 | spin_lock_irqsave(&dwc->lock, flags); | |
1086 | ||
1087 | if (usb_endpoint_xfer_isoc(dep->desc)) { | |
1088 | dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name); | |
1089 | ret = -EINVAL; | |
1090 | goto out; | |
1091 | } | |
1092 | ||
1093 | ret = __dwc3_gadget_ep_set_halt(dep, value); | |
1094 | out: | |
1095 | spin_unlock_irqrestore(&dwc->lock, flags); | |
1096 | ||
1097 | return ret; | |
1098 | } | |
1099 | ||
1100 | static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep) | |
1101 | { | |
1102 | struct dwc3_ep *dep = to_dwc3_ep(ep); | |
1103 | ||
1104 | dep->flags |= DWC3_EP_WEDGE; | |
1105 | ||
5275455a | 1106 | return dwc3_gadget_ep_set_halt(ep, 1); |
72246da4 FB |
1107 | } |
1108 | ||
1109 | /* -------------------------------------------------------------------------- */ | |
1110 | ||
1111 | static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = { | |
1112 | .bLength = USB_DT_ENDPOINT_SIZE, | |
1113 | .bDescriptorType = USB_DT_ENDPOINT, | |
1114 | .bmAttributes = USB_ENDPOINT_XFER_CONTROL, | |
1115 | }; | |
1116 | ||
1117 | static const struct usb_ep_ops dwc3_gadget_ep0_ops = { | |
1118 | .enable = dwc3_gadget_ep0_enable, | |
1119 | .disable = dwc3_gadget_ep0_disable, | |
1120 | .alloc_request = dwc3_gadget_ep_alloc_request, | |
1121 | .free_request = dwc3_gadget_ep_free_request, | |
1122 | .queue = dwc3_gadget_ep0_queue, | |
1123 | .dequeue = dwc3_gadget_ep_dequeue, | |
1124 | .set_halt = dwc3_gadget_ep_set_halt, | |
1125 | .set_wedge = dwc3_gadget_ep_set_wedge, | |
1126 | }; | |
1127 | ||
1128 | static const struct usb_ep_ops dwc3_gadget_ep_ops = { | |
1129 | .enable = dwc3_gadget_ep_enable, | |
1130 | .disable = dwc3_gadget_ep_disable, | |
1131 | .alloc_request = dwc3_gadget_ep_alloc_request, | |
1132 | .free_request = dwc3_gadget_ep_free_request, | |
1133 | .queue = dwc3_gadget_ep_queue, | |
1134 | .dequeue = dwc3_gadget_ep_dequeue, | |
1135 | .set_halt = dwc3_gadget_ep_set_halt, | |
1136 | .set_wedge = dwc3_gadget_ep_set_wedge, | |
1137 | }; | |
1138 | ||
1139 | /* -------------------------------------------------------------------------- */ | |
1140 | ||
1141 | static int dwc3_gadget_get_frame(struct usb_gadget *g) | |
1142 | { | |
1143 | struct dwc3 *dwc = gadget_to_dwc(g); | |
1144 | u32 reg; | |
1145 | ||
1146 | reg = dwc3_readl(dwc->regs, DWC3_DSTS); | |
1147 | return DWC3_DSTS_SOFFN(reg); | |
1148 | } | |
1149 | ||
1150 | static int dwc3_gadget_wakeup(struct usb_gadget *g) | |
1151 | { | |
1152 | struct dwc3 *dwc = gadget_to_dwc(g); | |
1153 | ||
1154 | unsigned long timeout; | |
1155 | unsigned long flags; | |
1156 | ||
1157 | u32 reg; | |
1158 | ||
1159 | int ret = 0; | |
1160 | ||
1161 | u8 link_state; | |
1162 | u8 speed; | |
1163 | ||
1164 | spin_lock_irqsave(&dwc->lock, flags); | |
1165 | ||
1166 | /* | |
1167 | * According to the Databook Remote wakeup request should | |
1168 | * be issued only when the device is in early suspend state. | |
1169 | * | |
1170 | * We can check that via USB Link State bits in DSTS register. | |
1171 | */ | |
1172 | reg = dwc3_readl(dwc->regs, DWC3_DSTS); | |
1173 | ||
1174 | speed = reg & DWC3_DSTS_CONNECTSPD; | |
1175 | if (speed == DWC3_DSTS_SUPERSPEED) { | |
1176 | dev_dbg(dwc->dev, "no wakeup on SuperSpeed\n"); | |
1177 | ret = -EINVAL; | |
1178 | goto out; | |
1179 | } | |
1180 | ||
1181 | link_state = DWC3_DSTS_USBLNKST(reg); | |
1182 | ||
1183 | switch (link_state) { | |
1184 | case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */ | |
1185 | case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */ | |
1186 | break; | |
1187 | default: | |
1188 | dev_dbg(dwc->dev, "can't wakeup from link state %d\n", | |
1189 | link_state); | |
1190 | ret = -EINVAL; | |
1191 | goto out; | |
1192 | } | |
1193 | ||
8598bde7 FB |
1194 | ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV); |
1195 | if (ret < 0) { | |
1196 | dev_err(dwc->dev, "failed to put link in Recovery\n"); | |
1197 | goto out; | |
1198 | } | |
72246da4 FB |
1199 | |
1200 | /* write zeroes to Link Change Request */ | |
1201 | reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK; | |
1202 | dwc3_writel(dwc->regs, DWC3_DCTL, reg); | |
1203 | ||
1204 | /* pool until Link State change to ON */ | |
1205 | timeout = jiffies + msecs_to_jiffies(100); | |
1206 | ||
1207 | while (!(time_after(jiffies, timeout))) { | |
1208 | reg = dwc3_readl(dwc->regs, DWC3_DSTS); | |
1209 | ||
1210 | /* in HS, means ON */ | |
1211 | if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0) | |
1212 | break; | |
1213 | } | |
1214 | ||
1215 | if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) { | |
1216 | dev_err(dwc->dev, "failed to send remote wakeup\n"); | |
1217 | ret = -EINVAL; | |
1218 | } | |
1219 | ||
1220 | out: | |
1221 | spin_unlock_irqrestore(&dwc->lock, flags); | |
1222 | ||
1223 | return ret; | |
1224 | } | |
1225 | ||
1226 | static int dwc3_gadget_set_selfpowered(struct usb_gadget *g, | |
1227 | int is_selfpowered) | |
1228 | { | |
1229 | struct dwc3 *dwc = gadget_to_dwc(g); | |
1230 | ||
1231 | dwc->is_selfpowered = !!is_selfpowered; | |
1232 | ||
1233 | return 0; | |
1234 | } | |
1235 | ||
1236 | static void dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on) | |
1237 | { | |
1238 | u32 reg; | |
61d58242 | 1239 | u32 timeout = 500; |
72246da4 FB |
1240 | |
1241 | reg = dwc3_readl(dwc->regs, DWC3_DCTL); | |
1242 | if (is_on) | |
1243 | reg |= DWC3_DCTL_RUN_STOP; | |
1244 | else | |
1245 | reg &= ~DWC3_DCTL_RUN_STOP; | |
1246 | ||
1247 | dwc3_writel(dwc->regs, DWC3_DCTL, reg); | |
1248 | ||
1249 | do { | |
1250 | reg = dwc3_readl(dwc->regs, DWC3_DSTS); | |
1251 | if (is_on) { | |
1252 | if (!(reg & DWC3_DSTS_DEVCTRLHLT)) | |
1253 | break; | |
1254 | } else { | |
1255 | if (reg & DWC3_DSTS_DEVCTRLHLT) | |
1256 | break; | |
1257 | } | |
72246da4 FB |
1258 | timeout--; |
1259 | if (!timeout) | |
1260 | break; | |
61d58242 | 1261 | udelay(1); |
72246da4 FB |
1262 | } while (1); |
1263 | ||
1264 | dev_vdbg(dwc->dev, "gadget %s data soft-%s\n", | |
1265 | dwc->gadget_driver | |
1266 | ? dwc->gadget_driver->function : "no-function", | |
1267 | is_on ? "connect" : "disconnect"); | |
1268 | } | |
1269 | ||
1270 | static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on) | |
1271 | { | |
1272 | struct dwc3 *dwc = gadget_to_dwc(g); | |
1273 | unsigned long flags; | |
1274 | ||
1275 | is_on = !!is_on; | |
1276 | ||
1277 | spin_lock_irqsave(&dwc->lock, flags); | |
1278 | dwc3_gadget_run_stop(dwc, is_on); | |
1279 | spin_unlock_irqrestore(&dwc->lock, flags); | |
1280 | ||
1281 | return 0; | |
1282 | } | |
1283 | ||
1284 | static int dwc3_gadget_start(struct usb_gadget *g, | |
1285 | struct usb_gadget_driver *driver) | |
1286 | { | |
1287 | struct dwc3 *dwc = gadget_to_dwc(g); | |
1288 | struct dwc3_ep *dep; | |
1289 | unsigned long flags; | |
1290 | int ret = 0; | |
1291 | u32 reg; | |
1292 | ||
1293 | spin_lock_irqsave(&dwc->lock, flags); | |
1294 | ||
1295 | if (dwc->gadget_driver) { | |
1296 | dev_err(dwc->dev, "%s is already bound to %s\n", | |
1297 | dwc->gadget.name, | |
1298 | dwc->gadget_driver->driver.name); | |
1299 | ret = -EBUSY; | |
1300 | goto err0; | |
1301 | } | |
1302 | ||
1303 | dwc->gadget_driver = driver; | |
1304 | dwc->gadget.dev.driver = &driver->driver; | |
1305 | ||
72246da4 FB |
1306 | reg = dwc3_readl(dwc->regs, DWC3_DCFG); |
1307 | reg &= ~(DWC3_DCFG_SPEED_MASK); | |
6c167fc9 | 1308 | reg |= dwc->maximum_speed; |
72246da4 FB |
1309 | dwc3_writel(dwc->regs, DWC3_DCFG, reg); |
1310 | ||
b23c8439 PZ |
1311 | dwc->start_config_issued = false; |
1312 | ||
72246da4 FB |
1313 | /* Start with SuperSpeed Default */ |
1314 | dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); | |
1315 | ||
1316 | dep = dwc->eps[0]; | |
c90bfaec | 1317 | ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL); |
72246da4 FB |
1318 | if (ret) { |
1319 | dev_err(dwc->dev, "failed to enable %s\n", dep->name); | |
1320 | goto err0; | |
1321 | } | |
1322 | ||
1323 | dep = dwc->eps[1]; | |
c90bfaec | 1324 | ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL); |
72246da4 FB |
1325 | if (ret) { |
1326 | dev_err(dwc->dev, "failed to enable %s\n", dep->name); | |
1327 | goto err1; | |
1328 | } | |
1329 | ||
1330 | /* begin to receive SETUP packets */ | |
c7fcdeb2 | 1331 | dwc->ep0state = EP0_SETUP_PHASE; |
72246da4 FB |
1332 | dwc3_ep0_out_start(dwc); |
1333 | ||
1334 | spin_unlock_irqrestore(&dwc->lock, flags); | |
1335 | ||
1336 | return 0; | |
1337 | ||
1338 | err1: | |
1339 | __dwc3_gadget_ep_disable(dwc->eps[0]); | |
1340 | ||
1341 | err0: | |
1342 | spin_unlock_irqrestore(&dwc->lock, flags); | |
1343 | ||
1344 | return ret; | |
1345 | } | |
1346 | ||
1347 | static int dwc3_gadget_stop(struct usb_gadget *g, | |
1348 | struct usb_gadget_driver *driver) | |
1349 | { | |
1350 | struct dwc3 *dwc = gadget_to_dwc(g); | |
1351 | unsigned long flags; | |
1352 | ||
1353 | spin_lock_irqsave(&dwc->lock, flags); | |
1354 | ||
1355 | __dwc3_gadget_ep_disable(dwc->eps[0]); | |
1356 | __dwc3_gadget_ep_disable(dwc->eps[1]); | |
1357 | ||
1358 | dwc->gadget_driver = NULL; | |
1359 | dwc->gadget.dev.driver = NULL; | |
1360 | ||
1361 | spin_unlock_irqrestore(&dwc->lock, flags); | |
1362 | ||
1363 | return 0; | |
1364 | } | |
1365 | static const struct usb_gadget_ops dwc3_gadget_ops = { | |
1366 | .get_frame = dwc3_gadget_get_frame, | |
1367 | .wakeup = dwc3_gadget_wakeup, | |
1368 | .set_selfpowered = dwc3_gadget_set_selfpowered, | |
1369 | .pullup = dwc3_gadget_pullup, | |
1370 | .udc_start = dwc3_gadget_start, | |
1371 | .udc_stop = dwc3_gadget_stop, | |
1372 | }; | |
1373 | ||
1374 | /* -------------------------------------------------------------------------- */ | |
1375 | ||
1376 | static int __devinit dwc3_gadget_init_endpoints(struct dwc3 *dwc) | |
1377 | { | |
1378 | struct dwc3_ep *dep; | |
1379 | u8 epnum; | |
1380 | ||
1381 | INIT_LIST_HEAD(&dwc->gadget.ep_list); | |
1382 | ||
1383 | for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) { | |
1384 | dep = kzalloc(sizeof(*dep), GFP_KERNEL); | |
1385 | if (!dep) { | |
1386 | dev_err(dwc->dev, "can't allocate endpoint %d\n", | |
1387 | epnum); | |
1388 | return -ENOMEM; | |
1389 | } | |
1390 | ||
1391 | dep->dwc = dwc; | |
1392 | dep->number = epnum; | |
1393 | dwc->eps[epnum] = dep; | |
1394 | ||
1395 | snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1, | |
1396 | (epnum & 1) ? "in" : "out"); | |
1397 | dep->endpoint.name = dep->name; | |
1398 | dep->direction = (epnum & 1); | |
1399 | ||
1400 | if (epnum == 0 || epnum == 1) { | |
1401 | dep->endpoint.maxpacket = 512; | |
1402 | dep->endpoint.ops = &dwc3_gadget_ep0_ops; | |
1403 | if (!epnum) | |
1404 | dwc->gadget.ep0 = &dep->endpoint; | |
1405 | } else { | |
1406 | int ret; | |
1407 | ||
1408 | dep->endpoint.maxpacket = 1024; | |
12d36c16 | 1409 | dep->endpoint.max_streams = 15; |
72246da4 FB |
1410 | dep->endpoint.ops = &dwc3_gadget_ep_ops; |
1411 | list_add_tail(&dep->endpoint.ep_list, | |
1412 | &dwc->gadget.ep_list); | |
1413 | ||
1414 | ret = dwc3_alloc_trb_pool(dep); | |
25b8ff68 | 1415 | if (ret) |
72246da4 | 1416 | return ret; |
72246da4 | 1417 | } |
25b8ff68 | 1418 | |
72246da4 FB |
1419 | INIT_LIST_HEAD(&dep->request_list); |
1420 | INIT_LIST_HEAD(&dep->req_queued); | |
1421 | } | |
1422 | ||
1423 | return 0; | |
1424 | } | |
1425 | ||
1426 | static void dwc3_gadget_free_endpoints(struct dwc3 *dwc) | |
1427 | { | |
1428 | struct dwc3_ep *dep; | |
1429 | u8 epnum; | |
1430 | ||
1431 | for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) { | |
1432 | dep = dwc->eps[epnum]; | |
1433 | dwc3_free_trb_pool(dep); | |
1434 | ||
1435 | if (epnum != 0 && epnum != 1) | |
1436 | list_del(&dep->endpoint.ep_list); | |
1437 | ||
1438 | kfree(dep); | |
1439 | } | |
1440 | } | |
1441 | ||
1442 | static void dwc3_gadget_release(struct device *dev) | |
1443 | { | |
1444 | dev_dbg(dev, "%s\n", __func__); | |
1445 | } | |
1446 | ||
1447 | /* -------------------------------------------------------------------------- */ | |
1448 | static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, | |
1449 | const struct dwc3_event_depevt *event, int status) | |
1450 | { | |
1451 | struct dwc3_request *req; | |
1452 | struct dwc3_trb trb; | |
1453 | unsigned int count; | |
1454 | unsigned int s_pkt = 0; | |
1455 | ||
1456 | do { | |
1457 | req = next_request(&dep->req_queued); | |
d39ee7be SAS |
1458 | if (!req) { |
1459 | WARN_ON_ONCE(1); | |
1460 | return 1; | |
1461 | } | |
72246da4 FB |
1462 | |
1463 | dwc3_trb_to_nat(req->trb, &trb); | |
1464 | ||
0d2f4758 SAS |
1465 | if (trb.hwo && status != -ESHUTDOWN) |
1466 | /* | |
1467 | * We continue despite the error. There is not much we | |
1468 | * can do. If we don't clean in up we loop for ever. If | |
1469 | * we skip the TRB than it gets overwritten reused after | |
1470 | * a while since we use them in a ring buffer. a BUG() | |
1471 | * would help. Lets hope that if this occures, someone | |
1472 | * fixes the root cause instead of looking away :) | |
1473 | */ | |
72246da4 FB |
1474 | dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n", |
1475 | dep->name, req->trb); | |
72246da4 FB |
1476 | count = trb.length; |
1477 | ||
1478 | if (dep->direction) { | |
1479 | if (count) { | |
1480 | dev_err(dwc->dev, "incomplete IN transfer %s\n", | |
1481 | dep->name); | |
1482 | status = -ECONNRESET; | |
1483 | } | |
1484 | } else { | |
1485 | if (count && (event->status & DEPEVT_STATUS_SHORT)) | |
1486 | s_pkt = 1; | |
1487 | } | |
1488 | ||
1489 | /* | |
1490 | * We assume here we will always receive the entire data block | |
1491 | * which we should receive. Meaning, if we program RX to | |
1492 | * receive 4K but we receive only 2K, we assume that's all we | |
1493 | * should receive and we simply bounce the request back to the | |
1494 | * gadget driver for further processing. | |
1495 | */ | |
1496 | req->request.actual += req->request.length - count; | |
1497 | dwc3_gadget_giveback(dep, req, status); | |
1498 | if (s_pkt) | |
1499 | break; | |
1500 | if ((event->status & DEPEVT_STATUS_LST) && trb.lst) | |
1501 | break; | |
1502 | if ((event->status & DEPEVT_STATUS_IOC) && trb.ioc) | |
1503 | break; | |
1504 | } while (1); | |
1505 | ||
1506 | if ((event->status & DEPEVT_STATUS_IOC) && trb.ioc) | |
1507 | return 0; | |
1508 | return 1; | |
1509 | } | |
1510 | ||
1511 | static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc, | |
1512 | struct dwc3_ep *dep, const struct dwc3_event_depevt *event, | |
1513 | int start_new) | |
1514 | { | |
1515 | unsigned status = 0; | |
1516 | int clean_busy; | |
1517 | ||
1518 | if (event->status & DEPEVT_STATUS_BUSERR) | |
1519 | status = -ECONNRESET; | |
1520 | ||
1521 | clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status); | |
a1ae9be5 | 1522 | if (clean_busy) { |
72246da4 | 1523 | dep->flags &= ~DWC3_EP_BUSY; |
a1ae9be5 SAS |
1524 | dep->res_trans_idx = 0; |
1525 | } | |
fae2b904 FB |
1526 | |
1527 | /* | |
1528 | * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround. | |
1529 | * See dwc3_gadget_linksts_change_interrupt() for 1st half. | |
1530 | */ | |
1531 | if (dwc->revision < DWC3_REVISION_183A) { | |
1532 | u32 reg; | |
1533 | int i; | |
1534 | ||
1535 | for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) { | |
1536 | struct dwc3_ep *dep = dwc->eps[i]; | |
1537 | ||
1538 | if (!(dep->flags & DWC3_EP_ENABLED)) | |
1539 | continue; | |
1540 | ||
1541 | if (!list_empty(&dep->req_queued)) | |
1542 | return; | |
1543 | } | |
1544 | ||
1545 | reg = dwc3_readl(dwc->regs, DWC3_DCTL); | |
1546 | reg |= dwc->u1u2; | |
1547 | dwc3_writel(dwc->regs, DWC3_DCTL, reg); | |
1548 | ||
1549 | dwc->u1u2 = 0; | |
1550 | } | |
72246da4 FB |
1551 | } |
1552 | ||
1553 | static void dwc3_gadget_start_isoc(struct dwc3 *dwc, | |
1554 | struct dwc3_ep *dep, const struct dwc3_event_depevt *event) | |
1555 | { | |
1556 | u32 uf; | |
1557 | ||
1558 | if (list_empty(&dep->request_list)) { | |
1559 | dev_vdbg(dwc->dev, "ISOC ep %s run out for requests.\n", | |
1560 | dep->name); | |
1561 | return; | |
1562 | } | |
1563 | ||
1564 | if (event->parameters) { | |
1565 | u32 mask; | |
1566 | ||
1567 | mask = ~(dep->interval - 1); | |
1568 | uf = event->parameters & mask; | |
1569 | /* 4 micro frames in the future */ | |
1570 | uf += dep->interval * 4; | |
1571 | } else { | |
1572 | uf = 0; | |
1573 | } | |
1574 | ||
1575 | __dwc3_gadget_kick_transfer(dep, uf, 1); | |
1576 | } | |
1577 | ||
1578 | static void dwc3_process_ep_cmd_complete(struct dwc3_ep *dep, | |
1579 | const struct dwc3_event_depevt *event) | |
1580 | { | |
1581 | struct dwc3 *dwc = dep->dwc; | |
1582 | struct dwc3_event_depevt mod_ev = *event; | |
1583 | ||
1584 | /* | |
1585 | * We were asked to remove one requests. It is possible that this | |
1586 | * request and a few other were started together and have the same | |
1587 | * transfer index. Since we stopped the complete endpoint we don't | |
1588 | * know how many requests were already completed (and not yet) | |
1589 | * reported and how could be done (later). We purge them all until | |
1590 | * the end of the list. | |
1591 | */ | |
1592 | mod_ev.status = DEPEVT_STATUS_LST; | |
1593 | dwc3_cleanup_done_reqs(dwc, dep, &mod_ev, -ESHUTDOWN); | |
1594 | dep->flags &= ~DWC3_EP_BUSY; | |
1595 | /* pending requets are ignored and are queued on XferNotReady */ | |
72246da4 FB |
1596 | } |
1597 | ||
1598 | static void dwc3_ep_cmd_compl(struct dwc3_ep *dep, | |
1599 | const struct dwc3_event_depevt *event) | |
1600 | { | |
1601 | u32 param = event->parameters; | |
1602 | u32 cmd_type = (param >> 8) & ((1 << 5) - 1); | |
1603 | ||
1604 | switch (cmd_type) { | |
1605 | case DWC3_DEPCMD_ENDTRANSFER: | |
1606 | dwc3_process_ep_cmd_complete(dep, event); | |
1607 | break; | |
1608 | case DWC3_DEPCMD_STARTTRANSFER: | |
1609 | dep->res_trans_idx = param & 0x7f; | |
1610 | break; | |
1611 | default: | |
1612 | printk(KERN_ERR "%s() unknown /unexpected type: %d\n", | |
1613 | __func__, cmd_type); | |
1614 | break; | |
1615 | }; | |
1616 | } | |
1617 | ||
1618 | static void dwc3_endpoint_interrupt(struct dwc3 *dwc, | |
1619 | const struct dwc3_event_depevt *event) | |
1620 | { | |
1621 | struct dwc3_ep *dep; | |
1622 | u8 epnum = event->endpoint_number; | |
1623 | ||
1624 | dep = dwc->eps[epnum]; | |
1625 | ||
1626 | dev_vdbg(dwc->dev, "%s: %s\n", dep->name, | |
1627 | dwc3_ep_event_string(event->endpoint_event)); | |
1628 | ||
1629 | if (epnum == 0 || epnum == 1) { | |
1630 | dwc3_ep0_interrupt(dwc, event); | |
1631 | return; | |
1632 | } | |
1633 | ||
1634 | switch (event->endpoint_event) { | |
1635 | case DWC3_DEPEVT_XFERCOMPLETE: | |
1636 | if (usb_endpoint_xfer_isoc(dep->desc)) { | |
1637 | dev_dbg(dwc->dev, "%s is an Isochronous endpoint\n", | |
1638 | dep->name); | |
1639 | return; | |
1640 | } | |
1641 | ||
1642 | dwc3_endpoint_transfer_complete(dwc, dep, event, 1); | |
1643 | break; | |
1644 | case DWC3_DEPEVT_XFERINPROGRESS: | |
1645 | if (!usb_endpoint_xfer_isoc(dep->desc)) { | |
1646 | dev_dbg(dwc->dev, "%s is not an Isochronous endpoint\n", | |
1647 | dep->name); | |
1648 | return; | |
1649 | } | |
1650 | ||
1651 | dwc3_endpoint_transfer_complete(dwc, dep, event, 0); | |
1652 | break; | |
1653 | case DWC3_DEPEVT_XFERNOTREADY: | |
1654 | if (usb_endpoint_xfer_isoc(dep->desc)) { | |
1655 | dwc3_gadget_start_isoc(dwc, dep, event); | |
1656 | } else { | |
1657 | int ret; | |
1658 | ||
1659 | dev_vdbg(dwc->dev, "%s: reason %s\n", | |
40aa41fb FB |
1660 | dep->name, event->status & |
1661 | DEPEVT_STATUS_TRANSFER_ACTIVE | |
72246da4 FB |
1662 | ? "Transfer Active" |
1663 | : "Transfer Not Active"); | |
1664 | ||
1665 | ret = __dwc3_gadget_kick_transfer(dep, 0, 1); | |
1666 | if (!ret || ret == -EBUSY) | |
1667 | return; | |
1668 | ||
1669 | dev_dbg(dwc->dev, "%s: failed to kick transfers\n", | |
1670 | dep->name); | |
1671 | } | |
1672 | ||
879631aa FB |
1673 | break; |
1674 | case DWC3_DEPEVT_STREAMEVT: | |
1675 | if (!usb_endpoint_xfer_bulk(dep->desc)) { | |
1676 | dev_err(dwc->dev, "Stream event for non-Bulk %s\n", | |
1677 | dep->name); | |
1678 | return; | |
1679 | } | |
1680 | ||
1681 | switch (event->status) { | |
1682 | case DEPEVT_STREAMEVT_FOUND: | |
1683 | dev_vdbg(dwc->dev, "Stream %d found and started\n", | |
1684 | event->parameters); | |
1685 | ||
1686 | break; | |
1687 | case DEPEVT_STREAMEVT_NOTFOUND: | |
1688 | /* FALLTHROUGH */ | |
1689 | default: | |
1690 | dev_dbg(dwc->dev, "Couldn't find suitable stream\n"); | |
1691 | } | |
72246da4 FB |
1692 | break; |
1693 | case DWC3_DEPEVT_RXTXFIFOEVT: | |
1694 | dev_dbg(dwc->dev, "%s FIFO Overrun\n", dep->name); | |
1695 | break; | |
72246da4 FB |
1696 | case DWC3_DEPEVT_EPCMDCMPLT: |
1697 | dwc3_ep_cmd_compl(dep, event); | |
1698 | break; | |
1699 | } | |
1700 | } | |
1701 | ||
1702 | static void dwc3_disconnect_gadget(struct dwc3 *dwc) | |
1703 | { | |
1704 | if (dwc->gadget_driver && dwc->gadget_driver->disconnect) { | |
1705 | spin_unlock(&dwc->lock); | |
1706 | dwc->gadget_driver->disconnect(&dwc->gadget); | |
1707 | spin_lock(&dwc->lock); | |
1708 | } | |
1709 | } | |
1710 | ||
1711 | static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum) | |
1712 | { | |
1713 | struct dwc3_ep *dep; | |
1714 | struct dwc3_gadget_ep_cmd_params params; | |
1715 | u32 cmd; | |
1716 | int ret; | |
1717 | ||
1718 | dep = dwc->eps[epnum]; | |
1719 | ||
624407f9 | 1720 | WARN_ON(!dep->res_trans_idx); |
72246da4 FB |
1721 | if (dep->res_trans_idx) { |
1722 | cmd = DWC3_DEPCMD_ENDTRANSFER; | |
1723 | cmd |= DWC3_DEPCMD_HIPRI_FORCERM | DWC3_DEPCMD_CMDIOC; | |
1724 | cmd |= DWC3_DEPCMD_PARAM(dep->res_trans_idx); | |
1725 | memset(¶ms, 0, sizeof(params)); | |
1726 | ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, ¶ms); | |
1727 | WARN_ON_ONCE(ret); | |
a1ae9be5 | 1728 | dep->res_trans_idx = 0; |
72246da4 FB |
1729 | } |
1730 | } | |
1731 | ||
1732 | static void dwc3_stop_active_transfers(struct dwc3 *dwc) | |
1733 | { | |
1734 | u32 epnum; | |
1735 | ||
1736 | for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) { | |
1737 | struct dwc3_ep *dep; | |
1738 | ||
1739 | dep = dwc->eps[epnum]; | |
1740 | if (!(dep->flags & DWC3_EP_ENABLED)) | |
1741 | continue; | |
1742 | ||
624407f9 | 1743 | dwc3_remove_requests(dwc, dep); |
72246da4 FB |
1744 | } |
1745 | } | |
1746 | ||
1747 | static void dwc3_clear_stall_all_ep(struct dwc3 *dwc) | |
1748 | { | |
1749 | u32 epnum; | |
1750 | ||
1751 | for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) { | |
1752 | struct dwc3_ep *dep; | |
1753 | struct dwc3_gadget_ep_cmd_params params; | |
1754 | int ret; | |
1755 | ||
1756 | dep = dwc->eps[epnum]; | |
1757 | ||
1758 | if (!(dep->flags & DWC3_EP_STALL)) | |
1759 | continue; | |
1760 | ||
1761 | dep->flags &= ~DWC3_EP_STALL; | |
1762 | ||
1763 | memset(¶ms, 0, sizeof(params)); | |
1764 | ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, | |
1765 | DWC3_DEPCMD_CLEARSTALL, ¶ms); | |
1766 | WARN_ON_ONCE(ret); | |
1767 | } | |
1768 | } | |
1769 | ||
1770 | static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc) | |
1771 | { | |
1772 | dev_vdbg(dwc->dev, "%s\n", __func__); | |
1773 | #if 0 | |
1774 | XXX | |
1775 | U1/U2 is powersave optimization. Skip it for now. Anyway we need to | |
1776 | enable it before we can disable it. | |
1777 | ||
1778 | reg = dwc3_readl(dwc->regs, DWC3_DCTL); | |
1779 | reg &= ~DWC3_DCTL_INITU1ENA; | |
1780 | dwc3_writel(dwc->regs, DWC3_DCTL, reg); | |
1781 | ||
1782 | reg &= ~DWC3_DCTL_INITU2ENA; | |
1783 | dwc3_writel(dwc->regs, DWC3_DCTL, reg); | |
1784 | #endif | |
1785 | ||
1786 | dwc3_stop_active_transfers(dwc); | |
1787 | dwc3_disconnect_gadget(dwc); | |
b23c8439 | 1788 | dwc->start_config_issued = false; |
72246da4 FB |
1789 | |
1790 | dwc->gadget.speed = USB_SPEED_UNKNOWN; | |
df62df56 | 1791 | dwc->setup_packet_pending = false; |
72246da4 FB |
1792 | } |
1793 | ||
1794 | static void dwc3_gadget_usb3_phy_power(struct dwc3 *dwc, int on) | |
1795 | { | |
1796 | u32 reg; | |
1797 | ||
1798 | reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0)); | |
1799 | ||
1800 | if (on) | |
1801 | reg &= ~DWC3_GUSB3PIPECTL_SUSPHY; | |
1802 | else | |
1803 | reg |= DWC3_GUSB3PIPECTL_SUSPHY; | |
1804 | ||
1805 | dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg); | |
1806 | } | |
1807 | ||
1808 | static void dwc3_gadget_usb2_phy_power(struct dwc3 *dwc, int on) | |
1809 | { | |
1810 | u32 reg; | |
1811 | ||
1812 | reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); | |
1813 | ||
1814 | if (on) | |
1815 | reg &= ~DWC3_GUSB2PHYCFG_SUSPHY; | |
1816 | else | |
1817 | reg |= DWC3_GUSB2PHYCFG_SUSPHY; | |
1818 | ||
1819 | dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); | |
1820 | } | |
1821 | ||
1822 | static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc) | |
1823 | { | |
1824 | u32 reg; | |
1825 | ||
1826 | dev_vdbg(dwc->dev, "%s\n", __func__); | |
1827 | ||
df62df56 FB |
1828 | /* |
1829 | * WORKAROUND: DWC3 revisions <1.88a have an issue which | |
1830 | * would cause a missing Disconnect Event if there's a | |
1831 | * pending Setup Packet in the FIFO. | |
1832 | * | |
1833 | * There's no suggested workaround on the official Bug | |
1834 | * report, which states that "unless the driver/application | |
1835 | * is doing any special handling of a disconnect event, | |
1836 | * there is no functional issue". | |
1837 | * | |
1838 | * Unfortunately, it turns out that we _do_ some special | |
1839 | * handling of a disconnect event, namely complete all | |
1840 | * pending transfers, notify gadget driver of the | |
1841 | * disconnection, and so on. | |
1842 | * | |
1843 | * Our suggested workaround is to follow the Disconnect | |
1844 | * Event steps here, instead, based on a setup_packet_pending | |
1845 | * flag. Such flag gets set whenever we have a XferNotReady | |
1846 | * event on EP0 and gets cleared on XferComplete for the | |
1847 | * same endpoint. | |
1848 | * | |
1849 | * Refers to: | |
1850 | * | |
1851 | * STAR#9000466709: RTL: Device : Disconnect event not | |
1852 | * generated if setup packet pending in FIFO | |
1853 | */ | |
1854 | if (dwc->revision < DWC3_REVISION_188A) { | |
1855 | if (dwc->setup_packet_pending) | |
1856 | dwc3_gadget_disconnect_interrupt(dwc); | |
1857 | } | |
1858 | ||
961906ed FB |
1859 | /* after reset -> Default State */ |
1860 | dwc->dev_state = DWC3_DEFAULT_STATE; | |
1861 | ||
72246da4 FB |
1862 | /* Enable PHYs */ |
1863 | dwc3_gadget_usb2_phy_power(dwc, true); | |
1864 | dwc3_gadget_usb3_phy_power(dwc, true); | |
1865 | ||
1866 | if (dwc->gadget.speed != USB_SPEED_UNKNOWN) | |
1867 | dwc3_disconnect_gadget(dwc); | |
1868 | ||
1869 | reg = dwc3_readl(dwc->regs, DWC3_DCTL); | |
1870 | reg &= ~DWC3_DCTL_TSTCTRL_MASK; | |
1871 | dwc3_writel(dwc->regs, DWC3_DCTL, reg); | |
1872 | ||
1873 | dwc3_stop_active_transfers(dwc); | |
1874 | dwc3_clear_stall_all_ep(dwc); | |
b23c8439 | 1875 | dwc->start_config_issued = false; |
72246da4 FB |
1876 | |
1877 | /* Reset device address to zero */ | |
1878 | reg = dwc3_readl(dwc->regs, DWC3_DCFG); | |
1879 | reg &= ~(DWC3_DCFG_DEVADDR_MASK); | |
1880 | dwc3_writel(dwc->regs, DWC3_DCFG, reg); | |
72246da4 FB |
1881 | } |
1882 | ||
1883 | static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed) | |
1884 | { | |
1885 | u32 reg; | |
1886 | u32 usb30_clock = DWC3_GCTL_CLK_BUS; | |
1887 | ||
1888 | /* | |
1889 | * We change the clock only at SS but I dunno why I would want to do | |
1890 | * this. Maybe it becomes part of the power saving plan. | |
1891 | */ | |
1892 | ||
1893 | if (speed != DWC3_DSTS_SUPERSPEED) | |
1894 | return; | |
1895 | ||
1896 | /* | |
1897 | * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed | |
1898 | * each time on Connect Done. | |
1899 | */ | |
1900 | if (!usb30_clock) | |
1901 | return; | |
1902 | ||
1903 | reg = dwc3_readl(dwc->regs, DWC3_GCTL); | |
1904 | reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock); | |
1905 | dwc3_writel(dwc->regs, DWC3_GCTL, reg); | |
1906 | } | |
1907 | ||
1908 | static void dwc3_gadget_disable_phy(struct dwc3 *dwc, u8 speed) | |
1909 | { | |
1910 | switch (speed) { | |
1911 | case USB_SPEED_SUPER: | |
1912 | dwc3_gadget_usb2_phy_power(dwc, false); | |
1913 | break; | |
1914 | case USB_SPEED_HIGH: | |
1915 | case USB_SPEED_FULL: | |
1916 | case USB_SPEED_LOW: | |
1917 | dwc3_gadget_usb3_phy_power(dwc, false); | |
1918 | break; | |
1919 | } | |
1920 | } | |
1921 | ||
1922 | static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc) | |
1923 | { | |
1924 | struct dwc3_gadget_ep_cmd_params params; | |
1925 | struct dwc3_ep *dep; | |
1926 | int ret; | |
1927 | u32 reg; | |
1928 | u8 speed; | |
1929 | ||
1930 | dev_vdbg(dwc->dev, "%s\n", __func__); | |
1931 | ||
1932 | memset(¶ms, 0x00, sizeof(params)); | |
1933 | ||
72246da4 FB |
1934 | reg = dwc3_readl(dwc->regs, DWC3_DSTS); |
1935 | speed = reg & DWC3_DSTS_CONNECTSPD; | |
1936 | dwc->speed = speed; | |
1937 | ||
1938 | dwc3_update_ram_clk_sel(dwc, speed); | |
1939 | ||
1940 | switch (speed) { | |
1941 | case DWC3_DCFG_SUPERSPEED: | |
05870c5b FB |
1942 | /* |
1943 | * WORKAROUND: DWC3 revisions <1.90a have an issue which | |
1944 | * would cause a missing USB3 Reset event. | |
1945 | * | |
1946 | * In such situations, we should force a USB3 Reset | |
1947 | * event by calling our dwc3_gadget_reset_interrupt() | |
1948 | * routine. | |
1949 | * | |
1950 | * Refers to: | |
1951 | * | |
1952 | * STAR#9000483510: RTL: SS : USB3 reset event may | |
1953 | * not be generated always when the link enters poll | |
1954 | */ | |
1955 | if (dwc->revision < DWC3_REVISION_190A) | |
1956 | dwc3_gadget_reset_interrupt(dwc); | |
1957 | ||
72246da4 FB |
1958 | dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); |
1959 | dwc->gadget.ep0->maxpacket = 512; | |
1960 | dwc->gadget.speed = USB_SPEED_SUPER; | |
1961 | break; | |
1962 | case DWC3_DCFG_HIGHSPEED: | |
1963 | dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); | |
1964 | dwc->gadget.ep0->maxpacket = 64; | |
1965 | dwc->gadget.speed = USB_SPEED_HIGH; | |
1966 | break; | |
1967 | case DWC3_DCFG_FULLSPEED2: | |
1968 | case DWC3_DCFG_FULLSPEED1: | |
1969 | dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); | |
1970 | dwc->gadget.ep0->maxpacket = 64; | |
1971 | dwc->gadget.speed = USB_SPEED_FULL; | |
1972 | break; | |
1973 | case DWC3_DCFG_LOWSPEED: | |
1974 | dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8); | |
1975 | dwc->gadget.ep0->maxpacket = 8; | |
1976 | dwc->gadget.speed = USB_SPEED_LOW; | |
1977 | break; | |
1978 | } | |
1979 | ||
1980 | /* Disable unneded PHY */ | |
1981 | dwc3_gadget_disable_phy(dwc, dwc->gadget.speed); | |
1982 | ||
1983 | dep = dwc->eps[0]; | |
c90bfaec | 1984 | ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL); |
72246da4 FB |
1985 | if (ret) { |
1986 | dev_err(dwc->dev, "failed to enable %s\n", dep->name); | |
1987 | return; | |
1988 | } | |
1989 | ||
1990 | dep = dwc->eps[1]; | |
c90bfaec | 1991 | ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL); |
72246da4 FB |
1992 | if (ret) { |
1993 | dev_err(dwc->dev, "failed to enable %s\n", dep->name); | |
1994 | return; | |
1995 | } | |
1996 | ||
1997 | /* | |
1998 | * Configure PHY via GUSB3PIPECTLn if required. | |
1999 | * | |
2000 | * Update GTXFIFOSIZn | |
2001 | * | |
2002 | * In both cases reset values should be sufficient. | |
2003 | */ | |
2004 | } | |
2005 | ||
2006 | static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc) | |
2007 | { | |
2008 | dev_vdbg(dwc->dev, "%s\n", __func__); | |
2009 | ||
2010 | /* | |
2011 | * TODO take core out of low power mode when that's | |
2012 | * implemented. | |
2013 | */ | |
2014 | ||
2015 | dwc->gadget_driver->resume(&dwc->gadget); | |
2016 | } | |
2017 | ||
2018 | static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc, | |
2019 | unsigned int evtinfo) | |
2020 | { | |
fae2b904 FB |
2021 | enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK; |
2022 | ||
2023 | /* | |
2024 | * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending | |
2025 | * on the link partner, the USB session might do multiple entry/exit | |
2026 | * of low power states before a transfer takes place. | |
2027 | * | |
2028 | * Due to this problem, we might experience lower throughput. The | |
2029 | * suggested workaround is to disable DCTL[12:9] bits if we're | |
2030 | * transitioning from U1/U2 to U0 and enable those bits again | |
2031 | * after a transfer completes and there are no pending transfers | |
2032 | * on any of the enabled endpoints. | |
2033 | * | |
2034 | * This is the first half of that workaround. | |
2035 | * | |
2036 | * Refers to: | |
2037 | * | |
2038 | * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us | |
2039 | * core send LGO_Ux entering U0 | |
2040 | */ | |
2041 | if (dwc->revision < DWC3_REVISION_183A) { | |
2042 | if (next == DWC3_LINK_STATE_U0) { | |
2043 | u32 u1u2; | |
2044 | u32 reg; | |
2045 | ||
2046 | switch (dwc->link_state) { | |
2047 | case DWC3_LINK_STATE_U1: | |
2048 | case DWC3_LINK_STATE_U2: | |
2049 | reg = dwc3_readl(dwc->regs, DWC3_DCTL); | |
2050 | u1u2 = reg & (DWC3_DCTL_INITU2ENA | |
2051 | | DWC3_DCTL_ACCEPTU2ENA | |
2052 | | DWC3_DCTL_INITU1ENA | |
2053 | | DWC3_DCTL_ACCEPTU1ENA); | |
2054 | ||
2055 | if (!dwc->u1u2) | |
2056 | dwc->u1u2 = reg & u1u2; | |
2057 | ||
2058 | reg &= ~u1u2; | |
2059 | ||
2060 | dwc3_writel(dwc->regs, DWC3_DCTL, reg); | |
2061 | break; | |
2062 | default: | |
2063 | /* do nothing */ | |
2064 | break; | |
2065 | } | |
2066 | } | |
2067 | } | |
2068 | ||
2069 | dwc->link_state = next; | |
019ac832 FB |
2070 | |
2071 | dev_vdbg(dwc->dev, "%s link %d\n", __func__, dwc->link_state); | |
72246da4 FB |
2072 | } |
2073 | ||
2074 | static void dwc3_gadget_interrupt(struct dwc3 *dwc, | |
2075 | const struct dwc3_event_devt *event) | |
2076 | { | |
2077 | switch (event->type) { | |
2078 | case DWC3_DEVICE_EVENT_DISCONNECT: | |
2079 | dwc3_gadget_disconnect_interrupt(dwc); | |
2080 | break; | |
2081 | case DWC3_DEVICE_EVENT_RESET: | |
2082 | dwc3_gadget_reset_interrupt(dwc); | |
2083 | break; | |
2084 | case DWC3_DEVICE_EVENT_CONNECT_DONE: | |
2085 | dwc3_gadget_conndone_interrupt(dwc); | |
2086 | break; | |
2087 | case DWC3_DEVICE_EVENT_WAKEUP: | |
2088 | dwc3_gadget_wakeup_interrupt(dwc); | |
2089 | break; | |
2090 | case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE: | |
2091 | dwc3_gadget_linksts_change_interrupt(dwc, event->event_info); | |
2092 | break; | |
2093 | case DWC3_DEVICE_EVENT_EOPF: | |
2094 | dev_vdbg(dwc->dev, "End of Periodic Frame\n"); | |
2095 | break; | |
2096 | case DWC3_DEVICE_EVENT_SOF: | |
2097 | dev_vdbg(dwc->dev, "Start of Periodic Frame\n"); | |
2098 | break; | |
2099 | case DWC3_DEVICE_EVENT_ERRATIC_ERROR: | |
2100 | dev_vdbg(dwc->dev, "Erratic Error\n"); | |
2101 | break; | |
2102 | case DWC3_DEVICE_EVENT_CMD_CMPL: | |
2103 | dev_vdbg(dwc->dev, "Command Complete\n"); | |
2104 | break; | |
2105 | case DWC3_DEVICE_EVENT_OVERFLOW: | |
2106 | dev_vdbg(dwc->dev, "Overflow\n"); | |
2107 | break; | |
2108 | default: | |
2109 | dev_dbg(dwc->dev, "UNKNOWN IRQ %d\n", event->type); | |
2110 | } | |
2111 | } | |
2112 | ||
2113 | static void dwc3_process_event_entry(struct dwc3 *dwc, | |
2114 | const union dwc3_event *event) | |
2115 | { | |
2116 | /* Endpoint IRQ, handle it and return early */ | |
2117 | if (event->type.is_devspec == 0) { | |
2118 | /* depevt */ | |
2119 | return dwc3_endpoint_interrupt(dwc, &event->depevt); | |
2120 | } | |
2121 | ||
2122 | switch (event->type.type) { | |
2123 | case DWC3_EVENT_TYPE_DEV: | |
2124 | dwc3_gadget_interrupt(dwc, &event->devt); | |
2125 | break; | |
2126 | /* REVISIT what to do with Carkit and I2C events ? */ | |
2127 | default: | |
2128 | dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw); | |
2129 | } | |
2130 | } | |
2131 | ||
2132 | static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf) | |
2133 | { | |
2134 | struct dwc3_event_buffer *evt; | |
2135 | int left; | |
2136 | u32 count; | |
2137 | ||
2138 | count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(buf)); | |
2139 | count &= DWC3_GEVNTCOUNT_MASK; | |
2140 | if (!count) | |
2141 | return IRQ_NONE; | |
2142 | ||
2143 | evt = dwc->ev_buffs[buf]; | |
2144 | left = count; | |
2145 | ||
2146 | while (left > 0) { | |
2147 | union dwc3_event event; | |
2148 | ||
2149 | memcpy(&event.raw, (evt->buf + evt->lpos), sizeof(event.raw)); | |
2150 | dwc3_process_event_entry(dwc, &event); | |
2151 | /* | |
2152 | * XXX we wrap around correctly to the next entry as almost all | |
2153 | * entries are 4 bytes in size. There is one entry which has 12 | |
2154 | * bytes which is a regular entry followed by 8 bytes data. ATM | |
2155 | * I don't know how things are organized if were get next to the | |
2156 | * a boundary so I worry about that once we try to handle that. | |
2157 | */ | |
2158 | evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE; | |
2159 | left -= 4; | |
2160 | ||
2161 | dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), 4); | |
2162 | } | |
2163 | ||
2164 | return IRQ_HANDLED; | |
2165 | } | |
2166 | ||
2167 | static irqreturn_t dwc3_interrupt(int irq, void *_dwc) | |
2168 | { | |
2169 | struct dwc3 *dwc = _dwc; | |
2170 | int i; | |
2171 | irqreturn_t ret = IRQ_NONE; | |
2172 | ||
2173 | spin_lock(&dwc->lock); | |
2174 | ||
9f622b2a | 2175 | for (i = 0; i < dwc->num_event_buffers; i++) { |
72246da4 FB |
2176 | irqreturn_t status; |
2177 | ||
2178 | status = dwc3_process_event_buf(dwc, i); | |
2179 | if (status == IRQ_HANDLED) | |
2180 | ret = status; | |
2181 | } | |
2182 | ||
2183 | spin_unlock(&dwc->lock); | |
2184 | ||
2185 | return ret; | |
2186 | } | |
2187 | ||
2188 | /** | |
2189 | * dwc3_gadget_init - Initializes gadget related registers | |
2190 | * @dwc: Pointer to out controller context structure | |
2191 | * | |
2192 | * Returns 0 on success otherwise negative errno. | |
2193 | */ | |
2194 | int __devinit dwc3_gadget_init(struct dwc3 *dwc) | |
2195 | { | |
2196 | u32 reg; | |
2197 | int ret; | |
2198 | int irq; | |
2199 | ||
2200 | dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req), | |
2201 | &dwc->ctrl_req_addr, GFP_KERNEL); | |
2202 | if (!dwc->ctrl_req) { | |
2203 | dev_err(dwc->dev, "failed to allocate ctrl request\n"); | |
2204 | ret = -ENOMEM; | |
2205 | goto err0; | |
2206 | } | |
2207 | ||
2208 | dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb), | |
2209 | &dwc->ep0_trb_addr, GFP_KERNEL); | |
2210 | if (!dwc->ep0_trb) { | |
2211 | dev_err(dwc->dev, "failed to allocate ep0 trb\n"); | |
2212 | ret = -ENOMEM; | |
2213 | goto err1; | |
2214 | } | |
2215 | ||
2216 | dwc->setup_buf = dma_alloc_coherent(dwc->dev, | |
2217 | sizeof(*dwc->setup_buf) * 2, | |
2218 | &dwc->setup_buf_addr, GFP_KERNEL); | |
2219 | if (!dwc->setup_buf) { | |
2220 | dev_err(dwc->dev, "failed to allocate setup buffer\n"); | |
2221 | ret = -ENOMEM; | |
2222 | goto err2; | |
2223 | } | |
2224 | ||
5812b1c2 FB |
2225 | dwc->ep0_bounce = dma_alloc_coherent(dwc->dev, |
2226 | 512, &dwc->ep0_bounce_addr, GFP_KERNEL); | |
2227 | if (!dwc->ep0_bounce) { | |
2228 | dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n"); | |
2229 | ret = -ENOMEM; | |
2230 | goto err3; | |
2231 | } | |
2232 | ||
72246da4 FB |
2233 | dev_set_name(&dwc->gadget.dev, "gadget"); |
2234 | ||
2235 | dwc->gadget.ops = &dwc3_gadget_ops; | |
d327ab5b | 2236 | dwc->gadget.max_speed = USB_SPEED_SUPER; |
72246da4 FB |
2237 | dwc->gadget.speed = USB_SPEED_UNKNOWN; |
2238 | dwc->gadget.dev.parent = dwc->dev; | |
eeb720fb | 2239 | dwc->gadget.sg_supported = true; |
72246da4 FB |
2240 | |
2241 | dma_set_coherent_mask(&dwc->gadget.dev, dwc->dev->coherent_dma_mask); | |
2242 | ||
2243 | dwc->gadget.dev.dma_parms = dwc->dev->dma_parms; | |
2244 | dwc->gadget.dev.dma_mask = dwc->dev->dma_mask; | |
2245 | dwc->gadget.dev.release = dwc3_gadget_release; | |
2246 | dwc->gadget.name = "dwc3-gadget"; | |
2247 | ||
2248 | /* | |
2249 | * REVISIT: Here we should clear all pending IRQs to be | |
2250 | * sure we're starting from a well known location. | |
2251 | */ | |
2252 | ||
2253 | ret = dwc3_gadget_init_endpoints(dwc); | |
2254 | if (ret) | |
5812b1c2 | 2255 | goto err4; |
72246da4 FB |
2256 | |
2257 | irq = platform_get_irq(to_platform_device(dwc->dev), 0); | |
2258 | ||
2259 | ret = request_irq(irq, dwc3_interrupt, IRQF_SHARED, | |
2260 | "dwc3", dwc); | |
2261 | if (ret) { | |
2262 | dev_err(dwc->dev, "failed to request irq #%d --> %d\n", | |
2263 | irq, ret); | |
5812b1c2 | 2264 | goto err5; |
72246da4 FB |
2265 | } |
2266 | ||
2267 | /* Enable all but Start and End of Frame IRQs */ | |
2268 | reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN | | |
2269 | DWC3_DEVTEN_EVNTOVERFLOWEN | | |
2270 | DWC3_DEVTEN_CMDCMPLTEN | | |
2271 | DWC3_DEVTEN_ERRTICERREN | | |
2272 | DWC3_DEVTEN_WKUPEVTEN | | |
2273 | DWC3_DEVTEN_ULSTCNGEN | | |
2274 | DWC3_DEVTEN_CONNECTDONEEN | | |
2275 | DWC3_DEVTEN_USBRSTEN | | |
2276 | DWC3_DEVTEN_DISCONNEVTEN); | |
2277 | dwc3_writel(dwc->regs, DWC3_DEVTEN, reg); | |
2278 | ||
2279 | ret = device_register(&dwc->gadget.dev); | |
2280 | if (ret) { | |
2281 | dev_err(dwc->dev, "failed to register gadget device\n"); | |
2282 | put_device(&dwc->gadget.dev); | |
5812b1c2 | 2283 | goto err6; |
72246da4 FB |
2284 | } |
2285 | ||
2286 | ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget); | |
2287 | if (ret) { | |
2288 | dev_err(dwc->dev, "failed to register udc\n"); | |
5812b1c2 | 2289 | goto err7; |
72246da4 FB |
2290 | } |
2291 | ||
2292 | return 0; | |
2293 | ||
5812b1c2 | 2294 | err7: |
72246da4 FB |
2295 | device_unregister(&dwc->gadget.dev); |
2296 | ||
5812b1c2 | 2297 | err6: |
72246da4 FB |
2298 | dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00); |
2299 | free_irq(irq, dwc); | |
2300 | ||
5812b1c2 | 2301 | err5: |
72246da4 FB |
2302 | dwc3_gadget_free_endpoints(dwc); |
2303 | ||
5812b1c2 FB |
2304 | err4: |
2305 | dma_free_coherent(dwc->dev, 512, dwc->ep0_bounce, | |
2306 | dwc->ep0_bounce_addr); | |
2307 | ||
72246da4 FB |
2308 | err3: |
2309 | dma_free_coherent(dwc->dev, sizeof(*dwc->setup_buf) * 2, | |
2310 | dwc->setup_buf, dwc->setup_buf_addr); | |
2311 | ||
2312 | err2: | |
2313 | dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb), | |
2314 | dwc->ep0_trb, dwc->ep0_trb_addr); | |
2315 | ||
2316 | err1: | |
2317 | dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req), | |
2318 | dwc->ctrl_req, dwc->ctrl_req_addr); | |
2319 | ||
2320 | err0: | |
2321 | return ret; | |
2322 | } | |
2323 | ||
2324 | void dwc3_gadget_exit(struct dwc3 *dwc) | |
2325 | { | |
2326 | int irq; | |
72246da4 FB |
2327 | |
2328 | usb_del_gadget_udc(&dwc->gadget); | |
2329 | irq = platform_get_irq(to_platform_device(dwc->dev), 0); | |
2330 | ||
2331 | dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00); | |
2332 | free_irq(irq, dwc); | |
2333 | ||
72246da4 FB |
2334 | dwc3_gadget_free_endpoints(dwc); |
2335 | ||
5812b1c2 FB |
2336 | dma_free_coherent(dwc->dev, 512, dwc->ep0_bounce, |
2337 | dwc->ep0_bounce_addr); | |
2338 | ||
72246da4 FB |
2339 | dma_free_coherent(dwc->dev, sizeof(*dwc->setup_buf) * 2, |
2340 | dwc->setup_buf, dwc->setup_buf_addr); | |
2341 | ||
2342 | dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb), | |
2343 | dwc->ep0_trb, dwc->ep0_trb_addr); | |
2344 | ||
2345 | dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req), | |
2346 | dwc->ctrl_req, dwc->ctrl_req_addr); | |
2347 | ||
2348 | device_unregister(&dwc->gadget.dev); | |
2349 | } |