]>
Commit | Line | Data |
---|---|---|
72246da4 FB |
1 | /** |
2 | * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link | |
3 | * | |
4 | * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com | |
5 | * All rights reserved. | |
6 | * | |
7 | * Authors: Felipe Balbi <[email protected]>, | |
8 | * Sebastian Andrzej Siewior <[email protected]> | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or without | |
11 | * modification, are permitted provided that the following conditions | |
12 | * are met: | |
13 | * 1. Redistributions of source code must retain the above copyright | |
14 | * notice, this list of conditions, and the following disclaimer, | |
15 | * without modification. | |
16 | * 2. Redistributions in binary form must reproduce the above copyright | |
17 | * notice, this list of conditions and the following disclaimer in the | |
18 | * documentation and/or other materials provided with the distribution. | |
19 | * 3. The names of the above-listed copyright holders may not be used | |
20 | * to endorse or promote products derived from this software without | |
21 | * specific prior written permission. | |
22 | * | |
23 | * ALTERNATIVELY, this software may be distributed under the terms of the | |
24 | * GNU General Public License ("GPL") version 2, as published by the Free | |
25 | * Software Foundation. | |
26 | * | |
27 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS | |
28 | * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, | |
29 | * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | |
30 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR | |
31 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, | |
32 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, | |
33 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR | |
34 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF | |
35 | * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING | |
36 | * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | |
37 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
38 | */ | |
39 | ||
40 | #include <linux/kernel.h> | |
41 | #include <linux/delay.h> | |
42 | #include <linux/slab.h> | |
43 | #include <linux/spinlock.h> | |
44 | #include <linux/platform_device.h> | |
45 | #include <linux/pm_runtime.h> | |
46 | #include <linux/interrupt.h> | |
47 | #include <linux/io.h> | |
48 | #include <linux/list.h> | |
49 | #include <linux/dma-mapping.h> | |
50 | ||
51 | #include <linux/usb/ch9.h> | |
52 | #include <linux/usb/gadget.h> | |
53 | ||
54 | #include "core.h" | |
55 | #include "gadget.h" | |
56 | #include "io.h" | |
57 | ||
58 | #define DMA_ADDR_INVALID (~(dma_addr_t)0) | |
59 | ||
60 | void dwc3_map_buffer_to_dma(struct dwc3_request *req) | |
61 | { | |
62 | struct dwc3 *dwc = req->dep->dwc; | |
63 | ||
64 | if (req->request.dma == DMA_ADDR_INVALID) { | |
65 | req->request.dma = dma_map_single(dwc->dev, req->request.buf, | |
66 | req->request.length, req->direction | |
67 | ? DMA_TO_DEVICE : DMA_FROM_DEVICE); | |
68 | req->mapped = true; | |
69 | } else { | |
70 | dma_sync_single_for_device(dwc->dev, req->request.dma, | |
71 | req->request.length, req->direction | |
72 | ? DMA_TO_DEVICE : DMA_FROM_DEVICE); | |
73 | req->mapped = false; | |
74 | } | |
75 | } | |
76 | ||
77 | void dwc3_unmap_buffer_from_dma(struct dwc3_request *req) | |
78 | { | |
79 | struct dwc3 *dwc = req->dep->dwc; | |
80 | ||
81 | if (req->mapped) { | |
82 | dma_unmap_single(dwc->dev, req->request.dma, | |
83 | req->request.length, req->direction | |
84 | ? DMA_TO_DEVICE : DMA_FROM_DEVICE); | |
85 | req->mapped = 0; | |
f198ead2 | 86 | req->request.dma = DMA_ADDR_INVALID; |
72246da4 FB |
87 | } else { |
88 | dma_sync_single_for_cpu(dwc->dev, req->request.dma, | |
89 | req->request.length, req->direction | |
90 | ? DMA_TO_DEVICE : DMA_FROM_DEVICE); | |
91 | } | |
92 | } | |
93 | ||
94 | void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, | |
95 | int status) | |
96 | { | |
97 | struct dwc3 *dwc = dep->dwc; | |
98 | ||
99 | if (req->queued) { | |
100 | dep->busy_slot++; | |
101 | /* | |
102 | * Skip LINK TRB. We can't use req->trb and check for | |
103 | * DWC3_TRBCTL_LINK_TRB because it points the TRB we just | |
104 | * completed (not the LINK TRB). | |
105 | */ | |
106 | if (((dep->busy_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) && | |
107 | usb_endpoint_xfer_isoc(dep->desc)) | |
108 | dep->busy_slot++; | |
109 | } | |
110 | list_del(&req->list); | |
111 | ||
112 | if (req->request.status == -EINPROGRESS) | |
113 | req->request.status = status; | |
114 | ||
115 | dwc3_unmap_buffer_from_dma(req); | |
116 | ||
117 | dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n", | |
118 | req, dep->name, req->request.actual, | |
119 | req->request.length, status); | |
120 | ||
121 | spin_unlock(&dwc->lock); | |
122 | req->request.complete(&req->dep->endpoint, &req->request); | |
123 | spin_lock(&dwc->lock); | |
124 | } | |
125 | ||
126 | static const char *dwc3_gadget_ep_cmd_string(u8 cmd) | |
127 | { | |
128 | switch (cmd) { | |
129 | case DWC3_DEPCMD_DEPSTARTCFG: | |
130 | return "Start New Configuration"; | |
131 | case DWC3_DEPCMD_ENDTRANSFER: | |
132 | return "End Transfer"; | |
133 | case DWC3_DEPCMD_UPDATETRANSFER: | |
134 | return "Update Transfer"; | |
135 | case DWC3_DEPCMD_STARTTRANSFER: | |
136 | return "Start Transfer"; | |
137 | case DWC3_DEPCMD_CLEARSTALL: | |
138 | return "Clear Stall"; | |
139 | case DWC3_DEPCMD_SETSTALL: | |
140 | return "Set Stall"; | |
141 | case DWC3_DEPCMD_GETSEQNUMBER: | |
142 | return "Get Data Sequence Number"; | |
143 | case DWC3_DEPCMD_SETTRANSFRESOURCE: | |
144 | return "Set Endpoint Transfer Resource"; | |
145 | case DWC3_DEPCMD_SETEPCONFIG: | |
146 | return "Set Endpoint Configuration"; | |
147 | default: | |
148 | return "UNKNOWN command"; | |
149 | } | |
150 | } | |
151 | ||
152 | int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep, | |
153 | unsigned cmd, struct dwc3_gadget_ep_cmd_params *params) | |
154 | { | |
155 | struct dwc3_ep *dep = dwc->eps[ep]; | |
156 | unsigned long timeout = 500; | |
157 | u32 reg; | |
158 | ||
159 | dev_vdbg(dwc->dev, "%s: cmd '%s' params %08x %08x %08x\n", | |
160 | dep->name, | |
161 | dwc3_gadget_ep_cmd_string(cmd), params->param0.raw, | |
162 | params->param1.raw, params->param2.raw); | |
163 | ||
164 | dwc3_writel(dwc->regs, DWC3_DEPCMDPAR0(ep), params->param0.raw); | |
165 | dwc3_writel(dwc->regs, DWC3_DEPCMDPAR1(ep), params->param1.raw); | |
166 | dwc3_writel(dwc->regs, DWC3_DEPCMDPAR2(ep), params->param2.raw); | |
167 | ||
168 | dwc3_writel(dwc->regs, DWC3_DEPCMD(ep), cmd | DWC3_DEPCMD_CMDACT); | |
169 | do { | |
170 | reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep)); | |
171 | if (!(reg & DWC3_DEPCMD_CMDACT)) { | |
164f6e14 FB |
172 | dev_vdbg(dwc->dev, "Command Complete --> %d\n", |
173 | DWC3_DEPCMD_STATUS(reg)); | |
72246da4 FB |
174 | return 0; |
175 | } | |
176 | ||
177 | /* | |
178 | * XXX Figure out a sane timeout here. 500ms is way too much. | |
179 | * We can't sleep here, because it is also called from | |
180 | * interrupt context. | |
181 | */ | |
182 | timeout--; | |
183 | if (!timeout) | |
184 | return -ETIMEDOUT; | |
185 | ||
186 | mdelay(1); | |
187 | } while (1); | |
188 | } | |
189 | ||
190 | static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep, | |
191 | struct dwc3_trb_hw *trb) | |
192 | { | |
193 | u32 offset = trb - dep->trb_pool; | |
194 | ||
195 | return dep->trb_pool_dma + offset; | |
196 | } | |
197 | ||
198 | static int dwc3_alloc_trb_pool(struct dwc3_ep *dep) | |
199 | { | |
200 | struct dwc3 *dwc = dep->dwc; | |
201 | ||
202 | if (dep->trb_pool) | |
203 | return 0; | |
204 | ||
205 | if (dep->number == 0 || dep->number == 1) | |
206 | return 0; | |
207 | ||
208 | dep->trb_pool = dma_alloc_coherent(dwc->dev, | |
209 | sizeof(struct dwc3_trb) * DWC3_TRB_NUM, | |
210 | &dep->trb_pool_dma, GFP_KERNEL); | |
211 | if (!dep->trb_pool) { | |
212 | dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n", | |
213 | dep->name); | |
214 | return -ENOMEM; | |
215 | } | |
216 | ||
217 | return 0; | |
218 | } | |
219 | ||
220 | static void dwc3_free_trb_pool(struct dwc3_ep *dep) | |
221 | { | |
222 | struct dwc3 *dwc = dep->dwc; | |
223 | ||
224 | dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM, | |
225 | dep->trb_pool, dep->trb_pool_dma); | |
226 | ||
227 | dep->trb_pool = NULL; | |
228 | dep->trb_pool_dma = 0; | |
229 | } | |
230 | ||
231 | static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep) | |
232 | { | |
233 | struct dwc3_gadget_ep_cmd_params params; | |
234 | u32 cmd; | |
235 | ||
236 | memset(¶ms, 0x00, sizeof(params)); | |
237 | ||
238 | if (dep->number != 1) { | |
239 | cmd = DWC3_DEPCMD_DEPSTARTCFG; | |
240 | /* XferRscIdx == 0 for ep0 and 2 for the remaining */ | |
241 | if (dep->number > 1) | |
242 | cmd |= DWC3_DEPCMD_PARAM(2); | |
243 | ||
244 | return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, ¶ms); | |
245 | } | |
246 | ||
247 | return 0; | |
248 | } | |
249 | ||
250 | static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep, | |
251 | const struct usb_endpoint_descriptor *desc) | |
252 | { | |
253 | struct dwc3_gadget_ep_cmd_params params; | |
254 | ||
255 | memset(¶ms, 0x00, sizeof(params)); | |
256 | ||
257 | params.param0.depcfg.ep_type = usb_endpoint_type(desc); | |
29cc8897 | 258 | params.param0.depcfg.max_packet_size = usb_endpoint_maxp(desc); |
72246da4 FB |
259 | |
260 | params.param1.depcfg.xfer_complete_enable = true; | |
261 | params.param1.depcfg.xfer_not_ready_enable = true; | |
262 | ||
263 | if (usb_endpoint_xfer_isoc(desc)) | |
264 | params.param1.depcfg.xfer_in_progress_enable = true; | |
265 | ||
266 | /* | |
267 | * We are doing 1:1 mapping for endpoints, meaning | |
268 | * Physical Endpoints 2 maps to Logical Endpoint 2 and | |
269 | * so on. We consider the direction bit as part of the physical | |
270 | * endpoint number. So USB endpoint 0x81 is 0x03. | |
271 | */ | |
272 | params.param1.depcfg.ep_number = dep->number; | |
273 | ||
274 | /* | |
275 | * We must use the lower 16 TX FIFOs even though | |
276 | * HW might have more | |
277 | */ | |
278 | if (dep->direction) | |
279 | params.param0.depcfg.fifo_number = dep->number >> 1; | |
280 | ||
281 | if (desc->bInterval) { | |
282 | params.param1.depcfg.binterval_m1 = desc->bInterval - 1; | |
283 | dep->interval = 1 << (desc->bInterval - 1); | |
284 | } | |
285 | ||
286 | return dwc3_send_gadget_ep_cmd(dwc, dep->number, | |
287 | DWC3_DEPCMD_SETEPCONFIG, ¶ms); | |
288 | } | |
289 | ||
290 | static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep) | |
291 | { | |
292 | struct dwc3_gadget_ep_cmd_params params; | |
293 | ||
294 | memset(¶ms, 0x00, sizeof(params)); | |
295 | ||
296 | params.param0.depxfercfg.number_xfer_resources = 1; | |
297 | ||
298 | return dwc3_send_gadget_ep_cmd(dwc, dep->number, | |
299 | DWC3_DEPCMD_SETTRANSFRESOURCE, ¶ms); | |
300 | } | |
301 | ||
302 | /** | |
303 | * __dwc3_gadget_ep_enable - Initializes a HW endpoint | |
304 | * @dep: endpoint to be initialized | |
305 | * @desc: USB Endpoint Descriptor | |
306 | * | |
307 | * Caller should take care of locking | |
308 | */ | |
309 | static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, | |
310 | const struct usb_endpoint_descriptor *desc) | |
311 | { | |
312 | struct dwc3 *dwc = dep->dwc; | |
313 | u32 reg; | |
314 | int ret = -ENOMEM; | |
315 | ||
316 | if (!(dep->flags & DWC3_EP_ENABLED)) { | |
317 | ret = dwc3_gadget_start_config(dwc, dep); | |
318 | if (ret) | |
319 | return ret; | |
320 | } | |
321 | ||
322 | ret = dwc3_gadget_set_ep_config(dwc, dep, desc); | |
323 | if (ret) | |
324 | return ret; | |
325 | ||
326 | if (!(dep->flags & DWC3_EP_ENABLED)) { | |
327 | struct dwc3_trb_hw *trb_st_hw; | |
328 | struct dwc3_trb_hw *trb_link_hw; | |
329 | struct dwc3_trb trb_link; | |
330 | ||
331 | ret = dwc3_gadget_set_xfer_resource(dwc, dep); | |
332 | if (ret) | |
333 | return ret; | |
334 | ||
335 | dep->desc = desc; | |
336 | dep->type = usb_endpoint_type(desc); | |
337 | dep->flags |= DWC3_EP_ENABLED; | |
338 | ||
339 | reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); | |
340 | reg |= DWC3_DALEPENA_EP(dep->number); | |
341 | dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); | |
342 | ||
343 | if (!usb_endpoint_xfer_isoc(desc)) | |
344 | return 0; | |
345 | ||
346 | memset(&trb_link, 0, sizeof(trb_link)); | |
347 | ||
348 | /* Link TRB for ISOC. The HWO but is never reset */ | |
349 | trb_st_hw = &dep->trb_pool[0]; | |
350 | ||
351 | trb_link.bplh = dwc3_trb_dma_offset(dep, trb_st_hw); | |
352 | trb_link.trbctl = DWC3_TRBCTL_LINK_TRB; | |
353 | trb_link.hwo = true; | |
354 | ||
355 | trb_link_hw = &dep->trb_pool[DWC3_TRB_NUM - 1]; | |
356 | dwc3_trb_to_hw(&trb_link, trb_link_hw); | |
357 | } | |
358 | ||
359 | return 0; | |
360 | } | |
361 | ||
362 | static void dwc3_gadget_nuke_reqs(struct dwc3_ep *dep, const int status) | |
363 | { | |
364 | struct dwc3_request *req; | |
365 | ||
366 | while (!list_empty(&dep->request_list)) { | |
367 | req = next_request(&dep->request_list); | |
368 | ||
369 | dwc3_gadget_giveback(dep, req, status); | |
370 | } | |
371 | /* nuke queued TRBs as well on command complete */ | |
372 | dep->flags |= DWC3_EP_WILL_SHUTDOWN; | |
373 | } | |
374 | ||
375 | /** | |
376 | * __dwc3_gadget_ep_disable - Disables a HW endpoint | |
377 | * @dep: the endpoint to disable | |
378 | * | |
379 | * Caller should take care of locking | |
380 | */ | |
381 | static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum); | |
382 | static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep) | |
383 | { | |
384 | struct dwc3 *dwc = dep->dwc; | |
385 | u32 reg; | |
386 | ||
387 | dep->flags &= ~DWC3_EP_ENABLED; | |
388 | dwc3_stop_active_transfer(dwc, dep->number); | |
389 | dwc3_gadget_nuke_reqs(dep, -ESHUTDOWN); | |
390 | ||
391 | reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); | |
392 | reg &= ~DWC3_DALEPENA_EP(dep->number); | |
393 | dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); | |
394 | ||
395 | dep->desc = NULL; | |
396 | dep->type = 0; | |
397 | ||
398 | return 0; | |
399 | } | |
400 | ||
401 | /* -------------------------------------------------------------------------- */ | |
402 | ||
403 | static int dwc3_gadget_ep0_enable(struct usb_ep *ep, | |
404 | const struct usb_endpoint_descriptor *desc) | |
405 | { | |
406 | return -EINVAL; | |
407 | } | |
408 | ||
409 | static int dwc3_gadget_ep0_disable(struct usb_ep *ep) | |
410 | { | |
411 | return -EINVAL; | |
412 | } | |
413 | ||
414 | /* -------------------------------------------------------------------------- */ | |
415 | ||
416 | static int dwc3_gadget_ep_enable(struct usb_ep *ep, | |
417 | const struct usb_endpoint_descriptor *desc) | |
418 | { | |
419 | struct dwc3_ep *dep; | |
420 | struct dwc3 *dwc; | |
421 | unsigned long flags; | |
422 | int ret; | |
423 | ||
424 | if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) { | |
425 | pr_debug("dwc3: invalid parameters\n"); | |
426 | return -EINVAL; | |
427 | } | |
428 | ||
429 | if (!desc->wMaxPacketSize) { | |
430 | pr_debug("dwc3: missing wMaxPacketSize\n"); | |
431 | return -EINVAL; | |
432 | } | |
433 | ||
434 | dep = to_dwc3_ep(ep); | |
435 | dwc = dep->dwc; | |
436 | ||
437 | switch (usb_endpoint_type(desc)) { | |
438 | case USB_ENDPOINT_XFER_CONTROL: | |
439 | strncat(dep->name, "-control", sizeof(dep->name)); | |
440 | break; | |
441 | case USB_ENDPOINT_XFER_ISOC: | |
442 | strncat(dep->name, "-isoc", sizeof(dep->name)); | |
443 | break; | |
444 | case USB_ENDPOINT_XFER_BULK: | |
445 | strncat(dep->name, "-bulk", sizeof(dep->name)); | |
446 | break; | |
447 | case USB_ENDPOINT_XFER_INT: | |
448 | strncat(dep->name, "-int", sizeof(dep->name)); | |
449 | break; | |
450 | default: | |
451 | dev_err(dwc->dev, "invalid endpoint transfer type\n"); | |
452 | } | |
453 | ||
454 | if (dep->flags & DWC3_EP_ENABLED) { | |
455 | dev_WARN_ONCE(dwc->dev, true, "%s is already enabled\n", | |
456 | dep->name); | |
457 | return 0; | |
458 | } | |
459 | ||
460 | dev_vdbg(dwc->dev, "Enabling %s\n", dep->name); | |
461 | ||
462 | spin_lock_irqsave(&dwc->lock, flags); | |
463 | ret = __dwc3_gadget_ep_enable(dep, desc); | |
464 | spin_unlock_irqrestore(&dwc->lock, flags); | |
465 | ||
466 | return ret; | |
467 | } | |
468 | ||
469 | static int dwc3_gadget_ep_disable(struct usb_ep *ep) | |
470 | { | |
471 | struct dwc3_ep *dep; | |
472 | struct dwc3 *dwc; | |
473 | unsigned long flags; | |
474 | int ret; | |
475 | ||
476 | if (!ep) { | |
477 | pr_debug("dwc3: invalid parameters\n"); | |
478 | return -EINVAL; | |
479 | } | |
480 | ||
481 | dep = to_dwc3_ep(ep); | |
482 | dwc = dep->dwc; | |
483 | ||
484 | if (!(dep->flags & DWC3_EP_ENABLED)) { | |
485 | dev_WARN_ONCE(dwc->dev, true, "%s is already disabled\n", | |
486 | dep->name); | |
487 | return 0; | |
488 | } | |
489 | ||
490 | snprintf(dep->name, sizeof(dep->name), "ep%d%s", | |
491 | dep->number >> 1, | |
492 | (dep->number & 1) ? "in" : "out"); | |
493 | ||
494 | spin_lock_irqsave(&dwc->lock, flags); | |
495 | ret = __dwc3_gadget_ep_disable(dep); | |
496 | spin_unlock_irqrestore(&dwc->lock, flags); | |
497 | ||
498 | return ret; | |
499 | } | |
500 | ||
501 | static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep, | |
502 | gfp_t gfp_flags) | |
503 | { | |
504 | struct dwc3_request *req; | |
505 | struct dwc3_ep *dep = to_dwc3_ep(ep); | |
506 | struct dwc3 *dwc = dep->dwc; | |
507 | ||
508 | req = kzalloc(sizeof(*req), gfp_flags); | |
509 | if (!req) { | |
510 | dev_err(dwc->dev, "not enough memory\n"); | |
511 | return NULL; | |
512 | } | |
513 | ||
514 | req->epnum = dep->number; | |
515 | req->dep = dep; | |
516 | req->request.dma = DMA_ADDR_INVALID; | |
517 | ||
518 | return &req->request; | |
519 | } | |
520 | ||
521 | static void dwc3_gadget_ep_free_request(struct usb_ep *ep, | |
522 | struct usb_request *request) | |
523 | { | |
524 | struct dwc3_request *req = to_dwc3_request(request); | |
525 | ||
526 | kfree(req); | |
527 | } | |
528 | ||
529 | /* | |
530 | * dwc3_prepare_trbs - setup TRBs from requests | |
531 | * @dep: endpoint for which requests are being prepared | |
532 | * @starting: true if the endpoint is idle and no requests are queued. | |
533 | * | |
534 | * The functions goes through the requests list and setups TRBs for the | |
535 | * transfers. The functions returns once there are not more TRBs available or | |
536 | * it run out of requests. | |
537 | */ | |
538 | static struct dwc3_request *dwc3_prepare_trbs(struct dwc3_ep *dep, | |
539 | bool starting) | |
540 | { | |
541 | struct dwc3_request *req, *n, *ret = NULL; | |
542 | struct dwc3_trb_hw *trb_hw; | |
543 | struct dwc3_trb trb; | |
544 | u32 trbs_left; | |
545 | ||
546 | BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM); | |
547 | ||
548 | /* the first request must not be queued */ | |
549 | trbs_left = (dep->busy_slot - dep->free_slot) & DWC3_TRB_MASK; | |
550 | /* | |
551 | * if busy & slot are equal than it is either full or empty. If we are | |
552 | * starting to proceed requests then we are empty. Otherwise we ar | |
553 | * full and don't do anything | |
554 | */ | |
555 | if (!trbs_left) { | |
556 | if (!starting) | |
557 | return NULL; | |
558 | trbs_left = DWC3_TRB_NUM; | |
559 | /* | |
560 | * In case we start from scratch, we queue the ISOC requests | |
561 | * starting from slot 1. This is done because we use ring | |
562 | * buffer and have no LST bit to stop us. Instead, we place | |
563 | * IOC bit TRB_NUM/4. We try to avoid to having an interrupt | |
564 | * after the first request so we start at slot 1 and have | |
565 | * 7 requests proceed before we hit the first IOC. | |
566 | * Other transfer types don't use the ring buffer and are | |
567 | * processed from the first TRB until the last one. Since we | |
568 | * don't wrap around we have to start at the beginning. | |
569 | */ | |
570 | if (usb_endpoint_xfer_isoc(dep->desc)) { | |
571 | dep->busy_slot = 1; | |
572 | dep->free_slot = 1; | |
573 | } else { | |
574 | dep->busy_slot = 0; | |
575 | dep->free_slot = 0; | |
576 | } | |
577 | } | |
578 | ||
579 | /* The last TRB is a link TRB, not used for xfer */ | |
580 | if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->desc)) | |
581 | return NULL; | |
582 | ||
583 | list_for_each_entry_safe(req, n, &dep->request_list, list) { | |
584 | unsigned int last_one = 0; | |
585 | unsigned int cur_slot; | |
586 | ||
587 | trb_hw = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK]; | |
588 | cur_slot = dep->free_slot; | |
589 | dep->free_slot++; | |
590 | ||
591 | /* Skip the LINK-TRB on ISOC */ | |
592 | if (((cur_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) && | |
593 | usb_endpoint_xfer_isoc(dep->desc)) | |
594 | continue; | |
595 | ||
596 | dwc3_gadget_move_request_queued(req); | |
597 | memset(&trb, 0, sizeof(trb)); | |
598 | trbs_left--; | |
599 | ||
600 | /* Is our TRB pool empty? */ | |
601 | if (!trbs_left) | |
602 | last_one = 1; | |
603 | /* Is this the last request? */ | |
604 | if (list_empty(&dep->request_list)) | |
605 | last_one = 1; | |
606 | ||
607 | /* | |
608 | * FIXME we shouldn't need to set LST bit always but we are | |
609 | * facing some weird problem with the Hardware where it doesn't | |
610 | * complete even though it has been previously started. | |
611 | * | |
612 | * While we're debugging the problem, as a workaround to | |
613 | * multiple TRBs handling, use only one TRB at a time. | |
614 | */ | |
615 | last_one = 1; | |
616 | ||
617 | req->trb = trb_hw; | |
618 | if (!ret) | |
619 | ret = req; | |
620 | ||
621 | trb.bplh = req->request.dma; | |
622 | ||
623 | if (usb_endpoint_xfer_isoc(dep->desc)) { | |
624 | trb.isp_imi = true; | |
625 | trb.csp = true; | |
626 | } else { | |
627 | trb.lst = last_one; | |
628 | } | |
629 | ||
630 | switch (usb_endpoint_type(dep->desc)) { | |
631 | case USB_ENDPOINT_XFER_CONTROL: | |
632 | trb.trbctl = DWC3_TRBCTL_CONTROL_SETUP; | |
633 | break; | |
634 | ||
635 | case USB_ENDPOINT_XFER_ISOC: | |
5a18999e | 636 | trb.trbctl = DWC3_TRBCTL_ISOCHRONOUS_FIRST; |
72246da4 FB |
637 | |
638 | /* IOC every DWC3_TRB_NUM / 4 so we can refill */ | |
639 | if (!(cur_slot % (DWC3_TRB_NUM / 4))) | |
640 | trb.ioc = last_one; | |
641 | break; | |
642 | ||
643 | case USB_ENDPOINT_XFER_BULK: | |
644 | case USB_ENDPOINT_XFER_INT: | |
645 | trb.trbctl = DWC3_TRBCTL_NORMAL; | |
646 | break; | |
647 | default: | |
648 | /* | |
649 | * This is only possible with faulty memory because we | |
650 | * checked it already :) | |
651 | */ | |
652 | BUG(); | |
653 | } | |
654 | ||
655 | trb.length = req->request.length; | |
656 | trb.hwo = true; | |
657 | ||
658 | dwc3_trb_to_hw(&trb, trb_hw); | |
659 | req->trb_dma = dwc3_trb_dma_offset(dep, trb_hw); | |
660 | ||
661 | if (last_one) | |
662 | break; | |
663 | } | |
664 | ||
665 | return ret; | |
666 | } | |
667 | ||
668 | static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param, | |
669 | int start_new) | |
670 | { | |
671 | struct dwc3_gadget_ep_cmd_params params; | |
672 | struct dwc3_request *req; | |
673 | struct dwc3 *dwc = dep->dwc; | |
674 | int ret; | |
675 | u32 cmd; | |
676 | ||
677 | if (start_new && (dep->flags & DWC3_EP_BUSY)) { | |
678 | dev_vdbg(dwc->dev, "%s: endpoint busy\n", dep->name); | |
679 | return -EBUSY; | |
680 | } | |
681 | dep->flags &= ~DWC3_EP_PENDING_REQUEST; | |
682 | ||
683 | /* | |
684 | * If we are getting here after a short-out-packet we don't enqueue any | |
685 | * new requests as we try to set the IOC bit only on the last request. | |
686 | */ | |
687 | if (start_new) { | |
688 | if (list_empty(&dep->req_queued)) | |
689 | dwc3_prepare_trbs(dep, start_new); | |
690 | ||
691 | /* req points to the first request which will be sent */ | |
692 | req = next_request(&dep->req_queued); | |
693 | } else { | |
694 | /* | |
695 | * req points to the first request where HWO changed | |
696 | * from 0 to 1 | |
697 | */ | |
698 | req = dwc3_prepare_trbs(dep, start_new); | |
699 | } | |
700 | if (!req) { | |
701 | dep->flags |= DWC3_EP_PENDING_REQUEST; | |
702 | return 0; | |
703 | } | |
704 | ||
705 | memset(¶ms, 0, sizeof(params)); | |
706 | params.param0.depstrtxfer.transfer_desc_addr_high = | |
707 | upper_32_bits(req->trb_dma); | |
708 | params.param1.depstrtxfer.transfer_desc_addr_low = | |
709 | lower_32_bits(req->trb_dma); | |
710 | ||
711 | if (start_new) | |
712 | cmd = DWC3_DEPCMD_STARTTRANSFER; | |
713 | else | |
714 | cmd = DWC3_DEPCMD_UPDATETRANSFER; | |
715 | ||
716 | cmd |= DWC3_DEPCMD_PARAM(cmd_param); | |
717 | ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, ¶ms); | |
718 | if (ret < 0) { | |
719 | dev_dbg(dwc->dev, "failed to send STARTTRANSFER command\n"); | |
720 | ||
721 | /* | |
722 | * FIXME we need to iterate over the list of requests | |
723 | * here and stop, unmap, free and del each of the linked | |
724 | * requests instead of we do now. | |
725 | */ | |
726 | dwc3_unmap_buffer_from_dma(req); | |
727 | list_del(&req->list); | |
728 | return ret; | |
729 | } | |
730 | ||
731 | dep->flags |= DWC3_EP_BUSY; | |
732 | dep->res_trans_idx = dwc3_gadget_ep_get_transfer_index(dwc, | |
733 | dep->number); | |
734 | if (!dep->res_trans_idx) | |
735 | printk_once(KERN_ERR "%s() res_trans_idx is invalid\n", __func__); | |
736 | return 0; | |
737 | } | |
738 | ||
739 | static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req) | |
740 | { | |
741 | req->request.actual = 0; | |
742 | req->request.status = -EINPROGRESS; | |
743 | req->direction = dep->direction; | |
744 | req->epnum = dep->number; | |
745 | ||
746 | /* | |
747 | * We only add to our list of requests now and | |
748 | * start consuming the list once we get XferNotReady | |
749 | * IRQ. | |
750 | * | |
751 | * That way, we avoid doing anything that we don't need | |
752 | * to do now and defer it until the point we receive a | |
753 | * particular token from the Host side. | |
754 | * | |
755 | * This will also avoid Host cancelling URBs due to too | |
756 | * many NACKs. | |
757 | */ | |
758 | dwc3_map_buffer_to_dma(req); | |
759 | list_add_tail(&req->list, &dep->request_list); | |
760 | ||
761 | /* | |
762 | * There is one special case: XferNotReady with | |
763 | * empty list of requests. We need to kick the | |
764 | * transfer here in that situation, otherwise | |
765 | * we will be NAKing forever. | |
766 | * | |
767 | * If we get XferNotReady before gadget driver | |
768 | * has a chance to queue a request, we will ACK | |
769 | * the IRQ but won't be able to receive the data | |
770 | * until the next request is queued. The following | |
771 | * code is handling exactly that. | |
772 | */ | |
773 | if (dep->flags & DWC3_EP_PENDING_REQUEST) { | |
774 | int ret; | |
775 | int start_trans; | |
776 | ||
777 | start_trans = 1; | |
778 | if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && | |
779 | dep->flags & DWC3_EP_BUSY) | |
780 | start_trans = 0; | |
781 | ||
782 | ret = __dwc3_gadget_kick_transfer(dep, 0, start_trans); | |
783 | if (ret && ret != -EBUSY) { | |
784 | struct dwc3 *dwc = dep->dwc; | |
785 | ||
786 | dev_dbg(dwc->dev, "%s: failed to kick transfers\n", | |
787 | dep->name); | |
788 | } | |
789 | }; | |
790 | ||
791 | return 0; | |
792 | } | |
793 | ||
794 | static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request, | |
795 | gfp_t gfp_flags) | |
796 | { | |
797 | struct dwc3_request *req = to_dwc3_request(request); | |
798 | struct dwc3_ep *dep = to_dwc3_ep(ep); | |
799 | struct dwc3 *dwc = dep->dwc; | |
800 | ||
801 | unsigned long flags; | |
802 | ||
803 | int ret; | |
804 | ||
805 | if (!dep->desc) { | |
806 | dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n", | |
807 | request, ep->name); | |
808 | return -ESHUTDOWN; | |
809 | } | |
810 | ||
811 | dev_vdbg(dwc->dev, "queing request %p to %s length %d\n", | |
812 | request, ep->name, request->length); | |
813 | ||
814 | spin_lock_irqsave(&dwc->lock, flags); | |
815 | ret = __dwc3_gadget_ep_queue(dep, req); | |
816 | spin_unlock_irqrestore(&dwc->lock, flags); | |
817 | ||
818 | return ret; | |
819 | } | |
820 | ||
821 | static int dwc3_gadget_ep_dequeue(struct usb_ep *ep, | |
822 | struct usb_request *request) | |
823 | { | |
824 | struct dwc3_request *req = to_dwc3_request(request); | |
825 | struct dwc3_request *r = NULL; | |
826 | ||
827 | struct dwc3_ep *dep = to_dwc3_ep(ep); | |
828 | struct dwc3 *dwc = dep->dwc; | |
829 | ||
830 | unsigned long flags; | |
831 | int ret = 0; | |
832 | ||
833 | spin_lock_irqsave(&dwc->lock, flags); | |
834 | ||
835 | list_for_each_entry(r, &dep->request_list, list) { | |
836 | if (r == req) | |
837 | break; | |
838 | } | |
839 | ||
840 | if (r != req) { | |
841 | list_for_each_entry(r, &dep->req_queued, list) { | |
842 | if (r == req) | |
843 | break; | |
844 | } | |
845 | if (r == req) { | |
846 | /* wait until it is processed */ | |
847 | dwc3_stop_active_transfer(dwc, dep->number); | |
848 | goto out0; | |
849 | } | |
850 | dev_err(dwc->dev, "request %p was not queued to %s\n", | |
851 | request, ep->name); | |
852 | ret = -EINVAL; | |
853 | goto out0; | |
854 | } | |
855 | ||
856 | /* giveback the request */ | |
857 | dwc3_gadget_giveback(dep, req, -ECONNRESET); | |
858 | ||
859 | out0: | |
860 | spin_unlock_irqrestore(&dwc->lock, flags); | |
861 | ||
862 | return ret; | |
863 | } | |
864 | ||
865 | int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value) | |
866 | { | |
867 | struct dwc3_gadget_ep_cmd_params params; | |
868 | struct dwc3 *dwc = dep->dwc; | |
869 | int ret; | |
870 | ||
871 | memset(¶ms, 0x00, sizeof(params)); | |
872 | ||
873 | if (value) { | |
874 | if (dep->number == 0 || dep->number == 1) | |
875 | dwc->ep0state = EP0_STALL; | |
876 | ||
877 | ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, | |
878 | DWC3_DEPCMD_SETSTALL, ¶ms); | |
879 | if (ret) | |
880 | dev_err(dwc->dev, "failed to %s STALL on %s\n", | |
881 | value ? "set" : "clear", | |
882 | dep->name); | |
883 | else | |
884 | dep->flags |= DWC3_EP_STALL; | |
885 | } else { | |
886 | ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, | |
887 | DWC3_DEPCMD_CLEARSTALL, ¶ms); | |
888 | if (ret) | |
889 | dev_err(dwc->dev, "failed to %s STALL on %s\n", | |
890 | value ? "set" : "clear", | |
891 | dep->name); | |
892 | else | |
893 | dep->flags &= ~DWC3_EP_STALL; | |
894 | } | |
895 | return ret; | |
896 | } | |
897 | ||
898 | static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value) | |
899 | { | |
900 | struct dwc3_ep *dep = to_dwc3_ep(ep); | |
901 | struct dwc3 *dwc = dep->dwc; | |
902 | ||
903 | unsigned long flags; | |
904 | ||
905 | int ret; | |
906 | ||
907 | spin_lock_irqsave(&dwc->lock, flags); | |
908 | ||
909 | if (usb_endpoint_xfer_isoc(dep->desc)) { | |
910 | dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name); | |
911 | ret = -EINVAL; | |
912 | goto out; | |
913 | } | |
914 | ||
915 | ret = __dwc3_gadget_ep_set_halt(dep, value); | |
916 | out: | |
917 | spin_unlock_irqrestore(&dwc->lock, flags); | |
918 | ||
919 | return ret; | |
920 | } | |
921 | ||
922 | static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep) | |
923 | { | |
924 | struct dwc3_ep *dep = to_dwc3_ep(ep); | |
925 | ||
926 | dep->flags |= DWC3_EP_WEDGE; | |
927 | ||
928 | return usb_ep_set_halt(ep); | |
929 | } | |
930 | ||
931 | /* -------------------------------------------------------------------------- */ | |
932 | ||
933 | static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = { | |
934 | .bLength = USB_DT_ENDPOINT_SIZE, | |
935 | .bDescriptorType = USB_DT_ENDPOINT, | |
936 | .bmAttributes = USB_ENDPOINT_XFER_CONTROL, | |
937 | }; | |
938 | ||
939 | static const struct usb_ep_ops dwc3_gadget_ep0_ops = { | |
940 | .enable = dwc3_gadget_ep0_enable, | |
941 | .disable = dwc3_gadget_ep0_disable, | |
942 | .alloc_request = dwc3_gadget_ep_alloc_request, | |
943 | .free_request = dwc3_gadget_ep_free_request, | |
944 | .queue = dwc3_gadget_ep0_queue, | |
945 | .dequeue = dwc3_gadget_ep_dequeue, | |
946 | .set_halt = dwc3_gadget_ep_set_halt, | |
947 | .set_wedge = dwc3_gadget_ep_set_wedge, | |
948 | }; | |
949 | ||
950 | static const struct usb_ep_ops dwc3_gadget_ep_ops = { | |
951 | .enable = dwc3_gadget_ep_enable, | |
952 | .disable = dwc3_gadget_ep_disable, | |
953 | .alloc_request = dwc3_gadget_ep_alloc_request, | |
954 | .free_request = dwc3_gadget_ep_free_request, | |
955 | .queue = dwc3_gadget_ep_queue, | |
956 | .dequeue = dwc3_gadget_ep_dequeue, | |
957 | .set_halt = dwc3_gadget_ep_set_halt, | |
958 | .set_wedge = dwc3_gadget_ep_set_wedge, | |
959 | }; | |
960 | ||
961 | /* -------------------------------------------------------------------------- */ | |
962 | ||
963 | static int dwc3_gadget_get_frame(struct usb_gadget *g) | |
964 | { | |
965 | struct dwc3 *dwc = gadget_to_dwc(g); | |
966 | u32 reg; | |
967 | ||
968 | reg = dwc3_readl(dwc->regs, DWC3_DSTS); | |
969 | return DWC3_DSTS_SOFFN(reg); | |
970 | } | |
971 | ||
972 | static int dwc3_gadget_wakeup(struct usb_gadget *g) | |
973 | { | |
974 | struct dwc3 *dwc = gadget_to_dwc(g); | |
975 | ||
976 | unsigned long timeout; | |
977 | unsigned long flags; | |
978 | ||
979 | u32 reg; | |
980 | ||
981 | int ret = 0; | |
982 | ||
983 | u8 link_state; | |
984 | u8 speed; | |
985 | ||
986 | spin_lock_irqsave(&dwc->lock, flags); | |
987 | ||
988 | /* | |
989 | * According to the Databook Remote wakeup request should | |
990 | * be issued only when the device is in early suspend state. | |
991 | * | |
992 | * We can check that via USB Link State bits in DSTS register. | |
993 | */ | |
994 | reg = dwc3_readl(dwc->regs, DWC3_DSTS); | |
995 | ||
996 | speed = reg & DWC3_DSTS_CONNECTSPD; | |
997 | if (speed == DWC3_DSTS_SUPERSPEED) { | |
998 | dev_dbg(dwc->dev, "no wakeup on SuperSpeed\n"); | |
999 | ret = -EINVAL; | |
1000 | goto out; | |
1001 | } | |
1002 | ||
1003 | link_state = DWC3_DSTS_USBLNKST(reg); | |
1004 | ||
1005 | switch (link_state) { | |
1006 | case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */ | |
1007 | case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */ | |
1008 | break; | |
1009 | default: | |
1010 | dev_dbg(dwc->dev, "can't wakeup from link state %d\n", | |
1011 | link_state); | |
1012 | ret = -EINVAL; | |
1013 | goto out; | |
1014 | } | |
1015 | ||
1016 | reg = dwc3_readl(dwc->regs, DWC3_DCTL); | |
1017 | ||
1018 | /* | |
1019 | * Switch link state to Recovery. In HS/FS/LS this means | |
1020 | * RemoteWakeup Request | |
1021 | */ | |
1022 | reg |= DWC3_DCTL_ULSTCHNG_RECOVERY; | |
1023 | dwc3_writel(dwc->regs, DWC3_DCTL, reg); | |
1024 | ||
1025 | /* wait for at least 2000us */ | |
1026 | usleep_range(2000, 2500); | |
1027 | ||
1028 | /* write zeroes to Link Change Request */ | |
1029 | reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK; | |
1030 | dwc3_writel(dwc->regs, DWC3_DCTL, reg); | |
1031 | ||
1032 | /* pool until Link State change to ON */ | |
1033 | timeout = jiffies + msecs_to_jiffies(100); | |
1034 | ||
1035 | while (!(time_after(jiffies, timeout))) { | |
1036 | reg = dwc3_readl(dwc->regs, DWC3_DSTS); | |
1037 | ||
1038 | /* in HS, means ON */ | |
1039 | if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0) | |
1040 | break; | |
1041 | } | |
1042 | ||
1043 | if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) { | |
1044 | dev_err(dwc->dev, "failed to send remote wakeup\n"); | |
1045 | ret = -EINVAL; | |
1046 | } | |
1047 | ||
1048 | out: | |
1049 | spin_unlock_irqrestore(&dwc->lock, flags); | |
1050 | ||
1051 | return ret; | |
1052 | } | |
1053 | ||
1054 | static int dwc3_gadget_set_selfpowered(struct usb_gadget *g, | |
1055 | int is_selfpowered) | |
1056 | { | |
1057 | struct dwc3 *dwc = gadget_to_dwc(g); | |
1058 | ||
1059 | dwc->is_selfpowered = !!is_selfpowered; | |
1060 | ||
1061 | return 0; | |
1062 | } | |
1063 | ||
1064 | static void dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on) | |
1065 | { | |
1066 | u32 reg; | |
1067 | unsigned long timeout = 500; | |
1068 | ||
1069 | reg = dwc3_readl(dwc->regs, DWC3_DCTL); | |
1070 | if (is_on) | |
1071 | reg |= DWC3_DCTL_RUN_STOP; | |
1072 | else | |
1073 | reg &= ~DWC3_DCTL_RUN_STOP; | |
1074 | ||
1075 | dwc3_writel(dwc->regs, DWC3_DCTL, reg); | |
1076 | ||
1077 | do { | |
1078 | reg = dwc3_readl(dwc->regs, DWC3_DSTS); | |
1079 | if (is_on) { | |
1080 | if (!(reg & DWC3_DSTS_DEVCTRLHLT)) | |
1081 | break; | |
1082 | } else { | |
1083 | if (reg & DWC3_DSTS_DEVCTRLHLT) | |
1084 | break; | |
1085 | } | |
1086 | /* | |
1087 | * XXX reduce the 500ms delay | |
1088 | */ | |
1089 | timeout--; | |
1090 | if (!timeout) | |
1091 | break; | |
1092 | mdelay(1); | |
1093 | } while (1); | |
1094 | ||
1095 | dev_vdbg(dwc->dev, "gadget %s data soft-%s\n", | |
1096 | dwc->gadget_driver | |
1097 | ? dwc->gadget_driver->function : "no-function", | |
1098 | is_on ? "connect" : "disconnect"); | |
1099 | } | |
1100 | ||
1101 | static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on) | |
1102 | { | |
1103 | struct dwc3 *dwc = gadget_to_dwc(g); | |
1104 | unsigned long flags; | |
1105 | ||
1106 | is_on = !!is_on; | |
1107 | ||
1108 | spin_lock_irqsave(&dwc->lock, flags); | |
1109 | dwc3_gadget_run_stop(dwc, is_on); | |
1110 | spin_unlock_irqrestore(&dwc->lock, flags); | |
1111 | ||
1112 | return 0; | |
1113 | } | |
1114 | ||
1115 | static int dwc3_gadget_start(struct usb_gadget *g, | |
1116 | struct usb_gadget_driver *driver) | |
1117 | { | |
1118 | struct dwc3 *dwc = gadget_to_dwc(g); | |
1119 | struct dwc3_ep *dep; | |
1120 | unsigned long flags; | |
1121 | int ret = 0; | |
1122 | u32 reg; | |
1123 | ||
1124 | spin_lock_irqsave(&dwc->lock, flags); | |
1125 | ||
1126 | if (dwc->gadget_driver) { | |
1127 | dev_err(dwc->dev, "%s is already bound to %s\n", | |
1128 | dwc->gadget.name, | |
1129 | dwc->gadget_driver->driver.name); | |
1130 | ret = -EBUSY; | |
1131 | goto err0; | |
1132 | } | |
1133 | ||
1134 | dwc->gadget_driver = driver; | |
1135 | dwc->gadget.dev.driver = &driver->driver; | |
1136 | ||
1137 | reg = dwc3_readl(dwc->regs, DWC3_GCTL); | |
1138 | ||
1139 | /* | |
1140 | * REVISIT: power down scale might be different | |
1141 | * depending on PHY used, need to pass that via platform_data | |
1142 | */ | |
1143 | reg |= DWC3_GCTL_PWRDNSCALE(0x61a) | |
1144 | | DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_DEVICE); | |
1145 | reg &= ~DWC3_GCTL_DISSCRAMBLE; | |
1146 | ||
1147 | /* | |
1148 | * WORKAROUND: DWC3 revisions <1.90a have a bug | |
1149 | * when The device fails to connect at SuperSpeed | |
1150 | * and falls back to high-speed mode which causes | |
1151 | * the device to enter in a Connect/Disconnect loop | |
1152 | */ | |
1153 | if (dwc->revision < DWC3_REVISION_190A) | |
1154 | reg |= DWC3_GCTL_U2RSTECN; | |
1155 | ||
1156 | dwc3_writel(dwc->regs, DWC3_GCTL, reg); | |
1157 | ||
1158 | reg = dwc3_readl(dwc->regs, DWC3_DCFG); | |
1159 | reg &= ~(DWC3_DCFG_SPEED_MASK); | |
1160 | reg |= DWC3_DCFG_SUPERSPEED; | |
1161 | dwc3_writel(dwc->regs, DWC3_DCFG, reg); | |
1162 | ||
1163 | /* Start with SuperSpeed Default */ | |
1164 | dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); | |
1165 | ||
1166 | dep = dwc->eps[0]; | |
1167 | ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc); | |
1168 | if (ret) { | |
1169 | dev_err(dwc->dev, "failed to enable %s\n", dep->name); | |
1170 | goto err0; | |
1171 | } | |
1172 | ||
1173 | dep = dwc->eps[1]; | |
1174 | ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc); | |
1175 | if (ret) { | |
1176 | dev_err(dwc->dev, "failed to enable %s\n", dep->name); | |
1177 | goto err1; | |
1178 | } | |
1179 | ||
1180 | /* begin to receive SETUP packets */ | |
1181 | dwc->ep0state = EP0_IDLE; | |
1182 | dwc3_ep0_out_start(dwc); | |
1183 | ||
1184 | spin_unlock_irqrestore(&dwc->lock, flags); | |
1185 | ||
1186 | return 0; | |
1187 | ||
1188 | err1: | |
1189 | __dwc3_gadget_ep_disable(dwc->eps[0]); | |
1190 | ||
1191 | err0: | |
1192 | spin_unlock_irqrestore(&dwc->lock, flags); | |
1193 | ||
1194 | return ret; | |
1195 | } | |
1196 | ||
1197 | static int dwc3_gadget_stop(struct usb_gadget *g, | |
1198 | struct usb_gadget_driver *driver) | |
1199 | { | |
1200 | struct dwc3 *dwc = gadget_to_dwc(g); | |
1201 | unsigned long flags; | |
1202 | ||
1203 | spin_lock_irqsave(&dwc->lock, flags); | |
1204 | ||
1205 | __dwc3_gadget_ep_disable(dwc->eps[0]); | |
1206 | __dwc3_gadget_ep_disable(dwc->eps[1]); | |
1207 | ||
1208 | dwc->gadget_driver = NULL; | |
1209 | dwc->gadget.dev.driver = NULL; | |
1210 | ||
1211 | spin_unlock_irqrestore(&dwc->lock, flags); | |
1212 | ||
1213 | return 0; | |
1214 | } | |
1215 | static const struct usb_gadget_ops dwc3_gadget_ops = { | |
1216 | .get_frame = dwc3_gadget_get_frame, | |
1217 | .wakeup = dwc3_gadget_wakeup, | |
1218 | .set_selfpowered = dwc3_gadget_set_selfpowered, | |
1219 | .pullup = dwc3_gadget_pullup, | |
1220 | .udc_start = dwc3_gadget_start, | |
1221 | .udc_stop = dwc3_gadget_stop, | |
1222 | }; | |
1223 | ||
1224 | /* -------------------------------------------------------------------------- */ | |
1225 | ||
1226 | static int __devinit dwc3_gadget_init_endpoints(struct dwc3 *dwc) | |
1227 | { | |
1228 | struct dwc3_ep *dep; | |
1229 | u8 epnum; | |
1230 | ||
1231 | INIT_LIST_HEAD(&dwc->gadget.ep_list); | |
1232 | ||
1233 | for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) { | |
1234 | dep = kzalloc(sizeof(*dep), GFP_KERNEL); | |
1235 | if (!dep) { | |
1236 | dev_err(dwc->dev, "can't allocate endpoint %d\n", | |
1237 | epnum); | |
1238 | return -ENOMEM; | |
1239 | } | |
1240 | ||
1241 | dep->dwc = dwc; | |
1242 | dep->number = epnum; | |
1243 | dwc->eps[epnum] = dep; | |
1244 | ||
1245 | snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1, | |
1246 | (epnum & 1) ? "in" : "out"); | |
1247 | dep->endpoint.name = dep->name; | |
1248 | dep->direction = (epnum & 1); | |
1249 | ||
1250 | if (epnum == 0 || epnum == 1) { | |
1251 | dep->endpoint.maxpacket = 512; | |
1252 | dep->endpoint.ops = &dwc3_gadget_ep0_ops; | |
1253 | if (!epnum) | |
1254 | dwc->gadget.ep0 = &dep->endpoint; | |
1255 | } else { | |
1256 | int ret; | |
1257 | ||
1258 | dep->endpoint.maxpacket = 1024; | |
1259 | dep->endpoint.ops = &dwc3_gadget_ep_ops; | |
1260 | list_add_tail(&dep->endpoint.ep_list, | |
1261 | &dwc->gadget.ep_list); | |
1262 | ||
1263 | ret = dwc3_alloc_trb_pool(dep); | |
1264 | if (ret) { | |
1265 | dev_err(dwc->dev, "%s: failed to allocate TRB pool\n", dep->name); | |
1266 | return ret; | |
1267 | } | |
1268 | } | |
1269 | INIT_LIST_HEAD(&dep->request_list); | |
1270 | INIT_LIST_HEAD(&dep->req_queued); | |
1271 | } | |
1272 | ||
1273 | return 0; | |
1274 | } | |
1275 | ||
1276 | static void dwc3_gadget_free_endpoints(struct dwc3 *dwc) | |
1277 | { | |
1278 | struct dwc3_ep *dep; | |
1279 | u8 epnum; | |
1280 | ||
1281 | for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) { | |
1282 | dep = dwc->eps[epnum]; | |
1283 | dwc3_free_trb_pool(dep); | |
1284 | ||
1285 | if (epnum != 0 && epnum != 1) | |
1286 | list_del(&dep->endpoint.ep_list); | |
1287 | ||
1288 | kfree(dep); | |
1289 | } | |
1290 | } | |
1291 | ||
1292 | static void dwc3_gadget_release(struct device *dev) | |
1293 | { | |
1294 | dev_dbg(dev, "%s\n", __func__); | |
1295 | } | |
1296 | ||
1297 | /* -------------------------------------------------------------------------- */ | |
1298 | static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, | |
1299 | const struct dwc3_event_depevt *event, int status) | |
1300 | { | |
1301 | struct dwc3_request *req; | |
1302 | struct dwc3_trb trb; | |
1303 | unsigned int count; | |
1304 | unsigned int s_pkt = 0; | |
1305 | ||
1306 | do { | |
1307 | req = next_request(&dep->req_queued); | |
1308 | if (!req) | |
1309 | break; | |
1310 | ||
1311 | dwc3_trb_to_nat(req->trb, &trb); | |
1312 | ||
0d2f4758 SAS |
1313 | if (trb.hwo && status != -ESHUTDOWN) |
1314 | /* | |
1315 | * We continue despite the error. There is not much we | |
1316 | * can do. If we don't clean in up we loop for ever. If | |
1317 | * we skip the TRB than it gets overwritten reused after | |
1318 | * a while since we use them in a ring buffer. a BUG() | |
1319 | * would help. Lets hope that if this occures, someone | |
1320 | * fixes the root cause instead of looking away :) | |
1321 | */ | |
72246da4 FB |
1322 | dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n", |
1323 | dep->name, req->trb); | |
72246da4 FB |
1324 | count = trb.length; |
1325 | ||
1326 | if (dep->direction) { | |
1327 | if (count) { | |
1328 | dev_err(dwc->dev, "incomplete IN transfer %s\n", | |
1329 | dep->name); | |
1330 | status = -ECONNRESET; | |
1331 | } | |
1332 | } else { | |
1333 | if (count && (event->status & DEPEVT_STATUS_SHORT)) | |
1334 | s_pkt = 1; | |
1335 | } | |
1336 | ||
1337 | /* | |
1338 | * We assume here we will always receive the entire data block | |
1339 | * which we should receive. Meaning, if we program RX to | |
1340 | * receive 4K but we receive only 2K, we assume that's all we | |
1341 | * should receive and we simply bounce the request back to the | |
1342 | * gadget driver for further processing. | |
1343 | */ | |
1344 | req->request.actual += req->request.length - count; | |
1345 | dwc3_gadget_giveback(dep, req, status); | |
1346 | if (s_pkt) | |
1347 | break; | |
1348 | if ((event->status & DEPEVT_STATUS_LST) && trb.lst) | |
1349 | break; | |
1350 | if ((event->status & DEPEVT_STATUS_IOC) && trb.ioc) | |
1351 | break; | |
1352 | } while (1); | |
1353 | ||
1354 | if ((event->status & DEPEVT_STATUS_IOC) && trb.ioc) | |
1355 | return 0; | |
1356 | return 1; | |
1357 | } | |
1358 | ||
1359 | static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc, | |
1360 | struct dwc3_ep *dep, const struct dwc3_event_depevt *event, | |
1361 | int start_new) | |
1362 | { | |
1363 | unsigned status = 0; | |
1364 | int clean_busy; | |
1365 | ||
1366 | if (event->status & DEPEVT_STATUS_BUSERR) | |
1367 | status = -ECONNRESET; | |
1368 | ||
1369 | clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status); | |
a1ae9be5 | 1370 | if (clean_busy) { |
72246da4 | 1371 | dep->flags &= ~DWC3_EP_BUSY; |
a1ae9be5 SAS |
1372 | dep->res_trans_idx = 0; |
1373 | } | |
72246da4 FB |
1374 | } |
1375 | ||
1376 | static void dwc3_gadget_start_isoc(struct dwc3 *dwc, | |
1377 | struct dwc3_ep *dep, const struct dwc3_event_depevt *event) | |
1378 | { | |
1379 | u32 uf; | |
1380 | ||
1381 | if (list_empty(&dep->request_list)) { | |
1382 | dev_vdbg(dwc->dev, "ISOC ep %s run out for requests.\n", | |
1383 | dep->name); | |
1384 | return; | |
1385 | } | |
1386 | ||
1387 | if (event->parameters) { | |
1388 | u32 mask; | |
1389 | ||
1390 | mask = ~(dep->interval - 1); | |
1391 | uf = event->parameters & mask; | |
1392 | /* 4 micro frames in the future */ | |
1393 | uf += dep->interval * 4; | |
1394 | } else { | |
1395 | uf = 0; | |
1396 | } | |
1397 | ||
1398 | __dwc3_gadget_kick_transfer(dep, uf, 1); | |
1399 | } | |
1400 | ||
1401 | static void dwc3_process_ep_cmd_complete(struct dwc3_ep *dep, | |
1402 | const struct dwc3_event_depevt *event) | |
1403 | { | |
1404 | struct dwc3 *dwc = dep->dwc; | |
1405 | struct dwc3_event_depevt mod_ev = *event; | |
1406 | ||
1407 | /* | |
1408 | * We were asked to remove one requests. It is possible that this | |
1409 | * request and a few other were started together and have the same | |
1410 | * transfer index. Since we stopped the complete endpoint we don't | |
1411 | * know how many requests were already completed (and not yet) | |
1412 | * reported and how could be done (later). We purge them all until | |
1413 | * the end of the list. | |
1414 | */ | |
1415 | mod_ev.status = DEPEVT_STATUS_LST; | |
1416 | dwc3_cleanup_done_reqs(dwc, dep, &mod_ev, -ESHUTDOWN); | |
1417 | dep->flags &= ~DWC3_EP_BUSY; | |
1418 | /* pending requets are ignored and are queued on XferNotReady */ | |
1419 | ||
1420 | if (dep->flags & DWC3_EP_WILL_SHUTDOWN) { | |
1421 | while (!list_empty(&dep->req_queued)) { | |
1422 | struct dwc3_request *req; | |
1423 | ||
1424 | req = next_request(&dep->req_queued); | |
1425 | dwc3_gadget_giveback(dep, req, -ESHUTDOWN); | |
1426 | } | |
0156cf86 | 1427 | dep->flags &= ~DWC3_EP_WILL_SHUTDOWN; |
72246da4 FB |
1428 | } |
1429 | } | |
1430 | ||
1431 | static void dwc3_ep_cmd_compl(struct dwc3_ep *dep, | |
1432 | const struct dwc3_event_depevt *event) | |
1433 | { | |
1434 | u32 param = event->parameters; | |
1435 | u32 cmd_type = (param >> 8) & ((1 << 5) - 1); | |
1436 | ||
1437 | switch (cmd_type) { | |
1438 | case DWC3_DEPCMD_ENDTRANSFER: | |
1439 | dwc3_process_ep_cmd_complete(dep, event); | |
1440 | break; | |
1441 | case DWC3_DEPCMD_STARTTRANSFER: | |
1442 | dep->res_trans_idx = param & 0x7f; | |
1443 | break; | |
1444 | default: | |
1445 | printk(KERN_ERR "%s() unknown /unexpected type: %d\n", | |
1446 | __func__, cmd_type); | |
1447 | break; | |
1448 | }; | |
1449 | } | |
1450 | ||
1451 | static void dwc3_endpoint_interrupt(struct dwc3 *dwc, | |
1452 | const struct dwc3_event_depevt *event) | |
1453 | { | |
1454 | struct dwc3_ep *dep; | |
1455 | u8 epnum = event->endpoint_number; | |
1456 | ||
1457 | dep = dwc->eps[epnum]; | |
1458 | ||
1459 | dev_vdbg(dwc->dev, "%s: %s\n", dep->name, | |
1460 | dwc3_ep_event_string(event->endpoint_event)); | |
1461 | ||
1462 | if (epnum == 0 || epnum == 1) { | |
1463 | dwc3_ep0_interrupt(dwc, event); | |
1464 | return; | |
1465 | } | |
1466 | ||
1467 | switch (event->endpoint_event) { | |
1468 | case DWC3_DEPEVT_XFERCOMPLETE: | |
1469 | if (usb_endpoint_xfer_isoc(dep->desc)) { | |
1470 | dev_dbg(dwc->dev, "%s is an Isochronous endpoint\n", | |
1471 | dep->name); | |
1472 | return; | |
1473 | } | |
1474 | ||
1475 | dwc3_endpoint_transfer_complete(dwc, dep, event, 1); | |
1476 | break; | |
1477 | case DWC3_DEPEVT_XFERINPROGRESS: | |
1478 | if (!usb_endpoint_xfer_isoc(dep->desc)) { | |
1479 | dev_dbg(dwc->dev, "%s is not an Isochronous endpoint\n", | |
1480 | dep->name); | |
1481 | return; | |
1482 | } | |
1483 | ||
1484 | dwc3_endpoint_transfer_complete(dwc, dep, event, 0); | |
1485 | break; | |
1486 | case DWC3_DEPEVT_XFERNOTREADY: | |
1487 | if (usb_endpoint_xfer_isoc(dep->desc)) { | |
1488 | dwc3_gadget_start_isoc(dwc, dep, event); | |
1489 | } else { | |
1490 | int ret; | |
1491 | ||
1492 | dev_vdbg(dwc->dev, "%s: reason %s\n", | |
1493 | dep->name, event->status | |
1494 | ? "Transfer Active" | |
1495 | : "Transfer Not Active"); | |
1496 | ||
1497 | ret = __dwc3_gadget_kick_transfer(dep, 0, 1); | |
1498 | if (!ret || ret == -EBUSY) | |
1499 | return; | |
1500 | ||
1501 | dev_dbg(dwc->dev, "%s: failed to kick transfers\n", | |
1502 | dep->name); | |
1503 | } | |
1504 | ||
1505 | break; | |
1506 | case DWC3_DEPEVT_RXTXFIFOEVT: | |
1507 | dev_dbg(dwc->dev, "%s FIFO Overrun\n", dep->name); | |
1508 | break; | |
1509 | case DWC3_DEPEVT_STREAMEVT: | |
1510 | dev_dbg(dwc->dev, "%s Stream Event\n", dep->name); | |
1511 | break; | |
1512 | case DWC3_DEPEVT_EPCMDCMPLT: | |
1513 | dwc3_ep_cmd_compl(dep, event); | |
1514 | break; | |
1515 | } | |
1516 | } | |
1517 | ||
1518 | static void dwc3_disconnect_gadget(struct dwc3 *dwc) | |
1519 | { | |
1520 | if (dwc->gadget_driver && dwc->gadget_driver->disconnect) { | |
1521 | spin_unlock(&dwc->lock); | |
1522 | dwc->gadget_driver->disconnect(&dwc->gadget); | |
1523 | spin_lock(&dwc->lock); | |
1524 | } | |
1525 | } | |
1526 | ||
1527 | static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum) | |
1528 | { | |
1529 | struct dwc3_ep *dep; | |
1530 | struct dwc3_gadget_ep_cmd_params params; | |
1531 | u32 cmd; | |
1532 | int ret; | |
1533 | ||
1534 | dep = dwc->eps[epnum]; | |
1535 | ||
1536 | if (dep->res_trans_idx) { | |
1537 | cmd = DWC3_DEPCMD_ENDTRANSFER; | |
1538 | cmd |= DWC3_DEPCMD_HIPRI_FORCERM | DWC3_DEPCMD_CMDIOC; | |
1539 | cmd |= DWC3_DEPCMD_PARAM(dep->res_trans_idx); | |
1540 | memset(¶ms, 0, sizeof(params)); | |
1541 | ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, ¶ms); | |
1542 | WARN_ON_ONCE(ret); | |
a1ae9be5 | 1543 | dep->res_trans_idx = 0; |
72246da4 FB |
1544 | } |
1545 | } | |
1546 | ||
1547 | static void dwc3_stop_active_transfers(struct dwc3 *dwc) | |
1548 | { | |
1549 | u32 epnum; | |
1550 | ||
1551 | for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) { | |
1552 | struct dwc3_ep *dep; | |
1553 | ||
1554 | dep = dwc->eps[epnum]; | |
1555 | if (!(dep->flags & DWC3_EP_ENABLED)) | |
1556 | continue; | |
1557 | ||
1558 | __dwc3_gadget_ep_disable(dep); | |
1559 | } | |
1560 | } | |
1561 | ||
1562 | static void dwc3_clear_stall_all_ep(struct dwc3 *dwc) | |
1563 | { | |
1564 | u32 epnum; | |
1565 | ||
1566 | for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) { | |
1567 | struct dwc3_ep *dep; | |
1568 | struct dwc3_gadget_ep_cmd_params params; | |
1569 | int ret; | |
1570 | ||
1571 | dep = dwc->eps[epnum]; | |
1572 | ||
1573 | if (!(dep->flags & DWC3_EP_STALL)) | |
1574 | continue; | |
1575 | ||
1576 | dep->flags &= ~DWC3_EP_STALL; | |
1577 | ||
1578 | memset(¶ms, 0, sizeof(params)); | |
1579 | ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, | |
1580 | DWC3_DEPCMD_CLEARSTALL, ¶ms); | |
1581 | WARN_ON_ONCE(ret); | |
1582 | } | |
1583 | } | |
1584 | ||
1585 | static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc) | |
1586 | { | |
1587 | dev_vdbg(dwc->dev, "%s\n", __func__); | |
1588 | #if 0 | |
1589 | XXX | |
1590 | U1/U2 is powersave optimization. Skip it for now. Anyway we need to | |
1591 | enable it before we can disable it. | |
1592 | ||
1593 | reg = dwc3_readl(dwc->regs, DWC3_DCTL); | |
1594 | reg &= ~DWC3_DCTL_INITU1ENA; | |
1595 | dwc3_writel(dwc->regs, DWC3_DCTL, reg); | |
1596 | ||
1597 | reg &= ~DWC3_DCTL_INITU2ENA; | |
1598 | dwc3_writel(dwc->regs, DWC3_DCTL, reg); | |
1599 | #endif | |
1600 | ||
1601 | dwc3_stop_active_transfers(dwc); | |
1602 | dwc3_disconnect_gadget(dwc); | |
1603 | ||
1604 | dwc->gadget.speed = USB_SPEED_UNKNOWN; | |
1605 | } | |
1606 | ||
1607 | static void dwc3_gadget_usb3_phy_power(struct dwc3 *dwc, int on) | |
1608 | { | |
1609 | u32 reg; | |
1610 | ||
1611 | reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0)); | |
1612 | ||
1613 | if (on) | |
1614 | reg &= ~DWC3_GUSB3PIPECTL_SUSPHY; | |
1615 | else | |
1616 | reg |= DWC3_GUSB3PIPECTL_SUSPHY; | |
1617 | ||
1618 | dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg); | |
1619 | } | |
1620 | ||
1621 | static void dwc3_gadget_usb2_phy_power(struct dwc3 *dwc, int on) | |
1622 | { | |
1623 | u32 reg; | |
1624 | ||
1625 | reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); | |
1626 | ||
1627 | if (on) | |
1628 | reg &= ~DWC3_GUSB2PHYCFG_SUSPHY; | |
1629 | else | |
1630 | reg |= DWC3_GUSB2PHYCFG_SUSPHY; | |
1631 | ||
1632 | dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); | |
1633 | } | |
1634 | ||
1635 | static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc) | |
1636 | { | |
1637 | u32 reg; | |
1638 | ||
1639 | dev_vdbg(dwc->dev, "%s\n", __func__); | |
1640 | ||
1641 | /* Enable PHYs */ | |
1642 | dwc3_gadget_usb2_phy_power(dwc, true); | |
1643 | dwc3_gadget_usb3_phy_power(dwc, true); | |
1644 | ||
1645 | if (dwc->gadget.speed != USB_SPEED_UNKNOWN) | |
1646 | dwc3_disconnect_gadget(dwc); | |
1647 | ||
1648 | reg = dwc3_readl(dwc->regs, DWC3_DCTL); | |
1649 | reg &= ~DWC3_DCTL_TSTCTRL_MASK; | |
1650 | dwc3_writel(dwc->regs, DWC3_DCTL, reg); | |
1651 | ||
1652 | dwc3_stop_active_transfers(dwc); | |
1653 | dwc3_clear_stall_all_ep(dwc); | |
1654 | ||
1655 | /* Reset device address to zero */ | |
1656 | reg = dwc3_readl(dwc->regs, DWC3_DCFG); | |
1657 | reg &= ~(DWC3_DCFG_DEVADDR_MASK); | |
1658 | dwc3_writel(dwc->regs, DWC3_DCFG, reg); | |
1659 | ||
1660 | /* | |
1661 | * Wait for RxFifo to drain | |
1662 | * | |
1663 | * REVISIT probably shouldn't wait forever. | |
1664 | * In case Hardware ends up in a screwed up | |
1665 | * case, we error out, notify the user and, | |
1666 | * maybe, WARN() or BUG() but leave the rest | |
1667 | * of the kernel working fine. | |
1668 | * | |
1669 | * REVISIT the below is rather CPU intensive, | |
1670 | * maybe we should read and if it doesn't work | |
1671 | * sleep (not busy wait) for a few useconds. | |
1672 | * | |
1673 | * REVISIT why wait until the RXFIFO is empty anyway? | |
1674 | */ | |
1675 | while (!(dwc3_readl(dwc->regs, DWC3_DSTS) | |
1676 | & DWC3_DSTS_RXFIFOEMPTY)) | |
1677 | cpu_relax(); | |
1678 | } | |
1679 | ||
1680 | static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed) | |
1681 | { | |
1682 | u32 reg; | |
1683 | u32 usb30_clock = DWC3_GCTL_CLK_BUS; | |
1684 | ||
1685 | /* | |
1686 | * We change the clock only at SS but I dunno why I would want to do | |
1687 | * this. Maybe it becomes part of the power saving plan. | |
1688 | */ | |
1689 | ||
1690 | if (speed != DWC3_DSTS_SUPERSPEED) | |
1691 | return; | |
1692 | ||
1693 | /* | |
1694 | * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed | |
1695 | * each time on Connect Done. | |
1696 | */ | |
1697 | if (!usb30_clock) | |
1698 | return; | |
1699 | ||
1700 | reg = dwc3_readl(dwc->regs, DWC3_GCTL); | |
1701 | reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock); | |
1702 | dwc3_writel(dwc->regs, DWC3_GCTL, reg); | |
1703 | } | |
1704 | ||
1705 | static void dwc3_gadget_disable_phy(struct dwc3 *dwc, u8 speed) | |
1706 | { | |
1707 | switch (speed) { | |
1708 | case USB_SPEED_SUPER: | |
1709 | dwc3_gadget_usb2_phy_power(dwc, false); | |
1710 | break; | |
1711 | case USB_SPEED_HIGH: | |
1712 | case USB_SPEED_FULL: | |
1713 | case USB_SPEED_LOW: | |
1714 | dwc3_gadget_usb3_phy_power(dwc, false); | |
1715 | break; | |
1716 | } | |
1717 | } | |
1718 | ||
1719 | static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc) | |
1720 | { | |
1721 | struct dwc3_gadget_ep_cmd_params params; | |
1722 | struct dwc3_ep *dep; | |
1723 | int ret; | |
1724 | u32 reg; | |
1725 | u8 speed; | |
1726 | ||
1727 | dev_vdbg(dwc->dev, "%s\n", __func__); | |
1728 | ||
1729 | memset(¶ms, 0x00, sizeof(params)); | |
1730 | ||
1731 | dwc->ep0state = EP0_IDLE; | |
1732 | reg = dwc3_readl(dwc->regs, DWC3_DSTS); | |
1733 | speed = reg & DWC3_DSTS_CONNECTSPD; | |
1734 | dwc->speed = speed; | |
1735 | ||
1736 | dwc3_update_ram_clk_sel(dwc, speed); | |
1737 | ||
1738 | switch (speed) { | |
1739 | case DWC3_DCFG_SUPERSPEED: | |
1740 | dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); | |
1741 | dwc->gadget.ep0->maxpacket = 512; | |
1742 | dwc->gadget.speed = USB_SPEED_SUPER; | |
1743 | break; | |
1744 | case DWC3_DCFG_HIGHSPEED: | |
1745 | dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); | |
1746 | dwc->gadget.ep0->maxpacket = 64; | |
1747 | dwc->gadget.speed = USB_SPEED_HIGH; | |
1748 | break; | |
1749 | case DWC3_DCFG_FULLSPEED2: | |
1750 | case DWC3_DCFG_FULLSPEED1: | |
1751 | dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); | |
1752 | dwc->gadget.ep0->maxpacket = 64; | |
1753 | dwc->gadget.speed = USB_SPEED_FULL; | |
1754 | break; | |
1755 | case DWC3_DCFG_LOWSPEED: | |
1756 | dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8); | |
1757 | dwc->gadget.ep0->maxpacket = 8; | |
1758 | dwc->gadget.speed = USB_SPEED_LOW; | |
1759 | break; | |
1760 | } | |
1761 | ||
1762 | /* Disable unneded PHY */ | |
1763 | dwc3_gadget_disable_phy(dwc, dwc->gadget.speed); | |
1764 | ||
1765 | dep = dwc->eps[0]; | |
1766 | ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc); | |
1767 | if (ret) { | |
1768 | dev_err(dwc->dev, "failed to enable %s\n", dep->name); | |
1769 | return; | |
1770 | } | |
1771 | ||
1772 | dep = dwc->eps[1]; | |
1773 | ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc); | |
1774 | if (ret) { | |
1775 | dev_err(dwc->dev, "failed to enable %s\n", dep->name); | |
1776 | return; | |
1777 | } | |
1778 | ||
1779 | /* | |
1780 | * Configure PHY via GUSB3PIPECTLn if required. | |
1781 | * | |
1782 | * Update GTXFIFOSIZn | |
1783 | * | |
1784 | * In both cases reset values should be sufficient. | |
1785 | */ | |
1786 | } | |
1787 | ||
1788 | static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc) | |
1789 | { | |
1790 | dev_vdbg(dwc->dev, "%s\n", __func__); | |
1791 | ||
1792 | /* | |
1793 | * TODO take core out of low power mode when that's | |
1794 | * implemented. | |
1795 | */ | |
1796 | ||
1797 | dwc->gadget_driver->resume(&dwc->gadget); | |
1798 | } | |
1799 | ||
1800 | static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc, | |
1801 | unsigned int evtinfo) | |
1802 | { | |
1803 | dev_vdbg(dwc->dev, "%s\n", __func__); | |
1804 | ||
1805 | /* The fith bit says SuperSpeed yes or no. */ | |
1806 | dwc->link_state = evtinfo & DWC3_LINK_STATE_MASK; | |
1807 | } | |
1808 | ||
1809 | static void dwc3_gadget_interrupt(struct dwc3 *dwc, | |
1810 | const struct dwc3_event_devt *event) | |
1811 | { | |
1812 | switch (event->type) { | |
1813 | case DWC3_DEVICE_EVENT_DISCONNECT: | |
1814 | dwc3_gadget_disconnect_interrupt(dwc); | |
1815 | break; | |
1816 | case DWC3_DEVICE_EVENT_RESET: | |
1817 | dwc3_gadget_reset_interrupt(dwc); | |
1818 | break; | |
1819 | case DWC3_DEVICE_EVENT_CONNECT_DONE: | |
1820 | dwc3_gadget_conndone_interrupt(dwc); | |
1821 | break; | |
1822 | case DWC3_DEVICE_EVENT_WAKEUP: | |
1823 | dwc3_gadget_wakeup_interrupt(dwc); | |
1824 | break; | |
1825 | case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE: | |
1826 | dwc3_gadget_linksts_change_interrupt(dwc, event->event_info); | |
1827 | break; | |
1828 | case DWC3_DEVICE_EVENT_EOPF: | |
1829 | dev_vdbg(dwc->dev, "End of Periodic Frame\n"); | |
1830 | break; | |
1831 | case DWC3_DEVICE_EVENT_SOF: | |
1832 | dev_vdbg(dwc->dev, "Start of Periodic Frame\n"); | |
1833 | break; | |
1834 | case DWC3_DEVICE_EVENT_ERRATIC_ERROR: | |
1835 | dev_vdbg(dwc->dev, "Erratic Error\n"); | |
1836 | break; | |
1837 | case DWC3_DEVICE_EVENT_CMD_CMPL: | |
1838 | dev_vdbg(dwc->dev, "Command Complete\n"); | |
1839 | break; | |
1840 | case DWC3_DEVICE_EVENT_OVERFLOW: | |
1841 | dev_vdbg(dwc->dev, "Overflow\n"); | |
1842 | break; | |
1843 | default: | |
1844 | dev_dbg(dwc->dev, "UNKNOWN IRQ %d\n", event->type); | |
1845 | } | |
1846 | } | |
1847 | ||
1848 | static void dwc3_process_event_entry(struct dwc3 *dwc, | |
1849 | const union dwc3_event *event) | |
1850 | { | |
1851 | /* Endpoint IRQ, handle it and return early */ | |
1852 | if (event->type.is_devspec == 0) { | |
1853 | /* depevt */ | |
1854 | return dwc3_endpoint_interrupt(dwc, &event->depevt); | |
1855 | } | |
1856 | ||
1857 | switch (event->type.type) { | |
1858 | case DWC3_EVENT_TYPE_DEV: | |
1859 | dwc3_gadget_interrupt(dwc, &event->devt); | |
1860 | break; | |
1861 | /* REVISIT what to do with Carkit and I2C events ? */ | |
1862 | default: | |
1863 | dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw); | |
1864 | } | |
1865 | } | |
1866 | ||
1867 | static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf) | |
1868 | { | |
1869 | struct dwc3_event_buffer *evt; | |
1870 | int left; | |
1871 | u32 count; | |
1872 | ||
1873 | count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(buf)); | |
1874 | count &= DWC3_GEVNTCOUNT_MASK; | |
1875 | if (!count) | |
1876 | return IRQ_NONE; | |
1877 | ||
1878 | evt = dwc->ev_buffs[buf]; | |
1879 | left = count; | |
1880 | ||
1881 | while (left > 0) { | |
1882 | union dwc3_event event; | |
1883 | ||
1884 | memcpy(&event.raw, (evt->buf + evt->lpos), sizeof(event.raw)); | |
1885 | dwc3_process_event_entry(dwc, &event); | |
1886 | /* | |
1887 | * XXX we wrap around correctly to the next entry as almost all | |
1888 | * entries are 4 bytes in size. There is one entry which has 12 | |
1889 | * bytes which is a regular entry followed by 8 bytes data. ATM | |
1890 | * I don't know how things are organized if were get next to the | |
1891 | * a boundary so I worry about that once we try to handle that. | |
1892 | */ | |
1893 | evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE; | |
1894 | left -= 4; | |
1895 | ||
1896 | dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), 4); | |
1897 | } | |
1898 | ||
1899 | return IRQ_HANDLED; | |
1900 | } | |
1901 | ||
1902 | static irqreturn_t dwc3_interrupt(int irq, void *_dwc) | |
1903 | { | |
1904 | struct dwc3 *dwc = _dwc; | |
1905 | int i; | |
1906 | irqreturn_t ret = IRQ_NONE; | |
1907 | ||
1908 | spin_lock(&dwc->lock); | |
1909 | ||
1910 | for (i = 0; i < DWC3_EVENT_BUFFERS_NUM; i++) { | |
1911 | irqreturn_t status; | |
1912 | ||
1913 | status = dwc3_process_event_buf(dwc, i); | |
1914 | if (status == IRQ_HANDLED) | |
1915 | ret = status; | |
1916 | } | |
1917 | ||
1918 | spin_unlock(&dwc->lock); | |
1919 | ||
1920 | return ret; | |
1921 | } | |
1922 | ||
1923 | /** | |
1924 | * dwc3_gadget_init - Initializes gadget related registers | |
1925 | * @dwc: Pointer to out controller context structure | |
1926 | * | |
1927 | * Returns 0 on success otherwise negative errno. | |
1928 | */ | |
1929 | int __devinit dwc3_gadget_init(struct dwc3 *dwc) | |
1930 | { | |
1931 | u32 reg; | |
1932 | int ret; | |
1933 | int irq; | |
1934 | ||
1935 | dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req), | |
1936 | &dwc->ctrl_req_addr, GFP_KERNEL); | |
1937 | if (!dwc->ctrl_req) { | |
1938 | dev_err(dwc->dev, "failed to allocate ctrl request\n"); | |
1939 | ret = -ENOMEM; | |
1940 | goto err0; | |
1941 | } | |
1942 | ||
1943 | dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb), | |
1944 | &dwc->ep0_trb_addr, GFP_KERNEL); | |
1945 | if (!dwc->ep0_trb) { | |
1946 | dev_err(dwc->dev, "failed to allocate ep0 trb\n"); | |
1947 | ret = -ENOMEM; | |
1948 | goto err1; | |
1949 | } | |
1950 | ||
1951 | dwc->setup_buf = dma_alloc_coherent(dwc->dev, | |
1952 | sizeof(*dwc->setup_buf) * 2, | |
1953 | &dwc->setup_buf_addr, GFP_KERNEL); | |
1954 | if (!dwc->setup_buf) { | |
1955 | dev_err(dwc->dev, "failed to allocate setup buffer\n"); | |
1956 | ret = -ENOMEM; | |
1957 | goto err2; | |
1958 | } | |
1959 | ||
5812b1c2 FB |
1960 | dwc->ep0_bounce = dma_alloc_coherent(dwc->dev, |
1961 | 512, &dwc->ep0_bounce_addr, GFP_KERNEL); | |
1962 | if (!dwc->ep0_bounce) { | |
1963 | dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n"); | |
1964 | ret = -ENOMEM; | |
1965 | goto err3; | |
1966 | } | |
1967 | ||
72246da4 FB |
1968 | dev_set_name(&dwc->gadget.dev, "gadget"); |
1969 | ||
1970 | dwc->gadget.ops = &dwc3_gadget_ops; | |
1971 | dwc->gadget.is_dualspeed = true; | |
1972 | dwc->gadget.speed = USB_SPEED_UNKNOWN; | |
1973 | dwc->gadget.dev.parent = dwc->dev; | |
1974 | ||
1975 | dma_set_coherent_mask(&dwc->gadget.dev, dwc->dev->coherent_dma_mask); | |
1976 | ||
1977 | dwc->gadget.dev.dma_parms = dwc->dev->dma_parms; | |
1978 | dwc->gadget.dev.dma_mask = dwc->dev->dma_mask; | |
1979 | dwc->gadget.dev.release = dwc3_gadget_release; | |
1980 | dwc->gadget.name = "dwc3-gadget"; | |
1981 | ||
1982 | /* | |
1983 | * REVISIT: Here we should clear all pending IRQs to be | |
1984 | * sure we're starting from a well known location. | |
1985 | */ | |
1986 | ||
1987 | ret = dwc3_gadget_init_endpoints(dwc); | |
1988 | if (ret) | |
5812b1c2 | 1989 | goto err4; |
72246da4 FB |
1990 | |
1991 | irq = platform_get_irq(to_platform_device(dwc->dev), 0); | |
1992 | ||
1993 | ret = request_irq(irq, dwc3_interrupt, IRQF_SHARED, | |
1994 | "dwc3", dwc); | |
1995 | if (ret) { | |
1996 | dev_err(dwc->dev, "failed to request irq #%d --> %d\n", | |
1997 | irq, ret); | |
5812b1c2 | 1998 | goto err5; |
72246da4 FB |
1999 | } |
2000 | ||
2001 | /* Enable all but Start and End of Frame IRQs */ | |
2002 | reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN | | |
2003 | DWC3_DEVTEN_EVNTOVERFLOWEN | | |
2004 | DWC3_DEVTEN_CMDCMPLTEN | | |
2005 | DWC3_DEVTEN_ERRTICERREN | | |
2006 | DWC3_DEVTEN_WKUPEVTEN | | |
2007 | DWC3_DEVTEN_ULSTCNGEN | | |
2008 | DWC3_DEVTEN_CONNECTDONEEN | | |
2009 | DWC3_DEVTEN_USBRSTEN | | |
2010 | DWC3_DEVTEN_DISCONNEVTEN); | |
2011 | dwc3_writel(dwc->regs, DWC3_DEVTEN, reg); | |
2012 | ||
2013 | ret = device_register(&dwc->gadget.dev); | |
2014 | if (ret) { | |
2015 | dev_err(dwc->dev, "failed to register gadget device\n"); | |
2016 | put_device(&dwc->gadget.dev); | |
5812b1c2 | 2017 | goto err6; |
72246da4 FB |
2018 | } |
2019 | ||
2020 | ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget); | |
2021 | if (ret) { | |
2022 | dev_err(dwc->dev, "failed to register udc\n"); | |
5812b1c2 | 2023 | goto err7; |
72246da4 FB |
2024 | } |
2025 | ||
2026 | return 0; | |
2027 | ||
5812b1c2 | 2028 | err7: |
72246da4 FB |
2029 | device_unregister(&dwc->gadget.dev); |
2030 | ||
5812b1c2 | 2031 | err6: |
72246da4 FB |
2032 | dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00); |
2033 | free_irq(irq, dwc); | |
2034 | ||
5812b1c2 | 2035 | err5: |
72246da4 FB |
2036 | dwc3_gadget_free_endpoints(dwc); |
2037 | ||
5812b1c2 FB |
2038 | err4: |
2039 | dma_free_coherent(dwc->dev, 512, dwc->ep0_bounce, | |
2040 | dwc->ep0_bounce_addr); | |
2041 | ||
72246da4 FB |
2042 | err3: |
2043 | dma_free_coherent(dwc->dev, sizeof(*dwc->setup_buf) * 2, | |
2044 | dwc->setup_buf, dwc->setup_buf_addr); | |
2045 | ||
2046 | err2: | |
2047 | dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb), | |
2048 | dwc->ep0_trb, dwc->ep0_trb_addr); | |
2049 | ||
2050 | err1: | |
2051 | dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req), | |
2052 | dwc->ctrl_req, dwc->ctrl_req_addr); | |
2053 | ||
2054 | err0: | |
2055 | return ret; | |
2056 | } | |
2057 | ||
2058 | void dwc3_gadget_exit(struct dwc3 *dwc) | |
2059 | { | |
2060 | int irq; | |
2061 | int i; | |
2062 | ||
2063 | usb_del_gadget_udc(&dwc->gadget); | |
2064 | irq = platform_get_irq(to_platform_device(dwc->dev), 0); | |
2065 | ||
2066 | dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00); | |
2067 | free_irq(irq, dwc); | |
2068 | ||
2069 | for (i = 0; i < ARRAY_SIZE(dwc->eps); i++) | |
2070 | __dwc3_gadget_ep_disable(dwc->eps[i]); | |
2071 | ||
2072 | dwc3_gadget_free_endpoints(dwc); | |
2073 | ||
5812b1c2 FB |
2074 | dma_free_coherent(dwc->dev, 512, dwc->ep0_bounce, |
2075 | dwc->ep0_bounce_addr); | |
2076 | ||
72246da4 FB |
2077 | dma_free_coherent(dwc->dev, sizeof(*dwc->setup_buf) * 2, |
2078 | dwc->setup_buf, dwc->setup_buf_addr); | |
2079 | ||
2080 | dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb), | |
2081 | dwc->ep0_trb, dwc->ep0_trb_addr); | |
2082 | ||
2083 | dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req), | |
2084 | dwc->ctrl_req, dwc->ctrl_req_addr); | |
2085 | ||
2086 | device_unregister(&dwc->gadget.dev); | |
2087 | } |