]>
Commit | Line | Data |
---|---|---|
23d3e7a6 MF |
1 | /* |
2 | * USB Host Controller Driver for IMX21 | |
3 | * | |
4 | * Copyright (C) 2006 Loping Dog Embedded Systems | |
5 | * Copyright (C) 2009 Martin Fuzzey | |
6 | * Originally written by Jay Monkman <[email protected]> | |
7 | * Ported to 2.6.30, debugged and enhanced by Martin Fuzzey | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify it | |
10 | * under the terms of the GNU General Public License as published by the | |
11 | * Free Software Foundation; either version 2 of the License, or (at your | |
12 | * option) any later version. | |
13 | * | |
14 | * This program is distributed in the hope that it will be useful, but | |
15 | * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | |
16 | * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
17 | * for more details. | |
18 | * | |
19 | * You should have received a copy of the GNU General Public License | |
20 | * along with this program; if not, write to the Free Software Foundation, | |
21 | * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
22 | */ | |
23 | ||
24 | ||
25 | /* | |
26 | * The i.MX21 USB hardware contains | |
27 | * * 32 transfer descriptors (called ETDs) | |
28 | * * 4Kb of Data memory | |
29 | * | |
eef35c2d SW |
30 | * The data memory is shared between the host and function controllers |
31 | * (but this driver only supports the host controller) | |
23d3e7a6 MF |
32 | * |
33 | * So setting up a transfer involves: | |
34 | * * Allocating a ETD | |
35 | * * Fill in ETD with appropriate information | |
36 | * * Allocating data memory (and putting the offset in the ETD) | |
37 | * * Activate the ETD | |
38 | * * Get interrupt when done. | |
39 | * | |
40 | * An ETD is assigned to each active endpoint. | |
41 | * | |
42 | * Low resource (ETD and Data memory) situations are handled differently for | |
43 | * isochronous and non insosynchronous transactions : | |
44 | * | |
45 | * Non ISOC transfers are queued if either ETDs or Data memory are unavailable | |
46 | * | |
47 | * ISOC transfers use 2 ETDs per endpoint to achieve double buffering. | |
48 | * They allocate both ETDs and Data memory during URB submission | |
49 | * (and fail if unavailable). | |
50 | */ | |
51 | ||
52 | #include <linux/clk.h> | |
53 | #include <linux/io.h> | |
54 | #include <linux/kernel.h> | |
55 | #include <linux/list.h> | |
56 | #include <linux/platform_device.h> | |
5a0e3ad6 | 57 | #include <linux/slab.h> |
23d3e7a6 | 58 | #include <linux/usb.h> |
27729aad | 59 | #include <linux/usb/hcd.h> |
d0cc3d41 | 60 | #include <linux/dma-mapping.h> |
23d3e7a6 | 61 | |
23d3e7a6 MF |
62 | #include "imx21-hcd.h" |
63 | ||
64 | #ifdef DEBUG | |
65 | #define DEBUG_LOG_FRAME(imx21, etd, event) \ | |
66 | (etd)->event##_frame = readl((imx21)->regs + USBH_FRMNUB) | |
67 | #else | |
68 | #define DEBUG_LOG_FRAME(imx21, etd, event) do { } while (0) | |
69 | #endif | |
70 | ||
71 | static const char hcd_name[] = "imx21-hcd"; | |
72 | ||
73 | static inline struct imx21 *hcd_to_imx21(struct usb_hcd *hcd) | |
74 | { | |
75 | return (struct imx21 *)hcd->hcd_priv; | |
76 | } | |
77 | ||
78 | ||
79 | /* =========================================== */ | |
80 | /* Hardware access helpers */ | |
81 | /* =========================================== */ | |
82 | ||
83 | static inline void set_register_bits(struct imx21 *imx21, u32 offset, u32 mask) | |
84 | { | |
85 | void __iomem *reg = imx21->regs + offset; | |
86 | writel(readl(reg) | mask, reg); | |
87 | } | |
88 | ||
89 | static inline void clear_register_bits(struct imx21 *imx21, | |
90 | u32 offset, u32 mask) | |
91 | { | |
92 | void __iomem *reg = imx21->regs + offset; | |
93 | writel(readl(reg) & ~mask, reg); | |
94 | } | |
95 | ||
96 | static inline void clear_toggle_bit(struct imx21 *imx21, u32 offset, u32 mask) | |
97 | { | |
98 | void __iomem *reg = imx21->regs + offset; | |
99 | ||
100 | if (readl(reg) & mask) | |
101 | writel(mask, reg); | |
102 | } | |
103 | ||
104 | static inline void set_toggle_bit(struct imx21 *imx21, u32 offset, u32 mask) | |
105 | { | |
106 | void __iomem *reg = imx21->regs + offset; | |
107 | ||
108 | if (!(readl(reg) & mask)) | |
109 | writel(mask, reg); | |
110 | } | |
111 | ||
112 | static void etd_writel(struct imx21 *imx21, int etd_num, int dword, u32 value) | |
113 | { | |
114 | writel(value, imx21->regs + USB_ETD_DWORD(etd_num, dword)); | |
115 | } | |
116 | ||
117 | static u32 etd_readl(struct imx21 *imx21, int etd_num, int dword) | |
118 | { | |
119 | return readl(imx21->regs + USB_ETD_DWORD(etd_num, dword)); | |
120 | } | |
121 | ||
122 | static inline int wrap_frame(int counter) | |
123 | { | |
124 | return counter & 0xFFFF; | |
125 | } | |
126 | ||
127 | static inline int frame_after(int frame, int after) | |
128 | { | |
129 | /* handle wrapping like jiffies time_afer */ | |
130 | return (s16)((s16)after - (s16)frame) < 0; | |
131 | } | |
132 | ||
133 | static int imx21_hc_get_frame(struct usb_hcd *hcd) | |
134 | { | |
135 | struct imx21 *imx21 = hcd_to_imx21(hcd); | |
136 | ||
137 | return wrap_frame(readl(imx21->regs + USBH_FRMNUB)); | |
138 | } | |
139 | ||
d0cc3d41 MF |
140 | static inline bool unsuitable_for_dma(dma_addr_t addr) |
141 | { | |
142 | return (addr & 3) != 0; | |
143 | } | |
23d3e7a6 MF |
144 | |
145 | #include "imx21-dbg.c" | |
146 | ||
d0cc3d41 MF |
147 | static void nonisoc_urb_completed_for_etd( |
148 | struct imx21 *imx21, struct etd_priv *etd, int status); | |
149 | static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb); | |
150 | static void free_dmem(struct imx21 *imx21, struct etd_priv *etd); | |
151 | ||
23d3e7a6 MF |
152 | /* =========================================== */ |
153 | /* ETD management */ | |
154 | /* =========================================== */ | |
155 | ||
156 | static int alloc_etd(struct imx21 *imx21) | |
157 | { | |
158 | int i; | |
159 | struct etd_priv *etd = imx21->etd; | |
160 | ||
161 | for (i = 0; i < USB_NUM_ETD; i++, etd++) { | |
162 | if (etd->alloc == 0) { | |
163 | memset(etd, 0, sizeof(imx21->etd[0])); | |
164 | etd->alloc = 1; | |
165 | debug_etd_allocated(imx21); | |
166 | return i; | |
167 | } | |
168 | } | |
169 | return -1; | |
170 | } | |
171 | ||
172 | static void disactivate_etd(struct imx21 *imx21, int num) | |
173 | { | |
174 | int etd_mask = (1 << num); | |
175 | struct etd_priv *etd = &imx21->etd[num]; | |
176 | ||
177 | writel(etd_mask, imx21->regs + USBH_ETDENCLR); | |
178 | clear_register_bits(imx21, USBH_ETDDONEEN, etd_mask); | |
179 | writel(etd_mask, imx21->regs + USB_ETDDMACHANLCLR); | |
180 | clear_toggle_bit(imx21, USBH_ETDDONESTAT, etd_mask); | |
181 | ||
182 | etd->active_count = 0; | |
183 | ||
184 | DEBUG_LOG_FRAME(imx21, etd, disactivated); | |
185 | } | |
186 | ||
187 | static void reset_etd(struct imx21 *imx21, int num) | |
188 | { | |
189 | struct etd_priv *etd = imx21->etd + num; | |
190 | int i; | |
191 | ||
192 | disactivate_etd(imx21, num); | |
193 | ||
194 | for (i = 0; i < 4; i++) | |
195 | etd_writel(imx21, num, i, 0); | |
196 | etd->urb = NULL; | |
197 | etd->ep = NULL; | |
d0cc3d41 MF |
198 | etd->td = NULL; |
199 | etd->bounce_buffer = NULL; | |
23d3e7a6 MF |
200 | } |
201 | ||
202 | static void free_etd(struct imx21 *imx21, int num) | |
203 | { | |
204 | if (num < 0) | |
205 | return; | |
206 | ||
207 | if (num >= USB_NUM_ETD) { | |
208 | dev_err(imx21->dev, "BAD etd=%d!\n", num); | |
209 | return; | |
210 | } | |
211 | if (imx21->etd[num].alloc == 0) { | |
212 | dev_err(imx21->dev, "ETD %d already free!\n", num); | |
213 | return; | |
214 | } | |
215 | ||
216 | debug_etd_freed(imx21); | |
217 | reset_etd(imx21, num); | |
218 | memset(&imx21->etd[num], 0, sizeof(imx21->etd[0])); | |
219 | } | |
220 | ||
221 | ||
222 | static void setup_etd_dword0(struct imx21 *imx21, | |
223 | int etd_num, struct urb *urb, u8 dir, u16 maxpacket) | |
224 | { | |
225 | etd_writel(imx21, etd_num, 0, | |
226 | ((u32) usb_pipedevice(urb->pipe)) << DW0_ADDRESS | | |
227 | ((u32) usb_pipeendpoint(urb->pipe) << DW0_ENDPNT) | | |
228 | ((u32) dir << DW0_DIRECT) | | |
229 | ((u32) ((urb->dev->speed == USB_SPEED_LOW) ? | |
230 | 1 : 0) << DW0_SPEED) | | |
231 | ((u32) fmt_urb_to_etd[usb_pipetype(urb->pipe)] << DW0_FORMAT) | | |
232 | ((u32) maxpacket << DW0_MAXPKTSIZ)); | |
233 | } | |
234 | ||
d0cc3d41 MF |
235 | /** |
236 | * Copy buffer to data controller data memory. | |
237 | * We cannot use memcpy_toio() because the hardware requires 32bit writes | |
238 | */ | |
239 | static void copy_to_dmem( | |
240 | struct imx21 *imx21, int dmem_offset, void *src, int count) | |
241 | { | |
242 | void __iomem *dmem = imx21->regs + USBOTG_DMEM + dmem_offset; | |
243 | u32 word = 0; | |
244 | u8 *p = src; | |
245 | int byte = 0; | |
246 | int i; | |
247 | ||
248 | for (i = 0; i < count; i++) { | |
249 | byte = i % 4; | |
250 | word += (*p++ << (byte * 8)); | |
251 | if (byte == 3) { | |
252 | writel(word, dmem); | |
253 | dmem += 4; | |
254 | word = 0; | |
255 | } | |
256 | } | |
257 | ||
258 | if (count && byte != 3) | |
259 | writel(word, dmem); | |
260 | } | |
261 | ||
262 | static void activate_etd(struct imx21 *imx21, int etd_num, u8 dir) | |
23d3e7a6 MF |
263 | { |
264 | u32 etd_mask = 1 << etd_num; | |
265 | struct etd_priv *etd = &imx21->etd[etd_num]; | |
266 | ||
d0cc3d41 MF |
267 | if (etd->dma_handle && unsuitable_for_dma(etd->dma_handle)) { |
268 | /* For non aligned isoc the condition below is always true */ | |
269 | if (etd->len <= etd->dmem_size) { | |
270 | /* Fits into data memory, use PIO */ | |
271 | if (dir != TD_DIR_IN) { | |
272 | copy_to_dmem(imx21, | |
273 | etd->dmem_offset, | |
274 | etd->cpu_buffer, etd->len); | |
275 | } | |
276 | etd->dma_handle = 0; | |
277 | ||
278 | } else { | |
279 | /* Too big for data memory, use bounce buffer */ | |
280 | enum dma_data_direction dmadir; | |
281 | ||
282 | if (dir == TD_DIR_IN) { | |
283 | dmadir = DMA_FROM_DEVICE; | |
284 | etd->bounce_buffer = kmalloc(etd->len, | |
285 | GFP_ATOMIC); | |
286 | } else { | |
287 | dmadir = DMA_TO_DEVICE; | |
288 | etd->bounce_buffer = kmemdup(etd->cpu_buffer, | |
289 | etd->len, | |
290 | GFP_ATOMIC); | |
291 | } | |
292 | if (!etd->bounce_buffer) { | |
293 | dev_err(imx21->dev, "failed bounce alloc\n"); | |
294 | goto err_bounce_alloc; | |
295 | } | |
296 | ||
297 | etd->dma_handle = | |
298 | dma_map_single(imx21->dev, | |
299 | etd->bounce_buffer, | |
300 | etd->len, | |
301 | dmadir); | |
302 | if (dma_mapping_error(imx21->dev, etd->dma_handle)) { | |
303 | dev_err(imx21->dev, "failed bounce map\n"); | |
304 | goto err_bounce_map; | |
305 | } | |
306 | } | |
307 | } | |
308 | ||
23d3e7a6 MF |
309 | clear_toggle_bit(imx21, USBH_ETDDONESTAT, etd_mask); |
310 | set_register_bits(imx21, USBH_ETDDONEEN, etd_mask); | |
311 | clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask); | |
312 | clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask); | |
313 | ||
d0cc3d41 | 314 | if (etd->dma_handle) { |
23d3e7a6 MF |
315 | set_register_bits(imx21, USB_ETDDMACHANLCLR, etd_mask); |
316 | clear_toggle_bit(imx21, USBH_XBUFSTAT, etd_mask); | |
317 | clear_toggle_bit(imx21, USBH_YBUFSTAT, etd_mask); | |
d0cc3d41 | 318 | writel(etd->dma_handle, imx21->regs + USB_ETDSMSA(etd_num)); |
23d3e7a6 MF |
319 | set_register_bits(imx21, USB_ETDDMAEN, etd_mask); |
320 | } else { | |
321 | if (dir != TD_DIR_IN) { | |
d0cc3d41 | 322 | /* need to set for ZLP and PIO */ |
23d3e7a6 MF |
323 | set_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask); |
324 | set_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask); | |
325 | } | |
326 | } | |
327 | ||
328 | DEBUG_LOG_FRAME(imx21, etd, activated); | |
329 | ||
330 | #ifdef DEBUG | |
331 | if (!etd->active_count) { | |
332 | int i; | |
333 | etd->activated_frame = readl(imx21->regs + USBH_FRMNUB); | |
334 | etd->disactivated_frame = -1; | |
335 | etd->last_int_frame = -1; | |
336 | etd->last_req_frame = -1; | |
337 | ||
338 | for (i = 0; i < 4; i++) | |
339 | etd->submitted_dwords[i] = etd_readl(imx21, etd_num, i); | |
340 | } | |
341 | #endif | |
342 | ||
343 | etd->active_count = 1; | |
344 | writel(etd_mask, imx21->regs + USBH_ETDENSET); | |
d0cc3d41 MF |
345 | return; |
346 | ||
347 | err_bounce_map: | |
348 | kfree(etd->bounce_buffer); | |
349 | ||
350 | err_bounce_alloc: | |
351 | free_dmem(imx21, etd); | |
352 | nonisoc_urb_completed_for_etd(imx21, etd, -ENOMEM); | |
23d3e7a6 MF |
353 | } |
354 | ||
355 | /* =========================================== */ | |
356 | /* Data memory management */ | |
357 | /* =========================================== */ | |
358 | ||
359 | static int alloc_dmem(struct imx21 *imx21, unsigned int size, | |
360 | struct usb_host_endpoint *ep) | |
361 | { | |
362 | unsigned int offset = 0; | |
363 | struct imx21_dmem_area *area; | |
364 | struct imx21_dmem_area *tmp; | |
365 | ||
366 | size += (~size + 1) & 0x3; /* Round to 4 byte multiple */ | |
367 | ||
368 | if (size > DMEM_SIZE) { | |
369 | dev_err(imx21->dev, "size=%d > DMEM_SIZE(%d)\n", | |
370 | size, DMEM_SIZE); | |
371 | return -EINVAL; | |
372 | } | |
373 | ||
374 | list_for_each_entry(tmp, &imx21->dmem_list, list) { | |
375 | if ((size + offset) < offset) | |
376 | goto fail; | |
377 | if ((size + offset) <= tmp->offset) | |
378 | break; | |
379 | offset = tmp->size + tmp->offset; | |
380 | if ((offset + size) > DMEM_SIZE) | |
381 | goto fail; | |
382 | } | |
383 | ||
384 | area = kmalloc(sizeof(struct imx21_dmem_area), GFP_ATOMIC); | |
385 | if (area == NULL) | |
386 | return -ENOMEM; | |
387 | ||
388 | area->ep = ep; | |
389 | area->offset = offset; | |
390 | area->size = size; | |
391 | list_add_tail(&area->list, &tmp->list); | |
392 | debug_dmem_allocated(imx21, size); | |
393 | return offset; | |
394 | ||
395 | fail: | |
396 | return -ENOMEM; | |
397 | } | |
398 | ||
399 | /* Memory now available for a queued ETD - activate it */ | |
400 | static void activate_queued_etd(struct imx21 *imx21, | |
401 | struct etd_priv *etd, u32 dmem_offset) | |
402 | { | |
403 | struct urb_priv *urb_priv = etd->urb->hcpriv; | |
404 | int etd_num = etd - &imx21->etd[0]; | |
405 | u32 maxpacket = etd_readl(imx21, etd_num, 1) >> DW1_YBUFSRTAD; | |
406 | u8 dir = (etd_readl(imx21, etd_num, 2) >> DW2_DIRPID) & 0x03; | |
407 | ||
408 | dev_dbg(imx21->dev, "activating queued ETD %d now DMEM available\n", | |
409 | etd_num); | |
410 | etd_writel(imx21, etd_num, 1, | |
411 | ((dmem_offset + maxpacket) << DW1_YBUFSRTAD) | dmem_offset); | |
412 | ||
b2a068d0 | 413 | etd->dmem_offset = dmem_offset; |
23d3e7a6 | 414 | urb_priv->active = 1; |
d0cc3d41 | 415 | activate_etd(imx21, etd_num, dir); |
23d3e7a6 MF |
416 | } |
417 | ||
b2a068d0 | 418 | static void free_dmem(struct imx21 *imx21, struct etd_priv *etd) |
23d3e7a6 MF |
419 | { |
420 | struct imx21_dmem_area *area; | |
b2a068d0 | 421 | struct etd_priv *tmp; |
23d3e7a6 | 422 | int found = 0; |
b2a068d0 | 423 | int offset; |
23d3e7a6 | 424 | |
b2a068d0 MF |
425 | if (!etd->dmem_size) |
426 | return; | |
427 | etd->dmem_size = 0; | |
428 | ||
429 | offset = etd->dmem_offset; | |
23d3e7a6 MF |
430 | list_for_each_entry(area, &imx21->dmem_list, list) { |
431 | if (area->offset == offset) { | |
432 | debug_dmem_freed(imx21, area->size); | |
433 | list_del(&area->list); | |
434 | kfree(area); | |
435 | found = 1; | |
436 | break; | |
437 | } | |
438 | } | |
439 | ||
440 | if (!found) { | |
441 | dev_err(imx21->dev, | |
442 | "Trying to free unallocated DMEM %d\n", offset); | |
443 | return; | |
444 | } | |
445 | ||
446 | /* Try again to allocate memory for anything we've queued */ | |
447 | list_for_each_entry_safe(etd, tmp, &imx21->queue_for_dmem, queue) { | |
448 | offset = alloc_dmem(imx21, etd->dmem_size, etd->ep); | |
449 | if (offset >= 0) { | |
450 | list_del(&etd->queue); | |
451 | activate_queued_etd(imx21, etd, (u32)offset); | |
452 | } | |
453 | } | |
454 | } | |
455 | ||
456 | static void free_epdmem(struct imx21 *imx21, struct usb_host_endpoint *ep) | |
457 | { | |
458 | struct imx21_dmem_area *area, *tmp; | |
459 | ||
460 | list_for_each_entry_safe(area, tmp, &imx21->dmem_list, list) { | |
461 | if (area->ep == ep) { | |
462 | dev_err(imx21->dev, | |
463 | "Active DMEM %d for disabled ep=%p\n", | |
464 | area->offset, ep); | |
465 | list_del(&area->list); | |
466 | kfree(area); | |
467 | } | |
468 | } | |
469 | } | |
470 | ||
471 | ||
472 | /* =========================================== */ | |
473 | /* End handling */ | |
474 | /* =========================================== */ | |
23d3e7a6 MF |
475 | |
476 | /* Endpoint now idle - release it's ETD(s) or asssign to queued request */ | |
477 | static void ep_idle(struct imx21 *imx21, struct ep_priv *ep_priv) | |
478 | { | |
23d3e7a6 MF |
479 | int i; |
480 | ||
481 | for (i = 0; i < NUM_ISO_ETDS; i++) { | |
7a7e7896 MF |
482 | int etd_num = ep_priv->etd[i]; |
483 | struct etd_priv *etd; | |
23d3e7a6 MF |
484 | if (etd_num < 0) |
485 | continue; | |
486 | ||
7a7e7896 | 487 | etd = &imx21->etd[etd_num]; |
23d3e7a6 | 488 | ep_priv->etd[i] = -1; |
7a7e7896 MF |
489 | |
490 | free_dmem(imx21, etd); /* for isoc */ | |
491 | ||
23d3e7a6 MF |
492 | if (list_empty(&imx21->queue_for_etd)) { |
493 | free_etd(imx21, etd_num); | |
494 | continue; | |
495 | } | |
496 | ||
497 | dev_dbg(imx21->dev, | |
498 | "assigning idle etd %d for queued request\n", etd_num); | |
499 | ep_priv = list_first_entry(&imx21->queue_for_etd, | |
500 | struct ep_priv, queue); | |
501 | list_del(&ep_priv->queue); | |
502 | reset_etd(imx21, etd_num); | |
503 | ep_priv->waiting_etd = 0; | |
504 | ep_priv->etd[i] = etd_num; | |
505 | ||
506 | if (list_empty(&ep_priv->ep->urb_list)) { | |
507 | dev_err(imx21->dev, "No urb for queued ep!\n"); | |
508 | continue; | |
509 | } | |
510 | schedule_nonisoc_etd(imx21, list_first_entry( | |
511 | &ep_priv->ep->urb_list, struct urb, urb_list)); | |
512 | } | |
513 | } | |
514 | ||
515 | static void urb_done(struct usb_hcd *hcd, struct urb *urb, int status) | |
516 | __releases(imx21->lock) | |
517 | __acquires(imx21->lock) | |
518 | { | |
519 | struct imx21 *imx21 = hcd_to_imx21(hcd); | |
520 | struct ep_priv *ep_priv = urb->ep->hcpriv; | |
521 | struct urb_priv *urb_priv = urb->hcpriv; | |
522 | ||
523 | debug_urb_completed(imx21, urb, status); | |
524 | dev_vdbg(imx21->dev, "urb %p done %d\n", urb, status); | |
525 | ||
526 | kfree(urb_priv->isoc_td); | |
527 | kfree(urb->hcpriv); | |
528 | urb->hcpriv = NULL; | |
529 | usb_hcd_unlink_urb_from_ep(hcd, urb); | |
530 | spin_unlock(&imx21->lock); | |
531 | usb_hcd_giveback_urb(hcd, urb, status); | |
532 | spin_lock(&imx21->lock); | |
533 | if (list_empty(&ep_priv->ep->urb_list)) | |
534 | ep_idle(imx21, ep_priv); | |
535 | } | |
536 | ||
d0cc3d41 MF |
537 | static void nonisoc_urb_completed_for_etd( |
538 | struct imx21 *imx21, struct etd_priv *etd, int status) | |
539 | { | |
540 | struct usb_host_endpoint *ep = etd->ep; | |
541 | ||
542 | urb_done(imx21->hcd, etd->urb, status); | |
543 | etd->urb = NULL; | |
544 | ||
545 | if (!list_empty(&ep->urb_list)) { | |
546 | struct urb *urb = list_first_entry( | |
547 | &ep->urb_list, struct urb, urb_list); | |
548 | ||
549 | dev_vdbg(imx21->dev, "next URB %p\n", urb); | |
550 | schedule_nonisoc_etd(imx21, urb); | |
551 | } | |
552 | } | |
553 | ||
554 | ||
23d3e7a6 MF |
555 | /* =========================================== */ |
556 | /* ISOC Handling ... */ | |
557 | /* =========================================== */ | |
558 | ||
559 | static void schedule_isoc_etds(struct usb_hcd *hcd, | |
560 | struct usb_host_endpoint *ep) | |
561 | { | |
562 | struct imx21 *imx21 = hcd_to_imx21(hcd); | |
563 | struct ep_priv *ep_priv = ep->hcpriv; | |
564 | struct etd_priv *etd; | |
565 | struct urb_priv *urb_priv; | |
566 | struct td *td; | |
567 | int etd_num; | |
568 | int i; | |
569 | int cur_frame; | |
570 | u8 dir; | |
571 | ||
572 | for (i = 0; i < NUM_ISO_ETDS; i++) { | |
573 | too_late: | |
574 | if (list_empty(&ep_priv->td_list)) | |
575 | break; | |
576 | ||
577 | etd_num = ep_priv->etd[i]; | |
578 | if (etd_num < 0) | |
579 | break; | |
580 | ||
581 | etd = &imx21->etd[etd_num]; | |
582 | if (etd->urb) | |
583 | continue; | |
584 | ||
585 | td = list_entry(ep_priv->td_list.next, struct td, list); | |
586 | list_del(&td->list); | |
587 | urb_priv = td->urb->hcpriv; | |
588 | ||
589 | cur_frame = imx21_hc_get_frame(hcd); | |
590 | if (frame_after(cur_frame, td->frame)) { | |
591 | dev_dbg(imx21->dev, "isoc too late frame %d > %d\n", | |
592 | cur_frame, td->frame); | |
593 | urb_priv->isoc_status = -EXDEV; | |
594 | td->urb->iso_frame_desc[ | |
595 | td->isoc_index].actual_length = 0; | |
596 | td->urb->iso_frame_desc[td->isoc_index].status = -EXDEV; | |
597 | if (--urb_priv->isoc_remaining == 0) | |
598 | urb_done(hcd, td->urb, urb_priv->isoc_status); | |
599 | goto too_late; | |
600 | } | |
601 | ||
602 | urb_priv->active = 1; | |
603 | etd->td = td; | |
604 | etd->ep = td->ep; | |
605 | etd->urb = td->urb; | |
606 | etd->len = td->len; | |
d0cc3d41 MF |
607 | etd->dma_handle = td->dma_handle; |
608 | etd->cpu_buffer = td->cpu_buffer; | |
23d3e7a6 MF |
609 | |
610 | debug_isoc_submitted(imx21, cur_frame, td); | |
611 | ||
612 | dir = usb_pipeout(td->urb->pipe) ? TD_DIR_OUT : TD_DIR_IN; | |
613 | setup_etd_dword0(imx21, etd_num, td->urb, dir, etd->dmem_size); | |
614 | etd_writel(imx21, etd_num, 1, etd->dmem_offset); | |
615 | etd_writel(imx21, etd_num, 2, | |
616 | (TD_NOTACCESSED << DW2_COMPCODE) | | |
617 | ((td->frame & 0xFFFF) << DW2_STARTFRM)); | |
618 | etd_writel(imx21, etd_num, 3, | |
619 | (TD_NOTACCESSED << DW3_COMPCODE0) | | |
620 | (td->len << DW3_PKTLEN0)); | |
621 | ||
d0cc3d41 | 622 | activate_etd(imx21, etd_num, dir); |
23d3e7a6 MF |
623 | } |
624 | } | |
625 | ||
d0cc3d41 | 626 | static void isoc_etd_done(struct usb_hcd *hcd, int etd_num) |
23d3e7a6 MF |
627 | { |
628 | struct imx21 *imx21 = hcd_to_imx21(hcd); | |
629 | int etd_mask = 1 << etd_num; | |
23d3e7a6 | 630 | struct etd_priv *etd = imx21->etd + etd_num; |
d0cc3d41 MF |
631 | struct urb *urb = etd->urb; |
632 | struct urb_priv *urb_priv = urb->hcpriv; | |
23d3e7a6 MF |
633 | struct td *td = etd->td; |
634 | struct usb_host_endpoint *ep = etd->ep; | |
635 | int isoc_index = td->isoc_index; | |
636 | unsigned int pipe = urb->pipe; | |
637 | int dir_in = usb_pipein(pipe); | |
638 | int cc; | |
639 | int bytes_xfrd; | |
640 | ||
641 | disactivate_etd(imx21, etd_num); | |
642 | ||
643 | cc = (etd_readl(imx21, etd_num, 3) >> DW3_COMPCODE0) & 0xf; | |
644 | bytes_xfrd = etd_readl(imx21, etd_num, 3) & 0x3ff; | |
645 | ||
646 | /* Input doesn't always fill the buffer, don't generate an error | |
647 | * when this happens. | |
648 | */ | |
649 | if (dir_in && (cc == TD_DATAUNDERRUN)) | |
650 | cc = TD_CC_NOERROR; | |
651 | ||
652 | if (cc == TD_NOTACCESSED) | |
653 | bytes_xfrd = 0; | |
654 | ||
655 | debug_isoc_completed(imx21, | |
656 | imx21_hc_get_frame(hcd), td, cc, bytes_xfrd); | |
657 | if (cc) { | |
658 | urb_priv->isoc_status = -EXDEV; | |
659 | dev_dbg(imx21->dev, | |
660 | "bad iso cc=0x%X frame=%d sched frame=%d " | |
661 | "cnt=%d len=%d urb=%p etd=%d index=%d\n", | |
662 | cc, imx21_hc_get_frame(hcd), td->frame, | |
663 | bytes_xfrd, td->len, urb, etd_num, isoc_index); | |
664 | } | |
665 | ||
d0cc3d41 | 666 | if (dir_in) { |
23d3e7a6 | 667 | clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask); |
d0cc3d41 MF |
668 | if (!etd->dma_handle) |
669 | memcpy_fromio(etd->cpu_buffer, | |
670 | imx21->regs + USBOTG_DMEM + etd->dmem_offset, | |
671 | bytes_xfrd); | |
672 | } | |
23d3e7a6 MF |
673 | |
674 | urb->actual_length += bytes_xfrd; | |
675 | urb->iso_frame_desc[isoc_index].actual_length = bytes_xfrd; | |
676 | urb->iso_frame_desc[isoc_index].status = cc_to_error[cc]; | |
677 | ||
678 | etd->td = NULL; | |
679 | etd->urb = NULL; | |
680 | etd->ep = NULL; | |
681 | ||
682 | if (--urb_priv->isoc_remaining == 0) | |
683 | urb_done(hcd, urb, urb_priv->isoc_status); | |
684 | ||
685 | schedule_isoc_etds(hcd, ep); | |
686 | } | |
687 | ||
688 | static struct ep_priv *alloc_isoc_ep( | |
689 | struct imx21 *imx21, struct usb_host_endpoint *ep) | |
690 | { | |
691 | struct ep_priv *ep_priv; | |
692 | int i; | |
693 | ||
694 | ep_priv = kzalloc(sizeof(struct ep_priv), GFP_ATOMIC); | |
7a7e7896 | 695 | if (!ep_priv) |
23d3e7a6 MF |
696 | return NULL; |
697 | ||
7a7e7896 MF |
698 | for (i = 0; i < NUM_ISO_ETDS; i++) |
699 | ep_priv->etd[i] = -1; | |
23d3e7a6 MF |
700 | |
701 | INIT_LIST_HEAD(&ep_priv->td_list); | |
702 | ep_priv->ep = ep; | |
703 | ep->hcpriv = ep_priv; | |
704 | return ep_priv; | |
7a7e7896 MF |
705 | } |
706 | ||
707 | static int alloc_isoc_etds(struct imx21 *imx21, struct ep_priv *ep_priv) | |
708 | { | |
709 | int i, j; | |
710 | int etd_num; | |
711 | ||
712 | /* Allocate the ETDs if required */ | |
713 | for (i = 0; i < NUM_ISO_ETDS; i++) { | |
714 | if (ep_priv->etd[i] < 0) { | |
715 | etd_num = alloc_etd(imx21); | |
716 | if (etd_num < 0) | |
717 | goto alloc_etd_failed; | |
718 | ||
719 | ep_priv->etd[i] = etd_num; | |
720 | imx21->etd[etd_num].ep = ep_priv->ep; | |
721 | } | |
722 | } | |
723 | return 0; | |
23d3e7a6 MF |
724 | |
725 | alloc_etd_failed: | |
7a7e7896 MF |
726 | dev_err(imx21->dev, "isoc: Couldn't allocate etd\n"); |
727 | for (j = 0; j < i; j++) { | |
728 | free_etd(imx21, ep_priv->etd[j]); | |
729 | ep_priv->etd[j] = -1; | |
730 | } | |
731 | return -ENOMEM; | |
23d3e7a6 MF |
732 | } |
733 | ||
734 | static int imx21_hc_urb_enqueue_isoc(struct usb_hcd *hcd, | |
735 | struct usb_host_endpoint *ep, | |
736 | struct urb *urb, gfp_t mem_flags) | |
737 | { | |
738 | struct imx21 *imx21 = hcd_to_imx21(hcd); | |
739 | struct urb_priv *urb_priv; | |
740 | unsigned long flags; | |
741 | struct ep_priv *ep_priv; | |
742 | struct td *td = NULL; | |
743 | int i; | |
744 | int ret; | |
745 | int cur_frame; | |
746 | u16 maxpacket; | |
747 | ||
748 | urb_priv = kzalloc(sizeof(struct urb_priv), mem_flags); | |
749 | if (urb_priv == NULL) | |
750 | return -ENOMEM; | |
751 | ||
752 | urb_priv->isoc_td = kzalloc( | |
753 | sizeof(struct td) * urb->number_of_packets, mem_flags); | |
754 | if (urb_priv->isoc_td == NULL) { | |
755 | ret = -ENOMEM; | |
756 | goto alloc_td_failed; | |
757 | } | |
758 | ||
759 | spin_lock_irqsave(&imx21->lock, flags); | |
760 | ||
761 | if (ep->hcpriv == NULL) { | |
762 | ep_priv = alloc_isoc_ep(imx21, ep); | |
763 | if (ep_priv == NULL) { | |
764 | ret = -ENOMEM; | |
765 | goto alloc_ep_failed; | |
766 | } | |
767 | } else { | |
768 | ep_priv = ep->hcpriv; | |
769 | } | |
770 | ||
7a7e7896 MF |
771 | ret = alloc_isoc_etds(imx21, ep_priv); |
772 | if (ret) | |
773 | goto alloc_etd_failed; | |
774 | ||
23d3e7a6 MF |
775 | ret = usb_hcd_link_urb_to_ep(hcd, urb); |
776 | if (ret) | |
777 | goto link_failed; | |
778 | ||
779 | urb->status = -EINPROGRESS; | |
780 | urb->actual_length = 0; | |
781 | urb->error_count = 0; | |
782 | urb->hcpriv = urb_priv; | |
783 | urb_priv->ep = ep; | |
784 | ||
785 | /* allocate data memory for largest packets if not already done */ | |
786 | maxpacket = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)); | |
787 | for (i = 0; i < NUM_ISO_ETDS; i++) { | |
788 | struct etd_priv *etd = &imx21->etd[ep_priv->etd[i]]; | |
789 | ||
790 | if (etd->dmem_size > 0 && etd->dmem_size < maxpacket) { | |
791 | /* not sure if this can really occur.... */ | |
792 | dev_err(imx21->dev, "increasing isoc buffer %d->%d\n", | |
793 | etd->dmem_size, maxpacket); | |
794 | ret = -EMSGSIZE; | |
795 | goto alloc_dmem_failed; | |
796 | } | |
797 | ||
798 | if (etd->dmem_size == 0) { | |
799 | etd->dmem_offset = alloc_dmem(imx21, maxpacket, ep); | |
800 | if (etd->dmem_offset < 0) { | |
801 | dev_dbg(imx21->dev, "failed alloc isoc dmem\n"); | |
802 | ret = -EAGAIN; | |
803 | goto alloc_dmem_failed; | |
804 | } | |
805 | etd->dmem_size = maxpacket; | |
806 | } | |
807 | } | |
808 | ||
809 | /* calculate frame */ | |
810 | cur_frame = imx21_hc_get_frame(hcd); | |
811 | if (urb->transfer_flags & URB_ISO_ASAP) { | |
812 | if (list_empty(&ep_priv->td_list)) | |
813 | urb->start_frame = cur_frame + 5; | |
814 | else | |
815 | urb->start_frame = list_entry( | |
816 | ep_priv->td_list.prev, | |
817 | struct td, list)->frame + urb->interval; | |
818 | } | |
819 | urb->start_frame = wrap_frame(urb->start_frame); | |
820 | if (frame_after(cur_frame, urb->start_frame)) { | |
821 | dev_dbg(imx21->dev, | |
822 | "enqueue: adjusting iso start %d (cur=%d) asap=%d\n", | |
823 | urb->start_frame, cur_frame, | |
824 | (urb->transfer_flags & URB_ISO_ASAP) != 0); | |
825 | urb->start_frame = wrap_frame(cur_frame + 1); | |
826 | } | |
827 | ||
828 | /* set up transfers */ | |
829 | td = urb_priv->isoc_td; | |
830 | for (i = 0; i < urb->number_of_packets; i++, td++) { | |
d0cc3d41 | 831 | unsigned int offset = urb->iso_frame_desc[i].offset; |
23d3e7a6 MF |
832 | td->ep = ep; |
833 | td->urb = urb; | |
834 | td->len = urb->iso_frame_desc[i].length; | |
835 | td->isoc_index = i; | |
836 | td->frame = wrap_frame(urb->start_frame + urb->interval * i); | |
d0cc3d41 MF |
837 | td->dma_handle = urb->transfer_dma + offset; |
838 | td->cpu_buffer = urb->transfer_buffer + offset; | |
23d3e7a6 MF |
839 | list_add_tail(&td->list, &ep_priv->td_list); |
840 | } | |
841 | ||
842 | urb_priv->isoc_remaining = urb->number_of_packets; | |
843 | dev_vdbg(imx21->dev, "setup %d packets for iso frame %d->%d\n", | |
844 | urb->number_of_packets, urb->start_frame, td->frame); | |
845 | ||
846 | debug_urb_submitted(imx21, urb); | |
847 | schedule_isoc_etds(hcd, ep); | |
848 | ||
849 | spin_unlock_irqrestore(&imx21->lock, flags); | |
850 | return 0; | |
851 | ||
852 | alloc_dmem_failed: | |
853 | usb_hcd_unlink_urb_from_ep(hcd, urb); | |
854 | ||
855 | link_failed: | |
7a7e7896 | 856 | alloc_etd_failed: |
23d3e7a6 MF |
857 | alloc_ep_failed: |
858 | spin_unlock_irqrestore(&imx21->lock, flags); | |
859 | kfree(urb_priv->isoc_td); | |
860 | ||
861 | alloc_td_failed: | |
862 | kfree(urb_priv); | |
863 | return ret; | |
864 | } | |
865 | ||
866 | static void dequeue_isoc_urb(struct imx21 *imx21, | |
867 | struct urb *urb, struct ep_priv *ep_priv) | |
868 | { | |
869 | struct urb_priv *urb_priv = urb->hcpriv; | |
870 | struct td *td, *tmp; | |
871 | int i; | |
872 | ||
873 | if (urb_priv->active) { | |
874 | for (i = 0; i < NUM_ISO_ETDS; i++) { | |
875 | int etd_num = ep_priv->etd[i]; | |
876 | if (etd_num != -1 && imx21->etd[etd_num].urb == urb) { | |
877 | struct etd_priv *etd = imx21->etd + etd_num; | |
878 | ||
879 | reset_etd(imx21, etd_num); | |
b2a068d0 | 880 | free_dmem(imx21, etd); |
23d3e7a6 MF |
881 | } |
882 | } | |
883 | } | |
884 | ||
885 | list_for_each_entry_safe(td, tmp, &ep_priv->td_list, list) { | |
886 | if (td->urb == urb) { | |
887 | dev_vdbg(imx21->dev, "removing td %p\n", td); | |
888 | list_del(&td->list); | |
889 | } | |
890 | } | |
891 | } | |
892 | ||
893 | /* =========================================== */ | |
894 | /* NON ISOC Handling ... */ | |
895 | /* =========================================== */ | |
896 | ||
897 | static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb) | |
898 | { | |
899 | unsigned int pipe = urb->pipe; | |
900 | struct urb_priv *urb_priv = urb->hcpriv; | |
901 | struct ep_priv *ep_priv = urb_priv->ep->hcpriv; | |
902 | int state = urb_priv->state; | |
903 | int etd_num = ep_priv->etd[0]; | |
904 | struct etd_priv *etd; | |
23d3e7a6 MF |
905 | u32 count; |
906 | u16 etd_buf_size; | |
907 | u16 maxpacket; | |
908 | u8 dir; | |
909 | u8 bufround; | |
910 | u8 datatoggle; | |
911 | u8 interval = 0; | |
912 | u8 relpolpos = 0; | |
913 | ||
914 | if (etd_num < 0) { | |
915 | dev_err(imx21->dev, "No valid ETD\n"); | |
916 | return; | |
917 | } | |
918 | if (readl(imx21->regs + USBH_ETDENSET) & (1 << etd_num)) | |
919 | dev_err(imx21->dev, "submitting to active ETD %d\n", etd_num); | |
920 | ||
921 | etd = &imx21->etd[etd_num]; | |
922 | maxpacket = usb_maxpacket(urb->dev, pipe, usb_pipeout(pipe)); | |
923 | if (!maxpacket) | |
924 | maxpacket = 8; | |
925 | ||
926 | if (usb_pipecontrol(pipe) && (state != US_CTRL_DATA)) { | |
927 | if (state == US_CTRL_SETUP) { | |
928 | dir = TD_DIR_SETUP; | |
d0cc3d41 MF |
929 | if (unsuitable_for_dma(urb->setup_dma)) |
930 | unmap_urb_setup_for_dma(imx21->hcd, urb); | |
23d3e7a6 | 931 | etd->dma_handle = urb->setup_dma; |
d0cc3d41 | 932 | etd->cpu_buffer = urb->setup_packet; |
23d3e7a6 MF |
933 | bufround = 0; |
934 | count = 8; | |
935 | datatoggle = TD_TOGGLE_DATA0; | |
936 | } else { /* US_CTRL_ACK */ | |
937 | dir = usb_pipeout(pipe) ? TD_DIR_IN : TD_DIR_OUT; | |
23d3e7a6 MF |
938 | bufround = 0; |
939 | count = 0; | |
940 | datatoggle = TD_TOGGLE_DATA1; | |
941 | } | |
942 | } else { | |
943 | dir = usb_pipeout(pipe) ? TD_DIR_OUT : TD_DIR_IN; | |
944 | bufround = (dir == TD_DIR_IN) ? 1 : 0; | |
d0cc3d41 MF |
945 | if (unsuitable_for_dma(urb->transfer_dma)) |
946 | unmap_urb_for_dma(imx21->hcd, urb); | |
947 | ||
23d3e7a6 | 948 | etd->dma_handle = urb->transfer_dma; |
d0cc3d41 | 949 | etd->cpu_buffer = urb->transfer_buffer; |
23d3e7a6 MF |
950 | if (usb_pipebulk(pipe) && (state == US_BULK0)) |
951 | count = 0; | |
952 | else | |
953 | count = urb->transfer_buffer_length; | |
954 | ||
955 | if (usb_pipecontrol(pipe)) { | |
956 | datatoggle = TD_TOGGLE_DATA1; | |
957 | } else { | |
958 | if (usb_gettoggle( | |
959 | urb->dev, | |
960 | usb_pipeendpoint(urb->pipe), | |
961 | usb_pipeout(urb->pipe))) | |
962 | datatoggle = TD_TOGGLE_DATA1; | |
963 | else | |
964 | datatoggle = TD_TOGGLE_DATA0; | |
965 | } | |
966 | } | |
967 | ||
968 | etd->urb = urb; | |
969 | etd->ep = urb_priv->ep; | |
970 | etd->len = count; | |
971 | ||
972 | if (usb_pipeint(pipe)) { | |
973 | interval = urb->interval; | |
974 | relpolpos = (readl(imx21->regs + USBH_FRMNUB) + 1) & 0xff; | |
975 | } | |
976 | ||
977 | /* Write ETD to device memory */ | |
978 | setup_etd_dword0(imx21, etd_num, urb, dir, maxpacket); | |
979 | ||
980 | etd_writel(imx21, etd_num, 2, | |
981 | (u32) interval << DW2_POLINTERV | | |
982 | ((u32) relpolpos << DW2_RELPOLPOS) | | |
983 | ((u32) dir << DW2_DIRPID) | | |
984 | ((u32) bufround << DW2_BUFROUND) | | |
985 | ((u32) datatoggle << DW2_DATATOG) | | |
986 | ((u32) TD_NOTACCESSED << DW2_COMPCODE)); | |
987 | ||
988 | /* DMA will always transfer buffer size even if TOBYCNT in DWORD3 | |
989 | is smaller. Make sure we don't overrun the buffer! | |
990 | */ | |
991 | if (count && count < maxpacket) | |
992 | etd_buf_size = count; | |
993 | else | |
994 | etd_buf_size = maxpacket; | |
995 | ||
996 | etd_writel(imx21, etd_num, 3, | |
997 | ((u32) (etd_buf_size - 1) << DW3_BUFSIZE) | (u32) count); | |
998 | ||
999 | if (!count) | |
1000 | etd->dma_handle = 0; | |
1001 | ||
1002 | /* allocate x and y buffer space at once */ | |
1003 | etd->dmem_size = (count > maxpacket) ? maxpacket * 2 : maxpacket; | |
b2a068d0 MF |
1004 | etd->dmem_offset = alloc_dmem(imx21, etd->dmem_size, urb_priv->ep); |
1005 | if (etd->dmem_offset < 0) { | |
23d3e7a6 MF |
1006 | /* Setup everything we can in HW and update when we get DMEM */ |
1007 | etd_writel(imx21, etd_num, 1, (u32)maxpacket << 16); | |
1008 | ||
1009 | dev_dbg(imx21->dev, "Queuing etd %d for DMEM\n", etd_num); | |
1010 | debug_urb_queued_for_dmem(imx21, urb); | |
1011 | list_add_tail(&etd->queue, &imx21->queue_for_dmem); | |
1012 | return; | |
1013 | } | |
1014 | ||
1015 | etd_writel(imx21, etd_num, 1, | |
b2a068d0 MF |
1016 | (((u32) etd->dmem_offset + (u32) maxpacket) << DW1_YBUFSRTAD) | |
1017 | (u32) etd->dmem_offset); | |
23d3e7a6 MF |
1018 | |
1019 | urb_priv->active = 1; | |
1020 | ||
1021 | /* enable the ETD to kick off transfer */ | |
1022 | dev_vdbg(imx21->dev, "Activating etd %d for %d bytes %s\n", | |
1023 | etd_num, count, dir != TD_DIR_IN ? "out" : "in"); | |
d0cc3d41 | 1024 | activate_etd(imx21, etd_num, dir); |
23d3e7a6 MF |
1025 | |
1026 | } | |
1027 | ||
d0cc3d41 | 1028 | static void nonisoc_etd_done(struct usb_hcd *hcd, int etd_num) |
23d3e7a6 MF |
1029 | { |
1030 | struct imx21 *imx21 = hcd_to_imx21(hcd); | |
1031 | struct etd_priv *etd = &imx21->etd[etd_num]; | |
d0cc3d41 | 1032 | struct urb *urb = etd->urb; |
23d3e7a6 MF |
1033 | u32 etd_mask = 1 << etd_num; |
1034 | struct urb_priv *urb_priv = urb->hcpriv; | |
1035 | int dir; | |
23d3e7a6 MF |
1036 | int cc; |
1037 | u32 bytes_xfrd; | |
1038 | int etd_done; | |
1039 | ||
1040 | disactivate_etd(imx21, etd_num); | |
1041 | ||
1042 | dir = (etd_readl(imx21, etd_num, 0) >> DW0_DIRECT) & 0x3; | |
23d3e7a6 MF |
1043 | cc = (etd_readl(imx21, etd_num, 2) >> DW2_COMPCODE) & 0xf; |
1044 | bytes_xfrd = etd->len - (etd_readl(imx21, etd_num, 3) & 0x1fffff); | |
1045 | ||
1046 | /* save toggle carry */ | |
1047 | usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), | |
1048 | usb_pipeout(urb->pipe), | |
1049 | (etd_readl(imx21, etd_num, 0) >> DW0_TOGCRY) & 0x1); | |
1050 | ||
1051 | if (dir == TD_DIR_IN) { | |
1052 | clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask); | |
1053 | clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask); | |
d0cc3d41 MF |
1054 | |
1055 | if (etd->bounce_buffer) { | |
1056 | memcpy(etd->cpu_buffer, etd->bounce_buffer, bytes_xfrd); | |
1057 | dma_unmap_single(imx21->dev, | |
1058 | etd->dma_handle, etd->len, DMA_FROM_DEVICE); | |
1059 | } else if (!etd->dma_handle && bytes_xfrd) {/* PIO */ | |
1060 | memcpy_fromio(etd->cpu_buffer, | |
1061 | imx21->regs + USBOTG_DMEM + etd->dmem_offset, | |
1062 | bytes_xfrd); | |
1063 | } | |
23d3e7a6 | 1064 | } |
d0cc3d41 MF |
1065 | |
1066 | kfree(etd->bounce_buffer); | |
1067 | etd->bounce_buffer = NULL; | |
b2a068d0 | 1068 | free_dmem(imx21, etd); |
23d3e7a6 MF |
1069 | |
1070 | urb->error_count = 0; | |
1071 | if (!(urb->transfer_flags & URB_SHORT_NOT_OK) | |
1072 | && (cc == TD_DATAUNDERRUN)) | |
1073 | cc = TD_CC_NOERROR; | |
1074 | ||
1075 | if (cc != 0) | |
1076 | dev_vdbg(imx21->dev, "cc is 0x%x\n", cc); | |
1077 | ||
1078 | etd_done = (cc_to_error[cc] != 0); /* stop if error */ | |
1079 | ||
1080 | switch (usb_pipetype(urb->pipe)) { | |
1081 | case PIPE_CONTROL: | |
1082 | switch (urb_priv->state) { | |
1083 | case US_CTRL_SETUP: | |
1084 | if (urb->transfer_buffer_length > 0) | |
1085 | urb_priv->state = US_CTRL_DATA; | |
1086 | else | |
1087 | urb_priv->state = US_CTRL_ACK; | |
1088 | break; | |
1089 | case US_CTRL_DATA: | |
1090 | urb->actual_length += bytes_xfrd; | |
1091 | urb_priv->state = US_CTRL_ACK; | |
1092 | break; | |
1093 | case US_CTRL_ACK: | |
1094 | etd_done = 1; | |
1095 | break; | |
1096 | default: | |
1097 | dev_err(imx21->dev, | |
1098 | "Invalid pipe state %d\n", urb_priv->state); | |
1099 | etd_done = 1; | |
1100 | break; | |
1101 | } | |
1102 | break; | |
1103 | ||
1104 | case PIPE_BULK: | |
1105 | urb->actual_length += bytes_xfrd; | |
1106 | if ((urb_priv->state == US_BULK) | |
1107 | && (urb->transfer_flags & URB_ZERO_PACKET) | |
1108 | && urb->transfer_buffer_length > 0 | |
1109 | && ((urb->transfer_buffer_length % | |
1110 | usb_maxpacket(urb->dev, urb->pipe, | |
1111 | usb_pipeout(urb->pipe))) == 0)) { | |
1112 | /* need a 0-packet */ | |
1113 | urb_priv->state = US_BULK0; | |
1114 | } else { | |
1115 | etd_done = 1; | |
1116 | } | |
1117 | break; | |
1118 | ||
1119 | case PIPE_INTERRUPT: | |
1120 | urb->actual_length += bytes_xfrd; | |
1121 | etd_done = 1; | |
1122 | break; | |
1123 | } | |
1124 | ||
d0cc3d41 MF |
1125 | if (etd_done) |
1126 | nonisoc_urb_completed_for_etd(imx21, etd, cc_to_error[cc]); | |
1127 | else { | |
23d3e7a6 MF |
1128 | dev_vdbg(imx21->dev, "next state=%d\n", urb_priv->state); |
1129 | schedule_nonisoc_etd(imx21, urb); | |
23d3e7a6 MF |
1130 | } |
1131 | } | |
1132 | ||
d0cc3d41 | 1133 | |
23d3e7a6 MF |
1134 | static struct ep_priv *alloc_ep(void) |
1135 | { | |
1136 | int i; | |
1137 | struct ep_priv *ep_priv; | |
1138 | ||
1139 | ep_priv = kzalloc(sizeof(struct ep_priv), GFP_ATOMIC); | |
1140 | if (!ep_priv) | |
1141 | return NULL; | |
1142 | ||
1143 | for (i = 0; i < NUM_ISO_ETDS; ++i) | |
1144 | ep_priv->etd[i] = -1; | |
1145 | ||
1146 | return ep_priv; | |
1147 | } | |
1148 | ||
1149 | static int imx21_hc_urb_enqueue(struct usb_hcd *hcd, | |
1150 | struct urb *urb, gfp_t mem_flags) | |
1151 | { | |
1152 | struct imx21 *imx21 = hcd_to_imx21(hcd); | |
1153 | struct usb_host_endpoint *ep = urb->ep; | |
1154 | struct urb_priv *urb_priv; | |
1155 | struct ep_priv *ep_priv; | |
1156 | struct etd_priv *etd; | |
1157 | int ret; | |
1158 | unsigned long flags; | |
23d3e7a6 MF |
1159 | |
1160 | dev_vdbg(imx21->dev, | |
1161 | "enqueue urb=%p ep=%p len=%d " | |
1162 | "buffer=%p dma=%08X setupBuf=%p setupDma=%08X\n", | |
1163 | urb, ep, | |
1164 | urb->transfer_buffer_length, | |
1165 | urb->transfer_buffer, urb->transfer_dma, | |
1166 | urb->setup_packet, urb->setup_dma); | |
1167 | ||
1168 | if (usb_pipeisoc(urb->pipe)) | |
1169 | return imx21_hc_urb_enqueue_isoc(hcd, ep, urb, mem_flags); | |
1170 | ||
1171 | urb_priv = kzalloc(sizeof(struct urb_priv), mem_flags); | |
1172 | if (!urb_priv) | |
1173 | return -ENOMEM; | |
1174 | ||
1175 | spin_lock_irqsave(&imx21->lock, flags); | |
1176 | ||
1177 | ep_priv = ep->hcpriv; | |
1178 | if (ep_priv == NULL) { | |
1179 | ep_priv = alloc_ep(); | |
1180 | if (!ep_priv) { | |
1181 | ret = -ENOMEM; | |
1182 | goto failed_alloc_ep; | |
1183 | } | |
1184 | ep->hcpriv = ep_priv; | |
1185 | ep_priv->ep = ep; | |
23d3e7a6 MF |
1186 | } |
1187 | ||
1188 | ret = usb_hcd_link_urb_to_ep(hcd, urb); | |
1189 | if (ret) | |
1190 | goto failed_link; | |
1191 | ||
1192 | urb->status = -EINPROGRESS; | |
1193 | urb->actual_length = 0; | |
1194 | urb->error_count = 0; | |
1195 | urb->hcpriv = urb_priv; | |
1196 | urb_priv->ep = ep; | |
1197 | ||
1198 | switch (usb_pipetype(urb->pipe)) { | |
1199 | case PIPE_CONTROL: | |
1200 | urb_priv->state = US_CTRL_SETUP; | |
1201 | break; | |
1202 | case PIPE_BULK: | |
1203 | urb_priv->state = US_BULK; | |
1204 | break; | |
1205 | } | |
1206 | ||
1207 | debug_urb_submitted(imx21, urb); | |
1208 | if (ep_priv->etd[0] < 0) { | |
1209 | if (ep_priv->waiting_etd) { | |
1210 | dev_dbg(imx21->dev, | |
1211 | "no ETD available already queued %p\n", | |
1212 | ep_priv); | |
1213 | debug_urb_queued_for_etd(imx21, urb); | |
1214 | goto out; | |
1215 | } | |
1216 | ep_priv->etd[0] = alloc_etd(imx21); | |
1217 | if (ep_priv->etd[0] < 0) { | |
1218 | dev_dbg(imx21->dev, | |
1219 | "no ETD available queueing %p\n", ep_priv); | |
1220 | debug_urb_queued_for_etd(imx21, urb); | |
1221 | list_add_tail(&ep_priv->queue, &imx21->queue_for_etd); | |
1222 | ep_priv->waiting_etd = 1; | |
1223 | goto out; | |
1224 | } | |
1225 | } | |
1226 | ||
1227 | /* Schedule if no URB already active for this endpoint */ | |
1228 | etd = &imx21->etd[ep_priv->etd[0]]; | |
1229 | if (etd->urb == NULL) { | |
1230 | DEBUG_LOG_FRAME(imx21, etd, last_req); | |
1231 | schedule_nonisoc_etd(imx21, urb); | |
1232 | } | |
1233 | ||
1234 | out: | |
1235 | spin_unlock_irqrestore(&imx21->lock, flags); | |
1236 | return 0; | |
1237 | ||
1238 | failed_link: | |
1239 | failed_alloc_ep: | |
1240 | spin_unlock_irqrestore(&imx21->lock, flags); | |
1241 | kfree(urb_priv); | |
1242 | return ret; | |
1243 | } | |
1244 | ||
1245 | static int imx21_hc_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, | |
1246 | int status) | |
1247 | { | |
1248 | struct imx21 *imx21 = hcd_to_imx21(hcd); | |
1249 | unsigned long flags; | |
1250 | struct usb_host_endpoint *ep; | |
1251 | struct ep_priv *ep_priv; | |
1252 | struct urb_priv *urb_priv = urb->hcpriv; | |
1253 | int ret = -EINVAL; | |
1254 | ||
1255 | dev_vdbg(imx21->dev, "dequeue urb=%p iso=%d status=%d\n", | |
1256 | urb, usb_pipeisoc(urb->pipe), status); | |
1257 | ||
1258 | spin_lock_irqsave(&imx21->lock, flags); | |
1259 | ||
1260 | ret = usb_hcd_check_unlink_urb(hcd, urb, status); | |
1261 | if (ret) | |
1262 | goto fail; | |
1263 | ep = urb_priv->ep; | |
1264 | ep_priv = ep->hcpriv; | |
1265 | ||
1266 | debug_urb_unlinked(imx21, urb); | |
1267 | ||
1268 | if (usb_pipeisoc(urb->pipe)) { | |
1269 | dequeue_isoc_urb(imx21, urb, ep_priv); | |
1270 | schedule_isoc_etds(hcd, ep); | |
1271 | } else if (urb_priv->active) { | |
1272 | int etd_num = ep_priv->etd[0]; | |
1273 | if (etd_num != -1) { | |
d0cc3d41 MF |
1274 | struct etd_priv *etd = &imx21->etd[etd_num]; |
1275 | ||
23d3e7a6 | 1276 | disactivate_etd(imx21, etd_num); |
d0cc3d41 MF |
1277 | free_dmem(imx21, etd); |
1278 | etd->urb = NULL; | |
1279 | kfree(etd->bounce_buffer); | |
1280 | etd->bounce_buffer = NULL; | |
23d3e7a6 MF |
1281 | } |
1282 | } | |
1283 | ||
1284 | urb_done(hcd, urb, status); | |
1285 | ||
1286 | spin_unlock_irqrestore(&imx21->lock, flags); | |
1287 | return 0; | |
1288 | ||
1289 | fail: | |
1290 | spin_unlock_irqrestore(&imx21->lock, flags); | |
1291 | return ret; | |
1292 | } | |
1293 | ||
1294 | /* =========================================== */ | |
1295 | /* Interrupt dispatch */ | |
1296 | /* =========================================== */ | |
1297 | ||
1298 | static void process_etds(struct usb_hcd *hcd, struct imx21 *imx21, int sof) | |
1299 | { | |
1300 | int etd_num; | |
1301 | int enable_sof_int = 0; | |
1302 | unsigned long flags; | |
1303 | ||
1304 | spin_lock_irqsave(&imx21->lock, flags); | |
1305 | ||
1306 | for (etd_num = 0; etd_num < USB_NUM_ETD; etd_num++) { | |
1307 | u32 etd_mask = 1 << etd_num; | |
1308 | u32 enabled = readl(imx21->regs + USBH_ETDENSET) & etd_mask; | |
1309 | u32 done = readl(imx21->regs + USBH_ETDDONESTAT) & etd_mask; | |
1310 | struct etd_priv *etd = &imx21->etd[etd_num]; | |
1311 | ||
1312 | ||
1313 | if (done) { | |
1314 | DEBUG_LOG_FRAME(imx21, etd, last_int); | |
1315 | } else { | |
1316 | /* | |
1317 | * Kludge warning! | |
1318 | * | |
1319 | * When multiple transfers are using the bus we sometimes get into a state | |
1320 | * where the transfer has completed (the CC field of the ETD is != 0x0F), | |
1321 | * the ETD has self disabled but the ETDDONESTAT flag is not set | |
1322 | * (and hence no interrupt occurs). | |
1323 | * This causes the transfer in question to hang. | |
1324 | * The kludge below checks for this condition at each SOF and processes any | |
1325 | * blocked ETDs (after an arbitary 10 frame wait) | |
1326 | * | |
1327 | * With a single active transfer the usbtest test suite will run for days | |
1328 | * without the kludge. | |
1329 | * With other bus activity (eg mass storage) even just test1 will hang without | |
1330 | * the kludge. | |
1331 | */ | |
1332 | u32 dword0; | |
1333 | int cc; | |
1334 | ||
1335 | if (etd->active_count && !enabled) /* suspicious... */ | |
1336 | enable_sof_int = 1; | |
1337 | ||
1338 | if (!sof || enabled || !etd->active_count) | |
1339 | continue; | |
1340 | ||
1341 | cc = etd_readl(imx21, etd_num, 2) >> DW2_COMPCODE; | |
1342 | if (cc == TD_NOTACCESSED) | |
1343 | continue; | |
1344 | ||
1345 | if (++etd->active_count < 10) | |
1346 | continue; | |
1347 | ||
1348 | dword0 = etd_readl(imx21, etd_num, 0); | |
1349 | dev_dbg(imx21->dev, | |
1350 | "unblock ETD %d dev=0x%X ep=0x%X cc=0x%02X!\n", | |
1351 | etd_num, dword0 & 0x7F, | |
1352 | (dword0 >> DW0_ENDPNT) & 0x0F, | |
1353 | cc); | |
1354 | ||
1355 | #ifdef DEBUG | |
1356 | dev_dbg(imx21->dev, | |
1357 | "frame: act=%d disact=%d" | |
1358 | " int=%d req=%d cur=%d\n", | |
1359 | etd->activated_frame, | |
1360 | etd->disactivated_frame, | |
1361 | etd->last_int_frame, | |
1362 | etd->last_req_frame, | |
1363 | readl(imx21->regs + USBH_FRMNUB)); | |
1364 | imx21->debug_unblocks++; | |
1365 | #endif | |
1366 | etd->active_count = 0; | |
1367 | /* End of kludge */ | |
1368 | } | |
1369 | ||
1370 | if (etd->ep == NULL || etd->urb == NULL) { | |
1371 | dev_dbg(imx21->dev, | |
1372 | "Interrupt for unexpected etd %d" | |
1373 | " ep=%p urb=%p\n", | |
1374 | etd_num, etd->ep, etd->urb); | |
1375 | disactivate_etd(imx21, etd_num); | |
1376 | continue; | |
1377 | } | |
1378 | ||
1379 | if (usb_pipeisoc(etd->urb->pipe)) | |
d0cc3d41 | 1380 | isoc_etd_done(hcd, etd_num); |
23d3e7a6 | 1381 | else |
d0cc3d41 | 1382 | nonisoc_etd_done(hcd, etd_num); |
23d3e7a6 MF |
1383 | } |
1384 | ||
1385 | /* only enable SOF interrupt if it may be needed for the kludge */ | |
1386 | if (enable_sof_int) | |
1387 | set_register_bits(imx21, USBH_SYSIEN, USBH_SYSIEN_SOFINT); | |
1388 | else | |
1389 | clear_register_bits(imx21, USBH_SYSIEN, USBH_SYSIEN_SOFINT); | |
1390 | ||
1391 | ||
1392 | spin_unlock_irqrestore(&imx21->lock, flags); | |
1393 | } | |
1394 | ||
1395 | static irqreturn_t imx21_irq(struct usb_hcd *hcd) | |
1396 | { | |
1397 | struct imx21 *imx21 = hcd_to_imx21(hcd); | |
1398 | u32 ints = readl(imx21->regs + USBH_SYSISR); | |
1399 | ||
1400 | if (ints & USBH_SYSIEN_HERRINT) | |
1401 | dev_dbg(imx21->dev, "Scheduling error\n"); | |
1402 | ||
1403 | if (ints & USBH_SYSIEN_SORINT) | |
1404 | dev_dbg(imx21->dev, "Scheduling overrun\n"); | |
1405 | ||
1406 | if (ints & (USBH_SYSISR_DONEINT | USBH_SYSISR_SOFINT)) | |
1407 | process_etds(hcd, imx21, ints & USBH_SYSISR_SOFINT); | |
1408 | ||
1409 | writel(ints, imx21->regs + USBH_SYSISR); | |
1410 | return IRQ_HANDLED; | |
1411 | } | |
1412 | ||
1413 | static void imx21_hc_endpoint_disable(struct usb_hcd *hcd, | |
1414 | struct usb_host_endpoint *ep) | |
1415 | { | |
1416 | struct imx21 *imx21 = hcd_to_imx21(hcd); | |
1417 | unsigned long flags; | |
1418 | struct ep_priv *ep_priv; | |
1419 | int i; | |
1420 | ||
1421 | if (ep == NULL) | |
1422 | return; | |
1423 | ||
1424 | spin_lock_irqsave(&imx21->lock, flags); | |
1425 | ep_priv = ep->hcpriv; | |
1426 | dev_vdbg(imx21->dev, "disable ep=%p, ep->hcpriv=%p\n", ep, ep_priv); | |
1427 | ||
1428 | if (!list_empty(&ep->urb_list)) | |
1429 | dev_dbg(imx21->dev, "ep's URB list is not empty\n"); | |
1430 | ||
1431 | if (ep_priv != NULL) { | |
1432 | for (i = 0; i < NUM_ISO_ETDS; i++) { | |
1433 | if (ep_priv->etd[i] > -1) | |
1434 | dev_dbg(imx21->dev, "free etd %d for disable\n", | |
1435 | ep_priv->etd[i]); | |
1436 | ||
1437 | free_etd(imx21, ep_priv->etd[i]); | |
1438 | } | |
1439 | kfree(ep_priv); | |
1440 | ep->hcpriv = NULL; | |
1441 | } | |
1442 | ||
1443 | for (i = 0; i < USB_NUM_ETD; i++) { | |
1444 | if (imx21->etd[i].alloc && imx21->etd[i].ep == ep) { | |
1445 | dev_err(imx21->dev, | |
1446 | "Active etd %d for disabled ep=%p!\n", i, ep); | |
1447 | free_etd(imx21, i); | |
1448 | } | |
1449 | } | |
1450 | free_epdmem(imx21, ep); | |
1451 | spin_unlock_irqrestore(&imx21->lock, flags); | |
1452 | } | |
1453 | ||
1454 | /* =========================================== */ | |
1455 | /* Hub handling */ | |
1456 | /* =========================================== */ | |
1457 | ||
1458 | static int get_hub_descriptor(struct usb_hcd *hcd, | |
1459 | struct usb_hub_descriptor *desc) | |
1460 | { | |
1461 | struct imx21 *imx21 = hcd_to_imx21(hcd); | |
1462 | desc->bDescriptorType = 0x29; /* HUB descriptor */ | |
1463 | desc->bHubContrCurrent = 0; | |
1464 | ||
1465 | desc->bNbrPorts = readl(imx21->regs + USBH_ROOTHUBA) | |
1466 | & USBH_ROOTHUBA_NDNSTMPRT_MASK; | |
1467 | desc->bDescLength = 9; | |
1468 | desc->bPwrOn2PwrGood = 0; | |
1469 | desc->wHubCharacteristics = (__force __u16) cpu_to_le16( | |
1470 | 0x0002 | /* No power switching */ | |
1471 | 0x0010 | /* No over current protection */ | |
1472 | 0); | |
1473 | ||
1474 | desc->bitmap[0] = 1 << 1; | |
1475 | desc->bitmap[1] = ~0; | |
1476 | return 0; | |
1477 | } | |
1478 | ||
1479 | static int imx21_hc_hub_status_data(struct usb_hcd *hcd, char *buf) | |
1480 | { | |
1481 | struct imx21 *imx21 = hcd_to_imx21(hcd); | |
1482 | int ports; | |
1483 | int changed = 0; | |
1484 | int i; | |
1485 | unsigned long flags; | |
1486 | ||
1487 | spin_lock_irqsave(&imx21->lock, flags); | |
1488 | ports = readl(imx21->regs + USBH_ROOTHUBA) | |
1489 | & USBH_ROOTHUBA_NDNSTMPRT_MASK; | |
1490 | if (ports > 7) { | |
1491 | ports = 7; | |
1492 | dev_err(imx21->dev, "ports %d > 7\n", ports); | |
1493 | } | |
1494 | for (i = 0; i < ports; i++) { | |
1495 | if (readl(imx21->regs + USBH_PORTSTAT(i)) & | |
1496 | (USBH_PORTSTAT_CONNECTSC | | |
1497 | USBH_PORTSTAT_PRTENBLSC | | |
1498 | USBH_PORTSTAT_PRTSTATSC | | |
1499 | USBH_PORTSTAT_OVRCURIC | | |
1500 | USBH_PORTSTAT_PRTRSTSC)) { | |
1501 | ||
1502 | changed = 1; | |
1503 | buf[0] |= 1 << (i + 1); | |
1504 | } | |
1505 | } | |
1506 | spin_unlock_irqrestore(&imx21->lock, flags); | |
1507 | ||
1508 | if (changed) | |
1509 | dev_info(imx21->dev, "Hub status changed\n"); | |
1510 | return changed; | |
1511 | } | |
1512 | ||
1513 | static int imx21_hc_hub_control(struct usb_hcd *hcd, | |
1514 | u16 typeReq, | |
1515 | u16 wValue, u16 wIndex, char *buf, u16 wLength) | |
1516 | { | |
1517 | struct imx21 *imx21 = hcd_to_imx21(hcd); | |
1518 | int rc = 0; | |
1519 | u32 status_write = 0; | |
1520 | ||
1521 | switch (typeReq) { | |
1522 | case ClearHubFeature: | |
1523 | dev_dbg(imx21->dev, "ClearHubFeature\n"); | |
1524 | switch (wValue) { | |
1525 | case C_HUB_OVER_CURRENT: | |
1526 | dev_dbg(imx21->dev, " OVER_CURRENT\n"); | |
1527 | break; | |
1528 | case C_HUB_LOCAL_POWER: | |
1529 | dev_dbg(imx21->dev, " LOCAL_POWER\n"); | |
1530 | break; | |
1531 | default: | |
1532 | dev_dbg(imx21->dev, " unknown\n"); | |
1533 | rc = -EINVAL; | |
1534 | break; | |
1535 | } | |
1536 | break; | |
1537 | ||
1538 | case ClearPortFeature: | |
1539 | dev_dbg(imx21->dev, "ClearPortFeature\n"); | |
1540 | switch (wValue) { | |
1541 | case USB_PORT_FEAT_ENABLE: | |
1542 | dev_dbg(imx21->dev, " ENABLE\n"); | |
1543 | status_write = USBH_PORTSTAT_CURCONST; | |
1544 | break; | |
1545 | case USB_PORT_FEAT_SUSPEND: | |
1546 | dev_dbg(imx21->dev, " SUSPEND\n"); | |
1547 | status_write = USBH_PORTSTAT_PRTOVRCURI; | |
1548 | break; | |
1549 | case USB_PORT_FEAT_POWER: | |
1550 | dev_dbg(imx21->dev, " POWER\n"); | |
1551 | status_write = USBH_PORTSTAT_LSDEVCON; | |
1552 | break; | |
1553 | case USB_PORT_FEAT_C_ENABLE: | |
1554 | dev_dbg(imx21->dev, " C_ENABLE\n"); | |
1555 | status_write = USBH_PORTSTAT_PRTENBLSC; | |
1556 | break; | |
1557 | case USB_PORT_FEAT_C_SUSPEND: | |
1558 | dev_dbg(imx21->dev, " C_SUSPEND\n"); | |
1559 | status_write = USBH_PORTSTAT_PRTSTATSC; | |
1560 | break; | |
1561 | case USB_PORT_FEAT_C_CONNECTION: | |
1562 | dev_dbg(imx21->dev, " C_CONNECTION\n"); | |
1563 | status_write = USBH_PORTSTAT_CONNECTSC; | |
1564 | break; | |
1565 | case USB_PORT_FEAT_C_OVER_CURRENT: | |
1566 | dev_dbg(imx21->dev, " C_OVER_CURRENT\n"); | |
1567 | status_write = USBH_PORTSTAT_OVRCURIC; | |
1568 | break; | |
1569 | case USB_PORT_FEAT_C_RESET: | |
1570 | dev_dbg(imx21->dev, " C_RESET\n"); | |
1571 | status_write = USBH_PORTSTAT_PRTRSTSC; | |
1572 | break; | |
1573 | default: | |
1574 | dev_dbg(imx21->dev, " unknown\n"); | |
1575 | rc = -EINVAL; | |
1576 | break; | |
1577 | } | |
1578 | ||
1579 | break; | |
1580 | ||
1581 | case GetHubDescriptor: | |
1582 | dev_dbg(imx21->dev, "GetHubDescriptor\n"); | |
1583 | rc = get_hub_descriptor(hcd, (void *)buf); | |
1584 | break; | |
1585 | ||
1586 | case GetHubStatus: | |
1587 | dev_dbg(imx21->dev, " GetHubStatus\n"); | |
1588 | *(__le32 *) buf = 0; | |
1589 | break; | |
1590 | ||
1591 | case GetPortStatus: | |
1592 | dev_dbg(imx21->dev, "GetPortStatus: port: %d, 0x%x\n", | |
1593 | wIndex, USBH_PORTSTAT(wIndex - 1)); | |
1594 | *(__le32 *) buf = readl(imx21->regs + | |
1595 | USBH_PORTSTAT(wIndex - 1)); | |
1596 | break; | |
1597 | ||
1598 | case SetHubFeature: | |
1599 | dev_dbg(imx21->dev, "SetHubFeature\n"); | |
1600 | switch (wValue) { | |
1601 | case C_HUB_OVER_CURRENT: | |
1602 | dev_dbg(imx21->dev, " OVER_CURRENT\n"); | |
1603 | break; | |
1604 | ||
1605 | case C_HUB_LOCAL_POWER: | |
1606 | dev_dbg(imx21->dev, " LOCAL_POWER\n"); | |
1607 | break; | |
1608 | default: | |
1609 | dev_dbg(imx21->dev, " unknown\n"); | |
1610 | rc = -EINVAL; | |
1611 | break; | |
1612 | } | |
1613 | ||
1614 | break; | |
1615 | ||
1616 | case SetPortFeature: | |
1617 | dev_dbg(imx21->dev, "SetPortFeature\n"); | |
1618 | switch (wValue) { | |
1619 | case USB_PORT_FEAT_SUSPEND: | |
1620 | dev_dbg(imx21->dev, " SUSPEND\n"); | |
1621 | status_write = USBH_PORTSTAT_PRTSUSPST; | |
1622 | break; | |
1623 | case USB_PORT_FEAT_POWER: | |
1624 | dev_dbg(imx21->dev, " POWER\n"); | |
1625 | status_write = USBH_PORTSTAT_PRTPWRST; | |
1626 | break; | |
1627 | case USB_PORT_FEAT_RESET: | |
1628 | dev_dbg(imx21->dev, " RESET\n"); | |
1629 | status_write = USBH_PORTSTAT_PRTRSTST; | |
1630 | break; | |
1631 | default: | |
1632 | dev_dbg(imx21->dev, " unknown\n"); | |
1633 | rc = -EINVAL; | |
1634 | break; | |
1635 | } | |
1636 | break; | |
1637 | ||
1638 | default: | |
1639 | dev_dbg(imx21->dev, " unknown\n"); | |
1640 | rc = -EINVAL; | |
1641 | break; | |
1642 | } | |
1643 | ||
1644 | if (status_write) | |
1645 | writel(status_write, imx21->regs + USBH_PORTSTAT(wIndex - 1)); | |
1646 | return rc; | |
1647 | } | |
1648 | ||
1649 | /* =========================================== */ | |
1650 | /* Host controller management */ | |
1651 | /* =========================================== */ | |
1652 | ||
1653 | static int imx21_hc_reset(struct usb_hcd *hcd) | |
1654 | { | |
1655 | struct imx21 *imx21 = hcd_to_imx21(hcd); | |
1656 | unsigned long timeout; | |
1657 | unsigned long flags; | |
1658 | ||
1659 | spin_lock_irqsave(&imx21->lock, flags); | |
1660 | ||
b595076a | 1661 | /* Reset the Host controller modules */ |
23d3e7a6 MF |
1662 | writel(USBOTG_RST_RSTCTRL | USBOTG_RST_RSTRH | |
1663 | USBOTG_RST_RSTHSIE | USBOTG_RST_RSTHC, | |
1664 | imx21->regs + USBOTG_RST_CTRL); | |
1665 | ||
1666 | /* Wait for reset to finish */ | |
1667 | timeout = jiffies + HZ; | |
1668 | while (readl(imx21->regs + USBOTG_RST_CTRL) != 0) { | |
1669 | if (time_after(jiffies, timeout)) { | |
1670 | spin_unlock_irqrestore(&imx21->lock, flags); | |
1671 | dev_err(imx21->dev, "timeout waiting for reset\n"); | |
1672 | return -ETIMEDOUT; | |
1673 | } | |
1674 | spin_unlock_irq(&imx21->lock); | |
9a4b7c3b | 1675 | schedule_timeout_uninterruptible(1); |
23d3e7a6 MF |
1676 | spin_lock_irq(&imx21->lock); |
1677 | } | |
1678 | spin_unlock_irqrestore(&imx21->lock, flags); | |
1679 | return 0; | |
1680 | } | |
1681 | ||
1682 | static int __devinit imx21_hc_start(struct usb_hcd *hcd) | |
1683 | { | |
1684 | struct imx21 *imx21 = hcd_to_imx21(hcd); | |
1685 | unsigned long flags; | |
1686 | int i, j; | |
1687 | u32 hw_mode = USBOTG_HWMODE_CRECFG_HOST; | |
1688 | u32 usb_control = 0; | |
1689 | ||
1690 | hw_mode |= ((imx21->pdata->host_xcvr << USBOTG_HWMODE_HOSTXCVR_SHIFT) & | |
1691 | USBOTG_HWMODE_HOSTXCVR_MASK); | |
1692 | hw_mode |= ((imx21->pdata->otg_xcvr << USBOTG_HWMODE_OTGXCVR_SHIFT) & | |
1693 | USBOTG_HWMODE_OTGXCVR_MASK); | |
1694 | ||
1695 | if (imx21->pdata->host1_txenoe) | |
1696 | usb_control |= USBCTRL_HOST1_TXEN_OE; | |
1697 | ||
1698 | if (!imx21->pdata->host1_xcverless) | |
1699 | usb_control |= USBCTRL_HOST1_BYP_TLL; | |
1700 | ||
1701 | if (imx21->pdata->otg_ext_xcvr) | |
1702 | usb_control |= USBCTRL_OTC_RCV_RXDP; | |
1703 | ||
1704 | ||
1705 | spin_lock_irqsave(&imx21->lock, flags); | |
1706 | ||
1707 | writel((USBOTG_CLK_CTRL_HST | USBOTG_CLK_CTRL_MAIN), | |
1708 | imx21->regs + USBOTG_CLK_CTRL); | |
1709 | writel(hw_mode, imx21->regs + USBOTG_HWMODE); | |
1710 | writel(usb_control, imx21->regs + USBCTRL); | |
1711 | writel(USB_MISCCONTROL_SKPRTRY | USB_MISCCONTROL_ARBMODE, | |
1712 | imx21->regs + USB_MISCCONTROL); | |
1713 | ||
1714 | /* Clear the ETDs */ | |
1715 | for (i = 0; i < USB_NUM_ETD; i++) | |
1716 | for (j = 0; j < 4; j++) | |
1717 | etd_writel(imx21, i, j, 0); | |
1718 | ||
1719 | /* Take the HC out of reset */ | |
1720 | writel(USBH_HOST_CTRL_HCUSBSTE_OPERATIONAL | USBH_HOST_CTRL_CTLBLKSR_1, | |
1721 | imx21->regs + USBH_HOST_CTRL); | |
1722 | ||
1723 | /* Enable ports */ | |
1724 | if (imx21->pdata->enable_otg_host) | |
1725 | writel(USBH_PORTSTAT_PRTPWRST | USBH_PORTSTAT_PRTENABST, | |
1726 | imx21->regs + USBH_PORTSTAT(0)); | |
1727 | ||
1728 | if (imx21->pdata->enable_host1) | |
1729 | writel(USBH_PORTSTAT_PRTPWRST | USBH_PORTSTAT_PRTENABST, | |
1730 | imx21->regs + USBH_PORTSTAT(1)); | |
1731 | ||
1732 | if (imx21->pdata->enable_host2) | |
1733 | writel(USBH_PORTSTAT_PRTPWRST | USBH_PORTSTAT_PRTENABST, | |
1734 | imx21->regs + USBH_PORTSTAT(2)); | |
1735 | ||
1736 | ||
1737 | hcd->state = HC_STATE_RUNNING; | |
1738 | ||
1739 | /* Enable host controller interrupts */ | |
1740 | set_register_bits(imx21, USBH_SYSIEN, | |
1741 | USBH_SYSIEN_HERRINT | | |
1742 | USBH_SYSIEN_DONEINT | USBH_SYSIEN_SORINT); | |
1743 | set_register_bits(imx21, USBOTG_CINT_STEN, USBOTG_HCINT); | |
1744 | ||
1745 | spin_unlock_irqrestore(&imx21->lock, flags); | |
1746 | ||
1747 | return 0; | |
1748 | } | |
1749 | ||
1750 | static void imx21_hc_stop(struct usb_hcd *hcd) | |
1751 | { | |
1752 | struct imx21 *imx21 = hcd_to_imx21(hcd); | |
1753 | unsigned long flags; | |
1754 | ||
1755 | spin_lock_irqsave(&imx21->lock, flags); | |
1756 | ||
1757 | writel(0, imx21->regs + USBH_SYSIEN); | |
1758 | clear_register_bits(imx21, USBOTG_CINT_STEN, USBOTG_HCINT); | |
1759 | clear_register_bits(imx21, USBOTG_CLK_CTRL_HST | USBOTG_CLK_CTRL_MAIN, | |
1760 | USBOTG_CLK_CTRL); | |
1761 | spin_unlock_irqrestore(&imx21->lock, flags); | |
1762 | } | |
1763 | ||
1764 | /* =========================================== */ | |
1765 | /* Driver glue */ | |
1766 | /* =========================================== */ | |
1767 | ||
1768 | static struct hc_driver imx21_hc_driver = { | |
1769 | .description = hcd_name, | |
1770 | .product_desc = "IMX21 USB Host Controller", | |
1771 | .hcd_priv_size = sizeof(struct imx21), | |
1772 | ||
1773 | .flags = HCD_USB11, | |
1774 | .irq = imx21_irq, | |
1775 | ||
1776 | .reset = imx21_hc_reset, | |
1777 | .start = imx21_hc_start, | |
1778 | .stop = imx21_hc_stop, | |
1779 | ||
1780 | /* I/O requests */ | |
1781 | .urb_enqueue = imx21_hc_urb_enqueue, | |
1782 | .urb_dequeue = imx21_hc_urb_dequeue, | |
1783 | .endpoint_disable = imx21_hc_endpoint_disable, | |
1784 | ||
1785 | /* scheduling support */ | |
1786 | .get_frame_number = imx21_hc_get_frame, | |
1787 | ||
1788 | /* Root hub support */ | |
1789 | .hub_status_data = imx21_hc_hub_status_data, | |
1790 | .hub_control = imx21_hc_hub_control, | |
1791 | ||
1792 | }; | |
1793 | ||
1794 | static struct mx21_usbh_platform_data default_pdata = { | |
1795 | .host_xcvr = MX21_USBXCVR_TXDIF_RXDIF, | |
1796 | .otg_xcvr = MX21_USBXCVR_TXDIF_RXDIF, | |
1797 | .enable_host1 = 1, | |
1798 | .enable_host2 = 1, | |
1799 | .enable_otg_host = 1, | |
1800 | ||
1801 | }; | |
1802 | ||
1803 | static int imx21_remove(struct platform_device *pdev) | |
1804 | { | |
1805 | struct usb_hcd *hcd = platform_get_drvdata(pdev); | |
1806 | struct imx21 *imx21 = hcd_to_imx21(hcd); | |
1807 | struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
1808 | ||
1809 | remove_debug_files(imx21); | |
1810 | usb_remove_hcd(hcd); | |
1811 | ||
1812 | if (res != NULL) { | |
1813 | clk_disable(imx21->clk); | |
1814 | clk_put(imx21->clk); | |
1815 | iounmap(imx21->regs); | |
1816 | release_mem_region(res->start, resource_size(res)); | |
1817 | } | |
1818 | ||
1819 | kfree(hcd); | |
1820 | return 0; | |
1821 | } | |
1822 | ||
1823 | ||
1824 | static int imx21_probe(struct platform_device *pdev) | |
1825 | { | |
1826 | struct usb_hcd *hcd; | |
1827 | struct imx21 *imx21; | |
1828 | struct resource *res; | |
1829 | int ret; | |
1830 | int irq; | |
1831 | ||
1832 | printk(KERN_INFO "%s\n", imx21_hc_driver.product_desc); | |
1833 | ||
1834 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
1835 | if (!res) | |
1836 | return -ENODEV; | |
1837 | irq = platform_get_irq(pdev, 0); | |
1838 | if (irq < 0) | |
1839 | return -ENXIO; | |
1840 | ||
1841 | hcd = usb_create_hcd(&imx21_hc_driver, | |
1842 | &pdev->dev, dev_name(&pdev->dev)); | |
1843 | if (hcd == NULL) { | |
1844 | dev_err(&pdev->dev, "Cannot create hcd (%s)\n", | |
1845 | dev_name(&pdev->dev)); | |
1846 | return -ENOMEM; | |
1847 | } | |
1848 | ||
1849 | imx21 = hcd_to_imx21(hcd); | |
d0cc3d41 | 1850 | imx21->hcd = hcd; |
23d3e7a6 MF |
1851 | imx21->dev = &pdev->dev; |
1852 | imx21->pdata = pdev->dev.platform_data; | |
1853 | if (!imx21->pdata) | |
1854 | imx21->pdata = &default_pdata; | |
1855 | ||
1856 | spin_lock_init(&imx21->lock); | |
1857 | INIT_LIST_HEAD(&imx21->dmem_list); | |
1858 | INIT_LIST_HEAD(&imx21->queue_for_etd); | |
1859 | INIT_LIST_HEAD(&imx21->queue_for_dmem); | |
1860 | create_debug_files(imx21); | |
1861 | ||
1862 | res = request_mem_region(res->start, resource_size(res), hcd_name); | |
1863 | if (!res) { | |
1864 | ret = -EBUSY; | |
1865 | goto failed_request_mem; | |
1866 | } | |
1867 | ||
1868 | imx21->regs = ioremap(res->start, resource_size(res)); | |
1869 | if (imx21->regs == NULL) { | |
1870 | dev_err(imx21->dev, "Cannot map registers\n"); | |
1871 | ret = -ENOMEM; | |
1872 | goto failed_ioremap; | |
1873 | } | |
1874 | ||
1875 | /* Enable clocks source */ | |
1876 | imx21->clk = clk_get(imx21->dev, NULL); | |
1877 | if (IS_ERR(imx21->clk)) { | |
1878 | dev_err(imx21->dev, "no clock found\n"); | |
1879 | ret = PTR_ERR(imx21->clk); | |
1880 | goto failed_clock_get; | |
1881 | } | |
1882 | ||
1883 | ret = clk_set_rate(imx21->clk, clk_round_rate(imx21->clk, 48000000)); | |
1884 | if (ret) | |
1885 | goto failed_clock_set; | |
1886 | ret = clk_enable(imx21->clk); | |
1887 | if (ret) | |
1888 | goto failed_clock_enable; | |
1889 | ||
1890 | dev_info(imx21->dev, "Hardware HC revision: 0x%02X\n", | |
1891 | (readl(imx21->regs + USBOTG_HWMODE) >> 16) & 0xFF); | |
1892 | ||
1893 | ret = usb_add_hcd(hcd, irq, IRQF_DISABLED); | |
1894 | if (ret != 0) { | |
1895 | dev_err(imx21->dev, "usb_add_hcd() returned %d\n", ret); | |
1896 | goto failed_add_hcd; | |
1897 | } | |
1898 | ||
1899 | return 0; | |
1900 | ||
1901 | failed_add_hcd: | |
1902 | clk_disable(imx21->clk); | |
1903 | failed_clock_enable: | |
1904 | failed_clock_set: | |
1905 | clk_put(imx21->clk); | |
1906 | failed_clock_get: | |
1907 | iounmap(imx21->regs); | |
1908 | failed_ioremap: | |
e581c8c8 | 1909 | release_mem_region(res->start, resource_size(res)); |
23d3e7a6 MF |
1910 | failed_request_mem: |
1911 | remove_debug_files(imx21); | |
1912 | usb_put_hcd(hcd); | |
1913 | return ret; | |
1914 | } | |
1915 | ||
1916 | static struct platform_driver imx21_hcd_driver = { | |
1917 | .driver = { | |
1918 | .name = (char *)hcd_name, | |
1919 | }, | |
1920 | .probe = imx21_probe, | |
1921 | .remove = imx21_remove, | |
1922 | .suspend = NULL, | |
1923 | .resume = NULL, | |
1924 | }; | |
1925 | ||
1926 | static int __init imx21_hcd_init(void) | |
1927 | { | |
1928 | return platform_driver_register(&imx21_hcd_driver); | |
1929 | } | |
1930 | ||
1931 | static void __exit imx21_hcd_cleanup(void) | |
1932 | { | |
1933 | platform_driver_unregister(&imx21_hcd_driver); | |
1934 | } | |
1935 | ||
1936 | module_init(imx21_hcd_init); | |
1937 | module_exit(imx21_hcd_cleanup); | |
1938 | ||
1939 | MODULE_DESCRIPTION("i.MX21 USB Host controller"); | |
1940 | MODULE_AUTHOR("Martin Fuzzey"); | |
1941 | MODULE_LICENSE("GPL"); | |
1942 | MODULE_ALIAS("platform:imx21-hcd"); |