]>
Commit | Line | Data |
---|---|---|
83d290c5 | 1 | // SPDX-License-Identifier: GPL-2.0+ |
5853e133 VG |
2 | /* |
3 | * USB HOST XHCI Controller stack | |
4 | * | |
5 | * Based on xHCI host controller driver in linux-kernel | |
6 | * by Sarah Sharp. | |
7 | * | |
8 | * Copyright (C) 2008 Intel Corp. | |
9 | * Author: Sarah Sharp | |
10 | * | |
11 | * Copyright (C) 2013 Samsung Electronics Co.Ltd | |
12 | * Authors: Vivek Gautam <[email protected]> | |
13 | * Vikas Sajjan <[email protected]> | |
5853e133 VG |
14 | */ |
15 | ||
1eb69ae4 | 16 | #include <cpu_func.h> |
a5762fe0 | 17 | #include <dm.h> |
f7ae49fc | 18 | #include <log.h> |
5853e133 VG |
19 | #include <asm/byteorder.h> |
20 | #include <usb.h> | |
21 | #include <malloc.h> | |
22 | #include <asm/cache.h> | |
eb41d8a1 | 23 | #include <linux/bug.h> |
5d97dff0 | 24 | #include <linux/errno.h> |
5853e133 | 25 | |
1708a123 | 26 | #include <usb/xhci.h> |
5853e133 VG |
27 | |
28 | #define CACHELINE_SIZE CONFIG_SYS_CACHELINE_SIZE | |
29 | /** | |
30 | * flushes the address passed till the length | |
31 | * | |
32 | * @param addr pointer to memory region to be flushed | |
33 | * @param len the length of the cache line to be flushed | |
185f812c | 34 | * Return: none |
5853e133 | 35 | */ |
421a5a0c | 36 | void xhci_flush_cache(uintptr_t addr, u32 len) |
5853e133 VG |
37 | { |
38 | BUG_ON((void *)addr == NULL || len == 0); | |
39 | ||
40 | flush_dcache_range(addr & ~(CACHELINE_SIZE - 1), | |
41 | ALIGN(addr + len, CACHELINE_SIZE)); | |
42 | } | |
43 | ||
44 | /** | |
45 | * invalidates the address passed till the length | |
46 | * | |
47 | * @param addr pointer to memory region to be invalidates | |
48 | * @param len the length of the cache line to be invalidated | |
185f812c | 49 | * Return: none |
5853e133 | 50 | */ |
421a5a0c | 51 | void xhci_inval_cache(uintptr_t addr, u32 len) |
5853e133 VG |
52 | { |
53 | BUG_ON((void *)addr == NULL || len == 0); | |
54 | ||
55 | invalidate_dcache_range(addr & ~(CACHELINE_SIZE - 1), | |
56 | ALIGN(addr + len, CACHELINE_SIZE)); | |
57 | } | |
58 | ||
5853e133 VG |
59 | /** |
60 | * frees the "segment" pointer passed | |
61 | * | |
62 | * @param ptr pointer to "segement" to be freed | |
185f812c | 63 | * Return: none |
5853e133 | 64 | */ |
ba1efb3d | 65 | static void xhci_segment_free(struct xhci_ctrl *ctrl, struct xhci_segment *seg) |
5853e133 | 66 | { |
ba1efb3d | 67 | xhci_dma_unmap(ctrl, seg->dma, SEGMENT_SIZE); |
5853e133 VG |
68 | free(seg->trbs); |
69 | seg->trbs = NULL; | |
70 | ||
71 | free(seg); | |
72 | } | |
73 | ||
74 | /** | |
75 | * frees the "ring" pointer passed | |
76 | * | |
77 | * @param ptr pointer to "ring" to be freed | |
185f812c | 78 | * Return: none |
5853e133 | 79 | */ |
ba1efb3d | 80 | static void xhci_ring_free(struct xhci_ctrl *ctrl, struct xhci_ring *ring) |
5853e133 VG |
81 | { |
82 | struct xhci_segment *seg; | |
83 | struct xhci_segment *first_seg; | |
84 | ||
85 | BUG_ON(!ring); | |
86 | ||
87 | first_seg = ring->first_seg; | |
88 | seg = first_seg->next; | |
89 | while (seg != first_seg) { | |
90 | struct xhci_segment *next = seg->next; | |
ba1efb3d | 91 | xhci_segment_free(ctrl, seg); |
5853e133 VG |
92 | seg = next; |
93 | } | |
ba1efb3d | 94 | xhci_segment_free(ctrl, first_seg); |
5853e133 VG |
95 | |
96 | free(ring); | |
97 | } | |
98 | ||
209b98de BM |
99 | /** |
100 | * Free the scratchpad buffer array and scratchpad buffers | |
101 | * | |
102 | * @ctrl host controller data structure | |
185f812c | 103 | * Return: none |
209b98de BM |
104 | */ |
105 | static void xhci_scratchpad_free(struct xhci_ctrl *ctrl) | |
106 | { | |
ba1efb3d MK |
107 | struct xhci_hccr *hccr = ctrl->hccr; |
108 | int num_sp; | |
109 | ||
209b98de BM |
110 | if (!ctrl->scratchpad) |
111 | return; | |
112 | ||
ba1efb3d MK |
113 | num_sp = HCS_MAX_SCRATCHPAD(xhci_readl(&hccr->cr_hcsparams2)); |
114 | xhci_dma_unmap(ctrl, ctrl->scratchpad->sp_array[0], | |
115 | num_sp * ctrl->page_size); | |
116 | xhci_dma_unmap(ctrl, ctrl->dcbaa->dev_context_ptrs[0], | |
117 | num_sp * sizeof(u64)); | |
209b98de BM |
118 | ctrl->dcbaa->dev_context_ptrs[0] = 0; |
119 | ||
ba1efb3d | 120 | free(ctrl->scratchpad->scratchpad); |
209b98de BM |
121 | free(ctrl->scratchpad->sp_array); |
122 | free(ctrl->scratchpad); | |
123 | ctrl->scratchpad = NULL; | |
124 | } | |
125 | ||
5853e133 VG |
126 | /** |
127 | * frees the "xhci_container_ctx" pointer passed | |
128 | * | |
129 | * @param ptr pointer to "xhci_container_ctx" to be freed | |
185f812c | 130 | * Return: none |
5853e133 | 131 | */ |
ba1efb3d MK |
132 | static void xhci_free_container_ctx(struct xhci_ctrl *ctrl, |
133 | struct xhci_container_ctx *ctx) | |
5853e133 | 134 | { |
ba1efb3d | 135 | xhci_dma_unmap(ctrl, ctx->dma, ctx->size); |
5853e133 VG |
136 | free(ctx->bytes); |
137 | free(ctx); | |
138 | } | |
139 | ||
140 | /** | |
141 | * frees the virtual devices for "xhci_ctrl" pointer passed | |
142 | * | |
143 | * @param ptr pointer to "xhci_ctrl" whose virtual devices are to be freed | |
185f812c | 144 | * Return: none |
5853e133 VG |
145 | */ |
146 | static void xhci_free_virt_devices(struct xhci_ctrl *ctrl) | |
147 | { | |
148 | int i; | |
149 | int slot_id; | |
150 | struct xhci_virt_device *virt_dev; | |
151 | ||
152 | /* | |
153 | * refactored here to loop through all virt_dev | |
154 | * Slot ID 0 is reserved | |
155 | */ | |
156 | for (slot_id = 0; slot_id < MAX_HC_SLOTS; slot_id++) { | |
157 | virt_dev = ctrl->devs[slot_id]; | |
158 | if (!virt_dev) | |
159 | continue; | |
160 | ||
161 | ctrl->dcbaa->dev_context_ptrs[slot_id] = 0; | |
162 | ||
163 | for (i = 0; i < 31; ++i) | |
164 | if (virt_dev->eps[i].ring) | |
ba1efb3d | 165 | xhci_ring_free(ctrl, virt_dev->eps[i].ring); |
5853e133 VG |
166 | |
167 | if (virt_dev->in_ctx) | |
ba1efb3d | 168 | xhci_free_container_ctx(ctrl, virt_dev->in_ctx); |
5853e133 | 169 | if (virt_dev->out_ctx) |
ba1efb3d | 170 | xhci_free_container_ctx(ctrl, virt_dev->out_ctx); |
5853e133 VG |
171 | |
172 | free(virt_dev); | |
173 | /* make sure we are pointing to NULL */ | |
174 | ctrl->devs[slot_id] = NULL; | |
175 | } | |
176 | } | |
177 | ||
178 | /** | |
179 | * frees all the memory allocated | |
180 | * | |
181 | * @param ptr pointer to "xhci_ctrl" to be cleaned up | |
185f812c | 182 | * Return: none |
5853e133 VG |
183 | */ |
184 | void xhci_cleanup(struct xhci_ctrl *ctrl) | |
185 | { | |
ba1efb3d MK |
186 | xhci_ring_free(ctrl, ctrl->event_ring); |
187 | xhci_ring_free(ctrl, ctrl->cmd_ring); | |
209b98de | 188 | xhci_scratchpad_free(ctrl); |
5853e133 | 189 | xhci_free_virt_devices(ctrl); |
ba1efb3d MK |
190 | xhci_dma_unmap(ctrl, ctrl->erst.erst_dma_addr, |
191 | sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS); | |
5853e133 | 192 | free(ctrl->erst.entries); |
ba1efb3d MK |
193 | xhci_dma_unmap(ctrl, ctrl->dcbaa->dma, |
194 | sizeof(struct xhci_device_context_array)); | |
5853e133 VG |
195 | free(ctrl->dcbaa); |
196 | memset(ctrl, '\0', sizeof(struct xhci_ctrl)); | |
197 | } | |
198 | ||
199 | /** | |
200 | * Malloc the aligned memory | |
201 | * | |
202 | * @param size size of memory to be allocated | |
185f812c | 203 | * Return: allocates the memory and returns the aligned pointer |
5853e133 VG |
204 | */ |
205 | static void *xhci_malloc(unsigned int size) | |
206 | { | |
207 | void *ptr; | |
208 | size_t cacheline_size = max(XHCI_ALIGNMENT, CACHELINE_SIZE); | |
209 | ||
210 | ptr = memalign(cacheline_size, ALIGN(size, cacheline_size)); | |
211 | BUG_ON(!ptr); | |
212 | memset(ptr, '\0', size); | |
213 | ||
421a5a0c | 214 | xhci_flush_cache((uintptr_t)ptr, size); |
5853e133 VG |
215 | |
216 | return ptr; | |
217 | } | |
218 | ||
219 | /** | |
220 | * Make the prev segment point to the next segment. | |
221 | * Change the last TRB in the prev segment to be a Link TRB which points to the | |
222 | * address of the next segment. The caller needs to set any Link TRB | |
223 | * related flags, such as End TRB, Toggle Cycle, and no snoop. | |
224 | * | |
225 | * @param prev pointer to the previous segment | |
226 | * @param next pointer to the next segment | |
227 | * @param link_trbs flag to indicate whether to link the trbs or NOT | |
185f812c | 228 | * Return: none |
5853e133 | 229 | */ |
1a474559 NSJ |
230 | static void xhci_link_segments(struct xhci_ctrl *ctrl, struct xhci_segment *prev, |
231 | struct xhci_segment *next, bool link_trbs) | |
5853e133 VG |
232 | { |
233 | u32 val; | |
5853e133 VG |
234 | |
235 | if (!prev || !next) | |
236 | return; | |
237 | prev->next = next; | |
238 | if (link_trbs) { | |
543eb12e | 239 | prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr = |
ba1efb3d | 240 | cpu_to_le64(next->dma); |
5853e133 VG |
241 | |
242 | /* | |
243 | * Set the last TRB in the segment to | |
244 | * have a TRB type ID of Link TRB | |
245 | */ | |
246 | val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control); | |
247 | val &= ~TRB_TYPE_BITMASK; | |
a826d76f | 248 | val |= TRB_TYPE(TRB_LINK); |
5853e133 VG |
249 | prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val); |
250 | } | |
251 | } | |
252 | ||
253 | /** | |
254 | * Initialises the Ring's enqueue,dequeue,enq_seg pointers | |
255 | * | |
256 | * @param ring pointer to the RING to be intialised | |
185f812c | 257 | * Return: none |
5853e133 VG |
258 | */ |
259 | static void xhci_initialize_ring_info(struct xhci_ring *ring) | |
260 | { | |
261 | /* | |
262 | * The ring is empty, so the enqueue pointer == dequeue pointer | |
263 | */ | |
264 | ring->enqueue = ring->first_seg->trbs; | |
265 | ring->enq_seg = ring->first_seg; | |
266 | ring->dequeue = ring->enqueue; | |
267 | ring->deq_seg = ring->first_seg; | |
268 | ||
269 | /* | |
270 | * The ring is initialized to 0. The producer must write 1 to the | |
271 | * cycle bit to handover ownership of the TRB, so PCS = 1. | |
272 | * The consumer must compare CCS to the cycle bit to | |
273 | * check ownership, so CCS = 1. | |
274 | */ | |
275 | ring->cycle_state = 1; | |
276 | } | |
277 | ||
278 | /** | |
279 | * Allocates a generic ring segment from the ring pool, sets the dma address, | |
280 | * initializes the segment to zero, and sets the private next pointer to NULL. | |
281 | * Section 4.11.1.1: | |
282 | * "All components of all Command and Transfer TRBs shall be initialized to '0'" | |
283 | * | |
284 | * @param none | |
185f812c | 285 | * Return: pointer to the newly allocated SEGMENT |
5853e133 | 286 | */ |
ba1efb3d | 287 | static struct xhci_segment *xhci_segment_alloc(struct xhci_ctrl *ctrl) |
5853e133 VG |
288 | { |
289 | struct xhci_segment *seg; | |
290 | ||
3fade886 | 291 | seg = malloc(sizeof(struct xhci_segment)); |
5853e133 VG |
292 | BUG_ON(!seg); |
293 | ||
3fade886 | 294 | seg->trbs = xhci_malloc(SEGMENT_SIZE); |
ba1efb3d | 295 | seg->dma = xhci_dma_map(ctrl, seg->trbs, SEGMENT_SIZE); |
5853e133 VG |
296 | |
297 | seg->next = NULL; | |
298 | ||
299 | return seg; | |
300 | } | |
301 | ||
302 | /** | |
303 | * Create a new ring with zero or more segments. | |
304 | * TODO: current code only uses one-time-allocated single-segment rings | |
305 | * of 1KB anyway, so we might as well get rid of all the segment and | |
306 | * linking code (and maybe increase the size a bit, e.g. 4KB). | |
307 | * | |
308 | * | |
309 | * Link each segment together into a ring. | |
310 | * Set the end flag and the cycle toggle bit on the last segment. | |
311 | * See section 4.9.2 and figures 15 and 16 of XHCI spec rev1.0. | |
312 | * | |
313 | * @param num_segs number of segments in the ring | |
314 | * @param link_trbs flag to indicate whether to link the trbs or NOT | |
185f812c | 315 | * Return: pointer to the newly created RING |
5853e133 | 316 | */ |
1a474559 NSJ |
317 | struct xhci_ring *xhci_ring_alloc(struct xhci_ctrl *ctrl, unsigned int num_segs, |
318 | bool link_trbs) | |
5853e133 VG |
319 | { |
320 | struct xhci_ring *ring; | |
321 | struct xhci_segment *prev; | |
322 | ||
3fade886 | 323 | ring = malloc(sizeof(struct xhci_ring)); |
5853e133 VG |
324 | BUG_ON(!ring); |
325 | ||
326 | if (num_segs == 0) | |
327 | return ring; | |
328 | ||
ba1efb3d | 329 | ring->first_seg = xhci_segment_alloc(ctrl); |
5853e133 VG |
330 | BUG_ON(!ring->first_seg); |
331 | ||
332 | num_segs--; | |
333 | ||
334 | prev = ring->first_seg; | |
335 | while (num_segs > 0) { | |
336 | struct xhci_segment *next; | |
337 | ||
ba1efb3d | 338 | next = xhci_segment_alloc(ctrl); |
5853e133 VG |
339 | BUG_ON(!next); |
340 | ||
1a474559 | 341 | xhci_link_segments(ctrl, prev, next, link_trbs); |
5853e133 VG |
342 | |
343 | prev = next; | |
344 | num_segs--; | |
345 | } | |
1a474559 | 346 | xhci_link_segments(ctrl, prev, ring->first_seg, link_trbs); |
5853e133 VG |
347 | if (link_trbs) { |
348 | /* See section 4.9.2.1 and 6.4.4.1 */ | |
349 | prev->trbs[TRBS_PER_SEGMENT-1].link.control |= | |
350 | cpu_to_le32(LINK_TOGGLE); | |
351 | } | |
352 | xhci_initialize_ring_info(ring); | |
353 | ||
354 | return ring; | |
355 | } | |
356 | ||
209b98de BM |
357 | /** |
358 | * Set up the scratchpad buffer array and scratchpad buffers | |
359 | * | |
360 | * @ctrl host controller data structure | |
185f812c | 361 | * Return: -ENOMEM if buffer allocation fails, 0 on success |
209b98de BM |
362 | */ |
363 | static int xhci_scratchpad_alloc(struct xhci_ctrl *ctrl) | |
364 | { | |
365 | struct xhci_hccr *hccr = ctrl->hccr; | |
366 | struct xhci_hcor *hcor = ctrl->hcor; | |
367 | struct xhci_scratchpad *scratchpad; | |
1a474559 | 368 | uint64_t val_64; |
209b98de BM |
369 | int num_sp; |
370 | uint32_t page_size; | |
371 | void *buf; | |
372 | int i; | |
373 | ||
374 | num_sp = HCS_MAX_SCRATCHPAD(xhci_readl(&hccr->cr_hcsparams2)); | |
375 | if (!num_sp) | |
376 | return 0; | |
377 | ||
378 | scratchpad = malloc(sizeof(*scratchpad)); | |
379 | if (!scratchpad) | |
380 | goto fail_sp; | |
381 | ctrl->scratchpad = scratchpad; | |
382 | ||
383 | scratchpad->sp_array = xhci_malloc(num_sp * sizeof(u64)); | |
384 | if (!scratchpad->sp_array) | |
385 | goto fail_sp2; | |
1a474559 | 386 | |
ba1efb3d MK |
387 | val_64 = xhci_dma_map(ctrl, scratchpad->sp_array, |
388 | num_sp * sizeof(u64)); | |
1a474559 | 389 | ctrl->dcbaa->dev_context_ptrs[0] = cpu_to_le64(val_64); |
209b98de | 390 | |
8c6cc71b YL |
391 | xhci_flush_cache((uintptr_t)&ctrl->dcbaa->dev_context_ptrs[0], |
392 | sizeof(ctrl->dcbaa->dev_context_ptrs[0])); | |
393 | ||
209b98de BM |
394 | page_size = xhci_readl(&hcor->or_pagesize) & 0xffff; |
395 | for (i = 0; i < 16; i++) { | |
396 | if ((0x1 & page_size) != 0) | |
397 | break; | |
398 | page_size = page_size >> 1; | |
399 | } | |
400 | BUG_ON(i == 16); | |
401 | ||
ba1efb3d MK |
402 | ctrl->page_size = 1 << (i + 12); |
403 | buf = memalign(ctrl->page_size, num_sp * ctrl->page_size); | |
209b98de BM |
404 | if (!buf) |
405 | goto fail_sp3; | |
ba1efb3d MK |
406 | memset(buf, '\0', num_sp * ctrl->page_size); |
407 | xhci_flush_cache((uintptr_t)buf, num_sp * ctrl->page_size); | |
209b98de | 408 | |
ba1efb3d MK |
409 | scratchpad->scratchpad = buf; |
410 | val_64 = xhci_dma_map(ctrl, buf, num_sp * ctrl->page_size); | |
209b98de | 411 | for (i = 0; i < num_sp; i++) { |
1a474559 | 412 | scratchpad->sp_array[i] = cpu_to_le64(val_64); |
ba1efb3d | 413 | val_64 += ctrl->page_size; |
209b98de BM |
414 | } |
415 | ||
61293f51 SN |
416 | xhci_flush_cache((uintptr_t)scratchpad->sp_array, |
417 | sizeof(u64) * num_sp); | |
418 | ||
209b98de BM |
419 | return 0; |
420 | ||
421 | fail_sp3: | |
422 | free(scratchpad->sp_array); | |
423 | ||
424 | fail_sp2: | |
425 | free(scratchpad); | |
426 | ctrl->scratchpad = NULL; | |
427 | ||
428 | fail_sp: | |
429 | return -ENOMEM; | |
430 | } | |
431 | ||
5853e133 VG |
432 | /** |
433 | * Allocates the Container context | |
434 | * | |
435 | * @param ctrl Host controller data structure | |
436 | * @param type type of XHCI Container Context | |
185f812c | 437 | * Return: NULL if failed else pointer to the context on success |
5853e133 VG |
438 | */ |
439 | static struct xhci_container_ctx | |
440 | *xhci_alloc_container_ctx(struct xhci_ctrl *ctrl, int type) | |
441 | { | |
442 | struct xhci_container_ctx *ctx; | |
443 | ||
3fade886 | 444 | ctx = malloc(sizeof(struct xhci_container_ctx)); |
5853e133 VG |
445 | BUG_ON(!ctx); |
446 | ||
447 | BUG_ON((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT)); | |
448 | ctx->type = type; | |
449 | ctx->size = (MAX_EP_CTX_NUM + 1) * | |
cf868772 | 450 | CTX_SIZE(xhci_readl(&ctrl->hccr->cr_hccparams)); |
5853e133 | 451 | if (type == XHCI_CTX_TYPE_INPUT) |
cf868772 | 452 | ctx->size += CTX_SIZE(xhci_readl(&ctrl->hccr->cr_hccparams)); |
5853e133 | 453 | |
3fade886 | 454 | ctx->bytes = xhci_malloc(ctx->size); |
ba1efb3d | 455 | ctx->dma = xhci_dma_map(ctrl, ctx->bytes, ctx->size); |
5853e133 VG |
456 | |
457 | return ctx; | |
458 | } | |
459 | ||
460 | /** | |
461 | * Allocating virtual device | |
462 | * | |
463 | * @param udev pointer to USB deivce structure | |
185f812c | 464 | * Return: 0 on success else -1 on failure |
5853e133 | 465 | */ |
7e0c5ee8 | 466 | int xhci_alloc_virt_device(struct xhci_ctrl *ctrl, unsigned int slot_id) |
5853e133 VG |
467 | { |
468 | u64 byte_64 = 0; | |
5853e133 | 469 | struct xhci_virt_device *virt_dev; |
5853e133 VG |
470 | |
471 | /* Slot ID 0 is reserved */ | |
472 | if (ctrl->devs[slot_id]) { | |
473 | printf("Virt dev for slot[%d] already allocated\n", slot_id); | |
474 | return -EEXIST; | |
475 | } | |
476 | ||
3fade886 | 477 | ctrl->devs[slot_id] = malloc(sizeof(struct xhci_virt_device)); |
5853e133 VG |
478 | |
479 | if (!ctrl->devs[slot_id]) { | |
480 | puts("Failed to allocate virtual device\n"); | |
481 | return -ENOMEM; | |
482 | } | |
483 | ||
484 | memset(ctrl->devs[slot_id], 0, sizeof(struct xhci_virt_device)); | |
485 | virt_dev = ctrl->devs[slot_id]; | |
486 | ||
487 | /* Allocate the (output) device context that will be used in the HC. */ | |
488 | virt_dev->out_ctx = xhci_alloc_container_ctx(ctrl, | |
489 | XHCI_CTX_TYPE_DEVICE); | |
490 | if (!virt_dev->out_ctx) { | |
491 | puts("Failed to allocate out context for virt dev\n"); | |
492 | return -ENOMEM; | |
493 | } | |
494 | ||
495 | /* Allocate the (input) device context for address device command */ | |
496 | virt_dev->in_ctx = xhci_alloc_container_ctx(ctrl, | |
497 | XHCI_CTX_TYPE_INPUT); | |
498 | if (!virt_dev->in_ctx) { | |
499 | puts("Failed to allocate in context for virt dev\n"); | |
500 | return -ENOMEM; | |
501 | } | |
502 | ||
503 | /* Allocate endpoint 0 ring */ | |
1a474559 | 504 | virt_dev->eps[0].ring = xhci_ring_alloc(ctrl, 1, true); |
5853e133 | 505 | |
ba1efb3d | 506 | byte_64 = virt_dev->out_ctx->dma; |
5853e133 VG |
507 | |
508 | /* Point to output device context in dcbaa. */ | |
543eb12e | 509 | ctrl->dcbaa->dev_context_ptrs[slot_id] = cpu_to_le64(byte_64); |
5853e133 | 510 | |
421a5a0c ST |
511 | xhci_flush_cache((uintptr_t)&ctrl->dcbaa->dev_context_ptrs[slot_id], |
512 | sizeof(__le64)); | |
5853e133 VG |
513 | return 0; |
514 | } | |
515 | ||
516 | /** | |
517 | * Allocates the necessary data structures | |
518 | * for XHCI host controller | |
519 | * | |
520 | * @param ctrl Host controller data structure | |
521 | * @param hccr pointer to HOST Controller Control Registers | |
522 | * @param hcor pointer to HOST Controller Operational Registers | |
185f812c | 523 | * Return: 0 if successful else -1 on failure |
5853e133 VG |
524 | */ |
525 | int xhci_mem_init(struct xhci_ctrl *ctrl, struct xhci_hccr *hccr, | |
526 | struct xhci_hcor *hcor) | |
527 | { | |
528 | uint64_t val_64; | |
529 | uint64_t trb_64; | |
530 | uint32_t val; | |
b5152a65 | 531 | uint64_t deq; |
5853e133 VG |
532 | int i; |
533 | struct xhci_segment *seg; | |
534 | ||
535 | /* DCBAA initialization */ | |
3fade886 | 536 | ctrl->dcbaa = xhci_malloc(sizeof(struct xhci_device_context_array)); |
5853e133 VG |
537 | if (ctrl->dcbaa == NULL) { |
538 | puts("unable to allocate DCBA\n"); | |
539 | return -ENOMEM; | |
540 | } | |
541 | ||
ba1efb3d MK |
542 | ctrl->dcbaa->dma = xhci_dma_map(ctrl, ctrl->dcbaa, |
543 | sizeof(struct xhci_device_context_array)); | |
5853e133 | 544 | /* Set the pointer in DCBAA register */ |
ba1efb3d | 545 | xhci_writeq(&hcor->or_dcbaap, ctrl->dcbaa->dma); |
5853e133 VG |
546 | |
547 | /* Command ring control pointer register initialization */ | |
1a474559 | 548 | ctrl->cmd_ring = xhci_ring_alloc(ctrl, 1, true); |
5853e133 VG |
549 | |
550 | /* Set the address in the Command Ring Control register */ | |
ba1efb3d | 551 | trb_64 = ctrl->cmd_ring->first_seg->dma; |
5853e133 VG |
552 | val_64 = xhci_readq(&hcor->or_crcr); |
553 | val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | | |
554 | (trb_64 & (u64) ~CMD_RING_RSVD_BITS) | | |
555 | ctrl->cmd_ring->cycle_state; | |
556 | xhci_writeq(&hcor->or_crcr, val_64); | |
557 | ||
558 | /* write the address of db register */ | |
559 | val = xhci_readl(&hccr->cr_dboff); | |
560 | val &= DBOFF_MASK; | |
561 | ctrl->dba = (struct xhci_doorbell_array *)((char *)hccr + val); | |
562 | ||
563 | /* write the address of runtime register */ | |
564 | val = xhci_readl(&hccr->cr_rtsoff); | |
565 | val &= RTSOFF_MASK; | |
566 | ctrl->run_regs = (struct xhci_run_regs *)((char *)hccr + val); | |
567 | ||
568 | /* writting the address of ir_set structure */ | |
569 | ctrl->ir_set = &ctrl->run_regs->ir_set[0]; | |
570 | ||
571 | /* Event ring does not maintain link TRB */ | |
1a474559 | 572 | ctrl->event_ring = xhci_ring_alloc(ctrl, ERST_NUM_SEGS, false); |
3fade886 HS |
573 | ctrl->erst.entries = xhci_malloc(sizeof(struct xhci_erst_entry) * |
574 | ERST_NUM_SEGS); | |
ba1efb3d MK |
575 | ctrl->erst.erst_dma_addr = xhci_dma_map(ctrl, ctrl->erst.entries, |
576 | sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS); | |
5853e133 VG |
577 | |
578 | ctrl->erst.num_entries = ERST_NUM_SEGS; | |
579 | ||
580 | for (val = 0, seg = ctrl->event_ring->first_seg; | |
581 | val < ERST_NUM_SEGS; | |
582 | val++) { | |
5853e133 | 583 | struct xhci_erst_entry *entry = &ctrl->erst.entries[val]; |
ba1efb3d | 584 | trb_64 = seg->dma; |
61a1acb5 | 585 | entry->seg_addr = cpu_to_le64(trb_64); |
5853e133 VG |
586 | entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT); |
587 | entry->rsvd = 0; | |
588 | seg = seg->next; | |
589 | } | |
421a5a0c ST |
590 | xhci_flush_cache((uintptr_t)ctrl->erst.entries, |
591 | ERST_NUM_SEGS * sizeof(struct xhci_erst_entry)); | |
5853e133 | 592 | |
ba1efb3d MK |
593 | deq = xhci_trb_virt_to_dma(ctrl->event_ring->deq_seg, |
594 | ctrl->event_ring->dequeue); | |
5853e133 VG |
595 | |
596 | /* Update HC event ring dequeue pointer */ | |
597 | xhci_writeq(&ctrl->ir_set->erst_dequeue, | |
598 | (u64)deq & (u64)~ERST_PTR_MASK); | |
599 | ||
600 | /* set ERST count with the number of entries in the segment table */ | |
601 | val = xhci_readl(&ctrl->ir_set->erst_size); | |
602 | val &= ERST_SIZE_MASK; | |
603 | val |= ERST_NUM_SEGS; | |
604 | xhci_writel(&ctrl->ir_set->erst_size, val); | |
605 | ||
606 | /* this is the event ring segment table pointer */ | |
607 | val_64 = xhci_readq(&ctrl->ir_set->erst_base); | |
608 | val_64 &= ERST_PTR_MASK; | |
ba1efb3d | 609 | val_64 |= ctrl->erst.erst_dma_addr & ~ERST_PTR_MASK; |
5853e133 VG |
610 | |
611 | xhci_writeq(&ctrl->ir_set->erst_base, val_64); | |
612 | ||
209b98de BM |
613 | /* set up the scratchpad buffer array and scratchpad buffers */ |
614 | xhci_scratchpad_alloc(ctrl); | |
615 | ||
5853e133 VG |
616 | /* initializing the virtual devices to NULL */ |
617 | for (i = 0; i < MAX_HC_SLOTS; ++i) | |
618 | ctrl->devs[i] = NULL; | |
619 | ||
620 | /* | |
621 | * Just Zero'ing this register completely, | |
622 | * or some spurious Device Notification Events | |
623 | * might screw things here. | |
624 | */ | |
625 | xhci_writel(&hcor->or_dnctrl, 0x0); | |
626 | ||
627 | return 0; | |
628 | } | |
629 | ||
630 | /** | |
631 | * Give the input control context for the passed container context | |
632 | * | |
633 | * @param ctx pointer to the context | |
185f812c | 634 | * Return: pointer to the Input control context data |
5853e133 VG |
635 | */ |
636 | struct xhci_input_control_ctx | |
637 | *xhci_get_input_control_ctx(struct xhci_container_ctx *ctx) | |
638 | { | |
639 | BUG_ON(ctx->type != XHCI_CTX_TYPE_INPUT); | |
640 | return (struct xhci_input_control_ctx *)ctx->bytes; | |
641 | } | |
642 | ||
643 | /** | |
644 | * Give the slot context for the passed container context | |
645 | * | |
646 | * @param ctrl Host controller data structure | |
647 | * @param ctx pointer to the context | |
185f812c | 648 | * Return: pointer to the slot control context data |
5853e133 VG |
649 | */ |
650 | struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_ctrl *ctrl, | |
651 | struct xhci_container_ctx *ctx) | |
652 | { | |
653 | if (ctx->type == XHCI_CTX_TYPE_DEVICE) | |
654 | return (struct xhci_slot_ctx *)ctx->bytes; | |
655 | ||
656 | return (struct xhci_slot_ctx *) | |
cf868772 | 657 | (ctx->bytes + CTX_SIZE(xhci_readl(&ctrl->hccr->cr_hccparams))); |
5853e133 VG |
658 | } |
659 | ||
660 | /** | |
661 | * Gets the EP context from based on the ep_index | |
662 | * | |
663 | * @param ctrl Host controller data structure | |
664 | * @param ctx context container | |
665 | * @param ep_index index of the endpoint | |
185f812c | 666 | * Return: pointer to the End point context |
5853e133 VG |
667 | */ |
668 | struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_ctrl *ctrl, | |
669 | struct xhci_container_ctx *ctx, | |
670 | unsigned int ep_index) | |
671 | { | |
672 | /* increment ep index by offset of start of ep ctx array */ | |
673 | ep_index++; | |
674 | if (ctx->type == XHCI_CTX_TYPE_INPUT) | |
675 | ep_index++; | |
676 | ||
677 | return (struct xhci_ep_ctx *) | |
678 | (ctx->bytes + | |
cf868772 | 679 | (ep_index * CTX_SIZE(xhci_readl(&ctrl->hccr->cr_hccparams)))); |
5853e133 VG |
680 | } |
681 | ||
682 | /** | |
683 | * Copy output xhci_ep_ctx to the input xhci_ep_ctx copy. | |
684 | * Useful when you want to change one particular aspect of the endpoint | |
685 | * and then issue a configure endpoint command. | |
686 | * | |
687 | * @param ctrl Host controller data structure | |
688 | * @param in_ctx contains the input context | |
689 | * @param out_ctx contains the input context | |
690 | * @param ep_index index of the end point | |
185f812c | 691 | * Return: none |
5853e133 VG |
692 | */ |
693 | void xhci_endpoint_copy(struct xhci_ctrl *ctrl, | |
694 | struct xhci_container_ctx *in_ctx, | |
695 | struct xhci_container_ctx *out_ctx, | |
696 | unsigned int ep_index) | |
697 | { | |
698 | struct xhci_ep_ctx *out_ep_ctx; | |
699 | struct xhci_ep_ctx *in_ep_ctx; | |
700 | ||
701 | out_ep_ctx = xhci_get_ep_ctx(ctrl, out_ctx, ep_index); | |
702 | in_ep_ctx = xhci_get_ep_ctx(ctrl, in_ctx, ep_index); | |
703 | ||
704 | in_ep_ctx->ep_info = out_ep_ctx->ep_info; | |
705 | in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2; | |
706 | in_ep_ctx->deq = out_ep_ctx->deq; | |
707 | in_ep_ctx->tx_info = out_ep_ctx->tx_info; | |
708 | } | |
709 | ||
710 | /** | |
711 | * Copy output xhci_slot_ctx to the input xhci_slot_ctx. | |
712 | * Useful when you want to change one particular aspect of the endpoint | |
713 | * and then issue a configure endpoint command. | |
714 | * Only the context entries field matters, but | |
715 | * we'll copy the whole thing anyway. | |
716 | * | |
717 | * @param ctrl Host controller data structure | |
718 | * @param in_ctx contains the inpout context | |
719 | * @param out_ctx contains the inpout context | |
185f812c | 720 | * Return: none |
5853e133 VG |
721 | */ |
722 | void xhci_slot_copy(struct xhci_ctrl *ctrl, struct xhci_container_ctx *in_ctx, | |
723 | struct xhci_container_ctx *out_ctx) | |
724 | { | |
725 | struct xhci_slot_ctx *in_slot_ctx; | |
726 | struct xhci_slot_ctx *out_slot_ctx; | |
727 | ||
728 | in_slot_ctx = xhci_get_slot_ctx(ctrl, in_ctx); | |
729 | out_slot_ctx = xhci_get_slot_ctx(ctrl, out_ctx); | |
730 | ||
731 | in_slot_ctx->dev_info = out_slot_ctx->dev_info; | |
732 | in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2; | |
733 | in_slot_ctx->tt_info = out_slot_ctx->tt_info; | |
734 | in_slot_ctx->dev_state = out_slot_ctx->dev_state; | |
735 | } | |
736 | ||
737 | /** | |
738 | * Setup an xHCI virtual device for a Set Address command | |
739 | * | |
740 | * @param udev pointer to the Device Data Structure | |
185f812c | 741 | * Return: returns negative value on failure else 0 on success |
5853e133 | 742 | */ |
daec4691 BM |
743 | void xhci_setup_addressable_virt_dev(struct xhci_ctrl *ctrl, |
744 | struct usb_device *udev, int hop_portnr) | |
5853e133 | 745 | { |
5853e133 VG |
746 | struct xhci_virt_device *virt_dev; |
747 | struct xhci_ep_ctx *ep0_ctx; | |
748 | struct xhci_slot_ctx *slot_ctx; | |
749 | u32 port_num = 0; | |
750 | u64 trb_64 = 0; | |
daec4691 BM |
751 | int slot_id = udev->slot_id; |
752 | int speed = udev->speed; | |
493b8dd0 | 753 | int route = 0; |
fd09c205 | 754 | #if CONFIG_IS_ENABLED(DM_USB) |
493b8dd0 BM |
755 | struct usb_device *dev = udev; |
756 | struct usb_hub_device *hub; | |
757 | #endif | |
5853e133 | 758 | |
5dd75e3b | 759 | virt_dev = ctrl->devs[slot_id]; |
5853e133 VG |
760 | |
761 | BUG_ON(!virt_dev); | |
762 | ||
763 | /* Extract the EP0 and Slot Ctrl */ | |
764 | ep0_ctx = xhci_get_ep_ctx(ctrl, virt_dev->in_ctx, 0); | |
765 | slot_ctx = xhci_get_slot_ctx(ctrl, virt_dev->in_ctx); | |
766 | ||
767 | /* Only the control endpoint is valid - one endpoint context */ | |
493b8dd0 BM |
768 | slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1)); |
769 | ||
fd09c205 | 770 | #if CONFIG_IS_ENABLED(DM_USB) |
493b8dd0 BM |
771 | /* Calculate the route string for this device */ |
772 | port_num = dev->portnr; | |
773 | while (!usb_hub_is_root_hub(dev->dev)) { | |
774 | hub = dev_get_uclass_priv(dev->dev); | |
775 | /* | |
776 | * Each hub in the topology is expected to have no more than | |
777 | * 15 ports in order for the route string of a device to be | |
778 | * unique. SuperSpeed hubs are restricted to only having 15 | |
779 | * ports, but FS/LS/HS hubs are not. The xHCI specification | |
780 | * says that if the port number the device is greater than 15, | |
781 | * that portion of the route string shall be set to 15. | |
782 | */ | |
783 | if (port_num > 15) | |
784 | port_num = 15; | |
785 | route |= port_num << (hub->hub_depth * 4); | |
786 | dev = dev_get_parent_priv(dev->dev); | |
787 | port_num = dev->portnr; | |
788 | dev = dev_get_parent_priv(dev->dev->parent); | |
789 | } | |
790 | ||
791 | debug("route string %x\n", route); | |
792 | #endif | |
543eb12e | 793 | slot_ctx->dev_info |= cpu_to_le32(route); |
5853e133 | 794 | |
5dd75e3b | 795 | switch (speed) { |
5853e133 VG |
796 | case USB_SPEED_SUPER: |
797 | slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS); | |
798 | break; | |
799 | case USB_SPEED_HIGH: | |
800 | slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS); | |
801 | break; | |
802 | case USB_SPEED_FULL: | |
803 | slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS); | |
804 | break; | |
805 | case USB_SPEED_LOW: | |
806 | slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_LS); | |
807 | break; | |
808 | default: | |
809 | /* Speed was set earlier, this shouldn't happen. */ | |
810 | BUG(); | |
811 | } | |
812 | ||
fd09c205 | 813 | #if CONFIG_IS_ENABLED(DM_USB) |
78e30987 BM |
814 | /* Set up TT fields to support FS/LS devices */ |
815 | if (speed == USB_SPEED_LOW || speed == USB_SPEED_FULL) { | |
8a0e6d83 BM |
816 | struct udevice *parent = udev->dev; |
817 | ||
818 | dev = udev; | |
819 | do { | |
820 | port_num = dev->portnr; | |
821 | dev = dev_get_parent_priv(parent); | |
822 | if (usb_hub_is_root_hub(dev->dev)) | |
823 | break; | |
824 | parent = dev->dev->parent; | |
825 | } while (dev->speed != USB_SPEED_HIGH); | |
826 | ||
827 | if (!usb_hub_is_root_hub(dev->dev)) { | |
828 | hub = dev_get_uclass_priv(dev->dev); | |
78e30987 BM |
829 | if (hub->tt.multi) |
830 | slot_ctx->dev_info |= cpu_to_le32(DEV_MTT); | |
8a0e6d83 | 831 | slot_ctx->tt_info |= cpu_to_le32(TT_PORT(port_num)); |
78e30987 BM |
832 | slot_ctx->tt_info |= cpu_to_le32(TT_SLOT(dev->slot_id)); |
833 | } | |
834 | } | |
835 | #endif | |
836 | ||
5dd75e3b | 837 | port_num = hop_portnr; |
5853e133 VG |
838 | debug("port_num = %d\n", port_num); |
839 | ||
840 | slot_ctx->dev_info2 |= | |
841 | cpu_to_le32(((port_num & ROOT_HUB_PORT_MASK) << | |
842 | ROOT_HUB_PORT_SHIFT)); | |
843 | ||
844 | /* Step 4 - ring already allocated */ | |
845 | /* Step 5 */ | |
23a54ccf | 846 | ep0_ctx->ep_info2 = cpu_to_le32(EP_TYPE(CTRL_EP)); |
5dd75e3b | 847 | debug("SPEED = %d\n", speed); |
5853e133 | 848 | |
5dd75e3b | 849 | switch (speed) { |
5853e133 | 850 | case USB_SPEED_SUPER: |
23a54ccf | 851 | ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(512)); |
5853e133 VG |
852 | debug("Setting Packet size = 512bytes\n"); |
853 | break; | |
854 | case USB_SPEED_HIGH: | |
855 | /* USB core guesses at a 64-byte max packet first for FS devices */ | |
856 | case USB_SPEED_FULL: | |
23a54ccf | 857 | ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(64)); |
5853e133 VG |
858 | debug("Setting Packet size = 64bytes\n"); |
859 | break; | |
860 | case USB_SPEED_LOW: | |
23a54ccf | 861 | ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(8)); |
5853e133 VG |
862 | debug("Setting Packet size = 8bytes\n"); |
863 | break; | |
864 | default: | |
865 | /* New speed? */ | |
866 | BUG(); | |
867 | } | |
868 | ||
869 | /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */ | |
23a54ccf | 870 | ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3)); |
5853e133 | 871 | |
ba1efb3d | 872 | trb_64 = virt_dev->eps[0].ring->first_seg->dma; |
5853e133 VG |
873 | ep0_ctx->deq = cpu_to_le64(trb_64 | virt_dev->eps[0].ring->cycle_state); |
874 | ||
fae35857 BM |
875 | /* |
876 | * xHCI spec 6.2.3: | |
877 | * software shall set 'Average TRB Length' to 8 for control endpoints. | |
878 | */ | |
879 | ep0_ctx->tx_info = cpu_to_le32(EP_AVG_TRB_LENGTH(8)); | |
880 | ||
5853e133 VG |
881 | /* Steps 7 and 8 were done in xhci_alloc_virt_device() */ |
882 | ||
421a5a0c ST |
883 | xhci_flush_cache((uintptr_t)ep0_ctx, sizeof(struct xhci_ep_ctx)); |
884 | xhci_flush_cache((uintptr_t)slot_ctx, sizeof(struct xhci_slot_ctx)); | |
5853e133 | 885 | } |