]>
Commit | Line | Data |
---|---|---|
7f84eef0 SS |
1 | /* |
2 | * xHCI host controller driver | |
3 | * | |
4 | * Copyright (C) 2008 Intel Corp. | |
5 | * | |
6 | * Author: Sarah Sharp | |
7 | * Some code borrowed from the Linux EHCI driver. | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License version 2 as | |
11 | * published by the Free Software Foundation. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, but | |
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | |
15 | * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
16 | * for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program; if not, write to the Free Software Foundation, | |
20 | * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
21 | */ | |
22 | ||
23 | /* | |
24 | * Ring initialization rules: | |
25 | * 1. Each segment is initialized to zero, except for link TRBs. | |
26 | * 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or | |
27 | * Consumer Cycle State (CCS), depending on ring function. | |
28 | * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment. | |
29 | * | |
30 | * Ring behavior rules: | |
31 | * 1. A ring is empty if enqueue == dequeue. This means there will always be at | |
32 | * least one free TRB in the ring. This is useful if you want to turn that | |
33 | * into a link TRB and expand the ring. | |
34 | * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a | |
35 | * link TRB, then load the pointer with the address in the link TRB. If the | |
36 | * link TRB had its toggle bit set, you may need to update the ring cycle | |
37 | * state (see cycle bit rules). You may have to do this multiple times | |
38 | * until you reach a non-link TRB. | |
39 | * 3. A ring is full if enqueue++ (for the definition of increment above) | |
40 | * equals the dequeue pointer. | |
41 | * | |
42 | * Cycle bit rules: | |
43 | * 1. When a consumer increments a dequeue pointer and encounters a toggle bit | |
44 | * in a link TRB, it must toggle the ring cycle state. | |
45 | * 2. When a producer increments an enqueue pointer and encounters a toggle bit | |
46 | * in a link TRB, it must toggle the ring cycle state. | |
47 | * | |
48 | * Producer rules: | |
49 | * 1. Check if ring is full before you enqueue. | |
50 | * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing. | |
51 | * Update enqueue pointer between each write (which may update the ring | |
52 | * cycle state). | |
53 | * 3. Notify consumer. If SW is producer, it rings the doorbell for command | |
54 | * and endpoint rings. If HC is the producer for the event ring, | |
55 | * and it generates an interrupt according to interrupt modulation rules. | |
56 | * | |
57 | * Consumer rules: | |
58 | * 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state, | |
59 | * the TRB is owned by the consumer. | |
60 | * 2. Update dequeue pointer (which may update the ring cycle state) and | |
61 | * continue processing TRBs until you reach a TRB which is not owned by you. | |
62 | * 3. Notify the producer. SW is the consumer for the event ring, and it | |
63 | * updates event ring dequeue pointer. HC is the consumer for the command and | |
64 | * endpoint rings; it generates events on the event ring for these. | |
65 | */ | |
66 | ||
67 | #include "xhci.h" | |
68 | ||
69 | /* | |
70 | * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA | |
71 | * address of the TRB. | |
72 | */ | |
73 | dma_addr_t trb_virt_to_dma(struct xhci_segment *seg, | |
74 | union xhci_trb *trb) | |
75 | { | |
76 | unsigned int offset; | |
77 | ||
78 | if (!seg || !trb || (void *) trb < (void *) seg->trbs) | |
79 | return 0; | |
80 | /* offset in bytes, since these are byte-addressable */ | |
81 | offset = (unsigned int) trb - (unsigned int) seg->trbs; | |
82 | /* SEGMENT_SIZE in bytes, trbs are 16-byte aligned */ | |
83 | if (offset > SEGMENT_SIZE || (offset % sizeof(*trb)) != 0) | |
84 | return 0; | |
85 | return seg->dma + offset; | |
86 | } | |
87 | ||
88 | /* Does this link TRB point to the first segment in a ring, | |
89 | * or was the previous TRB the last TRB on the last segment in the ERST? | |
90 | */ | |
91 | static inline bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring, | |
92 | struct xhci_segment *seg, union xhci_trb *trb) | |
93 | { | |
94 | if (ring == xhci->event_ring) | |
95 | return (trb == &seg->trbs[TRBS_PER_SEGMENT]) && | |
96 | (seg->next == xhci->event_ring->first_seg); | |
97 | else | |
98 | return trb->link.control & LINK_TOGGLE; | |
99 | } | |
100 | ||
101 | /* Is this TRB a link TRB or was the last TRB the last TRB in this event ring | |
102 | * segment? I.e. would the updated event TRB pointer step off the end of the | |
103 | * event seg? | |
104 | */ | |
105 | static inline int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, | |
106 | struct xhci_segment *seg, union xhci_trb *trb) | |
107 | { | |
108 | if (ring == xhci->event_ring) | |
109 | return trb == &seg->trbs[TRBS_PER_SEGMENT]; | |
110 | else | |
111 | return (trb->link.control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK); | |
112 | } | |
113 | ||
114 | /* | |
115 | * See Cycle bit rules. SW is the consumer for the event ring only. | |
116 | * Don't make a ring full of link TRBs. That would be dumb and this would loop. | |
117 | */ | |
118 | static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer) | |
119 | { | |
120 | union xhci_trb *next = ++(ring->dequeue); | |
121 | ||
122 | ring->deq_updates++; | |
123 | /* Update the dequeue pointer further if that was a link TRB or we're at | |
124 | * the end of an event ring segment (which doesn't have link TRBS) | |
125 | */ | |
126 | while (last_trb(xhci, ring, ring->deq_seg, next)) { | |
127 | if (consumer && last_trb_on_last_seg(xhci, ring, ring->deq_seg, next)) { | |
128 | ring->cycle_state = (ring->cycle_state ? 0 : 1); | |
129 | if (!in_interrupt()) | |
130 | xhci_dbg(xhci, "Toggle cycle state for ring 0x%x = %i\n", | |
131 | (unsigned int) ring, | |
132 | (unsigned int) ring->cycle_state); | |
133 | } | |
134 | ring->deq_seg = ring->deq_seg->next; | |
135 | ring->dequeue = ring->deq_seg->trbs; | |
136 | next = ring->dequeue; | |
137 | } | |
138 | } | |
139 | ||
140 | /* | |
141 | * See Cycle bit rules. SW is the consumer for the event ring only. | |
142 | * Don't make a ring full of link TRBs. That would be dumb and this would loop. | |
143 | * | |
144 | * If we've just enqueued a TRB that is in the middle of a TD (meaning the | |
145 | * chain bit is set), then set the chain bit in all the following link TRBs. | |
146 | * If we've enqueued the last TRB in a TD, make sure the following link TRBs | |
147 | * have their chain bit cleared (so that each Link TRB is a separate TD). | |
148 | * | |
149 | * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit | |
150 | * set, but other sections talk about dealing with the chain bit set. | |
151 | * Assume section 6.4.4.1 is wrong, and the chain bit can be set in a Link TRB. | |
152 | */ | |
153 | static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer) | |
154 | { | |
155 | u32 chain; | |
156 | union xhci_trb *next; | |
157 | ||
158 | chain = ring->enqueue->generic.field[3] & TRB_CHAIN; | |
159 | next = ++(ring->enqueue); | |
160 | ||
161 | ring->enq_updates++; | |
162 | /* Update the dequeue pointer further if that was a link TRB or we're at | |
163 | * the end of an event ring segment (which doesn't have link TRBS) | |
164 | */ | |
165 | while (last_trb(xhci, ring, ring->enq_seg, next)) { | |
166 | if (!consumer) { | |
167 | if (ring != xhci->event_ring) { | |
168 | /* Give this link TRB to the hardware */ | |
169 | if (next->link.control & TRB_CYCLE) | |
170 | next->link.control &= (u32) ~TRB_CYCLE; | |
171 | else | |
172 | next->link.control |= (u32) TRB_CYCLE; | |
173 | next->link.control &= TRB_CHAIN; | |
174 | next->link.control |= chain; | |
175 | } | |
176 | /* Toggle the cycle bit after the last ring segment. */ | |
177 | if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) { | |
178 | ring->cycle_state = (ring->cycle_state ? 0 : 1); | |
179 | if (!in_interrupt()) | |
180 | xhci_dbg(xhci, "Toggle cycle state for ring 0x%x = %i\n", | |
181 | (unsigned int) ring, | |
182 | (unsigned int) ring->cycle_state); | |
183 | } | |
184 | } | |
185 | ring->enq_seg = ring->enq_seg->next; | |
186 | ring->enqueue = ring->enq_seg->trbs; | |
187 | next = ring->enqueue; | |
188 | } | |
189 | } | |
190 | ||
191 | /* | |
192 | * Check to see if there's room to enqueue num_trbs on the ring. See rules | |
193 | * above. | |
194 | * FIXME: this would be simpler and faster if we just kept track of the number | |
195 | * of free TRBs in a ring. | |
196 | */ | |
197 | static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring, | |
198 | unsigned int num_trbs) | |
199 | { | |
200 | int i; | |
201 | union xhci_trb *enq = ring->enqueue; | |
202 | struct xhci_segment *enq_seg = ring->enq_seg; | |
203 | ||
204 | /* Check if ring is empty */ | |
205 | if (enq == ring->dequeue) | |
206 | return 1; | |
207 | /* Make sure there's an extra empty TRB available */ | |
208 | for (i = 0; i <= num_trbs; ++i) { | |
209 | if (enq == ring->dequeue) | |
210 | return 0; | |
211 | enq++; | |
212 | while (last_trb(xhci, ring, enq_seg, enq)) { | |
213 | enq_seg = enq_seg->next; | |
214 | enq = enq_seg->trbs; | |
215 | } | |
216 | } | |
217 | return 1; | |
218 | } | |
219 | ||
220 | void set_hc_event_deq(struct xhci_hcd *xhci) | |
221 | { | |
222 | u32 temp; | |
223 | dma_addr_t deq; | |
224 | ||
225 | deq = trb_virt_to_dma(xhci->event_ring->deq_seg, | |
226 | xhci->event_ring->dequeue); | |
227 | if (deq == 0 && !in_interrupt()) | |
228 | xhci_warn(xhci, "WARN something wrong with SW event ring " | |
229 | "dequeue ptr.\n"); | |
230 | /* Update HC event ring dequeue pointer */ | |
231 | temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]); | |
232 | temp &= ERST_PTR_MASK; | |
233 | if (!in_interrupt()) | |
234 | xhci_dbg(xhci, "// Write event ring dequeue pointer\n"); | |
235 | xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[1]); | |
236 | xhci_writel(xhci, (deq & ~ERST_PTR_MASK) | temp, | |
237 | &xhci->ir_set->erst_dequeue[0]); | |
238 | } | |
239 | ||
240 | /* Ring the host controller doorbell after placing a command on the ring */ | |
241 | void ring_cmd_db(struct xhci_hcd *xhci) | |
242 | { | |
243 | u32 temp; | |
244 | ||
245 | xhci_dbg(xhci, "// Ding dong!\n"); | |
246 | temp = xhci_readl(xhci, &xhci->dba->doorbell[0]) & DB_MASK; | |
247 | xhci_writel(xhci, temp | DB_TARGET_HOST, &xhci->dba->doorbell[0]); | |
248 | /* Flush PCI posted writes */ | |
249 | xhci_readl(xhci, &xhci->dba->doorbell[0]); | |
250 | } | |
251 | ||
252 | static void handle_cmd_completion(struct xhci_hcd *xhci, | |
253 | struct xhci_event_cmd *event) | |
254 | { | |
255 | u64 cmd_dma; | |
256 | dma_addr_t cmd_dequeue_dma; | |
257 | ||
258 | /* Check completion code */ | |
259 | if (GET_COMP_CODE(event->status) != COMP_SUCCESS) | |
260 | xhci_dbg(xhci, "WARN: unsuccessful no-op command\n"); | |
261 | ||
262 | cmd_dma = (((u64) event->cmd_trb[1]) << 32) + event->cmd_trb[0]; | |
263 | cmd_dequeue_dma = trb_virt_to_dma(xhci->cmd_ring->deq_seg, | |
264 | xhci->cmd_ring->dequeue); | |
265 | /* Is the command ring deq ptr out of sync with the deq seg ptr? */ | |
266 | if (cmd_dequeue_dma == 0) { | |
267 | xhci->error_bitmask |= 1 << 4; | |
268 | return; | |
269 | } | |
270 | /* Does the DMA address match our internal dequeue pointer address? */ | |
271 | if (cmd_dma != (u64) cmd_dequeue_dma) { | |
272 | xhci->error_bitmask |= 1 << 5; | |
273 | return; | |
274 | } | |
275 | switch (xhci->cmd_ring->dequeue->generic.field[3] & TRB_TYPE_BITMASK) { | |
276 | case TRB_TYPE(TRB_CMD_NOOP): | |
277 | ++xhci->noops_handled; | |
278 | break; | |
279 | default: | |
280 | /* Skip over unknown commands on the event ring */ | |
281 | xhci->error_bitmask |= 1 << 6; | |
282 | break; | |
283 | } | |
284 | inc_deq(xhci, xhci->cmd_ring, false); | |
285 | } | |
286 | ||
0f2a7930 SS |
287 | static void handle_port_status(struct xhci_hcd *xhci, |
288 | union xhci_trb *event) | |
289 | { | |
290 | u32 port_id; | |
291 | ||
292 | /* Port status change events always have a successful completion code */ | |
293 | if (GET_COMP_CODE(event->generic.field[2]) != COMP_SUCCESS) { | |
294 | xhci_warn(xhci, "WARN: xHC returned failed port status event\n"); | |
295 | xhci->error_bitmask |= 1 << 8; | |
296 | } | |
297 | /* FIXME: core doesn't care about all port link state changes yet */ | |
298 | port_id = GET_PORT_ID(event->generic.field[0]); | |
299 | xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id); | |
300 | ||
301 | /* Update event ring dequeue pointer before dropping the lock */ | |
302 | inc_deq(xhci, xhci->event_ring, true); | |
303 | set_hc_event_deq(xhci); | |
304 | ||
305 | spin_unlock(&xhci->lock); | |
306 | /* Pass this up to the core */ | |
307 | usb_hcd_poll_rh_status(xhci_to_hcd(xhci)); | |
308 | spin_lock(&xhci->lock); | |
309 | } | |
310 | ||
311 | /* | |
312 | * This function handles all OS-owned events on the event ring. It may drop | |
313 | * xhci->lock between event processing (e.g. to pass up port status changes). | |
314 | */ | |
7f84eef0 SS |
315 | void handle_event(struct xhci_hcd *xhci) |
316 | { | |
317 | union xhci_trb *event; | |
0f2a7930 | 318 | int update_ptrs = 1; |
7f84eef0 SS |
319 | |
320 | if (!xhci->event_ring || !xhci->event_ring->dequeue) { | |
321 | xhci->error_bitmask |= 1 << 1; | |
322 | return; | |
323 | } | |
324 | ||
325 | event = xhci->event_ring->dequeue; | |
326 | /* Does the HC or OS own the TRB? */ | |
327 | if ((event->event_cmd.flags & TRB_CYCLE) != | |
328 | xhci->event_ring->cycle_state) { | |
329 | xhci->error_bitmask |= 1 << 2; | |
330 | return; | |
331 | } | |
332 | ||
0f2a7930 | 333 | /* FIXME: Handle more event types. */ |
7f84eef0 SS |
334 | switch ((event->event_cmd.flags & TRB_TYPE_BITMASK)) { |
335 | case TRB_TYPE(TRB_COMPLETION): | |
336 | handle_cmd_completion(xhci, &event->event_cmd); | |
337 | break; | |
0f2a7930 SS |
338 | case TRB_TYPE(TRB_PORT_STATUS): |
339 | handle_port_status(xhci, event); | |
340 | update_ptrs = 0; | |
341 | break; | |
7f84eef0 SS |
342 | default: |
343 | xhci->error_bitmask |= 1 << 3; | |
344 | } | |
345 | ||
0f2a7930 SS |
346 | if (update_ptrs) { |
347 | /* Update SW and HC event ring dequeue pointer */ | |
348 | inc_deq(xhci, xhci->event_ring, true); | |
349 | set_hc_event_deq(xhci); | |
350 | } | |
7f84eef0 SS |
351 | /* Are there more items on the event ring? */ |
352 | handle_event(xhci); | |
353 | } | |
354 | ||
355 | /* | |
356 | * Generic function for queueing a TRB on a ring. | |
357 | * The caller must have checked to make sure there's room on the ring. | |
358 | */ | |
359 | static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, | |
360 | bool consumer, | |
361 | u32 field1, u32 field2, u32 field3, u32 field4) | |
362 | { | |
363 | struct xhci_generic_trb *trb; | |
364 | ||
365 | trb = &ring->enqueue->generic; | |
366 | trb->field[0] = field1; | |
367 | trb->field[1] = field2; | |
368 | trb->field[2] = field3; | |
369 | trb->field[3] = field4; | |
370 | inc_enq(xhci, ring, consumer); | |
371 | } | |
372 | ||
373 | /* Generic function for queueing a command TRB on the command ring */ | |
374 | static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2, u32 field3, u32 field4) | |
375 | { | |
376 | if (!room_on_ring(xhci, xhci->cmd_ring, 1)) { | |
377 | if (!in_interrupt()) | |
378 | xhci_err(xhci, "ERR: No room for command on command ring\n"); | |
379 | return -ENOMEM; | |
380 | } | |
381 | queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3, | |
382 | field4 | xhci->cmd_ring->cycle_state); | |
383 | return 0; | |
384 | } | |
385 | ||
386 | /* Queue a no-op command on the command ring */ | |
387 | static int queue_cmd_noop(struct xhci_hcd *xhci) | |
388 | { | |
389 | return queue_command(xhci, 0, 0, 0, TRB_TYPE(TRB_CMD_NOOP)); | |
390 | } | |
391 | ||
392 | /* | |
393 | * Place a no-op command on the command ring to test the command and | |
394 | * event ring. | |
395 | */ | |
396 | void *setup_one_noop(struct xhci_hcd *xhci) | |
397 | { | |
398 | if (queue_cmd_noop(xhci) < 0) | |
399 | return NULL; | |
400 | xhci->noops_submitted++; | |
401 | return ring_cmd_db; | |
402 | } |