Commit | Line | Data |
---|---|---|
7f84eef0 SS |
1 | /* |
2 | * xHCI host controller driver | |
3 | * | |
4 | * Copyright (C) 2008 Intel Corp. | |
5 | * | |
6 | * Author: Sarah Sharp | |
7 | * Some code borrowed from the Linux EHCI driver. | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License version 2 as | |
11 | * published by the Free Software Foundation. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, but | |
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | |
15 | * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
16 | * for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program; if not, write to the Free Software Foundation, | |
20 | * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
21 | */ | |
22 | ||
23 | /* | |
24 | * Ring initialization rules: | |
25 | * 1. Each segment is initialized to zero, except for link TRBs. | |
26 | * 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or | |
27 | * Consumer Cycle State (CCS), depending on ring function. | |
28 | * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment. | |
29 | * | |
30 | * Ring behavior rules: | |
31 | * 1. A ring is empty if enqueue == dequeue. This means there will always be at | |
32 | * least one free TRB in the ring. This is useful if you want to turn that | |
33 | * into a link TRB and expand the ring. | |
34 | * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a | |
35 | * link TRB, then load the pointer with the address in the link TRB. If the | |
36 | * link TRB had its toggle bit set, you may need to update the ring cycle | |
37 | * state (see cycle bit rules). You may have to do this multiple times | |
38 | * until you reach a non-link TRB. | |
39 | * 3. A ring is full if enqueue++ (for the definition of increment above) | |
40 | * equals the dequeue pointer. | |
41 | * | |
42 | * Cycle bit rules: | |
43 | * 1. When a consumer increments a dequeue pointer and encounters a toggle bit | |
44 | * in a link TRB, it must toggle the ring cycle state. | |
45 | * 2. When a producer increments an enqueue pointer and encounters a toggle bit | |
46 | * in a link TRB, it must toggle the ring cycle state. | |
47 | * | |
48 | * Producer rules: | |
49 | * 1. Check if ring is full before you enqueue. | |
50 | * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing. | |
51 | * Update enqueue pointer between each write (which may update the ring | |
52 | * cycle state). | |
53 | * 3. Notify consumer. If SW is producer, it rings the doorbell for command | |
54 | * and endpoint rings. If HC is the producer for the event ring, | |
55 | * and it generates an interrupt according to interrupt modulation rules. | |
56 | * | |
57 | * Consumer rules: | |
58 | * 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state, | |
59 | * the TRB is owned by the consumer. | |
60 | * 2. Update dequeue pointer (which may update the ring cycle state) and | |
61 | * continue processing TRBs until you reach a TRB which is not owned by you. | |
62 | * 3. Notify the producer. SW is the consumer for the event ring, and it | |
63 | * updates event ring dequeue pointer. HC is the consumer for the command and | |
64 | * endpoint rings; it generates events on the event ring for these. | |
65 | */ | |
66 | ||
8a96c052 | 67 | #include <linux/scatterlist.h> |
7f84eef0 SS |
68 | #include "xhci.h" |
69 | ||
70 | /* | |
71 | * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA | |
72 | * address of the TRB. | |
73 | */ | |
74 | dma_addr_t trb_virt_to_dma(struct xhci_segment *seg, | |
75 | union xhci_trb *trb) | |
76 | { | |
77 | unsigned int offset; | |
78 | ||
79 | if (!seg || !trb || (void *) trb < (void *) seg->trbs) | |
80 | return 0; | |
81 | /* offset in bytes, since these are byte-addressable */ | |
82 | offset = (unsigned int) trb - (unsigned int) seg->trbs; | |
83 | /* SEGMENT_SIZE in bytes, trbs are 16-byte aligned */ | |
84 | if (offset > SEGMENT_SIZE || (offset % sizeof(*trb)) != 0) | |
85 | return 0; | |
86 | return seg->dma + offset; | |
87 | } | |
88 | ||
89 | /* Does this link TRB point to the first segment in a ring, | |
90 | * or was the previous TRB the last TRB on the last segment in the ERST? | |
91 | */ | |
92 | static inline bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring, | |
93 | struct xhci_segment *seg, union xhci_trb *trb) | |
94 | { | |
95 | if (ring == xhci->event_ring) | |
96 | return (trb == &seg->trbs[TRBS_PER_SEGMENT]) && | |
97 | (seg->next == xhci->event_ring->first_seg); | |
98 | else | |
99 | return trb->link.control & LINK_TOGGLE; | |
100 | } | |
101 | ||
102 | /* Is this TRB a link TRB or was the last TRB the last TRB in this event ring | |
103 | * segment? I.e. would the updated event TRB pointer step off the end of the | |
104 | * event seg? | |
105 | */ | |
106 | static inline int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, | |
107 | struct xhci_segment *seg, union xhci_trb *trb) | |
108 | { | |
109 | if (ring == xhci->event_ring) | |
110 | return trb == &seg->trbs[TRBS_PER_SEGMENT]; | |
111 | else | |
112 | return (trb->link.control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK); | |
113 | } | |
114 | ||
ae636747 SS |
115 | /* Updates trb to point to the next TRB in the ring, and updates seg if the next |
116 | * TRB is in a new segment. This does not skip over link TRBs, and it does not | |
117 | * effect the ring dequeue or enqueue pointers. | |
118 | */ | |
119 | static void next_trb(struct xhci_hcd *xhci, | |
120 | struct xhci_ring *ring, | |
121 | struct xhci_segment **seg, | |
122 | union xhci_trb **trb) | |
123 | { | |
124 | if (last_trb(xhci, ring, *seg, *trb)) { | |
125 | *seg = (*seg)->next; | |
126 | *trb = ((*seg)->trbs); | |
127 | } else { | |
128 | *trb = (*trb)++; | |
129 | } | |
130 | } | |
131 | ||
7f84eef0 SS |
132 | /* |
133 | * See Cycle bit rules. SW is the consumer for the event ring only. | |
134 | * Don't make a ring full of link TRBs. That would be dumb and this would loop. | |
135 | */ | |
136 | static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer) | |
137 | { | |
138 | union xhci_trb *next = ++(ring->dequeue); | |
139 | ||
140 | ring->deq_updates++; | |
141 | /* Update the dequeue pointer further if that was a link TRB or we're at | |
142 | * the end of an event ring segment (which doesn't have link TRBS) | |
143 | */ | |
144 | while (last_trb(xhci, ring, ring->deq_seg, next)) { | |
145 | if (consumer && last_trb_on_last_seg(xhci, ring, ring->deq_seg, next)) { | |
146 | ring->cycle_state = (ring->cycle_state ? 0 : 1); | |
147 | if (!in_interrupt()) | |
148 | xhci_dbg(xhci, "Toggle cycle state for ring 0x%x = %i\n", | |
149 | (unsigned int) ring, | |
150 | (unsigned int) ring->cycle_state); | |
151 | } | |
152 | ring->deq_seg = ring->deq_seg->next; | |
153 | ring->dequeue = ring->deq_seg->trbs; | |
154 | next = ring->dequeue; | |
155 | } | |
156 | } | |
157 | ||
158 | /* | |
159 | * See Cycle bit rules. SW is the consumer for the event ring only. | |
160 | * Don't make a ring full of link TRBs. That would be dumb and this would loop. | |
161 | * | |
162 | * If we've just enqueued a TRB that is in the middle of a TD (meaning the | |
163 | * chain bit is set), then set the chain bit in all the following link TRBs. | |
164 | * If we've enqueued the last TRB in a TD, make sure the following link TRBs | |
165 | * have their chain bit cleared (so that each Link TRB is a separate TD). | |
166 | * | |
167 | * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit | |
168 | * set, but other sections talk about dealing with the chain bit set. | |
169 | * Assume section 6.4.4.1 is wrong, and the chain bit can be set in a Link TRB. | |
170 | */ | |
171 | static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer) | |
172 | { | |
173 | u32 chain; | |
174 | union xhci_trb *next; | |
175 | ||
176 | chain = ring->enqueue->generic.field[3] & TRB_CHAIN; | |
177 | next = ++(ring->enqueue); | |
178 | ||
179 | ring->enq_updates++; | |
180 | /* Update the dequeue pointer further if that was a link TRB or we're at | |
181 | * the end of an event ring segment (which doesn't have link TRBS) | |
182 | */ | |
183 | while (last_trb(xhci, ring, ring->enq_seg, next)) { | |
184 | if (!consumer) { | |
185 | if (ring != xhci->event_ring) { | |
186 | /* Give this link TRB to the hardware */ | |
187 | if (next->link.control & TRB_CYCLE) | |
188 | next->link.control &= (u32) ~TRB_CYCLE; | |
189 | else | |
190 | next->link.control |= (u32) TRB_CYCLE; | |
191 | next->link.control &= TRB_CHAIN; | |
192 | next->link.control |= chain; | |
193 | } | |
194 | /* Toggle the cycle bit after the last ring segment. */ | |
195 | if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) { | |
196 | ring->cycle_state = (ring->cycle_state ? 0 : 1); | |
197 | if (!in_interrupt()) | |
198 | xhci_dbg(xhci, "Toggle cycle state for ring 0x%x = %i\n", | |
199 | (unsigned int) ring, | |
200 | (unsigned int) ring->cycle_state); | |
201 | } | |
202 | } | |
203 | ring->enq_seg = ring->enq_seg->next; | |
204 | ring->enqueue = ring->enq_seg->trbs; | |
205 | next = ring->enqueue; | |
206 | } | |
207 | } | |
208 | ||
209 | /* | |
210 | * Check to see if there's room to enqueue num_trbs on the ring. See rules | |
211 | * above. | |
212 | * FIXME: this would be simpler and faster if we just kept track of the number | |
213 | * of free TRBs in a ring. | |
214 | */ | |
215 | static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring, | |
216 | unsigned int num_trbs) | |
217 | { | |
218 | int i; | |
219 | union xhci_trb *enq = ring->enqueue; | |
220 | struct xhci_segment *enq_seg = ring->enq_seg; | |
221 | ||
222 | /* Check if ring is empty */ | |
223 | if (enq == ring->dequeue) | |
224 | return 1; | |
225 | /* Make sure there's an extra empty TRB available */ | |
226 | for (i = 0; i <= num_trbs; ++i) { | |
227 | if (enq == ring->dequeue) | |
228 | return 0; | |
229 | enq++; | |
230 | while (last_trb(xhci, ring, enq_seg, enq)) { | |
231 | enq_seg = enq_seg->next; | |
232 | enq = enq_seg->trbs; | |
233 | } | |
234 | } | |
235 | return 1; | |
236 | } | |
237 | ||
238 | void set_hc_event_deq(struct xhci_hcd *xhci) | |
239 | { | |
240 | u32 temp; | |
241 | dma_addr_t deq; | |
242 | ||
243 | deq = trb_virt_to_dma(xhci->event_ring->deq_seg, | |
244 | xhci->event_ring->dequeue); | |
245 | if (deq == 0 && !in_interrupt()) | |
246 | xhci_warn(xhci, "WARN something wrong with SW event ring " | |
247 | "dequeue ptr.\n"); | |
248 | /* Update HC event ring dequeue pointer */ | |
249 | temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]); | |
250 | temp &= ERST_PTR_MASK; | |
251 | if (!in_interrupt()) | |
252 | xhci_dbg(xhci, "// Write event ring dequeue pointer\n"); | |
253 | xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[1]); | |
254 | xhci_writel(xhci, (deq & ~ERST_PTR_MASK) | temp, | |
255 | &xhci->ir_set->erst_dequeue[0]); | |
256 | } | |
257 | ||
258 | /* Ring the host controller doorbell after placing a command on the ring */ | |
259 | void ring_cmd_db(struct xhci_hcd *xhci) | |
260 | { | |
261 | u32 temp; | |
262 | ||
263 | xhci_dbg(xhci, "// Ding dong!\n"); | |
264 | temp = xhci_readl(xhci, &xhci->dba->doorbell[0]) & DB_MASK; | |
265 | xhci_writel(xhci, temp | DB_TARGET_HOST, &xhci->dba->doorbell[0]); | |
266 | /* Flush PCI posted writes */ | |
267 | xhci_readl(xhci, &xhci->dba->doorbell[0]); | |
268 | } | |
269 | ||
ae636747 SS |
270 | static void ring_ep_doorbell(struct xhci_hcd *xhci, |
271 | unsigned int slot_id, | |
272 | unsigned int ep_index) | |
273 | { | |
274 | struct xhci_ring *ep_ring; | |
275 | u32 field; | |
276 | __u32 __iomem *db_addr = &xhci->dba->doorbell[slot_id]; | |
277 | ||
278 | ep_ring = xhci->devs[slot_id]->ep_rings[ep_index]; | |
279 | /* Don't ring the doorbell for this endpoint if there are pending | |
280 | * cancellations because the we don't want to interrupt processing. | |
281 | */ | |
282 | if (!ep_ring->cancels_pending && !(ep_ring->state & SET_DEQ_PENDING)) { | |
283 | field = xhci_readl(xhci, db_addr) & DB_MASK; | |
284 | xhci_writel(xhci, field | EPI_TO_DB(ep_index), db_addr); | |
285 | /* Flush PCI posted writes - FIXME Matthew Wilcox says this | |
286 | * isn't time-critical and we shouldn't make the CPU wait for | |
287 | * the flush. | |
288 | */ | |
289 | xhci_readl(xhci, db_addr); | |
290 | } | |
291 | } | |
292 | ||
293 | /* | |
294 | * Find the segment that trb is in. Start searching in start_seg. | |
295 | * If we must move past a segment that has a link TRB with a toggle cycle state | |
296 | * bit set, then we will toggle the value pointed at by cycle_state. | |
297 | */ | |
298 | static struct xhci_segment *find_trb_seg( | |
299 | struct xhci_segment *start_seg, | |
300 | union xhci_trb *trb, int *cycle_state) | |
301 | { | |
302 | struct xhci_segment *cur_seg = start_seg; | |
303 | struct xhci_generic_trb *generic_trb; | |
304 | ||
305 | while (cur_seg->trbs > trb || | |
306 | &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) { | |
307 | generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic; | |
308 | if (TRB_TYPE(generic_trb->field[3]) == TRB_LINK && | |
309 | (generic_trb->field[3] & LINK_TOGGLE)) | |
310 | *cycle_state = ~(*cycle_state) & 0x1; | |
311 | cur_seg = cur_seg->next; | |
312 | if (cur_seg == start_seg) | |
313 | /* Looped over the entire list. Oops! */ | |
314 | return 0; | |
315 | } | |
316 | return cur_seg; | |
317 | } | |
318 | ||
319 | struct dequeue_state { | |
320 | struct xhci_segment *new_deq_seg; | |
321 | union xhci_trb *new_deq_ptr; | |
322 | int new_cycle_state; | |
323 | }; | |
324 | ||
325 | /* | |
326 | * Move the xHC's endpoint ring dequeue pointer past cur_td. | |
327 | * Record the new state of the xHC's endpoint ring dequeue segment, | |
328 | * dequeue pointer, and new consumer cycle state in state. | |
329 | * Update our internal representation of the ring's dequeue pointer. | |
330 | * | |
331 | * We do this in three jumps: | |
332 | * - First we update our new ring state to be the same as when the xHC stopped. | |
333 | * - Then we traverse the ring to find the segment that contains | |
334 | * the last TRB in the TD. We toggle the xHC's new cycle state when we pass | |
335 | * any link TRBs with the toggle cycle bit set. | |
336 | * - Finally we move the dequeue state one TRB further, toggling the cycle bit | |
337 | * if we've moved it past a link TRB with the toggle cycle bit set. | |
338 | */ | |
339 | static void find_new_dequeue_state(struct xhci_hcd *xhci, | |
340 | unsigned int slot_id, unsigned int ep_index, | |
341 | struct xhci_td *cur_td, struct dequeue_state *state) | |
342 | { | |
343 | struct xhci_virt_device *dev = xhci->devs[slot_id]; | |
344 | struct xhci_ring *ep_ring = dev->ep_rings[ep_index]; | |
345 | struct xhci_generic_trb *trb; | |
346 | ||
347 | state->new_cycle_state = 0; | |
348 | state->new_deq_seg = find_trb_seg(cur_td->start_seg, | |
349 | ep_ring->stopped_trb, | |
350 | &state->new_cycle_state); | |
351 | if (!state->new_deq_seg) | |
352 | BUG(); | |
353 | /* Dig out the cycle state saved by the xHC during the stop ep cmd */ | |
354 | state->new_cycle_state = 0x1 & dev->out_ctx->ep[ep_index].deq[0]; | |
355 | ||
356 | state->new_deq_ptr = cur_td->last_trb; | |
357 | state->new_deq_seg = find_trb_seg(state->new_deq_seg, | |
358 | state->new_deq_ptr, | |
359 | &state->new_cycle_state); | |
360 | if (!state->new_deq_seg) | |
361 | BUG(); | |
362 | ||
363 | trb = &state->new_deq_ptr->generic; | |
364 | if (TRB_TYPE(trb->field[3]) == TRB_LINK && | |
365 | (trb->field[3] & LINK_TOGGLE)) | |
366 | state->new_cycle_state = ~(state->new_cycle_state) & 0x1; | |
367 | next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr); | |
368 | ||
369 | /* Don't update the ring cycle state for the producer (us). */ | |
370 | ep_ring->dequeue = state->new_deq_ptr; | |
371 | ep_ring->deq_seg = state->new_deq_seg; | |
372 | } | |
373 | ||
374 | void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, | |
375 | struct xhci_td *cur_td) | |
376 | { | |
377 | struct xhci_segment *cur_seg; | |
378 | union xhci_trb *cur_trb; | |
379 | ||
380 | for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb; | |
381 | true; | |
382 | next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { | |
383 | if ((cur_trb->generic.field[3] & TRB_TYPE_BITMASK) == | |
384 | TRB_TYPE(TRB_LINK)) { | |
385 | /* Unchain any chained Link TRBs, but | |
386 | * leave the pointers intact. | |
387 | */ | |
388 | cur_trb->generic.field[3] &= ~TRB_CHAIN; | |
389 | xhci_dbg(xhci, "Cancel (unchain) link TRB\n"); | |
390 | xhci_dbg(xhci, "Address = 0x%x (0x%x dma); " | |
391 | "in seg 0x%x (0x%x dma)\n", | |
392 | (unsigned int) cur_trb, | |
393 | trb_virt_to_dma(cur_seg, cur_trb), | |
394 | (unsigned int) cur_seg, | |
395 | cur_seg->dma); | |
396 | } else { | |
397 | cur_trb->generic.field[0] = 0; | |
398 | cur_trb->generic.field[1] = 0; | |
399 | cur_trb->generic.field[2] = 0; | |
400 | /* Preserve only the cycle bit of this TRB */ | |
401 | cur_trb->generic.field[3] &= TRB_CYCLE; | |
402 | cur_trb->generic.field[3] |= TRB_TYPE(TRB_TR_NOOP); | |
403 | xhci_dbg(xhci, "Cancel TRB 0x%x (0x%x dma) " | |
404 | "in seg 0x%x (0x%x dma)\n", | |
405 | (unsigned int) cur_trb, | |
406 | trb_virt_to_dma(cur_seg, cur_trb), | |
407 | (unsigned int) cur_seg, | |
408 | cur_seg->dma); | |
409 | } | |
410 | if (cur_trb == cur_td->last_trb) | |
411 | break; | |
412 | } | |
413 | } | |
414 | ||
415 | static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, | |
416 | unsigned int ep_index, struct xhci_segment *deq_seg, | |
417 | union xhci_trb *deq_ptr, u32 cycle_state); | |
418 | ||
419 | /* | |
420 | * When we get a command completion for a Stop Endpoint Command, we need to | |
421 | * unlink any cancelled TDs from the ring. There are two ways to do that: | |
422 | * | |
423 | * 1. If the HW was in the middle of processing the TD that needs to be | |
424 | * cancelled, then we must move the ring's dequeue pointer past the last TRB | |
425 | * in the TD with a Set Dequeue Pointer Command. | |
426 | * 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain | |
427 | * bit cleared) so that the HW will skip over them. | |
428 | */ | |
429 | static void handle_stopped_endpoint(struct xhci_hcd *xhci, | |
430 | union xhci_trb *trb) | |
431 | { | |
432 | unsigned int slot_id; | |
433 | unsigned int ep_index; | |
434 | struct xhci_ring *ep_ring; | |
435 | struct list_head *entry; | |
436 | struct xhci_td *cur_td = 0; | |
437 | struct xhci_td *last_unlinked_td; | |
438 | ||
439 | struct dequeue_state deq_state; | |
440 | #ifdef CONFIG_USB_HCD_STAT | |
441 | ktime_t stop_time = ktime_get(); | |
442 | #endif | |
443 | ||
444 | memset(&deq_state, 0, sizeof(deq_state)); | |
445 | slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); | |
446 | ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); | |
447 | ep_ring = xhci->devs[slot_id]->ep_rings[ep_index]; | |
448 | ||
449 | if (list_empty(&ep_ring->cancelled_td_list)) | |
450 | return; | |
451 | ||
452 | /* Fix up the ep ring first, so HW stops executing cancelled TDs. | |
453 | * We have the xHCI lock, so nothing can modify this list until we drop | |
454 | * it. We're also in the event handler, so we can't get re-interrupted | |
455 | * if another Stop Endpoint command completes | |
456 | */ | |
457 | list_for_each(entry, &ep_ring->cancelled_td_list) { | |
458 | cur_td = list_entry(entry, struct xhci_td, cancelled_td_list); | |
459 | xhci_dbg(xhci, "Cancelling TD starting at 0x%x, 0x%x (dma).\n", | |
460 | (unsigned int) cur_td->first_trb, | |
461 | trb_virt_to_dma(cur_td->start_seg, cur_td->first_trb)); | |
462 | /* | |
463 | * If we stopped on the TD we need to cancel, then we have to | |
464 | * move the xHC endpoint ring dequeue pointer past this TD. | |
465 | */ | |
466 | if (cur_td == ep_ring->stopped_td) | |
467 | find_new_dequeue_state(xhci, slot_id, ep_index, cur_td, | |
468 | &deq_state); | |
469 | else | |
470 | td_to_noop(xhci, ep_ring, cur_td); | |
471 | /* | |
472 | * The event handler won't see a completion for this TD anymore, | |
473 | * so remove it from the endpoint ring's TD list. Keep it in | |
474 | * the cancelled TD list for URB completion later. | |
475 | */ | |
476 | list_del(&cur_td->td_list); | |
477 | ep_ring->cancels_pending--; | |
478 | } | |
479 | last_unlinked_td = cur_td; | |
480 | ||
481 | /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */ | |
482 | if (deq_state.new_deq_ptr && deq_state.new_deq_seg) { | |
483 | xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = 0x%x (0x%x dma), " | |
484 | "new deq ptr = 0x%x (0x%x dma), new cycle = %u\n", | |
485 | (unsigned int) deq_state.new_deq_seg, | |
486 | deq_state.new_deq_seg->dma, | |
487 | (unsigned int) deq_state.new_deq_ptr, | |
488 | trb_virt_to_dma(deq_state.new_deq_seg, deq_state.new_deq_ptr), | |
489 | deq_state.new_cycle_state); | |
490 | queue_set_tr_deq(xhci, slot_id, ep_index, | |
491 | deq_state.new_deq_seg, | |
492 | deq_state.new_deq_ptr, | |
493 | (u32) deq_state.new_cycle_state); | |
494 | /* Stop the TD queueing code from ringing the doorbell until | |
495 | * this command completes. The HC won't set the dequeue pointer | |
496 | * if the ring is running, and ringing the doorbell starts the | |
497 | * ring running. | |
498 | */ | |
499 | ep_ring->state |= SET_DEQ_PENDING; | |
500 | ring_cmd_db(xhci); | |
501 | } else { | |
502 | /* Otherwise just ring the doorbell to restart the ring */ | |
503 | ring_ep_doorbell(xhci, slot_id, ep_index); | |
504 | } | |
505 | ||
506 | /* | |
507 | * Drop the lock and complete the URBs in the cancelled TD list. | |
508 | * New TDs to be cancelled might be added to the end of the list before | |
509 | * we can complete all the URBs for the TDs we already unlinked. | |
510 | * So stop when we've completed the URB for the last TD we unlinked. | |
511 | */ | |
512 | do { | |
513 | cur_td = list_entry(ep_ring->cancelled_td_list.next, | |
514 | struct xhci_td, cancelled_td_list); | |
515 | list_del(&cur_td->cancelled_td_list); | |
516 | ||
517 | /* Clean up the cancelled URB */ | |
518 | #ifdef CONFIG_USB_HCD_STAT | |
519 | hcd_stat_update(xhci->tp_stat, cur_td->urb->actual_length, | |
520 | ktime_sub(stop_time, cur_td->start_time)); | |
521 | #endif | |
522 | cur_td->urb->hcpriv = NULL; | |
523 | usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), cur_td->urb); | |
524 | ||
525 | xhci_dbg(xhci, "Giveback cancelled URB 0x%x\n", | |
526 | (unsigned int) cur_td->urb); | |
527 | spin_unlock(&xhci->lock); | |
528 | /* Doesn't matter what we pass for status, since the core will | |
529 | * just overwrite it (because the URB has been unlinked). | |
530 | */ | |
531 | usb_hcd_giveback_urb(xhci_to_hcd(xhci), cur_td->urb, 0); | |
532 | kfree(cur_td); | |
533 | ||
534 | spin_lock(&xhci->lock); | |
535 | } while (cur_td != last_unlinked_td); | |
536 | ||
537 | /* Return to the event handler with xhci->lock re-acquired */ | |
538 | } | |
539 | ||
540 | /* | |
541 | * When we get a completion for a Set Transfer Ring Dequeue Pointer command, | |
542 | * we need to clear the set deq pending flag in the endpoint ring state, so that | |
543 | * the TD queueing code can ring the doorbell again. We also need to ring the | |
544 | * endpoint doorbell to restart the ring, but only if there aren't more | |
545 | * cancellations pending. | |
546 | */ | |
547 | static void handle_set_deq_completion(struct xhci_hcd *xhci, | |
548 | struct xhci_event_cmd *event, | |
549 | union xhci_trb *trb) | |
550 | { | |
551 | unsigned int slot_id; | |
552 | unsigned int ep_index; | |
553 | struct xhci_ring *ep_ring; | |
554 | struct xhci_virt_device *dev; | |
555 | ||
556 | slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); | |
557 | ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); | |
558 | dev = xhci->devs[slot_id]; | |
559 | ep_ring = dev->ep_rings[ep_index]; | |
560 | ||
561 | if (GET_COMP_CODE(event->status) != COMP_SUCCESS) { | |
562 | unsigned int ep_state; | |
563 | unsigned int slot_state; | |
564 | ||
565 | switch (GET_COMP_CODE(event->status)) { | |
566 | case COMP_TRB_ERR: | |
567 | xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because " | |
568 | "of stream ID configuration\n"); | |
569 | break; | |
570 | case COMP_CTX_STATE: | |
571 | xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due " | |
572 | "to incorrect slot or ep state.\n"); | |
573 | ep_state = dev->out_ctx->ep[ep_index].ep_info; | |
574 | ep_state &= EP_STATE_MASK; | |
575 | slot_state = dev->out_ctx->slot.dev_state; | |
576 | slot_state = GET_SLOT_STATE(slot_state); | |
577 | xhci_dbg(xhci, "Slot state = %u, EP state = %u\n", | |
578 | slot_state, ep_state); | |
579 | break; | |
580 | case COMP_EBADSLT: | |
581 | xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because " | |
582 | "slot %u was not enabled.\n", slot_id); | |
583 | break; | |
584 | default: | |
585 | xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown " | |
586 | "completion code of %u.\n", | |
587 | GET_COMP_CODE(event->status)); | |
588 | break; | |
589 | } | |
590 | /* OK what do we do now? The endpoint state is hosed, and we | |
591 | * should never get to this point if the synchronization between | |
592 | * queueing, and endpoint state are correct. This might happen | |
593 | * if the device gets disconnected after we've finished | |
594 | * cancelling URBs, which might not be an error... | |
595 | */ | |
596 | } else { | |
597 | xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq[0] = 0x%x, " | |
598 | "deq[1] = 0x%x.\n", | |
599 | dev->out_ctx->ep[ep_index].deq[0], | |
600 | dev->out_ctx->ep[ep_index].deq[1]); | |
601 | } | |
602 | ||
603 | ep_ring->state &= ~SET_DEQ_PENDING; | |
604 | ring_ep_doorbell(xhci, slot_id, ep_index); | |
605 | } | |
606 | ||
607 | ||
7f84eef0 SS |
608 | static void handle_cmd_completion(struct xhci_hcd *xhci, |
609 | struct xhci_event_cmd *event) | |
610 | { | |
3ffbba95 | 611 | int slot_id = TRB_TO_SLOT_ID(event->flags); |
7f84eef0 SS |
612 | u64 cmd_dma; |
613 | dma_addr_t cmd_dequeue_dma; | |
614 | ||
7f84eef0 SS |
615 | cmd_dma = (((u64) event->cmd_trb[1]) << 32) + event->cmd_trb[0]; |
616 | cmd_dequeue_dma = trb_virt_to_dma(xhci->cmd_ring->deq_seg, | |
617 | xhci->cmd_ring->dequeue); | |
618 | /* Is the command ring deq ptr out of sync with the deq seg ptr? */ | |
619 | if (cmd_dequeue_dma == 0) { | |
620 | xhci->error_bitmask |= 1 << 4; | |
621 | return; | |
622 | } | |
623 | /* Does the DMA address match our internal dequeue pointer address? */ | |
624 | if (cmd_dma != (u64) cmd_dequeue_dma) { | |
625 | xhci->error_bitmask |= 1 << 5; | |
626 | return; | |
627 | } | |
628 | switch (xhci->cmd_ring->dequeue->generic.field[3] & TRB_TYPE_BITMASK) { | |
3ffbba95 SS |
629 | case TRB_TYPE(TRB_ENABLE_SLOT): |
630 | if (GET_COMP_CODE(event->status) == COMP_SUCCESS) | |
631 | xhci->slot_id = slot_id; | |
632 | else | |
633 | xhci->slot_id = 0; | |
634 | complete(&xhci->addr_dev); | |
635 | break; | |
636 | case TRB_TYPE(TRB_DISABLE_SLOT): | |
637 | if (xhci->devs[slot_id]) | |
638 | xhci_free_virt_device(xhci, slot_id); | |
639 | break; | |
f94e0186 SS |
640 | case TRB_TYPE(TRB_CONFIG_EP): |
641 | xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status); | |
642 | complete(&xhci->devs[slot_id]->cmd_completion); | |
643 | break; | |
3ffbba95 SS |
644 | case TRB_TYPE(TRB_ADDR_DEV): |
645 | xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status); | |
646 | complete(&xhci->addr_dev); | |
647 | break; | |
ae636747 SS |
648 | case TRB_TYPE(TRB_STOP_RING): |
649 | handle_stopped_endpoint(xhci, xhci->cmd_ring->dequeue); | |
650 | break; | |
651 | case TRB_TYPE(TRB_SET_DEQ): | |
652 | handle_set_deq_completion(xhci, event, xhci->cmd_ring->dequeue); | |
653 | break; | |
7f84eef0 SS |
654 | case TRB_TYPE(TRB_CMD_NOOP): |
655 | ++xhci->noops_handled; | |
656 | break; | |
657 | default: | |
658 | /* Skip over unknown commands on the event ring */ | |
659 | xhci->error_bitmask |= 1 << 6; | |
660 | break; | |
661 | } | |
662 | inc_deq(xhci, xhci->cmd_ring, false); | |
663 | } | |
664 | ||
0f2a7930 SS |
665 | static void handle_port_status(struct xhci_hcd *xhci, |
666 | union xhci_trb *event) | |
667 | { | |
668 | u32 port_id; | |
669 | ||
670 | /* Port status change events always have a successful completion code */ | |
671 | if (GET_COMP_CODE(event->generic.field[2]) != COMP_SUCCESS) { | |
672 | xhci_warn(xhci, "WARN: xHC returned failed port status event\n"); | |
673 | xhci->error_bitmask |= 1 << 8; | |
674 | } | |
675 | /* FIXME: core doesn't care about all port link state changes yet */ | |
676 | port_id = GET_PORT_ID(event->generic.field[0]); | |
677 | xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id); | |
678 | ||
679 | /* Update event ring dequeue pointer before dropping the lock */ | |
680 | inc_deq(xhci, xhci->event_ring, true); | |
681 | set_hc_event_deq(xhci); | |
682 | ||
683 | spin_unlock(&xhci->lock); | |
684 | /* Pass this up to the core */ | |
685 | usb_hcd_poll_rh_status(xhci_to_hcd(xhci)); | |
686 | spin_lock(&xhci->lock); | |
687 | } | |
688 | ||
d0e96f5a SS |
689 | /* |
690 | * This TD is defined by the TRBs starting at start_trb in start_seg and ending | |
691 | * at end_trb, which may be in another segment. If the suspect DMA address is a | |
692 | * TRB in this TD, this function returns that TRB's segment. Otherwise it | |
693 | * returns 0. | |
694 | */ | |
695 | static struct xhci_segment *trb_in_td( | |
696 | struct xhci_segment *start_seg, | |
697 | union xhci_trb *start_trb, | |
698 | union xhci_trb *end_trb, | |
699 | dma_addr_t suspect_dma) | |
700 | { | |
701 | dma_addr_t start_dma; | |
702 | dma_addr_t end_seg_dma; | |
703 | dma_addr_t end_trb_dma; | |
704 | struct xhci_segment *cur_seg; | |
705 | ||
706 | start_dma = trb_virt_to_dma(start_seg, start_trb); | |
707 | cur_seg = start_seg; | |
708 | ||
709 | do { | |
ae636747 SS |
710 | /* We may get an event for a Link TRB in the middle of a TD */ |
711 | end_seg_dma = trb_virt_to_dma(cur_seg, | |
712 | &start_seg->trbs[TRBS_PER_SEGMENT - 1]); | |
d0e96f5a SS |
713 | /* If the end TRB isn't in this segment, this is set to 0 */ |
714 | end_trb_dma = trb_virt_to_dma(cur_seg, end_trb); | |
715 | ||
716 | if (end_trb_dma > 0) { | |
717 | /* The end TRB is in this segment, so suspect should be here */ | |
718 | if (start_dma <= end_trb_dma) { | |
719 | if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma) | |
720 | return cur_seg; | |
721 | } else { | |
722 | /* Case for one segment with | |
723 | * a TD wrapped around to the top | |
724 | */ | |
725 | if ((suspect_dma >= start_dma && | |
726 | suspect_dma <= end_seg_dma) || | |
727 | (suspect_dma >= cur_seg->dma && | |
728 | suspect_dma <= end_trb_dma)) | |
729 | return cur_seg; | |
730 | } | |
731 | return 0; | |
732 | } else { | |
733 | /* Might still be somewhere in this segment */ | |
734 | if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma) | |
735 | return cur_seg; | |
736 | } | |
737 | cur_seg = cur_seg->next; | |
738 | start_dma = trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]); | |
739 | } while (1); | |
740 | ||
741 | } | |
742 | ||
743 | /* | |
744 | * If this function returns an error condition, it means it got a Transfer | |
745 | * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address. | |
746 | * At this point, the host controller is probably hosed and should be reset. | |
747 | */ | |
748 | static int handle_tx_event(struct xhci_hcd *xhci, | |
749 | struct xhci_transfer_event *event) | |
750 | { | |
751 | struct xhci_virt_device *xdev; | |
752 | struct xhci_ring *ep_ring; | |
753 | int ep_index; | |
754 | struct xhci_td *td = 0; | |
755 | dma_addr_t event_dma; | |
756 | struct xhci_segment *event_seg; | |
757 | union xhci_trb *event_trb; | |
ae636747 | 758 | struct urb *urb = 0; |
d0e96f5a SS |
759 | int status = -EINPROGRESS; |
760 | ||
761 | xdev = xhci->devs[TRB_TO_SLOT_ID(event->flags)]; | |
762 | if (!xdev) { | |
763 | xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n"); | |
764 | return -ENODEV; | |
765 | } | |
766 | ||
767 | /* Endpoint ID is 1 based, our index is zero based */ | |
768 | ep_index = TRB_TO_EP_ID(event->flags) - 1; | |
769 | ep_ring = xdev->ep_rings[ep_index]; | |
770 | if (!ep_ring || (xdev->out_ctx->ep[ep_index].ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) { | |
771 | xhci_err(xhci, "ERROR Transfer event pointed to disabled endpoint\n"); | |
772 | return -ENODEV; | |
773 | } | |
774 | ||
775 | event_dma = event->buffer[0]; | |
776 | if (event->buffer[1] != 0) | |
777 | xhci_warn(xhci, "WARN ignoring upper 32-bits of 64-bit TRB dma address\n"); | |
778 | ||
779 | /* This TRB should be in the TD at the head of this ring's TD list */ | |
780 | if (list_empty(&ep_ring->td_list)) { | |
781 | xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n", | |
782 | TRB_TO_SLOT_ID(event->flags), ep_index); | |
783 | xhci_dbg(xhci, "Event TRB with TRB type ID %u\n", | |
784 | (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10); | |
785 | xhci_print_trb_offsets(xhci, (union xhci_trb *) event); | |
786 | urb = NULL; | |
787 | goto cleanup; | |
788 | } | |
789 | td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list); | |
790 | ||
791 | /* Is this a TRB in the currently executing TD? */ | |
792 | event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue, | |
793 | td->last_trb, event_dma); | |
794 | if (!event_seg) { | |
795 | /* HC is busted, give up! */ | |
796 | xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not part of current TD\n"); | |
797 | return -ESHUTDOWN; | |
798 | } | |
799 | event_trb = &event_seg->trbs[(event_dma - event_seg->dma) / sizeof(*event_trb)]; | |
b10de142 SS |
800 | xhci_dbg(xhci, "Event TRB with TRB type ID %u\n", |
801 | (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10); | |
802 | xhci_dbg(xhci, "Offset 0x00 (buffer[0]) = 0x%x\n", | |
803 | (unsigned int) event->buffer[0]); | |
804 | xhci_dbg(xhci, "Offset 0x04 (buffer[0]) = 0x%x\n", | |
805 | (unsigned int) event->buffer[1]); | |
806 | xhci_dbg(xhci, "Offset 0x08 (transfer length) = 0x%x\n", | |
807 | (unsigned int) event->transfer_len); | |
808 | xhci_dbg(xhci, "Offset 0x0C (flags) = 0x%x\n", | |
809 | (unsigned int) event->flags); | |
810 | ||
811 | /* Look for common error cases */ | |
812 | switch (GET_COMP_CODE(event->transfer_len)) { | |
813 | /* Skip codes that require special handling depending on | |
814 | * transfer type | |
815 | */ | |
816 | case COMP_SUCCESS: | |
817 | case COMP_SHORT_TX: | |
818 | break; | |
ae636747 SS |
819 | case COMP_STOP: |
820 | xhci_dbg(xhci, "Stopped on Transfer TRB\n"); | |
821 | break; | |
822 | case COMP_STOP_INVAL: | |
823 | xhci_dbg(xhci, "Stopped on No-op or Link TRB\n"); | |
824 | break; | |
b10de142 SS |
825 | case COMP_STALL: |
826 | xhci_warn(xhci, "WARN: Stalled endpoint\n"); | |
827 | status = -EPIPE; | |
828 | break; | |
829 | case COMP_TRB_ERR: | |
830 | xhci_warn(xhci, "WARN: TRB error on endpoint\n"); | |
831 | status = -EILSEQ; | |
832 | break; | |
833 | case COMP_TX_ERR: | |
834 | xhci_warn(xhci, "WARN: transfer error on endpoint\n"); | |
835 | status = -EPROTO; | |
836 | break; | |
837 | case COMP_DB_ERR: | |
838 | xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n"); | |
839 | status = -ENOSR; | |
840 | break; | |
841 | default: | |
842 | xhci_warn(xhci, "ERROR Unknown event condition, HC probably busted\n"); | |
843 | urb = NULL; | |
844 | goto cleanup; | |
845 | } | |
d0e96f5a SS |
846 | /* Now update the urb's actual_length and give back to the core */ |
847 | /* Was this a control transfer? */ | |
848 | if (usb_endpoint_xfer_control(&td->urb->ep->desc)) { | |
849 | xhci_debug_trb(xhci, xhci->event_ring->dequeue); | |
850 | switch (GET_COMP_CODE(event->transfer_len)) { | |
851 | case COMP_SUCCESS: | |
852 | if (event_trb == ep_ring->dequeue) { | |
853 | xhci_warn(xhci, "WARN: Success on ctrl setup TRB without IOC set??\n"); | |
854 | status = -ESHUTDOWN; | |
855 | } else if (event_trb != td->last_trb) { | |
856 | xhci_warn(xhci, "WARN: Success on ctrl data TRB without IOC set??\n"); | |
857 | status = -ESHUTDOWN; | |
858 | } else { | |
859 | xhci_dbg(xhci, "Successful control transfer!\n"); | |
860 | status = 0; | |
861 | } | |
862 | break; | |
863 | case COMP_SHORT_TX: | |
864 | xhci_warn(xhci, "WARN: short transfer on control ep\n"); | |
865 | status = -EREMOTEIO; | |
866 | break; | |
d0e96f5a | 867 | default: |
b10de142 SS |
868 | /* Others already handled above */ |
869 | break; | |
d0e96f5a SS |
870 | } |
871 | /* | |
872 | * Did we transfer any data, despite the errors that might have | |
873 | * happened? I.e. did we get past the setup stage? | |
874 | */ | |
875 | if (event_trb != ep_ring->dequeue) { | |
876 | /* The event was for the status stage */ | |
877 | if (event_trb == td->last_trb) { | |
ae636747 SS |
878 | td->urb->actual_length = |
879 | td->urb->transfer_buffer_length; | |
d0e96f5a | 880 | } else { |
ae636747 SS |
881 | /* Maybe the event was for the data stage? */ |
882 | if (GET_COMP_CODE(event->transfer_len) != COMP_STOP_INVAL) | |
883 | /* We didn't stop on a link TRB in the middle */ | |
884 | td->urb->actual_length = | |
885 | td->urb->transfer_buffer_length - | |
886 | TRB_LEN(event->transfer_len); | |
d0e96f5a SS |
887 | } |
888 | } | |
d0e96f5a | 889 | } else { |
b10de142 SS |
890 | switch (GET_COMP_CODE(event->transfer_len)) { |
891 | case COMP_SUCCESS: | |
892 | /* Double check that the HW transferred everything. */ | |
893 | if (event_trb != td->last_trb) { | |
894 | xhci_warn(xhci, "WARN Successful completion " | |
895 | "on short TX\n"); | |
896 | if (td->urb->transfer_flags & URB_SHORT_NOT_OK) | |
897 | status = -EREMOTEIO; | |
898 | else | |
899 | status = 0; | |
900 | } else { | |
901 | xhci_dbg(xhci, "Successful bulk transfer!\n"); | |
902 | status = 0; | |
903 | } | |
904 | break; | |
905 | case COMP_SHORT_TX: | |
906 | if (td->urb->transfer_flags & URB_SHORT_NOT_OK) | |
907 | status = -EREMOTEIO; | |
908 | else | |
909 | status = 0; | |
910 | break; | |
911 | default: | |
912 | /* Others already handled above */ | |
913 | break; | |
914 | } | |
915 | dev_dbg(&td->urb->dev->dev, | |
916 | "ep %#x - asked for %d bytes, " | |
917 | "%d bytes untransferred\n", | |
918 | td->urb->ep->desc.bEndpointAddress, | |
919 | td->urb->transfer_buffer_length, | |
920 | TRB_LEN(event->transfer_len)); | |
921 | /* Fast path - was this the last TRB in the TD for this URB? */ | |
922 | if (event_trb == td->last_trb) { | |
923 | if (TRB_LEN(event->transfer_len) != 0) { | |
924 | td->urb->actual_length = | |
925 | td->urb->transfer_buffer_length - | |
926 | TRB_LEN(event->transfer_len); | |
927 | if (td->urb->actual_length < 0) { | |
928 | xhci_warn(xhci, "HC gave bad length " | |
929 | "of %d bytes left\n", | |
930 | TRB_LEN(event->transfer_len)); | |
931 | td->urb->actual_length = 0; | |
932 | } | |
933 | if (td->urb->transfer_flags & URB_SHORT_NOT_OK) | |
934 | status = -EREMOTEIO; | |
935 | else | |
936 | status = 0; | |
937 | } else { | |
938 | td->urb->actual_length = td->urb->transfer_buffer_length; | |
939 | /* Ignore a short packet completion if the | |
940 | * untransferred length was zero. | |
941 | */ | |
942 | status = 0; | |
943 | } | |
944 | } else { | |
ae636747 SS |
945 | /* Slow path - walk the list, starting from the dequeue |
946 | * pointer, to get the actual length transferred. | |
b10de142 | 947 | */ |
ae636747 SS |
948 | union xhci_trb *cur_trb; |
949 | struct xhci_segment *cur_seg; | |
950 | ||
b10de142 | 951 | td->urb->actual_length = 0; |
ae636747 SS |
952 | for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg; |
953 | cur_trb != event_trb; | |
954 | next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { | |
955 | if (TRB_TYPE(cur_trb->generic.field[3]) != TRB_TR_NOOP && | |
956 | TRB_TYPE(cur_trb->generic.field[3]) != TRB_LINK) | |
957 | td->urb->actual_length += | |
958 | TRB_LEN(cur_trb->generic.field[2]); | |
b10de142 | 959 | } |
ae636747 SS |
960 | /* If the ring didn't stop on a Link or No-op TRB, add |
961 | * in the actual bytes transferred from the Normal TRB | |
962 | */ | |
963 | if (GET_COMP_CODE(event->transfer_len) != COMP_STOP_INVAL) | |
964 | td->urb->actual_length += | |
965 | TRB_LEN(cur_trb->generic.field[2]) - | |
966 | TRB_LEN(event->transfer_len); | |
b10de142 | 967 | } |
d0e96f5a | 968 | } |
ae636747 SS |
969 | /* The Endpoint Stop Command completion will take care of |
970 | * any stopped TDs. A stopped TD may be restarted, so don't update the | |
971 | * ring dequeue pointer or take this TD off any lists yet. | |
972 | */ | |
973 | if (GET_COMP_CODE(event->transfer_len) == COMP_STOP_INVAL || | |
974 | GET_COMP_CODE(event->transfer_len) == COMP_STOP) { | |
975 | ep_ring->stopped_td = td; | |
976 | ep_ring->stopped_trb = event_trb; | |
977 | } else { | |
978 | /* Update ring dequeue pointer */ | |
979 | while (ep_ring->dequeue != td->last_trb) | |
980 | inc_deq(xhci, ep_ring, false); | |
b10de142 | 981 | inc_deq(xhci, ep_ring, false); |
b10de142 | 982 | |
ae636747 SS |
983 | /* Clean up the endpoint's TD list */ |
984 | urb = td->urb; | |
985 | list_del(&td->td_list); | |
986 | /* Was this TD slated to be cancelled but completed anyway? */ | |
987 | if (!list_empty(&td->cancelled_td_list)) { | |
988 | list_del(&td->cancelled_td_list); | |
989 | ep_ring->cancels_pending--; | |
990 | } | |
991 | kfree(td); | |
992 | urb->hcpriv = NULL; | |
993 | } | |
d0e96f5a SS |
994 | cleanup: |
995 | inc_deq(xhci, xhci->event_ring, true); | |
996 | set_hc_event_deq(xhci); | |
997 | ||
b10de142 | 998 | /* FIXME for multi-TD URBs (who have buffers bigger than 64MB) */ |
d0e96f5a SS |
999 | if (urb) { |
1000 | usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb); | |
1001 | spin_unlock(&xhci->lock); | |
1002 | usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status); | |
1003 | spin_lock(&xhci->lock); | |
1004 | } | |
1005 | return 0; | |
1006 | } | |
1007 | ||
0f2a7930 SS |
1008 | /* |
1009 | * This function handles all OS-owned events on the event ring. It may drop | |
1010 | * xhci->lock between event processing (e.g. to pass up port status changes). | |
1011 | */ | |
b7258a4a | 1012 | void xhci_handle_event(struct xhci_hcd *xhci) |
7f84eef0 SS |
1013 | { |
1014 | union xhci_trb *event; | |
0f2a7930 | 1015 | int update_ptrs = 1; |
d0e96f5a | 1016 | int ret; |
7f84eef0 SS |
1017 | |
1018 | if (!xhci->event_ring || !xhci->event_ring->dequeue) { | |
1019 | xhci->error_bitmask |= 1 << 1; | |
1020 | return; | |
1021 | } | |
1022 | ||
1023 | event = xhci->event_ring->dequeue; | |
1024 | /* Does the HC or OS own the TRB? */ | |
1025 | if ((event->event_cmd.flags & TRB_CYCLE) != | |
1026 | xhci->event_ring->cycle_state) { | |
1027 | xhci->error_bitmask |= 1 << 2; | |
1028 | return; | |
1029 | } | |
1030 | ||
0f2a7930 | 1031 | /* FIXME: Handle more event types. */ |
7f84eef0 SS |
1032 | switch ((event->event_cmd.flags & TRB_TYPE_BITMASK)) { |
1033 | case TRB_TYPE(TRB_COMPLETION): | |
1034 | handle_cmd_completion(xhci, &event->event_cmd); | |
1035 | break; | |
0f2a7930 SS |
1036 | case TRB_TYPE(TRB_PORT_STATUS): |
1037 | handle_port_status(xhci, event); | |
1038 | update_ptrs = 0; | |
1039 | break; | |
d0e96f5a SS |
1040 | case TRB_TYPE(TRB_TRANSFER): |
1041 | ret = handle_tx_event(xhci, &event->trans_event); | |
1042 | if (ret < 0) | |
1043 | xhci->error_bitmask |= 1 << 9; | |
1044 | else | |
1045 | update_ptrs = 0; | |
1046 | break; | |
7f84eef0 SS |
1047 | default: |
1048 | xhci->error_bitmask |= 1 << 3; | |
1049 | } | |
1050 | ||
0f2a7930 SS |
1051 | if (update_ptrs) { |
1052 | /* Update SW and HC event ring dequeue pointer */ | |
1053 | inc_deq(xhci, xhci->event_ring, true); | |
1054 | set_hc_event_deq(xhci); | |
1055 | } | |
7f84eef0 | 1056 | /* Are there more items on the event ring? */ |
b7258a4a | 1057 | xhci_handle_event(xhci); |
7f84eef0 SS |
1058 | } |
1059 | ||
d0e96f5a SS |
1060 | /**** Endpoint Ring Operations ****/ |
1061 | ||
7f84eef0 SS |
1062 | /* |
1063 | * Generic function for queueing a TRB on a ring. | |
1064 | * The caller must have checked to make sure there's room on the ring. | |
1065 | */ | |
1066 | static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, | |
1067 | bool consumer, | |
1068 | u32 field1, u32 field2, u32 field3, u32 field4) | |
1069 | { | |
1070 | struct xhci_generic_trb *trb; | |
1071 | ||
1072 | trb = &ring->enqueue->generic; | |
1073 | trb->field[0] = field1; | |
1074 | trb->field[1] = field2; | |
1075 | trb->field[2] = field3; | |
1076 | trb->field[3] = field4; | |
1077 | inc_enq(xhci, ring, consumer); | |
1078 | } | |
1079 | ||
d0e96f5a SS |
1080 | /* |
1081 | * Does various checks on the endpoint ring, and makes it ready to queue num_trbs. | |
1082 | * FIXME allocate segments if the ring is full. | |
1083 | */ | |
1084 | static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, | |
1085 | u32 ep_state, unsigned int num_trbs, gfp_t mem_flags) | |
1086 | { | |
1087 | /* Make sure the endpoint has been added to xHC schedule */ | |
1088 | xhci_dbg(xhci, "Endpoint state = 0x%x\n", ep_state); | |
1089 | switch (ep_state) { | |
1090 | case EP_STATE_DISABLED: | |
1091 | /* | |
1092 | * USB core changed config/interfaces without notifying us, | |
1093 | * or hardware is reporting the wrong state. | |
1094 | */ | |
1095 | xhci_warn(xhci, "WARN urb submitted to disabled ep\n"); | |
1096 | return -ENOENT; | |
1097 | case EP_STATE_HALTED: | |
1098 | case EP_STATE_ERROR: | |
1099 | xhci_warn(xhci, "WARN waiting for halt or error on ep " | |
1100 | "to be cleared\n"); | |
1101 | /* FIXME event handling code for error needs to clear it */ | |
1102 | /* XXX not sure if this should be -ENOENT or not */ | |
1103 | return -EINVAL; | |
1104 | case EP_STATE_STOPPED: | |
1105 | case EP_STATE_RUNNING: | |
1106 | break; | |
1107 | default: | |
1108 | xhci_err(xhci, "ERROR unknown endpoint state for ep\n"); | |
1109 | /* | |
1110 | * FIXME issue Configure Endpoint command to try to get the HC | |
1111 | * back into a known state. | |
1112 | */ | |
1113 | return -EINVAL; | |
1114 | } | |
1115 | if (!room_on_ring(xhci, ep_ring, num_trbs)) { | |
1116 | /* FIXME allocate more room */ | |
1117 | xhci_err(xhci, "ERROR no room on ep ring\n"); | |
1118 | return -ENOMEM; | |
1119 | } | |
1120 | return 0; | |
1121 | } | |
1122 | ||
1123 | int xhci_prepare_transfer(struct xhci_hcd *xhci, | |
1124 | struct xhci_virt_device *xdev, | |
1125 | unsigned int ep_index, | |
1126 | unsigned int num_trbs, | |
1127 | struct urb *urb, | |
1128 | struct xhci_td **td, | |
1129 | gfp_t mem_flags) | |
1130 | { | |
1131 | int ret; | |
1132 | ||
1133 | ret = prepare_ring(xhci, xdev->ep_rings[ep_index], | |
1134 | xdev->out_ctx->ep[ep_index].ep_info & EP_STATE_MASK, | |
1135 | num_trbs, mem_flags); | |
1136 | if (ret) | |
1137 | return ret; | |
1138 | *td = kzalloc(sizeof(struct xhci_td), mem_flags); | |
1139 | if (!*td) | |
1140 | return -ENOMEM; | |
1141 | INIT_LIST_HEAD(&(*td)->td_list); | |
ae636747 | 1142 | INIT_LIST_HEAD(&(*td)->cancelled_td_list); |
d0e96f5a SS |
1143 | |
1144 | ret = usb_hcd_link_urb_to_ep(xhci_to_hcd(xhci), urb); | |
1145 | if (unlikely(ret)) { | |
1146 | kfree(*td); | |
1147 | return ret; | |
1148 | } | |
1149 | ||
1150 | (*td)->urb = urb; | |
1151 | urb->hcpriv = (void *) (*td); | |
1152 | /* Add this TD to the tail of the endpoint ring's TD list */ | |
1153 | list_add_tail(&(*td)->td_list, &xdev->ep_rings[ep_index]->td_list); | |
ae636747 SS |
1154 | (*td)->start_seg = xdev->ep_rings[ep_index]->enq_seg; |
1155 | (*td)->first_trb = xdev->ep_rings[ep_index]->enqueue; | |
d0e96f5a SS |
1156 | |
1157 | return 0; | |
1158 | } | |
1159 | ||
8a96c052 SS |
1160 | unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb) |
1161 | { | |
1162 | int num_sgs, num_trbs, running_total, temp, i; | |
1163 | struct scatterlist *sg; | |
1164 | ||
1165 | sg = NULL; | |
1166 | num_sgs = urb->num_sgs; | |
1167 | temp = urb->transfer_buffer_length; | |
1168 | ||
1169 | xhci_dbg(xhci, "count sg list trbs: \n"); | |
1170 | num_trbs = 0; | |
1171 | for_each_sg(urb->sg->sg, sg, num_sgs, i) { | |
1172 | unsigned int previous_total_trbs = num_trbs; | |
1173 | unsigned int len = sg_dma_len(sg); | |
1174 | ||
1175 | /* Scatter gather list entries may cross 64KB boundaries */ | |
1176 | running_total = TRB_MAX_BUFF_SIZE - | |
1177 | (sg_dma_address(sg) & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); | |
1178 | if (running_total != 0) | |
1179 | num_trbs++; | |
1180 | ||
1181 | /* How many more 64KB chunks to transfer, how many more TRBs? */ | |
1182 | while (running_total < sg_dma_len(sg)) { | |
1183 | num_trbs++; | |
1184 | running_total += TRB_MAX_BUFF_SIZE; | |
1185 | } | |
1186 | xhci_dbg(xhci, " sg #%d: dma = %#x, len = %#x (%d), num_trbs = %d\n", | |
1187 | i, sg_dma_address(sg), len, len, | |
1188 | num_trbs - previous_total_trbs); | |
1189 | ||
1190 | len = min_t(int, len, temp); | |
1191 | temp -= len; | |
1192 | if (temp == 0) | |
1193 | break; | |
1194 | } | |
1195 | xhci_dbg(xhci, "\n"); | |
1196 | if (!in_interrupt()) | |
1197 | dev_dbg(&urb->dev->dev, "ep %#x - urb len = %d, sglist used, num_trbs = %d\n", | |
1198 | urb->ep->desc.bEndpointAddress, | |
1199 | urb->transfer_buffer_length, | |
1200 | num_trbs); | |
1201 | return num_trbs; | |
1202 | } | |
1203 | ||
1204 | void check_trb_math(struct urb *urb, int num_trbs, int running_total) | |
1205 | { | |
1206 | if (num_trbs != 0) | |
1207 | dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated number of " | |
1208 | "TRBs, %d left\n", __func__, | |
1209 | urb->ep->desc.bEndpointAddress, num_trbs); | |
1210 | if (running_total != urb->transfer_buffer_length) | |
1211 | dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, " | |
1212 | "queued %#x (%d), asked for %#x (%d)\n", | |
1213 | __func__, | |
1214 | urb->ep->desc.bEndpointAddress, | |
1215 | running_total, running_total, | |
1216 | urb->transfer_buffer_length, | |
1217 | urb->transfer_buffer_length); | |
1218 | } | |
1219 | ||
1220 | void giveback_first_trb(struct xhci_hcd *xhci, int slot_id, | |
1221 | unsigned int ep_index, int start_cycle, | |
1222 | struct xhci_generic_trb *start_trb, struct xhci_td *td) | |
1223 | { | |
8a96c052 SS |
1224 | /* |
1225 | * Pass all the TRBs to the hardware at once and make sure this write | |
1226 | * isn't reordered. | |
1227 | */ | |
1228 | wmb(); | |
1229 | start_trb->field[3] |= start_cycle; | |
ae636747 | 1230 | ring_ep_doorbell(xhci, slot_id, ep_index); |
8a96c052 SS |
1231 | } |
1232 | ||
1233 | int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |
1234 | struct urb *urb, int slot_id, unsigned int ep_index) | |
1235 | { | |
1236 | struct xhci_ring *ep_ring; | |
1237 | unsigned int num_trbs; | |
1238 | struct xhci_td *td; | |
1239 | struct scatterlist *sg; | |
1240 | int num_sgs; | |
1241 | int trb_buff_len, this_sg_len, running_total; | |
1242 | bool first_trb; | |
1243 | u64 addr; | |
1244 | ||
1245 | struct xhci_generic_trb *start_trb; | |
1246 | int start_cycle; | |
1247 | ||
1248 | ep_ring = xhci->devs[slot_id]->ep_rings[ep_index]; | |
1249 | num_trbs = count_sg_trbs_needed(xhci, urb); | |
1250 | num_sgs = urb->num_sgs; | |
1251 | ||
1252 | trb_buff_len = xhci_prepare_transfer(xhci, xhci->devs[slot_id], | |
1253 | ep_index, num_trbs, urb, &td, mem_flags); | |
1254 | if (trb_buff_len < 0) | |
1255 | return trb_buff_len; | |
1256 | /* | |
1257 | * Don't give the first TRB to the hardware (by toggling the cycle bit) | |
1258 | * until we've finished creating all the other TRBs. The ring's cycle | |
1259 | * state may change as we enqueue the other TRBs, so save it too. | |
1260 | */ | |
1261 | start_trb = &ep_ring->enqueue->generic; | |
1262 | start_cycle = ep_ring->cycle_state; | |
1263 | ||
1264 | running_total = 0; | |
1265 | /* | |
1266 | * How much data is in the first TRB? | |
1267 | * | |
1268 | * There are three forces at work for TRB buffer pointers and lengths: | |
1269 | * 1. We don't want to walk off the end of this sg-list entry buffer. | |
1270 | * 2. The transfer length that the driver requested may be smaller than | |
1271 | * the amount of memory allocated for this scatter-gather list. | |
1272 | * 3. TRBs buffers can't cross 64KB boundaries. | |
1273 | */ | |
1274 | sg = urb->sg->sg; | |
1275 | addr = (u64) sg_dma_address(sg); | |
1276 | this_sg_len = sg_dma_len(sg); | |
1277 | trb_buff_len = TRB_MAX_BUFF_SIZE - | |
1278 | (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); | |
1279 | trb_buff_len = min_t(int, trb_buff_len, this_sg_len); | |
1280 | if (trb_buff_len > urb->transfer_buffer_length) | |
1281 | trb_buff_len = urb->transfer_buffer_length; | |
1282 | xhci_dbg(xhci, "First length to xfer from 1st sglist entry = %u\n", | |
1283 | trb_buff_len); | |
1284 | ||
1285 | first_trb = true; | |
1286 | /* Queue the first TRB, even if it's zero-length */ | |
1287 | do { | |
1288 | u32 field = 0; | |
1289 | ||
1290 | /* Don't change the cycle bit of the first TRB until later */ | |
1291 | if (first_trb) | |
1292 | first_trb = false; | |
1293 | else | |
1294 | field |= ep_ring->cycle_state; | |
1295 | ||
1296 | /* Chain all the TRBs together; clear the chain bit in the last | |
1297 | * TRB to indicate it's the last TRB in the chain. | |
1298 | */ | |
1299 | if (num_trbs > 1) { | |
1300 | field |= TRB_CHAIN; | |
1301 | } else { | |
1302 | /* FIXME - add check for ZERO_PACKET flag before this */ | |
1303 | td->last_trb = ep_ring->enqueue; | |
1304 | field |= TRB_IOC; | |
1305 | } | |
1306 | xhci_dbg(xhci, " sg entry: dma = %#x, len = %#x (%d), " | |
1307 | "64KB boundary at %#x, end dma = %#x\n", | |
1308 | (unsigned int) addr, trb_buff_len, trb_buff_len, | |
1309 | (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), | |
1310 | (unsigned int) addr + trb_buff_len); | |
1311 | if (TRB_MAX_BUFF_SIZE - | |
1312 | (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)) < trb_buff_len) { | |
1313 | xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n"); | |
1314 | xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n", | |
1315 | (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), | |
1316 | (unsigned int) addr + trb_buff_len); | |
1317 | } | |
1318 | queue_trb(xhci, ep_ring, false, | |
1319 | (u32) addr, | |
1320 | (u32) ((u64) addr >> 32), | |
1321 | TRB_LEN(trb_buff_len) | TRB_INTR_TARGET(0), | |
1322 | /* We always want to know if the TRB was short, | |
1323 | * or we won't get an event when it completes. | |
1324 | * (Unless we use event data TRBs, which are a | |
1325 | * waste of space and HC resources.) | |
1326 | */ | |
1327 | field | TRB_ISP | TRB_TYPE(TRB_NORMAL)); | |
1328 | --num_trbs; | |
1329 | running_total += trb_buff_len; | |
1330 | ||
1331 | /* Calculate length for next transfer -- | |
1332 | * Are we done queueing all the TRBs for this sg entry? | |
1333 | */ | |
1334 | this_sg_len -= trb_buff_len; | |
1335 | if (this_sg_len == 0) { | |
1336 | --num_sgs; | |
1337 | if (num_sgs == 0) | |
1338 | break; | |
1339 | sg = sg_next(sg); | |
1340 | addr = (u64) sg_dma_address(sg); | |
1341 | this_sg_len = sg_dma_len(sg); | |
1342 | } else { | |
1343 | addr += trb_buff_len; | |
1344 | } | |
1345 | ||
1346 | trb_buff_len = TRB_MAX_BUFF_SIZE - | |
1347 | (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); | |
1348 | trb_buff_len = min_t(int, trb_buff_len, this_sg_len); | |
1349 | if (running_total + trb_buff_len > urb->transfer_buffer_length) | |
1350 | trb_buff_len = | |
1351 | urb->transfer_buffer_length - running_total; | |
1352 | } while (running_total < urb->transfer_buffer_length); | |
1353 | ||
1354 | check_trb_math(urb, num_trbs, running_total); | |
1355 | giveback_first_trb(xhci, slot_id, ep_index, start_cycle, start_trb, td); | |
1356 | return 0; | |
1357 | } | |
1358 | ||
b10de142 SS |
1359 | /* This is very similar to what ehci-q.c qtd_fill() does */ |
1360 | int queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |
1361 | struct urb *urb, int slot_id, unsigned int ep_index) | |
1362 | { | |
1363 | struct xhci_ring *ep_ring; | |
1364 | struct xhci_td *td; | |
1365 | int num_trbs; | |
1366 | struct xhci_generic_trb *start_trb; | |
1367 | bool first_trb; | |
1368 | int start_cycle; | |
1369 | u32 field; | |
1370 | ||
1371 | int running_total, trb_buff_len, ret; | |
1372 | u64 addr; | |
1373 | ||
8a96c052 SS |
1374 | if (urb->sg) |
1375 | return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index); | |
1376 | ||
b10de142 SS |
1377 | ep_ring = xhci->devs[slot_id]->ep_rings[ep_index]; |
1378 | ||
1379 | num_trbs = 0; | |
1380 | /* How much data is (potentially) left before the 64KB boundary? */ | |
1381 | running_total = TRB_MAX_BUFF_SIZE - | |
1382 | (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); | |
1383 | ||
1384 | /* If there's some data on this 64KB chunk, or we have to send a | |
1385 | * zero-length transfer, we need at least one TRB | |
1386 | */ | |
1387 | if (running_total != 0 || urb->transfer_buffer_length == 0) | |
1388 | num_trbs++; | |
1389 | /* How many more 64KB chunks to transfer, how many more TRBs? */ | |
1390 | while (running_total < urb->transfer_buffer_length) { | |
1391 | num_trbs++; | |
1392 | running_total += TRB_MAX_BUFF_SIZE; | |
1393 | } | |
1394 | /* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */ | |
1395 | ||
1396 | if (!in_interrupt()) | |
8a96c052 | 1397 | dev_dbg(&urb->dev->dev, "ep %#x - urb len = %#x (%d), addr = %#x, num_trbs = %d\n", |
b10de142 | 1398 | urb->ep->desc.bEndpointAddress, |
8a96c052 SS |
1399 | urb->transfer_buffer_length, |
1400 | urb->transfer_buffer_length, | |
1401 | urb->transfer_dma, | |
b10de142 | 1402 | num_trbs); |
8a96c052 | 1403 | |
b10de142 SS |
1404 | ret = xhci_prepare_transfer(xhci, xhci->devs[slot_id], ep_index, |
1405 | num_trbs, urb, &td, mem_flags); | |
1406 | if (ret < 0) | |
1407 | return ret; | |
1408 | ||
1409 | /* | |
1410 | * Don't give the first TRB to the hardware (by toggling the cycle bit) | |
1411 | * until we've finished creating all the other TRBs. The ring's cycle | |
1412 | * state may change as we enqueue the other TRBs, so save it too. | |
1413 | */ | |
1414 | start_trb = &ep_ring->enqueue->generic; | |
1415 | start_cycle = ep_ring->cycle_state; | |
1416 | ||
1417 | running_total = 0; | |
1418 | /* How much data is in the first TRB? */ | |
1419 | addr = (u64) urb->transfer_dma; | |
1420 | trb_buff_len = TRB_MAX_BUFF_SIZE - | |
1421 | (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); | |
1422 | if (urb->transfer_buffer_length < trb_buff_len) | |
1423 | trb_buff_len = urb->transfer_buffer_length; | |
1424 | ||
1425 | first_trb = true; | |
1426 | ||
1427 | /* Queue the first TRB, even if it's zero-length */ | |
1428 | do { | |
1429 | field = 0; | |
1430 | ||
1431 | /* Don't change the cycle bit of the first TRB until later */ | |
1432 | if (first_trb) | |
1433 | first_trb = false; | |
1434 | else | |
1435 | field |= ep_ring->cycle_state; | |
1436 | ||
1437 | /* Chain all the TRBs together; clear the chain bit in the last | |
1438 | * TRB to indicate it's the last TRB in the chain. | |
1439 | */ | |
1440 | if (num_trbs > 1) { | |
1441 | field |= TRB_CHAIN; | |
1442 | } else { | |
1443 | /* FIXME - add check for ZERO_PACKET flag before this */ | |
1444 | td->last_trb = ep_ring->enqueue; | |
1445 | field |= TRB_IOC; | |
1446 | } | |
1447 | queue_trb(xhci, ep_ring, false, | |
1448 | (u32) addr, | |
1449 | (u32) ((u64) addr >> 32), | |
1450 | TRB_LEN(trb_buff_len) | TRB_INTR_TARGET(0), | |
1451 | /* We always want to know if the TRB was short, | |
1452 | * or we won't get an event when it completes. | |
1453 | * (Unless we use event data TRBs, which are a | |
1454 | * waste of space and HC resources.) | |
1455 | */ | |
1456 | field | TRB_ISP | TRB_TYPE(TRB_NORMAL)); | |
1457 | --num_trbs; | |
1458 | running_total += trb_buff_len; | |
1459 | ||
1460 | /* Calculate length for next transfer */ | |
1461 | addr += trb_buff_len; | |
1462 | trb_buff_len = urb->transfer_buffer_length - running_total; | |
1463 | if (trb_buff_len > TRB_MAX_BUFF_SIZE) | |
1464 | trb_buff_len = TRB_MAX_BUFF_SIZE; | |
1465 | } while (running_total < urb->transfer_buffer_length); | |
1466 | ||
8a96c052 SS |
1467 | check_trb_math(urb, num_trbs, running_total); |
1468 | giveback_first_trb(xhci, slot_id, ep_index, start_cycle, start_trb, td); | |
b10de142 SS |
1469 | return 0; |
1470 | } | |
1471 | ||
d0e96f5a SS |
1472 | /* Caller must have locked xhci->lock */ |
1473 | int queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |
1474 | struct urb *urb, int slot_id, unsigned int ep_index) | |
1475 | { | |
1476 | struct xhci_ring *ep_ring; | |
1477 | int num_trbs; | |
1478 | int ret; | |
1479 | struct usb_ctrlrequest *setup; | |
1480 | struct xhci_generic_trb *start_trb; | |
1481 | int start_cycle; | |
1482 | u32 field; | |
1483 | struct xhci_td *td; | |
1484 | ||
1485 | ep_ring = xhci->devs[slot_id]->ep_rings[ep_index]; | |
1486 | ||
1487 | /* | |
1488 | * Need to copy setup packet into setup TRB, so we can't use the setup | |
1489 | * DMA address. | |
1490 | */ | |
1491 | if (!urb->setup_packet) | |
1492 | return -EINVAL; | |
1493 | ||
1494 | if (!in_interrupt()) | |
1495 | xhci_dbg(xhci, "Queueing ctrl tx for slot id %d, ep %d\n", | |
1496 | slot_id, ep_index); | |
1497 | /* 1 TRB for setup, 1 for status */ | |
1498 | num_trbs = 2; | |
1499 | /* | |
1500 | * Don't need to check if we need additional event data and normal TRBs, | |
1501 | * since data in control transfers will never get bigger than 16MB | |
1502 | * XXX: can we get a buffer that crosses 64KB boundaries? | |
1503 | */ | |
1504 | if (urb->transfer_buffer_length > 0) | |
1505 | num_trbs++; | |
1506 | ret = xhci_prepare_transfer(xhci, xhci->devs[slot_id], ep_index, num_trbs, | |
1507 | urb, &td, mem_flags); | |
1508 | if (ret < 0) | |
1509 | return ret; | |
1510 | ||
1511 | /* | |
1512 | * Don't give the first TRB to the hardware (by toggling the cycle bit) | |
1513 | * until we've finished creating all the other TRBs. The ring's cycle | |
1514 | * state may change as we enqueue the other TRBs, so save it too. | |
1515 | */ | |
1516 | start_trb = &ep_ring->enqueue->generic; | |
1517 | start_cycle = ep_ring->cycle_state; | |
1518 | ||
1519 | /* Queue setup TRB - see section 6.4.1.2.1 */ | |
1520 | /* FIXME better way to translate setup_packet into two u32 fields? */ | |
1521 | setup = (struct usb_ctrlrequest *) urb->setup_packet; | |
1522 | queue_trb(xhci, ep_ring, false, | |
1523 | /* FIXME endianness is probably going to bite my ass here. */ | |
1524 | setup->bRequestType | setup->bRequest << 8 | setup->wValue << 16, | |
1525 | setup->wIndex | setup->wLength << 16, | |
1526 | TRB_LEN(8) | TRB_INTR_TARGET(0), | |
1527 | /* Immediate data in pointer */ | |
1528 | TRB_IDT | TRB_TYPE(TRB_SETUP)); | |
1529 | ||
1530 | /* If there's data, queue data TRBs */ | |
1531 | field = 0; | |
1532 | if (urb->transfer_buffer_length > 0) { | |
1533 | if (setup->bRequestType & USB_DIR_IN) | |
1534 | field |= TRB_DIR_IN; | |
1535 | queue_trb(xhci, ep_ring, false, | |
1536 | lower_32_bits(urb->transfer_dma), | |
1537 | upper_32_bits(urb->transfer_dma), | |
1538 | TRB_LEN(urb->transfer_buffer_length) | TRB_INTR_TARGET(0), | |
1539 | /* Event on short tx */ | |
1540 | field | TRB_ISP | TRB_TYPE(TRB_DATA) | ep_ring->cycle_state); | |
1541 | } | |
1542 | ||
1543 | /* Save the DMA address of the last TRB in the TD */ | |
1544 | td->last_trb = ep_ring->enqueue; | |
1545 | ||
1546 | /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */ | |
1547 | /* If the device sent data, the status stage is an OUT transfer */ | |
1548 | if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN) | |
1549 | field = 0; | |
1550 | else | |
1551 | field = TRB_DIR_IN; | |
1552 | queue_trb(xhci, ep_ring, false, | |
1553 | 0, | |
1554 | 0, | |
1555 | TRB_INTR_TARGET(0), | |
1556 | /* Event on completion */ | |
1557 | field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state); | |
1558 | ||
8a96c052 | 1559 | giveback_first_trb(xhci, slot_id, ep_index, start_cycle, start_trb, td); |
d0e96f5a SS |
1560 | return 0; |
1561 | } | |
1562 | ||
1563 | /**** Command Ring Operations ****/ | |
1564 | ||
7f84eef0 SS |
1565 | /* Generic function for queueing a command TRB on the command ring */ |
1566 | static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2, u32 field3, u32 field4) | |
1567 | { | |
1568 | if (!room_on_ring(xhci, xhci->cmd_ring, 1)) { | |
1569 | if (!in_interrupt()) | |
1570 | xhci_err(xhci, "ERR: No room for command on command ring\n"); | |
1571 | return -ENOMEM; | |
1572 | } | |
1573 | queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3, | |
1574 | field4 | xhci->cmd_ring->cycle_state); | |
1575 | return 0; | |
1576 | } | |
1577 | ||
1578 | /* Queue a no-op command on the command ring */ | |
1579 | static int queue_cmd_noop(struct xhci_hcd *xhci) | |
1580 | { | |
1581 | return queue_command(xhci, 0, 0, 0, TRB_TYPE(TRB_CMD_NOOP)); | |
1582 | } | |
1583 | ||
1584 | /* | |
1585 | * Place a no-op command on the command ring to test the command and | |
1586 | * event ring. | |
1587 | */ | |
1588 | void *setup_one_noop(struct xhci_hcd *xhci) | |
1589 | { | |
1590 | if (queue_cmd_noop(xhci) < 0) | |
1591 | return NULL; | |
1592 | xhci->noops_submitted++; | |
1593 | return ring_cmd_db; | |
1594 | } | |
3ffbba95 SS |
1595 | |
1596 | /* Queue a slot enable or disable request on the command ring */ | |
1597 | int queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id) | |
1598 | { | |
1599 | return queue_command(xhci, 0, 0, 0, | |
1600 | TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id)); | |
1601 | } | |
1602 | ||
1603 | /* Queue an address device command TRB */ | |
1604 | int queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id) | |
1605 | { | |
1606 | return queue_command(xhci, in_ctx_ptr, 0, 0, | |
1607 | TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id)); | |
1608 | } | |
f94e0186 SS |
1609 | |
1610 | /* Queue a configure endpoint command TRB */ | |
1611 | int queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id) | |
1612 | { | |
1613 | return queue_command(xhci, in_ctx_ptr, 0, 0, | |
1614 | TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id)); | |
1615 | } | |
ae636747 SS |
1616 | |
1617 | int queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id, | |
1618 | unsigned int ep_index) | |
1619 | { | |
1620 | u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); | |
1621 | u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); | |
1622 | u32 type = TRB_TYPE(TRB_STOP_RING); | |
1623 | ||
1624 | return queue_command(xhci, 0, 0, 0, | |
1625 | trb_slot_id | trb_ep_index | type); | |
1626 | } | |
1627 | ||
1628 | /* Set Transfer Ring Dequeue Pointer command. | |
1629 | * This should not be used for endpoints that have streams enabled. | |
1630 | */ | |
1631 | static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, | |
1632 | unsigned int ep_index, struct xhci_segment *deq_seg, | |
1633 | union xhci_trb *deq_ptr, u32 cycle_state) | |
1634 | { | |
1635 | dma_addr_t addr; | |
1636 | u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); | |
1637 | u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); | |
1638 | u32 type = TRB_TYPE(TRB_SET_DEQ); | |
1639 | ||
1640 | addr = trb_virt_to_dma(deq_seg, deq_ptr); | |
1641 | if (addr == 0) | |
1642 | xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n"); | |
1643 | xhci_warn(xhci, "WARN deq seg = 0x%x, deq pt = 0x%x\n", | |
1644 | (unsigned int) deq_seg, | |
1645 | (unsigned int) deq_ptr); | |
1646 | return queue_command(xhci, (u32) addr | cycle_state, 0, 0, | |
1647 | trb_slot_id | trb_ep_index | type); | |
1648 | } |