Commit | Line | Data |
---|---|---|
7f84eef0 SS |
1 | /* |
2 | * xHCI host controller driver | |
3 | * | |
4 | * Copyright (C) 2008 Intel Corp. | |
5 | * | |
6 | * Author: Sarah Sharp | |
7 | * Some code borrowed from the Linux EHCI driver. | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License version 2 as | |
11 | * published by the Free Software Foundation. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, but | |
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | |
15 | * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
16 | * for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program; if not, write to the Free Software Foundation, | |
20 | * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
21 | */ | |
22 | ||
23 | /* | |
24 | * Ring initialization rules: | |
25 | * 1. Each segment is initialized to zero, except for link TRBs. | |
26 | * 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or | |
27 | * Consumer Cycle State (CCS), depending on ring function. | |
28 | * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment. | |
29 | * | |
30 | * Ring behavior rules: | |
31 | * 1. A ring is empty if enqueue == dequeue. This means there will always be at | |
32 | * least one free TRB in the ring. This is useful if you want to turn that | |
33 | * into a link TRB and expand the ring. | |
34 | * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a | |
35 | * link TRB, then load the pointer with the address in the link TRB. If the | |
36 | * link TRB had its toggle bit set, you may need to update the ring cycle | |
37 | * state (see cycle bit rules). You may have to do this multiple times | |
38 | * until you reach a non-link TRB. | |
39 | * 3. A ring is full if enqueue++ (for the definition of increment above) | |
40 | * equals the dequeue pointer. | |
41 | * | |
42 | * Cycle bit rules: | |
43 | * 1. When a consumer increments a dequeue pointer and encounters a toggle bit | |
44 | * in a link TRB, it must toggle the ring cycle state. | |
45 | * 2. When a producer increments an enqueue pointer and encounters a toggle bit | |
46 | * in a link TRB, it must toggle the ring cycle state. | |
47 | * | |
48 | * Producer rules: | |
49 | * 1. Check if ring is full before you enqueue. | |
50 | * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing. | |
51 | * Update enqueue pointer between each write (which may update the ring | |
52 | * cycle state). | |
53 | * 3. Notify consumer. If SW is producer, it rings the doorbell for command | |
54 | * and endpoint rings. If HC is the producer for the event ring, | |
55 | * and it generates an interrupt according to interrupt modulation rules. | |
56 | * | |
57 | * Consumer rules: | |
58 | * 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state, | |
59 | * the TRB is owned by the consumer. | |
60 | * 2. Update dequeue pointer (which may update the ring cycle state) and | |
61 | * continue processing TRBs until you reach a TRB which is not owned by you. | |
62 | * 3. Notify the producer. SW is the consumer for the event ring, and it | |
63 | * updates event ring dequeue pointer. HC is the consumer for the command and | |
64 | * endpoint rings; it generates events on the event ring for these. | |
65 | */ | |
66 | ||
67 | #include "xhci.h" | |
68 | ||
69 | /* | |
70 | * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA | |
71 | * address of the TRB. | |
72 | */ | |
73 | dma_addr_t trb_virt_to_dma(struct xhci_segment *seg, | |
74 | union xhci_trb *trb) | |
75 | { | |
76 | unsigned int offset; | |
77 | ||
78 | if (!seg || !trb || (void *) trb < (void *) seg->trbs) | |
79 | return 0; | |
80 | /* offset in bytes, since these are byte-addressable */ | |
81 | offset = (unsigned int) trb - (unsigned int) seg->trbs; | |
82 | /* SEGMENT_SIZE in bytes, trbs are 16-byte aligned */ | |
83 | if (offset > SEGMENT_SIZE || (offset % sizeof(*trb)) != 0) | |
84 | return 0; | |
85 | return seg->dma + offset; | |
86 | } | |
87 | ||
88 | /* Does this link TRB point to the first segment in a ring, | |
89 | * or was the previous TRB the last TRB on the last segment in the ERST? | |
90 | */ | |
91 | static inline bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring, | |
92 | struct xhci_segment *seg, union xhci_trb *trb) | |
93 | { | |
94 | if (ring == xhci->event_ring) | |
95 | return (trb == &seg->trbs[TRBS_PER_SEGMENT]) && | |
96 | (seg->next == xhci->event_ring->first_seg); | |
97 | else | |
98 | return trb->link.control & LINK_TOGGLE; | |
99 | } | |
100 | ||
101 | /* Is this TRB a link TRB or was the last TRB the last TRB in this event ring | |
102 | * segment? I.e. would the updated event TRB pointer step off the end of the | |
103 | * event seg? | |
104 | */ | |
105 | static inline int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, | |
106 | struct xhci_segment *seg, union xhci_trb *trb) | |
107 | { | |
108 | if (ring == xhci->event_ring) | |
109 | return trb == &seg->trbs[TRBS_PER_SEGMENT]; | |
110 | else | |
111 | return (trb->link.control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK); | |
112 | } | |
113 | ||
114 | /* | |
115 | * See Cycle bit rules. SW is the consumer for the event ring only. | |
116 | * Don't make a ring full of link TRBs. That would be dumb and this would loop. | |
117 | */ | |
118 | static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer) | |
119 | { | |
120 | union xhci_trb *next = ++(ring->dequeue); | |
121 | ||
122 | ring->deq_updates++; | |
123 | /* Update the dequeue pointer further if that was a link TRB or we're at | |
124 | * the end of an event ring segment (which doesn't have link TRBS) | |
125 | */ | |
126 | while (last_trb(xhci, ring, ring->deq_seg, next)) { | |
127 | if (consumer && last_trb_on_last_seg(xhci, ring, ring->deq_seg, next)) { | |
128 | ring->cycle_state = (ring->cycle_state ? 0 : 1); | |
129 | if (!in_interrupt()) | |
130 | xhci_dbg(xhci, "Toggle cycle state for ring 0x%x = %i\n", | |
131 | (unsigned int) ring, | |
132 | (unsigned int) ring->cycle_state); | |
133 | } | |
134 | ring->deq_seg = ring->deq_seg->next; | |
135 | ring->dequeue = ring->deq_seg->trbs; | |
136 | next = ring->dequeue; | |
137 | } | |
138 | } | |
139 | ||
140 | /* | |
141 | * See Cycle bit rules. SW is the consumer for the event ring only. | |
142 | * Don't make a ring full of link TRBs. That would be dumb and this would loop. | |
143 | * | |
144 | * If we've just enqueued a TRB that is in the middle of a TD (meaning the | |
145 | * chain bit is set), then set the chain bit in all the following link TRBs. | |
146 | * If we've enqueued the last TRB in a TD, make sure the following link TRBs | |
147 | * have their chain bit cleared (so that each Link TRB is a separate TD). | |
148 | * | |
149 | * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit | |
150 | * set, but other sections talk about dealing with the chain bit set. | |
151 | * Assume section 6.4.4.1 is wrong, and the chain bit can be set in a Link TRB. | |
152 | */ | |
153 | static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer) | |
154 | { | |
155 | u32 chain; | |
156 | union xhci_trb *next; | |
157 | ||
158 | chain = ring->enqueue->generic.field[3] & TRB_CHAIN; | |
159 | next = ++(ring->enqueue); | |
160 | ||
161 | ring->enq_updates++; | |
162 | /* Update the dequeue pointer further if that was a link TRB or we're at | |
163 | * the end of an event ring segment (which doesn't have link TRBS) | |
164 | */ | |
165 | while (last_trb(xhci, ring, ring->enq_seg, next)) { | |
166 | if (!consumer) { | |
167 | if (ring != xhci->event_ring) { | |
168 | /* Give this link TRB to the hardware */ | |
169 | if (next->link.control & TRB_CYCLE) | |
170 | next->link.control &= (u32) ~TRB_CYCLE; | |
171 | else | |
172 | next->link.control |= (u32) TRB_CYCLE; | |
173 | next->link.control &= TRB_CHAIN; | |
174 | next->link.control |= chain; | |
175 | } | |
176 | /* Toggle the cycle bit after the last ring segment. */ | |
177 | if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) { | |
178 | ring->cycle_state = (ring->cycle_state ? 0 : 1); | |
179 | if (!in_interrupt()) | |
180 | xhci_dbg(xhci, "Toggle cycle state for ring 0x%x = %i\n", | |
181 | (unsigned int) ring, | |
182 | (unsigned int) ring->cycle_state); | |
183 | } | |
184 | } | |
185 | ring->enq_seg = ring->enq_seg->next; | |
186 | ring->enqueue = ring->enq_seg->trbs; | |
187 | next = ring->enqueue; | |
188 | } | |
189 | } | |
190 | ||
191 | /* | |
192 | * Check to see if there's room to enqueue num_trbs on the ring. See rules | |
193 | * above. | |
194 | * FIXME: this would be simpler and faster if we just kept track of the number | |
195 | * of free TRBs in a ring. | |
196 | */ | |
197 | static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring, | |
198 | unsigned int num_trbs) | |
199 | { | |
200 | int i; | |
201 | union xhci_trb *enq = ring->enqueue; | |
202 | struct xhci_segment *enq_seg = ring->enq_seg; | |
203 | ||
204 | /* Check if ring is empty */ | |
205 | if (enq == ring->dequeue) | |
206 | return 1; | |
207 | /* Make sure there's an extra empty TRB available */ | |
208 | for (i = 0; i <= num_trbs; ++i) { | |
209 | if (enq == ring->dequeue) | |
210 | return 0; | |
211 | enq++; | |
212 | while (last_trb(xhci, ring, enq_seg, enq)) { | |
213 | enq_seg = enq_seg->next; | |
214 | enq = enq_seg->trbs; | |
215 | } | |
216 | } | |
217 | return 1; | |
218 | } | |
219 | ||
220 | void set_hc_event_deq(struct xhci_hcd *xhci) | |
221 | { | |
222 | u32 temp; | |
223 | dma_addr_t deq; | |
224 | ||
225 | deq = trb_virt_to_dma(xhci->event_ring->deq_seg, | |
226 | xhci->event_ring->dequeue); | |
227 | if (deq == 0 && !in_interrupt()) | |
228 | xhci_warn(xhci, "WARN something wrong with SW event ring " | |
229 | "dequeue ptr.\n"); | |
230 | /* Update HC event ring dequeue pointer */ | |
231 | temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]); | |
232 | temp &= ERST_PTR_MASK; | |
233 | if (!in_interrupt()) | |
234 | xhci_dbg(xhci, "// Write event ring dequeue pointer\n"); | |
235 | xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[1]); | |
236 | xhci_writel(xhci, (deq & ~ERST_PTR_MASK) | temp, | |
237 | &xhci->ir_set->erst_dequeue[0]); | |
238 | } | |
239 | ||
240 | /* Ring the host controller doorbell after placing a command on the ring */ | |
241 | void ring_cmd_db(struct xhci_hcd *xhci) | |
242 | { | |
243 | u32 temp; | |
244 | ||
245 | xhci_dbg(xhci, "// Ding dong!\n"); | |
246 | temp = xhci_readl(xhci, &xhci->dba->doorbell[0]) & DB_MASK; | |
247 | xhci_writel(xhci, temp | DB_TARGET_HOST, &xhci->dba->doorbell[0]); | |
248 | /* Flush PCI posted writes */ | |
249 | xhci_readl(xhci, &xhci->dba->doorbell[0]); | |
250 | } | |
251 | ||
252 | static void handle_cmd_completion(struct xhci_hcd *xhci, | |
253 | struct xhci_event_cmd *event) | |
254 | { | |
3ffbba95 | 255 | int slot_id = TRB_TO_SLOT_ID(event->flags); |
7f84eef0 SS |
256 | u64 cmd_dma; |
257 | dma_addr_t cmd_dequeue_dma; | |
258 | ||
7f84eef0 SS |
259 | cmd_dma = (((u64) event->cmd_trb[1]) << 32) + event->cmd_trb[0]; |
260 | cmd_dequeue_dma = trb_virt_to_dma(xhci->cmd_ring->deq_seg, | |
261 | xhci->cmd_ring->dequeue); | |
262 | /* Is the command ring deq ptr out of sync with the deq seg ptr? */ | |
263 | if (cmd_dequeue_dma == 0) { | |
264 | xhci->error_bitmask |= 1 << 4; | |
265 | return; | |
266 | } | |
267 | /* Does the DMA address match our internal dequeue pointer address? */ | |
268 | if (cmd_dma != (u64) cmd_dequeue_dma) { | |
269 | xhci->error_bitmask |= 1 << 5; | |
270 | return; | |
271 | } | |
272 | switch (xhci->cmd_ring->dequeue->generic.field[3] & TRB_TYPE_BITMASK) { | |
3ffbba95 SS |
273 | case TRB_TYPE(TRB_ENABLE_SLOT): |
274 | if (GET_COMP_CODE(event->status) == COMP_SUCCESS) | |
275 | xhci->slot_id = slot_id; | |
276 | else | |
277 | xhci->slot_id = 0; | |
278 | complete(&xhci->addr_dev); | |
279 | break; | |
280 | case TRB_TYPE(TRB_DISABLE_SLOT): | |
281 | if (xhci->devs[slot_id]) | |
282 | xhci_free_virt_device(xhci, slot_id); | |
283 | break; | |
f94e0186 SS |
284 | case TRB_TYPE(TRB_CONFIG_EP): |
285 | xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status); | |
286 | complete(&xhci->devs[slot_id]->cmd_completion); | |
287 | break; | |
3ffbba95 SS |
288 | case TRB_TYPE(TRB_ADDR_DEV): |
289 | xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status); | |
290 | complete(&xhci->addr_dev); | |
291 | break; | |
7f84eef0 SS |
292 | case TRB_TYPE(TRB_CMD_NOOP): |
293 | ++xhci->noops_handled; | |
294 | break; | |
295 | default: | |
296 | /* Skip over unknown commands on the event ring */ | |
297 | xhci->error_bitmask |= 1 << 6; | |
298 | break; | |
299 | } | |
300 | inc_deq(xhci, xhci->cmd_ring, false); | |
301 | } | |
302 | ||
0f2a7930 SS |
303 | static void handle_port_status(struct xhci_hcd *xhci, |
304 | union xhci_trb *event) | |
305 | { | |
306 | u32 port_id; | |
307 | ||
308 | /* Port status change events always have a successful completion code */ | |
309 | if (GET_COMP_CODE(event->generic.field[2]) != COMP_SUCCESS) { | |
310 | xhci_warn(xhci, "WARN: xHC returned failed port status event\n"); | |
311 | xhci->error_bitmask |= 1 << 8; | |
312 | } | |
313 | /* FIXME: core doesn't care about all port link state changes yet */ | |
314 | port_id = GET_PORT_ID(event->generic.field[0]); | |
315 | xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id); | |
316 | ||
317 | /* Update event ring dequeue pointer before dropping the lock */ | |
318 | inc_deq(xhci, xhci->event_ring, true); | |
319 | set_hc_event_deq(xhci); | |
320 | ||
321 | spin_unlock(&xhci->lock); | |
322 | /* Pass this up to the core */ | |
323 | usb_hcd_poll_rh_status(xhci_to_hcd(xhci)); | |
324 | spin_lock(&xhci->lock); | |
325 | } | |
326 | ||
d0e96f5a SS |
327 | /* |
328 | * This TD is defined by the TRBs starting at start_trb in start_seg and ending | |
329 | * at end_trb, which may be in another segment. If the suspect DMA address is a | |
330 | * TRB in this TD, this function returns that TRB's segment. Otherwise it | |
331 | * returns 0. | |
332 | */ | |
333 | static struct xhci_segment *trb_in_td( | |
334 | struct xhci_segment *start_seg, | |
335 | union xhci_trb *start_trb, | |
336 | union xhci_trb *end_trb, | |
337 | dma_addr_t suspect_dma) | |
338 | { | |
339 | dma_addr_t start_dma; | |
340 | dma_addr_t end_seg_dma; | |
341 | dma_addr_t end_trb_dma; | |
342 | struct xhci_segment *cur_seg; | |
343 | ||
344 | start_dma = trb_virt_to_dma(start_seg, start_trb); | |
345 | cur_seg = start_seg; | |
346 | ||
347 | do { | |
348 | /* | |
349 | * Last TRB is a link TRB (unless we start inserting links in | |
350 | * the middle, FIXME if you do) | |
351 | */ | |
352 | end_seg_dma = trb_virt_to_dma(cur_seg, &start_seg->trbs[TRBS_PER_SEGMENT - 2]); | |
353 | /* If the end TRB isn't in this segment, this is set to 0 */ | |
354 | end_trb_dma = trb_virt_to_dma(cur_seg, end_trb); | |
355 | ||
356 | if (end_trb_dma > 0) { | |
357 | /* The end TRB is in this segment, so suspect should be here */ | |
358 | if (start_dma <= end_trb_dma) { | |
359 | if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma) | |
360 | return cur_seg; | |
361 | } else { | |
362 | /* Case for one segment with | |
363 | * a TD wrapped around to the top | |
364 | */ | |
365 | if ((suspect_dma >= start_dma && | |
366 | suspect_dma <= end_seg_dma) || | |
367 | (suspect_dma >= cur_seg->dma && | |
368 | suspect_dma <= end_trb_dma)) | |
369 | return cur_seg; | |
370 | } | |
371 | return 0; | |
372 | } else { | |
373 | /* Might still be somewhere in this segment */ | |
374 | if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma) | |
375 | return cur_seg; | |
376 | } | |
377 | cur_seg = cur_seg->next; | |
378 | start_dma = trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]); | |
379 | } while (1); | |
380 | ||
381 | } | |
382 | ||
383 | /* | |
384 | * If this function returns an error condition, it means it got a Transfer | |
385 | * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address. | |
386 | * At this point, the host controller is probably hosed and should be reset. | |
387 | */ | |
388 | static int handle_tx_event(struct xhci_hcd *xhci, | |
389 | struct xhci_transfer_event *event) | |
390 | { | |
391 | struct xhci_virt_device *xdev; | |
392 | struct xhci_ring *ep_ring; | |
393 | int ep_index; | |
394 | struct xhci_td *td = 0; | |
395 | dma_addr_t event_dma; | |
396 | struct xhci_segment *event_seg; | |
397 | union xhci_trb *event_trb; | |
b10de142 | 398 | struct urb *urb; |
d0e96f5a SS |
399 | int status = -EINPROGRESS; |
400 | ||
401 | xdev = xhci->devs[TRB_TO_SLOT_ID(event->flags)]; | |
402 | if (!xdev) { | |
403 | xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n"); | |
404 | return -ENODEV; | |
405 | } | |
406 | ||
407 | /* Endpoint ID is 1 based, our index is zero based */ | |
408 | ep_index = TRB_TO_EP_ID(event->flags) - 1; | |
409 | ep_ring = xdev->ep_rings[ep_index]; | |
410 | if (!ep_ring || (xdev->out_ctx->ep[ep_index].ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) { | |
411 | xhci_err(xhci, "ERROR Transfer event pointed to disabled endpoint\n"); | |
412 | return -ENODEV; | |
413 | } | |
414 | ||
415 | event_dma = event->buffer[0]; | |
416 | if (event->buffer[1] != 0) | |
417 | xhci_warn(xhci, "WARN ignoring upper 32-bits of 64-bit TRB dma address\n"); | |
418 | ||
419 | /* This TRB should be in the TD at the head of this ring's TD list */ | |
420 | if (list_empty(&ep_ring->td_list)) { | |
421 | xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n", | |
422 | TRB_TO_SLOT_ID(event->flags), ep_index); | |
423 | xhci_dbg(xhci, "Event TRB with TRB type ID %u\n", | |
424 | (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10); | |
425 | xhci_print_trb_offsets(xhci, (union xhci_trb *) event); | |
426 | urb = NULL; | |
427 | goto cleanup; | |
428 | } | |
429 | td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list); | |
430 | ||
431 | /* Is this a TRB in the currently executing TD? */ | |
432 | event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue, | |
433 | td->last_trb, event_dma); | |
434 | if (!event_seg) { | |
435 | /* HC is busted, give up! */ | |
436 | xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not part of current TD\n"); | |
437 | return -ESHUTDOWN; | |
438 | } | |
439 | event_trb = &event_seg->trbs[(event_dma - event_seg->dma) / sizeof(*event_trb)]; | |
b10de142 SS |
440 | xhci_dbg(xhci, "Event TRB with TRB type ID %u\n", |
441 | (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10); | |
442 | xhci_dbg(xhci, "Offset 0x00 (buffer[0]) = 0x%x\n", | |
443 | (unsigned int) event->buffer[0]); | |
444 | xhci_dbg(xhci, "Offset 0x04 (buffer[0]) = 0x%x\n", | |
445 | (unsigned int) event->buffer[1]); | |
446 | xhci_dbg(xhci, "Offset 0x08 (transfer length) = 0x%x\n", | |
447 | (unsigned int) event->transfer_len); | |
448 | xhci_dbg(xhci, "Offset 0x0C (flags) = 0x%x\n", | |
449 | (unsigned int) event->flags); | |
450 | ||
451 | /* Look for common error cases */ | |
452 | switch (GET_COMP_CODE(event->transfer_len)) { | |
453 | /* Skip codes that require special handling depending on | |
454 | * transfer type | |
455 | */ | |
456 | case COMP_SUCCESS: | |
457 | case COMP_SHORT_TX: | |
458 | break; | |
459 | case COMP_STALL: | |
460 | xhci_warn(xhci, "WARN: Stalled endpoint\n"); | |
461 | status = -EPIPE; | |
462 | break; | |
463 | case COMP_TRB_ERR: | |
464 | xhci_warn(xhci, "WARN: TRB error on endpoint\n"); | |
465 | status = -EILSEQ; | |
466 | break; | |
467 | case COMP_TX_ERR: | |
468 | xhci_warn(xhci, "WARN: transfer error on endpoint\n"); | |
469 | status = -EPROTO; | |
470 | break; | |
471 | case COMP_DB_ERR: | |
472 | xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n"); | |
473 | status = -ENOSR; | |
474 | break; | |
475 | default: | |
476 | xhci_warn(xhci, "ERROR Unknown event condition, HC probably busted\n"); | |
477 | urb = NULL; | |
478 | goto cleanup; | |
479 | } | |
d0e96f5a SS |
480 | /* Now update the urb's actual_length and give back to the core */ |
481 | /* Was this a control transfer? */ | |
482 | if (usb_endpoint_xfer_control(&td->urb->ep->desc)) { | |
483 | xhci_debug_trb(xhci, xhci->event_ring->dequeue); | |
484 | switch (GET_COMP_CODE(event->transfer_len)) { | |
485 | case COMP_SUCCESS: | |
486 | if (event_trb == ep_ring->dequeue) { | |
487 | xhci_warn(xhci, "WARN: Success on ctrl setup TRB without IOC set??\n"); | |
488 | status = -ESHUTDOWN; | |
489 | } else if (event_trb != td->last_trb) { | |
490 | xhci_warn(xhci, "WARN: Success on ctrl data TRB without IOC set??\n"); | |
491 | status = -ESHUTDOWN; | |
492 | } else { | |
493 | xhci_dbg(xhci, "Successful control transfer!\n"); | |
494 | status = 0; | |
495 | } | |
496 | break; | |
497 | case COMP_SHORT_TX: | |
498 | xhci_warn(xhci, "WARN: short transfer on control ep\n"); | |
499 | status = -EREMOTEIO; | |
500 | break; | |
d0e96f5a | 501 | default: |
b10de142 SS |
502 | /* Others already handled above */ |
503 | break; | |
d0e96f5a SS |
504 | } |
505 | /* | |
506 | * Did we transfer any data, despite the errors that might have | |
507 | * happened? I.e. did we get past the setup stage? | |
508 | */ | |
509 | if (event_trb != ep_ring->dequeue) { | |
510 | /* The event was for the status stage */ | |
511 | if (event_trb == td->last_trb) { | |
512 | td->urb->actual_length = td->urb->transfer_buffer_length; | |
513 | } else { | |
514 | /* The event was for the data stage */ | |
515 | td->urb->actual_length = td->urb->transfer_buffer_length - | |
516 | TRB_LEN(event->transfer_len); | |
517 | } | |
518 | } | |
d0e96f5a | 519 | } else { |
b10de142 SS |
520 | switch (GET_COMP_CODE(event->transfer_len)) { |
521 | case COMP_SUCCESS: | |
522 | /* Double check that the HW transferred everything. */ | |
523 | if (event_trb != td->last_trb) { | |
524 | xhci_warn(xhci, "WARN Successful completion " | |
525 | "on short TX\n"); | |
526 | if (td->urb->transfer_flags & URB_SHORT_NOT_OK) | |
527 | status = -EREMOTEIO; | |
528 | else | |
529 | status = 0; | |
530 | } else { | |
531 | xhci_dbg(xhci, "Successful bulk transfer!\n"); | |
532 | status = 0; | |
533 | } | |
534 | break; | |
535 | case COMP_SHORT_TX: | |
536 | if (td->urb->transfer_flags & URB_SHORT_NOT_OK) | |
537 | status = -EREMOTEIO; | |
538 | else | |
539 | status = 0; | |
540 | break; | |
541 | default: | |
542 | /* Others already handled above */ | |
543 | break; | |
544 | } | |
545 | dev_dbg(&td->urb->dev->dev, | |
546 | "ep %#x - asked for %d bytes, " | |
547 | "%d bytes untransferred\n", | |
548 | td->urb->ep->desc.bEndpointAddress, | |
549 | td->urb->transfer_buffer_length, | |
550 | TRB_LEN(event->transfer_len)); | |
551 | /* Fast path - was this the last TRB in the TD for this URB? */ | |
552 | if (event_trb == td->last_trb) { | |
553 | if (TRB_LEN(event->transfer_len) != 0) { | |
554 | td->urb->actual_length = | |
555 | td->urb->transfer_buffer_length - | |
556 | TRB_LEN(event->transfer_len); | |
557 | if (td->urb->actual_length < 0) { | |
558 | xhci_warn(xhci, "HC gave bad length " | |
559 | "of %d bytes left\n", | |
560 | TRB_LEN(event->transfer_len)); | |
561 | td->urb->actual_length = 0; | |
562 | } | |
563 | if (td->urb->transfer_flags & URB_SHORT_NOT_OK) | |
564 | status = -EREMOTEIO; | |
565 | else | |
566 | status = 0; | |
567 | } else { | |
568 | td->urb->actual_length = td->urb->transfer_buffer_length; | |
569 | /* Ignore a short packet completion if the | |
570 | * untransferred length was zero. | |
571 | */ | |
572 | status = 0; | |
573 | } | |
574 | } else { | |
575 | /* Slow path - walk the list, starting from the first | |
576 | * TRB to get the actual length transferred | |
577 | */ | |
578 | td->urb->actual_length = 0; | |
579 | while (ep_ring->dequeue != event_trb) { | |
580 | td->urb->actual_length += TRB_LEN(ep_ring->dequeue->generic.field[2]); | |
581 | inc_deq(xhci, ep_ring, false); | |
582 | } | |
583 | td->urb->actual_length += TRB_LEN(ep_ring->dequeue->generic.field[2]) - | |
584 | TRB_LEN(event->transfer_len); | |
585 | ||
586 | } | |
d0e96f5a | 587 | } |
b10de142 SS |
588 | /* Update ring dequeue pointer */ |
589 | while (ep_ring->dequeue != td->last_trb) | |
590 | inc_deq(xhci, ep_ring, false); | |
591 | inc_deq(xhci, ep_ring, false); | |
592 | ||
593 | /* Clean up the endpoint's TD list */ | |
594 | urb = td->urb; | |
595 | list_del(&td->td_list); | |
596 | kfree(td); | |
597 | urb->hcpriv = NULL; | |
d0e96f5a SS |
598 | cleanup: |
599 | inc_deq(xhci, xhci->event_ring, true); | |
600 | set_hc_event_deq(xhci); | |
601 | ||
b10de142 | 602 | /* FIXME for multi-TD URBs (who have buffers bigger than 64MB) */ |
d0e96f5a SS |
603 | if (urb) { |
604 | usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb); | |
605 | spin_unlock(&xhci->lock); | |
606 | usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status); | |
607 | spin_lock(&xhci->lock); | |
608 | } | |
609 | return 0; | |
610 | } | |
611 | ||
0f2a7930 SS |
612 | /* |
613 | * This function handles all OS-owned events on the event ring. It may drop | |
614 | * xhci->lock between event processing (e.g. to pass up port status changes). | |
615 | */ | |
7f84eef0 SS |
616 | void handle_event(struct xhci_hcd *xhci) |
617 | { | |
618 | union xhci_trb *event; | |
0f2a7930 | 619 | int update_ptrs = 1; |
d0e96f5a | 620 | int ret; |
7f84eef0 SS |
621 | |
622 | if (!xhci->event_ring || !xhci->event_ring->dequeue) { | |
623 | xhci->error_bitmask |= 1 << 1; | |
624 | return; | |
625 | } | |
626 | ||
627 | event = xhci->event_ring->dequeue; | |
628 | /* Does the HC or OS own the TRB? */ | |
629 | if ((event->event_cmd.flags & TRB_CYCLE) != | |
630 | xhci->event_ring->cycle_state) { | |
631 | xhci->error_bitmask |= 1 << 2; | |
632 | return; | |
633 | } | |
634 | ||
0f2a7930 | 635 | /* FIXME: Handle more event types. */ |
7f84eef0 SS |
636 | switch ((event->event_cmd.flags & TRB_TYPE_BITMASK)) { |
637 | case TRB_TYPE(TRB_COMPLETION): | |
638 | handle_cmd_completion(xhci, &event->event_cmd); | |
639 | break; | |
0f2a7930 SS |
640 | case TRB_TYPE(TRB_PORT_STATUS): |
641 | handle_port_status(xhci, event); | |
642 | update_ptrs = 0; | |
643 | break; | |
d0e96f5a SS |
644 | case TRB_TYPE(TRB_TRANSFER): |
645 | ret = handle_tx_event(xhci, &event->trans_event); | |
646 | if (ret < 0) | |
647 | xhci->error_bitmask |= 1 << 9; | |
648 | else | |
649 | update_ptrs = 0; | |
650 | break; | |
7f84eef0 SS |
651 | default: |
652 | xhci->error_bitmask |= 1 << 3; | |
653 | } | |
654 | ||
0f2a7930 SS |
655 | if (update_ptrs) { |
656 | /* Update SW and HC event ring dequeue pointer */ | |
657 | inc_deq(xhci, xhci->event_ring, true); | |
658 | set_hc_event_deq(xhci); | |
659 | } | |
7f84eef0 SS |
660 | /* Are there more items on the event ring? */ |
661 | handle_event(xhci); | |
662 | } | |
663 | ||
d0e96f5a SS |
664 | /**** Endpoint Ring Operations ****/ |
665 | ||
7f84eef0 SS |
666 | /* |
667 | * Generic function for queueing a TRB on a ring. | |
668 | * The caller must have checked to make sure there's room on the ring. | |
669 | */ | |
670 | static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, | |
671 | bool consumer, | |
672 | u32 field1, u32 field2, u32 field3, u32 field4) | |
673 | { | |
674 | struct xhci_generic_trb *trb; | |
675 | ||
676 | trb = &ring->enqueue->generic; | |
677 | trb->field[0] = field1; | |
678 | trb->field[1] = field2; | |
679 | trb->field[2] = field3; | |
680 | trb->field[3] = field4; | |
681 | inc_enq(xhci, ring, consumer); | |
682 | } | |
683 | ||
d0e96f5a SS |
684 | /* |
685 | * Does various checks on the endpoint ring, and makes it ready to queue num_trbs. | |
686 | * FIXME allocate segments if the ring is full. | |
687 | */ | |
688 | static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, | |
689 | u32 ep_state, unsigned int num_trbs, gfp_t mem_flags) | |
690 | { | |
691 | /* Make sure the endpoint has been added to xHC schedule */ | |
692 | xhci_dbg(xhci, "Endpoint state = 0x%x\n", ep_state); | |
693 | switch (ep_state) { | |
694 | case EP_STATE_DISABLED: | |
695 | /* | |
696 | * USB core changed config/interfaces without notifying us, | |
697 | * or hardware is reporting the wrong state. | |
698 | */ | |
699 | xhci_warn(xhci, "WARN urb submitted to disabled ep\n"); | |
700 | return -ENOENT; | |
701 | case EP_STATE_HALTED: | |
702 | case EP_STATE_ERROR: | |
703 | xhci_warn(xhci, "WARN waiting for halt or error on ep " | |
704 | "to be cleared\n"); | |
705 | /* FIXME event handling code for error needs to clear it */ | |
706 | /* XXX not sure if this should be -ENOENT or not */ | |
707 | return -EINVAL; | |
708 | case EP_STATE_STOPPED: | |
709 | case EP_STATE_RUNNING: | |
710 | break; | |
711 | default: | |
712 | xhci_err(xhci, "ERROR unknown endpoint state for ep\n"); | |
713 | /* | |
714 | * FIXME issue Configure Endpoint command to try to get the HC | |
715 | * back into a known state. | |
716 | */ | |
717 | return -EINVAL; | |
718 | } | |
719 | if (!room_on_ring(xhci, ep_ring, num_trbs)) { | |
720 | /* FIXME allocate more room */ | |
721 | xhci_err(xhci, "ERROR no room on ep ring\n"); | |
722 | return -ENOMEM; | |
723 | } | |
724 | return 0; | |
725 | } | |
726 | ||
727 | int xhci_prepare_transfer(struct xhci_hcd *xhci, | |
728 | struct xhci_virt_device *xdev, | |
729 | unsigned int ep_index, | |
730 | unsigned int num_trbs, | |
731 | struct urb *urb, | |
732 | struct xhci_td **td, | |
733 | gfp_t mem_flags) | |
734 | { | |
735 | int ret; | |
736 | ||
737 | ret = prepare_ring(xhci, xdev->ep_rings[ep_index], | |
738 | xdev->out_ctx->ep[ep_index].ep_info & EP_STATE_MASK, | |
739 | num_trbs, mem_flags); | |
740 | if (ret) | |
741 | return ret; | |
742 | *td = kzalloc(sizeof(struct xhci_td), mem_flags); | |
743 | if (!*td) | |
744 | return -ENOMEM; | |
745 | INIT_LIST_HEAD(&(*td)->td_list); | |
746 | ||
747 | ret = usb_hcd_link_urb_to_ep(xhci_to_hcd(xhci), urb); | |
748 | if (unlikely(ret)) { | |
749 | kfree(*td); | |
750 | return ret; | |
751 | } | |
752 | ||
753 | (*td)->urb = urb; | |
754 | urb->hcpriv = (void *) (*td); | |
755 | /* Add this TD to the tail of the endpoint ring's TD list */ | |
756 | list_add_tail(&(*td)->td_list, &xdev->ep_rings[ep_index]->td_list); | |
757 | ||
758 | return 0; | |
759 | } | |
760 | ||
b10de142 SS |
761 | /* This is very similar to what ehci-q.c qtd_fill() does */ |
762 | int queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |
763 | struct urb *urb, int slot_id, unsigned int ep_index) | |
764 | { | |
765 | struct xhci_ring *ep_ring; | |
766 | struct xhci_td *td; | |
767 | int num_trbs; | |
768 | struct xhci_generic_trb *start_trb; | |
769 | bool first_trb; | |
770 | int start_cycle; | |
771 | u32 field; | |
772 | ||
773 | int running_total, trb_buff_len, ret; | |
774 | u64 addr; | |
775 | ||
776 | ep_ring = xhci->devs[slot_id]->ep_rings[ep_index]; | |
777 | ||
778 | num_trbs = 0; | |
779 | /* How much data is (potentially) left before the 64KB boundary? */ | |
780 | running_total = TRB_MAX_BUFF_SIZE - | |
781 | (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); | |
782 | ||
783 | /* If there's some data on this 64KB chunk, or we have to send a | |
784 | * zero-length transfer, we need at least one TRB | |
785 | */ | |
786 | if (running_total != 0 || urb->transfer_buffer_length == 0) | |
787 | num_trbs++; | |
788 | /* How many more 64KB chunks to transfer, how many more TRBs? */ | |
789 | while (running_total < urb->transfer_buffer_length) { | |
790 | num_trbs++; | |
791 | running_total += TRB_MAX_BUFF_SIZE; | |
792 | } | |
793 | /* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */ | |
794 | ||
795 | if (!in_interrupt()) | |
796 | dev_dbg(&urb->dev->dev, "ep %#x - urb len = %d, addr = %#x, num_trbs = %d\n", | |
797 | urb->ep->desc.bEndpointAddress, | |
798 | urb->transfer_buffer_length, urb->transfer_dma, | |
799 | num_trbs); | |
800 | ret = xhci_prepare_transfer(xhci, xhci->devs[slot_id], ep_index, | |
801 | num_trbs, urb, &td, mem_flags); | |
802 | if (ret < 0) | |
803 | return ret; | |
804 | ||
805 | /* | |
806 | * Don't give the first TRB to the hardware (by toggling the cycle bit) | |
807 | * until we've finished creating all the other TRBs. The ring's cycle | |
808 | * state may change as we enqueue the other TRBs, so save it too. | |
809 | */ | |
810 | start_trb = &ep_ring->enqueue->generic; | |
811 | start_cycle = ep_ring->cycle_state; | |
812 | ||
813 | running_total = 0; | |
814 | /* How much data is in the first TRB? */ | |
815 | addr = (u64) urb->transfer_dma; | |
816 | trb_buff_len = TRB_MAX_BUFF_SIZE - | |
817 | (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); | |
818 | if (urb->transfer_buffer_length < trb_buff_len) | |
819 | trb_buff_len = urb->transfer_buffer_length; | |
820 | ||
821 | first_trb = true; | |
822 | ||
823 | /* Queue the first TRB, even if it's zero-length */ | |
824 | do { | |
825 | field = 0; | |
826 | ||
827 | /* Don't change the cycle bit of the first TRB until later */ | |
828 | if (first_trb) | |
829 | first_trb = false; | |
830 | else | |
831 | field |= ep_ring->cycle_state; | |
832 | ||
833 | /* Chain all the TRBs together; clear the chain bit in the last | |
834 | * TRB to indicate it's the last TRB in the chain. | |
835 | */ | |
836 | if (num_trbs > 1) { | |
837 | field |= TRB_CHAIN; | |
838 | } else { | |
839 | /* FIXME - add check for ZERO_PACKET flag before this */ | |
840 | td->last_trb = ep_ring->enqueue; | |
841 | field |= TRB_IOC; | |
842 | } | |
843 | queue_trb(xhci, ep_ring, false, | |
844 | (u32) addr, | |
845 | (u32) ((u64) addr >> 32), | |
846 | TRB_LEN(trb_buff_len) | TRB_INTR_TARGET(0), | |
847 | /* We always want to know if the TRB was short, | |
848 | * or we won't get an event when it completes. | |
849 | * (Unless we use event data TRBs, which are a | |
850 | * waste of space and HC resources.) | |
851 | */ | |
852 | field | TRB_ISP | TRB_TYPE(TRB_NORMAL)); | |
853 | --num_trbs; | |
854 | running_total += trb_buff_len; | |
855 | ||
856 | /* Calculate length for next transfer */ | |
857 | addr += trb_buff_len; | |
858 | trb_buff_len = urb->transfer_buffer_length - running_total; | |
859 | if (trb_buff_len > TRB_MAX_BUFF_SIZE) | |
860 | trb_buff_len = TRB_MAX_BUFF_SIZE; | |
861 | } while (running_total < urb->transfer_buffer_length); | |
862 | ||
863 | if (num_trbs != 0) | |
864 | dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated number of " | |
865 | "TRBs, %d left\n", __FUNCTION__, | |
866 | urb->ep->desc.bEndpointAddress, num_trbs); | |
867 | /* | |
868 | * Pass all the TRBs to the hardware at once and make sure this write | |
869 | * isn't reordered. | |
870 | */ | |
871 | wmb(); | |
872 | start_trb->field[3] |= start_cycle; | |
873 | field = xhci_readl(xhci, &xhci->dba->doorbell[slot_id]) & DB_MASK; | |
874 | xhci_writel(xhci, field | EPI_TO_DB(ep_index), &xhci->dba->doorbell[slot_id]); | |
875 | /* Flush PCI posted writes */ | |
876 | xhci_readl(xhci, &xhci->dba->doorbell[slot_id]); | |
877 | ||
878 | return 0; | |
879 | } | |
880 | ||
d0e96f5a SS |
881 | /* Caller must have locked xhci->lock */ |
882 | int queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |
883 | struct urb *urb, int slot_id, unsigned int ep_index) | |
884 | { | |
885 | struct xhci_ring *ep_ring; | |
886 | int num_trbs; | |
887 | int ret; | |
888 | struct usb_ctrlrequest *setup; | |
889 | struct xhci_generic_trb *start_trb; | |
890 | int start_cycle; | |
891 | u32 field; | |
892 | struct xhci_td *td; | |
893 | ||
894 | ep_ring = xhci->devs[slot_id]->ep_rings[ep_index]; | |
895 | ||
896 | /* | |
897 | * Need to copy setup packet into setup TRB, so we can't use the setup | |
898 | * DMA address. | |
899 | */ | |
900 | if (!urb->setup_packet) | |
901 | return -EINVAL; | |
902 | ||
903 | if (!in_interrupt()) | |
904 | xhci_dbg(xhci, "Queueing ctrl tx for slot id %d, ep %d\n", | |
905 | slot_id, ep_index); | |
906 | /* 1 TRB for setup, 1 for status */ | |
907 | num_trbs = 2; | |
908 | /* | |
909 | * Don't need to check if we need additional event data and normal TRBs, | |
910 | * since data in control transfers will never get bigger than 16MB | |
911 | * XXX: can we get a buffer that crosses 64KB boundaries? | |
912 | */ | |
913 | if (urb->transfer_buffer_length > 0) | |
914 | num_trbs++; | |
915 | ret = xhci_prepare_transfer(xhci, xhci->devs[slot_id], ep_index, num_trbs, | |
916 | urb, &td, mem_flags); | |
917 | if (ret < 0) | |
918 | return ret; | |
919 | ||
920 | /* | |
921 | * Don't give the first TRB to the hardware (by toggling the cycle bit) | |
922 | * until we've finished creating all the other TRBs. The ring's cycle | |
923 | * state may change as we enqueue the other TRBs, so save it too. | |
924 | */ | |
925 | start_trb = &ep_ring->enqueue->generic; | |
926 | start_cycle = ep_ring->cycle_state; | |
927 | ||
928 | /* Queue setup TRB - see section 6.4.1.2.1 */ | |
929 | /* FIXME better way to translate setup_packet into two u32 fields? */ | |
930 | setup = (struct usb_ctrlrequest *) urb->setup_packet; | |
931 | queue_trb(xhci, ep_ring, false, | |
932 | /* FIXME endianness is probably going to bite my ass here. */ | |
933 | setup->bRequestType | setup->bRequest << 8 | setup->wValue << 16, | |
934 | setup->wIndex | setup->wLength << 16, | |
935 | TRB_LEN(8) | TRB_INTR_TARGET(0), | |
936 | /* Immediate data in pointer */ | |
937 | TRB_IDT | TRB_TYPE(TRB_SETUP)); | |
938 | ||
939 | /* If there's data, queue data TRBs */ | |
940 | field = 0; | |
941 | if (urb->transfer_buffer_length > 0) { | |
942 | if (setup->bRequestType & USB_DIR_IN) | |
943 | field |= TRB_DIR_IN; | |
944 | queue_trb(xhci, ep_ring, false, | |
945 | lower_32_bits(urb->transfer_dma), | |
946 | upper_32_bits(urb->transfer_dma), | |
947 | TRB_LEN(urb->transfer_buffer_length) | TRB_INTR_TARGET(0), | |
948 | /* Event on short tx */ | |
949 | field | TRB_ISP | TRB_TYPE(TRB_DATA) | ep_ring->cycle_state); | |
950 | } | |
951 | ||
952 | /* Save the DMA address of the last TRB in the TD */ | |
953 | td->last_trb = ep_ring->enqueue; | |
954 | ||
955 | /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */ | |
956 | /* If the device sent data, the status stage is an OUT transfer */ | |
957 | if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN) | |
958 | field = 0; | |
959 | else | |
960 | field = TRB_DIR_IN; | |
961 | queue_trb(xhci, ep_ring, false, | |
962 | 0, | |
963 | 0, | |
964 | TRB_INTR_TARGET(0), | |
965 | /* Event on completion */ | |
966 | field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state); | |
967 | ||
968 | /* | |
969 | * Pass all the TRBs to the hardware at once and make sure this write | |
970 | * isn't reordered. | |
971 | */ | |
972 | wmb(); | |
973 | start_trb->field[3] |= start_cycle; | |
974 | field = xhci_readl(xhci, &xhci->dba->doorbell[slot_id]) & DB_MASK; | |
975 | xhci_writel(xhci, field | EPI_TO_DB(ep_index), &xhci->dba->doorbell[slot_id]); | |
976 | /* Flush PCI posted writes */ | |
977 | xhci_readl(xhci, &xhci->dba->doorbell[slot_id]); | |
978 | ||
979 | return 0; | |
980 | } | |
981 | ||
982 | /**** Command Ring Operations ****/ | |
983 | ||
7f84eef0 SS |
984 | /* Generic function for queueing a command TRB on the command ring */ |
985 | static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2, u32 field3, u32 field4) | |
986 | { | |
987 | if (!room_on_ring(xhci, xhci->cmd_ring, 1)) { | |
988 | if (!in_interrupt()) | |
989 | xhci_err(xhci, "ERR: No room for command on command ring\n"); | |
990 | return -ENOMEM; | |
991 | } | |
992 | queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3, | |
993 | field4 | xhci->cmd_ring->cycle_state); | |
994 | return 0; | |
995 | } | |
996 | ||
997 | /* Queue a no-op command on the command ring */ | |
998 | static int queue_cmd_noop(struct xhci_hcd *xhci) | |
999 | { | |
1000 | return queue_command(xhci, 0, 0, 0, TRB_TYPE(TRB_CMD_NOOP)); | |
1001 | } | |
1002 | ||
1003 | /* | |
1004 | * Place a no-op command on the command ring to test the command and | |
1005 | * event ring. | |
1006 | */ | |
1007 | void *setup_one_noop(struct xhci_hcd *xhci) | |
1008 | { | |
1009 | if (queue_cmd_noop(xhci) < 0) | |
1010 | return NULL; | |
1011 | xhci->noops_submitted++; | |
1012 | return ring_cmd_db; | |
1013 | } | |
3ffbba95 SS |
1014 | |
1015 | /* Queue a slot enable or disable request on the command ring */ | |
1016 | int queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id) | |
1017 | { | |
1018 | return queue_command(xhci, 0, 0, 0, | |
1019 | TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id)); | |
1020 | } | |
1021 | ||
1022 | /* Queue an address device command TRB */ | |
1023 | int queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id) | |
1024 | { | |
1025 | return queue_command(xhci, in_ctx_ptr, 0, 0, | |
1026 | TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id)); | |
1027 | } | |
f94e0186 SS |
1028 | |
1029 | /* Queue a configure endpoint command TRB */ | |
1030 | int queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id) | |
1031 | { | |
1032 | return queue_command(xhci, in_ctx_ptr, 0, 0, | |
1033 | TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id)); | |
1034 | } |