Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Universal Host Controller Interface driver for USB. | |
3 | * | |
4 | * Maintainer: Alan Stern <stern@rowland.harvard.edu> | |
5 | * | |
6 | * (C) Copyright 1999 Linus Torvalds | |
7 | * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com | |
8 | * (C) Copyright 1999 Randy Dunlap | |
9 | * (C) Copyright 1999 Georg Acher, acher@in.tum.de | |
10 | * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de | |
11 | * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch | |
12 | * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at | |
13 | * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface | |
14 | * support from usb-ohci.c by Adam Richter, adam@yggdrasil.com). | |
15 | * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c) | |
17230acd | 16 | * (C) Copyright 2004-2007 Alan Stern, stern@rowland.harvard.edu |
1da177e4 LT |
17 | */ |
18 | ||
1da177e4 LT |
19 | |
20 | /* | |
21 | * Technically, updating td->status here is a race, but it's not really a | |
22 | * problem. The worst that can happen is that we set the IOC bit again | |
23 | * generating a spurious interrupt. We could fix this by creating another | |
24 | * QH and leaving the IOC bit always set, but then we would have to play | |
25 | * games with the FSBR code to make sure we get the correct order in all | |
26 | * the cases. I don't think it's worth the effort | |
27 | */ | |
dccf4a48 | 28 | static void uhci_set_next_interrupt(struct uhci_hcd *uhci) |
1da177e4 | 29 | { |
6c1b445c | 30 | if (uhci->is_stopped) |
1f09df8b | 31 | mod_timer(&uhci_to_hcd(uhci)->rh_timer, jiffies); |
1da177e4 LT |
32 | uhci->term_td->status |= cpu_to_le32(TD_CTRL_IOC); |
33 | } | |
34 | ||
35 | static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci) | |
36 | { | |
37 | uhci->term_td->status &= ~cpu_to_le32(TD_CTRL_IOC); | |
38 | } | |
39 | ||
84afddd7 AS |
40 | |
41 | /* | |
42 | * Full-Speed Bandwidth Reclamation (FSBR). | |
43 | * We turn on FSBR whenever a queue that wants it is advancing, | |
44 | * and leave it on for a short time thereafter. | |
45 | */ | |
46 | static void uhci_fsbr_on(struct uhci_hcd *uhci) | |
47 | { | |
e009f1b2 | 48 | struct uhci_qh *lqh; |
17230acd | 49 | |
e009f1b2 AS |
50 | /* The terminating skeleton QH always points back to the first |
51 | * FSBR QH. Make the last async QH point to the terminating | |
52 | * skeleton QH. */ | |
84afddd7 | 53 | uhci->fsbr_is_on = 1; |
17230acd AS |
54 | lqh = list_entry(uhci->skel_async_qh->node.prev, |
55 | struct uhci_qh, node); | |
e009f1b2 | 56 | lqh->link = LINK_TO_QH(uhci->skel_term_qh); |
84afddd7 AS |
57 | } |
58 | ||
59 | static void uhci_fsbr_off(struct uhci_hcd *uhci) | |
60 | { | |
17230acd AS |
61 | struct uhci_qh *lqh; |
62 | ||
e009f1b2 AS |
63 | /* Remove the link from the last async QH to the terminating |
64 | * skeleton QH. */ | |
84afddd7 | 65 | uhci->fsbr_is_on = 0; |
17230acd AS |
66 | lqh = list_entry(uhci->skel_async_qh->node.prev, |
67 | struct uhci_qh, node); | |
e009f1b2 | 68 | lqh->link = UHCI_PTR_TERM; |
84afddd7 AS |
69 | } |
70 | ||
71 | static void uhci_add_fsbr(struct uhci_hcd *uhci, struct urb *urb) | |
72 | { | |
73 | struct urb_priv *urbp = urb->hcpriv; | |
74 | ||
75 | if (!(urb->transfer_flags & URB_NO_FSBR)) | |
76 | urbp->fsbr = 1; | |
77 | } | |
78 | ||
c5e3b741 | 79 | static void uhci_urbp_wants_fsbr(struct uhci_hcd *uhci, struct urb_priv *urbp) |
84afddd7 | 80 | { |
84afddd7 | 81 | if (urbp->fsbr) { |
c5e3b741 | 82 | uhci->fsbr_is_wanted = 1; |
84afddd7 AS |
83 | if (!uhci->fsbr_is_on) |
84 | uhci_fsbr_on(uhci); | |
c5e3b741 AS |
85 | else if (uhci->fsbr_expiring) { |
86 | uhci->fsbr_expiring = 0; | |
87 | del_timer(&uhci->fsbr_timer); | |
88 | } | |
89 | } | |
90 | } | |
91 | ||
92 | static void uhci_fsbr_timeout(unsigned long _uhci) | |
93 | { | |
94 | struct uhci_hcd *uhci = (struct uhci_hcd *) _uhci; | |
95 | unsigned long flags; | |
96 | ||
97 | spin_lock_irqsave(&uhci->lock, flags); | |
98 | if (uhci->fsbr_expiring) { | |
99 | uhci->fsbr_expiring = 0; | |
100 | uhci_fsbr_off(uhci); | |
84afddd7 | 101 | } |
c5e3b741 | 102 | spin_unlock_irqrestore(&uhci->lock, flags); |
84afddd7 AS |
103 | } |
104 | ||
105 | ||
2532178a | 106 | static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci) |
1da177e4 LT |
107 | { |
108 | dma_addr_t dma_handle; | |
109 | struct uhci_td *td; | |
110 | ||
111 | td = dma_pool_alloc(uhci->td_pool, GFP_ATOMIC, &dma_handle); | |
112 | if (!td) | |
113 | return NULL; | |
114 | ||
115 | td->dma_handle = dma_handle; | |
1da177e4 | 116 | td->frame = -1; |
1da177e4 LT |
117 | |
118 | INIT_LIST_HEAD(&td->list); | |
1da177e4 LT |
119 | INIT_LIST_HEAD(&td->fl_list); |
120 | ||
1da177e4 LT |
121 | return td; |
122 | } | |
123 | ||
dccf4a48 AS |
124 | static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td) |
125 | { | |
0cef7727 | 126 | if (!list_empty(&td->list)) { |
dccf4a48 | 127 | dev_warn(uhci_dev(uhci), "td %p still in list!\n", td); |
0cef7727 AS |
128 | WARN_ON(1); |
129 | } | |
130 | if (!list_empty(&td->fl_list)) { | |
dccf4a48 | 131 | dev_warn(uhci_dev(uhci), "td %p still in fl_list!\n", td); |
0cef7727 AS |
132 | WARN_ON(1); |
133 | } | |
dccf4a48 AS |
134 | |
135 | dma_pool_free(uhci->td_pool, td, td->dma_handle); | |
136 | } | |
137 | ||
1da177e4 LT |
138 | static inline void uhci_fill_td(struct uhci_td *td, u32 status, |
139 | u32 token, u32 buffer) | |
140 | { | |
141 | td->status = cpu_to_le32(status); | |
142 | td->token = cpu_to_le32(token); | |
143 | td->buffer = cpu_to_le32(buffer); | |
144 | } | |
145 | ||
04538a25 AS |
146 | static void uhci_add_td_to_urbp(struct uhci_td *td, struct urb_priv *urbp) |
147 | { | |
148 | list_add_tail(&td->list, &urbp->td_list); | |
149 | } | |
150 | ||
151 | static void uhci_remove_td_from_urbp(struct uhci_td *td) | |
152 | { | |
153 | list_del_init(&td->list); | |
154 | } | |
155 | ||
1da177e4 | 156 | /* |
687f5f34 | 157 | * We insert Isochronous URBs directly into the frame list at the beginning |
1da177e4 | 158 | */ |
dccf4a48 AS |
159 | static inline void uhci_insert_td_in_frame_list(struct uhci_hcd *uhci, |
160 | struct uhci_td *td, unsigned framenum) | |
1da177e4 LT |
161 | { |
162 | framenum &= (UHCI_NUMFRAMES - 1); | |
163 | ||
164 | td->frame = framenum; | |
165 | ||
166 | /* Is there a TD already mapped there? */ | |
a1d59ce8 | 167 | if (uhci->frame_cpu[framenum]) { |
1da177e4 LT |
168 | struct uhci_td *ftd, *ltd; |
169 | ||
a1d59ce8 | 170 | ftd = uhci->frame_cpu[framenum]; |
1da177e4 LT |
171 | ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list); |
172 | ||
173 | list_add_tail(&td->fl_list, &ftd->fl_list); | |
174 | ||
175 | td->link = ltd->link; | |
176 | wmb(); | |
28b9325e | 177 | ltd->link = LINK_TO_TD(td); |
1da177e4 | 178 | } else { |
a1d59ce8 | 179 | td->link = uhci->frame[framenum]; |
1da177e4 | 180 | wmb(); |
28b9325e | 181 | uhci->frame[framenum] = LINK_TO_TD(td); |
a1d59ce8 | 182 | uhci->frame_cpu[framenum] = td; |
1da177e4 LT |
183 | } |
184 | } | |
185 | ||
dccf4a48 | 186 | static inline void uhci_remove_td_from_frame_list(struct uhci_hcd *uhci, |
b81d3436 | 187 | struct uhci_td *td) |
1da177e4 LT |
188 | { |
189 | /* If it's not inserted, don't remove it */ | |
b81d3436 AS |
190 | if (td->frame == -1) { |
191 | WARN_ON(!list_empty(&td->fl_list)); | |
1da177e4 | 192 | return; |
b81d3436 | 193 | } |
1da177e4 | 194 | |
b81d3436 | 195 | if (uhci->frame_cpu[td->frame] == td) { |
1da177e4 | 196 | if (list_empty(&td->fl_list)) { |
a1d59ce8 AS |
197 | uhci->frame[td->frame] = td->link; |
198 | uhci->frame_cpu[td->frame] = NULL; | |
1da177e4 LT |
199 | } else { |
200 | struct uhci_td *ntd; | |
201 | ||
202 | ntd = list_entry(td->fl_list.next, struct uhci_td, fl_list); | |
28b9325e | 203 | uhci->frame[td->frame] = LINK_TO_TD(ntd); |
a1d59ce8 | 204 | uhci->frame_cpu[td->frame] = ntd; |
1da177e4 LT |
205 | } |
206 | } else { | |
207 | struct uhci_td *ptd; | |
208 | ||
209 | ptd = list_entry(td->fl_list.prev, struct uhci_td, fl_list); | |
210 | ptd->link = td->link; | |
211 | } | |
212 | ||
1da177e4 LT |
213 | list_del_init(&td->fl_list); |
214 | td->frame = -1; | |
215 | } | |
216 | ||
c8155cc5 AS |
217 | static inline void uhci_remove_tds_from_frame(struct uhci_hcd *uhci, |
218 | unsigned int framenum) | |
219 | { | |
220 | struct uhci_td *ftd, *ltd; | |
221 | ||
222 | framenum &= (UHCI_NUMFRAMES - 1); | |
223 | ||
224 | ftd = uhci->frame_cpu[framenum]; | |
225 | if (ftd) { | |
226 | ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list); | |
227 | uhci->frame[framenum] = ltd->link; | |
228 | uhci->frame_cpu[framenum] = NULL; | |
229 | ||
230 | while (!list_empty(&ftd->fl_list)) | |
231 | list_del_init(ftd->fl_list.prev); | |
232 | } | |
233 | } | |
234 | ||
dccf4a48 AS |
235 | /* |
236 | * Remove all the TDs for an Isochronous URB from the frame list | |
237 | */ | |
238 | static void uhci_unlink_isochronous_tds(struct uhci_hcd *uhci, struct urb *urb) | |
b81d3436 AS |
239 | { |
240 | struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv; | |
241 | struct uhci_td *td; | |
242 | ||
243 | list_for_each_entry(td, &urbp->td_list, list) | |
dccf4a48 | 244 | uhci_remove_td_from_frame_list(uhci, td); |
b81d3436 AS |
245 | } |
246 | ||
dccf4a48 AS |
247 | static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci, |
248 | struct usb_device *udev, struct usb_host_endpoint *hep) | |
1da177e4 LT |
249 | { |
250 | dma_addr_t dma_handle; | |
251 | struct uhci_qh *qh; | |
252 | ||
253 | qh = dma_pool_alloc(uhci->qh_pool, GFP_ATOMIC, &dma_handle); | |
254 | if (!qh) | |
255 | return NULL; | |
256 | ||
59e29ed9 | 257 | memset(qh, 0, sizeof(*qh)); |
1da177e4 LT |
258 | qh->dma_handle = dma_handle; |
259 | ||
260 | qh->element = UHCI_PTR_TERM; | |
261 | qh->link = UHCI_PTR_TERM; | |
262 | ||
dccf4a48 AS |
263 | INIT_LIST_HEAD(&qh->queue); |
264 | INIT_LIST_HEAD(&qh->node); | |
1da177e4 | 265 | |
dccf4a48 | 266 | if (udev) { /* Normal QH */ |
85a975d0 AS |
267 | qh->type = hep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; |
268 | if (qh->type != USB_ENDPOINT_XFER_ISOC) { | |
269 | qh->dummy_td = uhci_alloc_td(uhci); | |
270 | if (!qh->dummy_td) { | |
271 | dma_pool_free(uhci->qh_pool, qh, dma_handle); | |
272 | return NULL; | |
273 | } | |
af0bb599 | 274 | } |
dccf4a48 AS |
275 | qh->state = QH_STATE_IDLE; |
276 | qh->hep = hep; | |
277 | qh->udev = udev; | |
278 | hep->hcpriv = qh; | |
1da177e4 | 279 | |
3ca2a321 AS |
280 | if (qh->type == USB_ENDPOINT_XFER_INT || |
281 | qh->type == USB_ENDPOINT_XFER_ISOC) | |
282 | qh->load = usb_calc_bus_time(udev->speed, | |
283 | usb_endpoint_dir_in(&hep->desc), | |
284 | qh->type == USB_ENDPOINT_XFER_ISOC, | |
285 | le16_to_cpu(hep->desc.wMaxPacketSize)) | |
286 | / 1000 + 1; | |
287 | ||
dccf4a48 AS |
288 | } else { /* Skeleton QH */ |
289 | qh->state = QH_STATE_ACTIVE; | |
4de7d2c2 | 290 | qh->type = -1; |
dccf4a48 | 291 | } |
1da177e4 LT |
292 | return qh; |
293 | } | |
294 | ||
295 | static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) | |
296 | { | |
dccf4a48 | 297 | WARN_ON(qh->state != QH_STATE_IDLE && qh->udev); |
0cef7727 | 298 | if (!list_empty(&qh->queue)) { |
1da177e4 | 299 | dev_warn(uhci_dev(uhci), "qh %p list not empty!\n", qh); |
0cef7727 AS |
300 | WARN_ON(1); |
301 | } | |
1da177e4 | 302 | |
dccf4a48 AS |
303 | list_del(&qh->node); |
304 | if (qh->udev) { | |
305 | qh->hep->hcpriv = NULL; | |
85a975d0 AS |
306 | if (qh->dummy_td) |
307 | uhci_free_td(uhci, qh->dummy_td); | |
dccf4a48 | 308 | } |
1da177e4 LT |
309 | dma_pool_free(uhci->qh_pool, qh, qh->dma_handle); |
310 | } | |
311 | ||
0ed8fee1 | 312 | /* |
a0b458b6 AS |
313 | * When a queue is stopped and a dequeued URB is given back, adjust |
314 | * the previous TD link (if the URB isn't first on the queue) or | |
315 | * save its toggle value (if it is first and is currently executing). | |
10b8e47d AS |
316 | * |
317 | * Returns 0 if the URB should not yet be given back, 1 otherwise. | |
0ed8fee1 | 318 | */ |
10b8e47d | 319 | static int uhci_cleanup_queue(struct uhci_hcd *uhci, struct uhci_qh *qh, |
a0b458b6 | 320 | struct urb *urb) |
0ed8fee1 | 321 | { |
a0b458b6 | 322 | struct urb_priv *urbp = urb->hcpriv; |
0ed8fee1 | 323 | struct uhci_td *td; |
10b8e47d | 324 | int ret = 1; |
0ed8fee1 | 325 | |
a0b458b6 | 326 | /* Isochronous pipes don't use toggles and their TD link pointers |
10b8e47d AS |
327 | * get adjusted during uhci_urb_dequeue(). But since their queues |
328 | * cannot truly be stopped, we have to watch out for dequeues | |
329 | * occurring after the nominal unlink frame. */ | |
330 | if (qh->type == USB_ENDPOINT_XFER_ISOC) { | |
331 | ret = (uhci->frame_number + uhci->is_stopped != | |
332 | qh->unlink_frame); | |
c5e3b741 | 333 | goto done; |
10b8e47d | 334 | } |
a0b458b6 AS |
335 | |
336 | /* If the URB isn't first on its queue, adjust the link pointer | |
337 | * of the last TD in the previous URB. The toggle doesn't need | |
338 | * to be saved since this URB can't be executing yet. */ | |
339 | if (qh->queue.next != &urbp->node) { | |
340 | struct urb_priv *purbp; | |
341 | struct uhci_td *ptd; | |
342 | ||
343 | purbp = list_entry(urbp->node.prev, struct urb_priv, node); | |
344 | WARN_ON(list_empty(&purbp->td_list)); | |
345 | ptd = list_entry(purbp->td_list.prev, struct uhci_td, | |
346 | list); | |
347 | td = list_entry(urbp->td_list.prev, struct uhci_td, | |
348 | list); | |
349 | ptd->link = td->link; | |
c5e3b741 | 350 | goto done; |
a0b458b6 AS |
351 | } |
352 | ||
0ed8fee1 AS |
353 | /* If the QH element pointer is UHCI_PTR_TERM then then currently |
354 | * executing URB has already been unlinked, so this one isn't it. */ | |
a0b458b6 | 355 | if (qh_element(qh) == UHCI_PTR_TERM) |
c5e3b741 | 356 | goto done; |
0ed8fee1 AS |
357 | qh->element = UHCI_PTR_TERM; |
358 | ||
85a975d0 | 359 | /* Control pipes don't have to worry about toggles */ |
a0b458b6 | 360 | if (qh->type == USB_ENDPOINT_XFER_CONTROL) |
c5e3b741 | 361 | goto done; |
0ed8fee1 | 362 | |
a0b458b6 | 363 | /* Save the next toggle value */ |
59e29ed9 AS |
364 | WARN_ON(list_empty(&urbp->td_list)); |
365 | td = list_entry(urbp->td_list.next, struct uhci_td, list); | |
366 | qh->needs_fixup = 1; | |
367 | qh->initial_toggle = uhci_toggle(td_token(td)); | |
c5e3b741 AS |
368 | |
369 | done: | |
10b8e47d | 370 | return ret; |
0ed8fee1 AS |
371 | } |
372 | ||
373 | /* | |
374 | * Fix up the data toggles for URBs in a queue, when one of them | |
375 | * terminates early (short transfer, error, or dequeued). | |
376 | */ | |
377 | static void uhci_fixup_toggles(struct uhci_qh *qh, int skip_first) | |
378 | { | |
379 | struct urb_priv *urbp = NULL; | |
380 | struct uhci_td *td; | |
381 | unsigned int toggle = qh->initial_toggle; | |
382 | unsigned int pipe; | |
383 | ||
384 | /* Fixups for a short transfer start with the second URB in the | |
385 | * queue (the short URB is the first). */ | |
386 | if (skip_first) | |
387 | urbp = list_entry(qh->queue.next, struct urb_priv, node); | |
388 | ||
389 | /* When starting with the first URB, if the QH element pointer is | |
390 | * still valid then we know the URB's toggles are okay. */ | |
391 | else if (qh_element(qh) != UHCI_PTR_TERM) | |
392 | toggle = 2; | |
393 | ||
394 | /* Fix up the toggle for the URBs in the queue. Normally this | |
395 | * loop won't run more than once: When an error or short transfer | |
396 | * occurs, the queue usually gets emptied. */ | |
1393adb2 | 397 | urbp = list_prepare_entry(urbp, &qh->queue, node); |
0ed8fee1 AS |
398 | list_for_each_entry_continue(urbp, &qh->queue, node) { |
399 | ||
400 | /* If the first TD has the right toggle value, we don't | |
401 | * need to change any toggles in this URB */ | |
402 | td = list_entry(urbp->td_list.next, struct uhci_td, list); | |
403 | if (toggle > 1 || uhci_toggle(td_token(td)) == toggle) { | |
db59b464 | 404 | td = list_entry(urbp->td_list.prev, struct uhci_td, |
0ed8fee1 AS |
405 | list); |
406 | toggle = uhci_toggle(td_token(td)) ^ 1; | |
407 | ||
408 | /* Otherwise all the toggles in the URB have to be switched */ | |
409 | } else { | |
410 | list_for_each_entry(td, &urbp->td_list, list) { | |
411 | td->token ^= __constant_cpu_to_le32( | |
412 | TD_TOKEN_TOGGLE); | |
413 | toggle ^= 1; | |
414 | } | |
415 | } | |
416 | } | |
417 | ||
418 | wmb(); | |
419 | pipe = list_entry(qh->queue.next, struct urb_priv, node)->urb->pipe; | |
420 | usb_settoggle(qh->udev, usb_pipeendpoint(pipe), | |
421 | usb_pipeout(pipe), toggle); | |
422 | qh->needs_fixup = 0; | |
423 | } | |
424 | ||
1da177e4 | 425 | /* |
17230acd | 426 | * Link an Isochronous QH into its skeleton's list |
1da177e4 | 427 | */ |
17230acd AS |
428 | static inline void link_iso(struct uhci_hcd *uhci, struct uhci_qh *qh) |
429 | { | |
430 | list_add_tail(&qh->node, &uhci->skel_iso_qh->node); | |
431 | ||
432 | /* Isochronous QHs aren't linked by the hardware */ | |
433 | } | |
434 | ||
435 | /* | |
436 | * Link a high-period interrupt QH into the schedule at the end of its | |
437 | * skeleton's list | |
438 | */ | |
439 | static void link_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh) | |
1da177e4 | 440 | { |
dccf4a48 | 441 | struct uhci_qh *pqh; |
1da177e4 | 442 | |
17230acd AS |
443 | list_add_tail(&qh->node, &uhci->skelqh[qh->skel]->node); |
444 | ||
445 | pqh = list_entry(qh->node.prev, struct uhci_qh, node); | |
446 | qh->link = pqh->link; | |
447 | wmb(); | |
448 | pqh->link = LINK_TO_QH(qh); | |
449 | } | |
450 | ||
451 | /* | |
452 | * Link a period-1 interrupt or async QH into the schedule at the | |
453 | * correct spot in the async skeleton's list, and update the FSBR link | |
454 | */ | |
455 | static void link_async(struct uhci_hcd *uhci, struct uhci_qh *qh) | |
456 | { | |
e009f1b2 | 457 | struct uhci_qh *pqh; |
17230acd | 458 | __le32 link_to_new_qh; |
17230acd AS |
459 | |
460 | /* Find the predecessor QH for our new one and insert it in the list. | |
461 | * The list of QHs is expected to be short, so linear search won't | |
462 | * take too long. */ | |
463 | list_for_each_entry_reverse(pqh, &uhci->skel_async_qh->node, node) { | |
464 | if (pqh->skel <= qh->skel) | |
465 | break; | |
466 | } | |
467 | list_add(&qh->node, &pqh->node); | |
17230acd AS |
468 | |
469 | /* Link it into the schedule */ | |
e009f1b2 | 470 | qh->link = pqh->link; |
17230acd | 471 | wmb(); |
e009f1b2 AS |
472 | link_to_new_qh = LINK_TO_QH(qh); |
473 | pqh->link = link_to_new_qh; | |
474 | ||
475 | /* If this is now the first FSBR QH, link the terminating skeleton | |
476 | * QH to it. */ | |
477 | if (pqh->skel < SKEL_FSBR && qh->skel >= SKEL_FSBR) | |
478 | uhci->skel_term_qh->link = link_to_new_qh; | |
17230acd AS |
479 | } |
480 | ||
481 | /* | |
482 | * Put a QH on the schedule in both hardware and software | |
483 | */ | |
484 | static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) | |
485 | { | |
dccf4a48 | 486 | WARN_ON(list_empty(&qh->queue)); |
1da177e4 | 487 | |
dccf4a48 AS |
488 | /* Set the element pointer if it isn't set already. |
489 | * This isn't needed for Isochronous queues, but it doesn't hurt. */ | |
490 | if (qh_element(qh) == UHCI_PTR_TERM) { | |
491 | struct urb_priv *urbp = list_entry(qh->queue.next, | |
492 | struct urb_priv, node); | |
493 | struct uhci_td *td = list_entry(urbp->td_list.next, | |
494 | struct uhci_td, list); | |
1da177e4 | 495 | |
28b9325e | 496 | qh->element = LINK_TO_TD(td); |
1da177e4 LT |
497 | } |
498 | ||
84afddd7 AS |
499 | /* Treat the queue as if it has just advanced */ |
500 | qh->wait_expired = 0; | |
501 | qh->advance_jiffies = jiffies; | |
502 | ||
dccf4a48 AS |
503 | if (qh->state == QH_STATE_ACTIVE) |
504 | return; | |
505 | qh->state = QH_STATE_ACTIVE; | |
506 | ||
17230acd | 507 | /* Move the QH from its old list to the correct spot in the appropriate |
dccf4a48 | 508 | * skeleton's list */ |
0ed8fee1 AS |
509 | if (qh == uhci->next_qh) |
510 | uhci->next_qh = list_entry(qh->node.next, struct uhci_qh, | |
511 | node); | |
17230acd AS |
512 | list_del(&qh->node); |
513 | ||
514 | if (qh->skel == SKEL_ISO) | |
515 | link_iso(uhci, qh); | |
516 | else if (qh->skel < SKEL_ASYNC) | |
517 | link_interrupt(uhci, qh); | |
518 | else | |
519 | link_async(uhci, qh); | |
520 | } | |
521 | ||
522 | /* | |
523 | * Unlink a high-period interrupt QH from the schedule | |
524 | */ | |
525 | static void unlink_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh) | |
526 | { | |
527 | struct uhci_qh *pqh; | |
dccf4a48 | 528 | |
dccf4a48 | 529 | pqh = list_entry(qh->node.prev, struct uhci_qh, node); |
17230acd AS |
530 | pqh->link = qh->link; |
531 | mb(); | |
532 | } | |
533 | ||
534 | /* | |
535 | * Unlink a period-1 interrupt or async QH from the schedule | |
536 | */ | |
537 | static void unlink_async(struct uhci_hcd *uhci, struct uhci_qh *qh) | |
538 | { | |
e009f1b2 | 539 | struct uhci_qh *pqh; |
17230acd AS |
540 | __le32 link_to_next_qh = qh->link; |
541 | ||
542 | pqh = list_entry(qh->node.prev, struct uhci_qh, node); | |
17230acd | 543 | pqh->link = link_to_next_qh; |
e009f1b2 AS |
544 | |
545 | /* If this was the old first FSBR QH, link the terminating skeleton | |
546 | * QH to the next (new first FSBR) QH. */ | |
547 | if (pqh->skel < SKEL_FSBR && qh->skel >= SKEL_FSBR) | |
548 | uhci->skel_term_qh->link = link_to_next_qh; | |
17230acd | 549 | mb(); |
1da177e4 LT |
550 | } |
551 | ||
552 | /* | |
dccf4a48 | 553 | * Take a QH off the hardware schedule |
1da177e4 | 554 | */ |
dccf4a48 | 555 | static void uhci_unlink_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) |
1da177e4 | 556 | { |
dccf4a48 | 557 | if (qh->state == QH_STATE_UNLINKING) |
1da177e4 | 558 | return; |
dccf4a48 AS |
559 | WARN_ON(qh->state != QH_STATE_ACTIVE || !qh->udev); |
560 | qh->state = QH_STATE_UNLINKING; | |
1da177e4 | 561 | |
dccf4a48 | 562 | /* Unlink the QH from the schedule and record when we did it */ |
17230acd AS |
563 | if (qh->skel == SKEL_ISO) |
564 | ; | |
565 | else if (qh->skel < SKEL_ASYNC) | |
566 | unlink_interrupt(uhci, qh); | |
567 | else | |
568 | unlink_async(uhci, qh); | |
1da177e4 LT |
569 | |
570 | uhci_get_current_frame_number(uhci); | |
dccf4a48 | 571 | qh->unlink_frame = uhci->frame_number; |
1da177e4 | 572 | |
dccf4a48 AS |
573 | /* Force an interrupt so we know when the QH is fully unlinked */ |
574 | if (list_empty(&uhci->skel_unlink_qh->node)) | |
1da177e4 LT |
575 | uhci_set_next_interrupt(uhci); |
576 | ||
dccf4a48 | 577 | /* Move the QH from its old list to the end of the unlinking list */ |
0ed8fee1 AS |
578 | if (qh == uhci->next_qh) |
579 | uhci->next_qh = list_entry(qh->node.next, struct uhci_qh, | |
580 | node); | |
dccf4a48 | 581 | list_move_tail(&qh->node, &uhci->skel_unlink_qh->node); |
1da177e4 LT |
582 | } |
583 | ||
dccf4a48 AS |
584 | /* |
585 | * When we and the controller are through with a QH, it becomes IDLE. | |
586 | * This happens when a QH has been off the schedule (on the unlinking | |
587 | * list) for more than one frame, or when an error occurs while adding | |
588 | * the first URB onto a new QH. | |
589 | */ | |
590 | static void uhci_make_qh_idle(struct uhci_hcd *uhci, struct uhci_qh *qh) | |
1da177e4 | 591 | { |
dccf4a48 | 592 | WARN_ON(qh->state == QH_STATE_ACTIVE); |
1da177e4 | 593 | |
0ed8fee1 AS |
594 | if (qh == uhci->next_qh) |
595 | uhci->next_qh = list_entry(qh->node.next, struct uhci_qh, | |
596 | node); | |
dccf4a48 AS |
597 | list_move(&qh->node, &uhci->idle_qh_list); |
598 | qh->state = QH_STATE_IDLE; | |
1da177e4 | 599 | |
59e29ed9 AS |
600 | /* Now that the QH is idle, its post_td isn't being used */ |
601 | if (qh->post_td) { | |
602 | uhci_free_td(uhci, qh->post_td); | |
603 | qh->post_td = NULL; | |
604 | } | |
605 | ||
dccf4a48 AS |
606 | /* If anyone is waiting for a QH to become idle, wake them up */ |
607 | if (uhci->num_waiting) | |
608 | wake_up_all(&uhci->waitqh); | |
1da177e4 LT |
609 | } |
610 | ||
3ca2a321 AS |
611 | /* |
612 | * Find the highest existing bandwidth load for a given phase and period. | |
613 | */ | |
614 | static int uhci_highest_load(struct uhci_hcd *uhci, int phase, int period) | |
615 | { | |
616 | int highest_load = uhci->load[phase]; | |
617 | ||
618 | for (phase += period; phase < MAX_PHASE; phase += period) | |
619 | highest_load = max_t(int, highest_load, uhci->load[phase]); | |
620 | return highest_load; | |
621 | } | |
622 | ||
623 | /* | |
624 | * Set qh->phase to the optimal phase for a periodic transfer and | |
625 | * check whether the bandwidth requirement is acceptable. | |
626 | */ | |
627 | static int uhci_check_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh) | |
628 | { | |
629 | int minimax_load; | |
630 | ||
631 | /* Find the optimal phase (unless it is already set) and get | |
632 | * its load value. */ | |
633 | if (qh->phase >= 0) | |
634 | minimax_load = uhci_highest_load(uhci, qh->phase, qh->period); | |
635 | else { | |
636 | int phase, load; | |
637 | int max_phase = min_t(int, MAX_PHASE, qh->period); | |
638 | ||
639 | qh->phase = 0; | |
640 | minimax_load = uhci_highest_load(uhci, qh->phase, qh->period); | |
641 | for (phase = 1; phase < max_phase; ++phase) { | |
642 | load = uhci_highest_load(uhci, phase, qh->period); | |
643 | if (load < minimax_load) { | |
644 | minimax_load = load; | |
645 | qh->phase = phase; | |
646 | } | |
647 | } | |
648 | } | |
649 | ||
650 | /* Maximum allowable periodic bandwidth is 90%, or 900 us per frame */ | |
651 | if (minimax_load + qh->load > 900) { | |
652 | dev_dbg(uhci_dev(uhci), "bandwidth allocation failed: " | |
653 | "period %d, phase %d, %d + %d us\n", | |
654 | qh->period, qh->phase, minimax_load, qh->load); | |
655 | return -ENOSPC; | |
656 | } | |
657 | return 0; | |
658 | } | |
659 | ||
660 | /* | |
661 | * Reserve a periodic QH's bandwidth in the schedule | |
662 | */ | |
663 | static void uhci_reserve_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh) | |
664 | { | |
665 | int i; | |
666 | int load = qh->load; | |
667 | char *p = "??"; | |
668 | ||
669 | for (i = qh->phase; i < MAX_PHASE; i += qh->period) { | |
670 | uhci->load[i] += load; | |
671 | uhci->total_load += load; | |
672 | } | |
673 | uhci_to_hcd(uhci)->self.bandwidth_allocated = | |
674 | uhci->total_load / MAX_PHASE; | |
675 | switch (qh->type) { | |
676 | case USB_ENDPOINT_XFER_INT: | |
677 | ++uhci_to_hcd(uhci)->self.bandwidth_int_reqs; | |
678 | p = "INT"; | |
679 | break; | |
680 | case USB_ENDPOINT_XFER_ISOC: | |
681 | ++uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs; | |
682 | p = "ISO"; | |
683 | break; | |
684 | } | |
685 | qh->bandwidth_reserved = 1; | |
686 | dev_dbg(uhci_dev(uhci), | |
687 | "%s dev %d ep%02x-%s, period %d, phase %d, %d us\n", | |
688 | "reserve", qh->udev->devnum, | |
689 | qh->hep->desc.bEndpointAddress, p, | |
690 | qh->period, qh->phase, load); | |
691 | } | |
692 | ||
693 | /* | |
694 | * Release a periodic QH's bandwidth reservation | |
695 | */ | |
696 | static void uhci_release_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh) | |
697 | { | |
698 | int i; | |
699 | int load = qh->load; | |
700 | char *p = "??"; | |
701 | ||
702 | for (i = qh->phase; i < MAX_PHASE; i += qh->period) { | |
703 | uhci->load[i] -= load; | |
704 | uhci->total_load -= load; | |
705 | } | |
706 | uhci_to_hcd(uhci)->self.bandwidth_allocated = | |
707 | uhci->total_load / MAX_PHASE; | |
708 | switch (qh->type) { | |
709 | case USB_ENDPOINT_XFER_INT: | |
710 | --uhci_to_hcd(uhci)->self.bandwidth_int_reqs; | |
711 | p = "INT"; | |
712 | break; | |
713 | case USB_ENDPOINT_XFER_ISOC: | |
714 | --uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs; | |
715 | p = "ISO"; | |
716 | break; | |
717 | } | |
718 | qh->bandwidth_reserved = 0; | |
719 | dev_dbg(uhci_dev(uhci), | |
720 | "%s dev %d ep%02x-%s, period %d, phase %d, %d us\n", | |
721 | "release", qh->udev->devnum, | |
722 | qh->hep->desc.bEndpointAddress, p, | |
723 | qh->period, qh->phase, load); | |
724 | } | |
725 | ||
dccf4a48 AS |
726 | static inline struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci, |
727 | struct urb *urb) | |
1da177e4 LT |
728 | { |
729 | struct urb_priv *urbp; | |
730 | ||
c3762229 | 731 | urbp = kmem_cache_zalloc(uhci_up_cachep, GFP_ATOMIC); |
1da177e4 LT |
732 | if (!urbp) |
733 | return NULL; | |
734 | ||
1da177e4 | 735 | urbp->urb = urb; |
dccf4a48 | 736 | urb->hcpriv = urbp; |
1da177e4 | 737 | |
dccf4a48 | 738 | INIT_LIST_HEAD(&urbp->node); |
1da177e4 | 739 | INIT_LIST_HEAD(&urbp->td_list); |
1da177e4 | 740 | |
1da177e4 LT |
741 | return urbp; |
742 | } | |
743 | ||
dccf4a48 AS |
744 | static void uhci_free_urb_priv(struct uhci_hcd *uhci, |
745 | struct urb_priv *urbp) | |
1da177e4 LT |
746 | { |
747 | struct uhci_td *td, *tmp; | |
1da177e4 | 748 | |
0cef7727 | 749 | if (!list_empty(&urbp->node)) { |
dccf4a48 AS |
750 | dev_warn(uhci_dev(uhci), "urb %p still on QH's list!\n", |
751 | urbp->urb); | |
0cef7727 AS |
752 | WARN_ON(1); |
753 | } | |
1da177e4 | 754 | |
1da177e4 | 755 | list_for_each_entry_safe(td, tmp, &urbp->td_list, list) { |
04538a25 AS |
756 | uhci_remove_td_from_urbp(td); |
757 | uhci_free_td(uhci, td); | |
1da177e4 LT |
758 | } |
759 | ||
1da177e4 LT |
760 | kmem_cache_free(uhci_up_cachep, urbp); |
761 | } | |
762 | ||
1da177e4 LT |
763 | /* |
764 | * Map status to standard result codes | |
765 | * | |
766 | * <status> is (td_status(td) & 0xF60000), a.k.a. | |
767 | * uhci_status_bits(td_status(td)). | |
768 | * Note: <status> does not include the TD_CTRL_NAK bit. | |
769 | * <dir_out> is True for output TDs and False for input TDs. | |
770 | */ | |
771 | static int uhci_map_status(int status, int dir_out) | |
772 | { | |
773 | if (!status) | |
774 | return 0; | |
775 | if (status & TD_CTRL_BITSTUFF) /* Bitstuff error */ | |
776 | return -EPROTO; | |
777 | if (status & TD_CTRL_CRCTIMEO) { /* CRC/Timeout */ | |
778 | if (dir_out) | |
779 | return -EPROTO; | |
780 | else | |
781 | return -EILSEQ; | |
782 | } | |
783 | if (status & TD_CTRL_BABBLE) /* Babble */ | |
784 | return -EOVERFLOW; | |
785 | if (status & TD_CTRL_DBUFERR) /* Buffer error */ | |
786 | return -ENOSR; | |
787 | if (status & TD_CTRL_STALLED) /* Stalled */ | |
788 | return -EPIPE; | |
1da177e4 LT |
789 | return 0; |
790 | } | |
791 | ||
792 | /* | |
793 | * Control transfers | |
794 | */ | |
dccf4a48 AS |
795 | static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, |
796 | struct uhci_qh *qh) | |
1da177e4 | 797 | { |
1da177e4 | 798 | struct uhci_td *td; |
1da177e4 | 799 | unsigned long destination, status; |
dccf4a48 | 800 | int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize); |
1da177e4 LT |
801 | int len = urb->transfer_buffer_length; |
802 | dma_addr_t data = urb->transfer_dma; | |
dccf4a48 | 803 | __le32 *plink; |
04538a25 | 804 | struct urb_priv *urbp = urb->hcpriv; |
17230acd | 805 | int skel; |
1da177e4 LT |
806 | |
807 | /* The "pipe" thing contains the destination in bits 8--18 */ | |
808 | destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP; | |
809 | ||
af0bb599 AS |
810 | /* 3 errors, dummy TD remains inactive */ |
811 | status = uhci_maxerr(3); | |
1da177e4 LT |
812 | if (urb->dev->speed == USB_SPEED_LOW) |
813 | status |= TD_CTRL_LS; | |
814 | ||
815 | /* | |
816 | * Build the TD for the control request setup packet | |
817 | */ | |
af0bb599 | 818 | td = qh->dummy_td; |
04538a25 | 819 | uhci_add_td_to_urbp(td, urbp); |
fa346568 | 820 | uhci_fill_td(td, status, destination | uhci_explen(8), |
dccf4a48 AS |
821 | urb->setup_dma); |
822 | plink = &td->link; | |
af0bb599 | 823 | status |= TD_CTRL_ACTIVE; |
1da177e4 LT |
824 | |
825 | /* | |
826 | * If direction is "send", change the packet ID from SETUP (0x2D) | |
827 | * to OUT (0xE1). Else change it from SETUP to IN (0x69) and | |
828 | * set Short Packet Detect (SPD) for all data packets. | |
e7e7c360 AS |
829 | * |
830 | * 0-length transfers always get treated as "send". | |
1da177e4 | 831 | */ |
e7e7c360 | 832 | if (usb_pipeout(urb->pipe) || len == 0) |
1da177e4 LT |
833 | destination ^= (USB_PID_SETUP ^ USB_PID_OUT); |
834 | else { | |
835 | destination ^= (USB_PID_SETUP ^ USB_PID_IN); | |
836 | status |= TD_CTRL_SPD; | |
837 | } | |
838 | ||
839 | /* | |
687f5f34 | 840 | * Build the DATA TDs |
1da177e4 LT |
841 | */ |
842 | while (len > 0) { | |
e7e7c360 AS |
843 | int pktsze = maxsze; |
844 | ||
845 | if (len <= pktsze) { /* The last data packet */ | |
846 | pktsze = len; | |
847 | status &= ~TD_CTRL_SPD; | |
848 | } | |
1da177e4 | 849 | |
2532178a | 850 | td = uhci_alloc_td(uhci); |
1da177e4 | 851 | if (!td) |
af0bb599 | 852 | goto nomem; |
28b9325e | 853 | *plink = LINK_TO_TD(td); |
1da177e4 LT |
854 | |
855 | /* Alternate Data0/1 (start with Data1) */ | |
856 | destination ^= TD_TOKEN_TOGGLE; | |
857 | ||
04538a25 | 858 | uhci_add_td_to_urbp(td, urbp); |
fa346568 | 859 | uhci_fill_td(td, status, destination | uhci_explen(pktsze), |
dccf4a48 AS |
860 | data); |
861 | plink = &td->link; | |
1da177e4 LT |
862 | |
863 | data += pktsze; | |
864 | len -= pktsze; | |
865 | } | |
866 | ||
867 | /* | |
868 | * Build the final TD for control status | |
869 | */ | |
2532178a | 870 | td = uhci_alloc_td(uhci); |
1da177e4 | 871 | if (!td) |
af0bb599 | 872 | goto nomem; |
28b9325e | 873 | *plink = LINK_TO_TD(td); |
1da177e4 | 874 | |
e7e7c360 AS |
875 | /* Change direction for the status transaction */ |
876 | destination ^= (USB_PID_IN ^ USB_PID_OUT); | |
1da177e4 LT |
877 | destination |= TD_TOKEN_TOGGLE; /* End in Data1 */ |
878 | ||
04538a25 | 879 | uhci_add_td_to_urbp(td, urbp); |
1da177e4 | 880 | uhci_fill_td(td, status | TD_CTRL_IOC, |
dccf4a48 | 881 | destination | uhci_explen(0), 0); |
af0bb599 AS |
882 | plink = &td->link; |
883 | ||
884 | /* | |
885 | * Build the new dummy TD and activate the old one | |
886 | */ | |
887 | td = uhci_alloc_td(uhci); | |
888 | if (!td) | |
889 | goto nomem; | |
28b9325e | 890 | *plink = LINK_TO_TD(td); |
af0bb599 AS |
891 | |
892 | uhci_fill_td(td, 0, USB_PID_OUT | uhci_explen(0), 0); | |
893 | wmb(); | |
894 | qh->dummy_td->status |= __constant_cpu_to_le32(TD_CTRL_ACTIVE); | |
895 | qh->dummy_td = td; | |
1da177e4 LT |
896 | |
897 | /* Low-speed transfers get a different queue, and won't hog the bus. | |
898 | * Also, some devices enumerate better without FSBR; the easiest way | |
899 | * to do that is to put URBs on the low-speed queue while the device | |
630aa3cf | 900 | * isn't in the CONFIGURED state. */ |
1da177e4 | 901 | if (urb->dev->speed == USB_SPEED_LOW || |
630aa3cf | 902 | urb->dev->state != USB_STATE_CONFIGURED) |
17230acd | 903 | skel = SKEL_LS_CONTROL; |
1da177e4 | 904 | else { |
17230acd | 905 | skel = SKEL_FS_CONTROL; |
84afddd7 | 906 | uhci_add_fsbr(uhci, urb); |
1da177e4 | 907 | } |
17230acd AS |
908 | if (qh->state != QH_STATE_ACTIVE) |
909 | qh->skel = skel; | |
59e29ed9 AS |
910 | |
911 | urb->actual_length = -8; /* Account for the SETUP packet */ | |
dccf4a48 | 912 | return 0; |
af0bb599 AS |
913 | |
914 | nomem: | |
915 | /* Remove the dummy TD from the td_list so it doesn't get freed */ | |
04538a25 | 916 | uhci_remove_td_from_urbp(qh->dummy_td); |
af0bb599 | 917 | return -ENOMEM; |
1da177e4 LT |
918 | } |
919 | ||
1da177e4 LT |
920 | /* |
921 | * Common submit for bulk and interrupt | |
922 | */ | |
dccf4a48 AS |
923 | static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, |
924 | struct uhci_qh *qh) | |
1da177e4 LT |
925 | { |
926 | struct uhci_td *td; | |
1da177e4 | 927 | unsigned long destination, status; |
dccf4a48 | 928 | int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize); |
1da177e4 | 929 | int len = urb->transfer_buffer_length; |
1da177e4 | 930 | dma_addr_t data = urb->transfer_dma; |
af0bb599 | 931 | __le32 *plink; |
04538a25 | 932 | struct urb_priv *urbp = urb->hcpriv; |
af0bb599 | 933 | unsigned int toggle; |
1da177e4 LT |
934 | |
935 | if (len < 0) | |
936 | return -EINVAL; | |
937 | ||
938 | /* The "pipe" thing contains the destination in bits 8--18 */ | |
939 | destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe); | |
af0bb599 AS |
940 | toggle = usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe), |
941 | usb_pipeout(urb->pipe)); | |
1da177e4 | 942 | |
af0bb599 AS |
943 | /* 3 errors, dummy TD remains inactive */ |
944 | status = uhci_maxerr(3); | |
1da177e4 LT |
945 | if (urb->dev->speed == USB_SPEED_LOW) |
946 | status |= TD_CTRL_LS; | |
947 | if (usb_pipein(urb->pipe)) | |
948 | status |= TD_CTRL_SPD; | |
949 | ||
950 | /* | |
687f5f34 | 951 | * Build the DATA TDs |
1da177e4 | 952 | */ |
af0bb599 AS |
953 | plink = NULL; |
954 | td = qh->dummy_td; | |
1da177e4 LT |
955 | do { /* Allow zero length packets */ |
956 | int pktsze = maxsze; | |
957 | ||
dccf4a48 | 958 | if (len <= pktsze) { /* The last packet */ |
1da177e4 LT |
959 | pktsze = len; |
960 | if (!(urb->transfer_flags & URB_SHORT_NOT_OK)) | |
961 | status &= ~TD_CTRL_SPD; | |
962 | } | |
963 | ||
af0bb599 AS |
964 | if (plink) { |
965 | td = uhci_alloc_td(uhci); | |
966 | if (!td) | |
967 | goto nomem; | |
28b9325e | 968 | *plink = LINK_TO_TD(td); |
af0bb599 | 969 | } |
04538a25 | 970 | uhci_add_td_to_urbp(td, urbp); |
dccf4a48 | 971 | uhci_fill_td(td, status, |
af0bb599 AS |
972 | destination | uhci_explen(pktsze) | |
973 | (toggle << TD_TOKEN_TOGGLE_SHIFT), | |
974 | data); | |
dccf4a48 | 975 | plink = &td->link; |
af0bb599 | 976 | status |= TD_CTRL_ACTIVE; |
1da177e4 LT |
977 | |
978 | data += pktsze; | |
979 | len -= maxsze; | |
af0bb599 | 980 | toggle ^= 1; |
1da177e4 LT |
981 | } while (len > 0); |
982 | ||
983 | /* | |
984 | * URB_ZERO_PACKET means adding a 0-length packet, if direction | |
985 | * is OUT and the transfer_length was an exact multiple of maxsze, | |
986 | * hence (len = transfer_length - N * maxsze) == 0 | |
987 | * however, if transfer_length == 0, the zero packet was already | |
988 | * prepared above. | |
989 | */ | |
dccf4a48 AS |
990 | if ((urb->transfer_flags & URB_ZERO_PACKET) && |
991 | usb_pipeout(urb->pipe) && len == 0 && | |
992 | urb->transfer_buffer_length > 0) { | |
2532178a | 993 | td = uhci_alloc_td(uhci); |
1da177e4 | 994 | if (!td) |
af0bb599 | 995 | goto nomem; |
28b9325e | 996 | *plink = LINK_TO_TD(td); |
1da177e4 | 997 | |
04538a25 | 998 | uhci_add_td_to_urbp(td, urbp); |
af0bb599 AS |
999 | uhci_fill_td(td, status, |
1000 | destination | uhci_explen(0) | | |
1001 | (toggle << TD_TOKEN_TOGGLE_SHIFT), | |
1002 | data); | |
1003 | plink = &td->link; | |
1da177e4 | 1004 | |
af0bb599 | 1005 | toggle ^= 1; |
1da177e4 LT |
1006 | } |
1007 | ||
1008 | /* Set the interrupt-on-completion flag on the last packet. | |
1009 | * A more-or-less typical 4 KB URB (= size of one memory page) | |
1010 | * will require about 3 ms to transfer; that's a little on the | |
1011 | * fast side but not enough to justify delaying an interrupt | |
1012 | * more than 2 or 3 URBs, so we will ignore the URB_NO_INTERRUPT | |
1013 | * flag setting. */ | |
dccf4a48 | 1014 | td->status |= __constant_cpu_to_le32(TD_CTRL_IOC); |
1da177e4 | 1015 | |
af0bb599 AS |
1016 | /* |
1017 | * Build the new dummy TD and activate the old one | |
1018 | */ | |
1019 | td = uhci_alloc_td(uhci); | |
1020 | if (!td) | |
1021 | goto nomem; | |
28b9325e | 1022 | *plink = LINK_TO_TD(td); |
af0bb599 AS |
1023 | |
1024 | uhci_fill_td(td, 0, USB_PID_OUT | uhci_explen(0), 0); | |
1025 | wmb(); | |
1026 | qh->dummy_td->status |= __constant_cpu_to_le32(TD_CTRL_ACTIVE); | |
1027 | qh->dummy_td = td; | |
1028 | ||
1029 | usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), | |
1030 | usb_pipeout(urb->pipe), toggle); | |
dccf4a48 | 1031 | return 0; |
af0bb599 AS |
1032 | |
1033 | nomem: | |
1034 | /* Remove the dummy TD from the td_list so it doesn't get freed */ | |
04538a25 | 1035 | uhci_remove_td_from_urbp(qh->dummy_td); |
af0bb599 | 1036 | return -ENOMEM; |
1da177e4 LT |
1037 | } |
1038 | ||
17230acd | 1039 | static int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb, |
dccf4a48 | 1040 | struct uhci_qh *qh) |
1da177e4 LT |
1041 | { |
1042 | int ret; | |
1043 | ||
1044 | /* Can't have low-speed bulk transfers */ | |
1045 | if (urb->dev->speed == USB_SPEED_LOW) | |
1046 | return -EINVAL; | |
1047 | ||
17230acd AS |
1048 | if (qh->state != QH_STATE_ACTIVE) |
1049 | qh->skel = SKEL_BULK; | |
dccf4a48 AS |
1050 | ret = uhci_submit_common(uhci, urb, qh); |
1051 | if (ret == 0) | |
84afddd7 | 1052 | uhci_add_fsbr(uhci, urb); |
1da177e4 LT |
1053 | return ret; |
1054 | } | |
1055 | ||
caf3827a | 1056 | static int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb, |
dccf4a48 | 1057 | struct uhci_qh *qh) |
1da177e4 | 1058 | { |
3ca2a321 | 1059 | int ret; |
caf3827a | 1060 | |
dccf4a48 AS |
1061 | /* USB 1.1 interrupt transfers only involve one packet per interval. |
1062 | * Drivers can submit URBs of any length, but longer ones will need | |
1063 | * multiple intervals to complete. | |
1da177e4 | 1064 | */ |
caf3827a | 1065 | |
3ca2a321 AS |
1066 | if (!qh->bandwidth_reserved) { |
1067 | int exponent; | |
caf3827a | 1068 | |
3ca2a321 AS |
1069 | /* Figure out which power-of-two queue to use */ |
1070 | for (exponent = 7; exponent >= 0; --exponent) { | |
1071 | if ((1 << exponent) <= urb->interval) | |
1072 | break; | |
1073 | } | |
1074 | if (exponent < 0) | |
1075 | return -EINVAL; | |
1076 | qh->period = 1 << exponent; | |
17230acd | 1077 | qh->skel = SKEL_INDEX(exponent); |
caf3827a | 1078 | |
3ca2a321 AS |
1079 | /* For now, interrupt phase is fixed by the layout |
1080 | * of the QH lists. */ | |
1081 | qh->phase = (qh->period / 2) & (MAX_PHASE - 1); | |
1082 | ret = uhci_check_bandwidth(uhci, qh); | |
1083 | if (ret) | |
1084 | return ret; | |
1085 | } else if (qh->period > urb->interval) | |
1086 | return -EINVAL; /* Can't decrease the period */ | |
1087 | ||
1088 | ret = uhci_submit_common(uhci, urb, qh); | |
1089 | if (ret == 0) { | |
1090 | urb->interval = qh->period; | |
1091 | if (!qh->bandwidth_reserved) | |
1092 | uhci_reserve_bandwidth(uhci, qh); | |
1093 | } | |
1094 | return ret; | |
1da177e4 LT |
1095 | } |
1096 | ||
b1869000 AS |
1097 | /* |
1098 | * Fix up the data structures following a short transfer | |
1099 | */ | |
1100 | static int uhci_fixup_short_transfer(struct uhci_hcd *uhci, | |
59e29ed9 | 1101 | struct uhci_qh *qh, struct urb_priv *urbp) |
b1869000 AS |
1102 | { |
1103 | struct uhci_td *td; | |
59e29ed9 AS |
1104 | struct list_head *tmp; |
1105 | int ret; | |
b1869000 AS |
1106 | |
1107 | td = list_entry(urbp->td_list.prev, struct uhci_td, list); | |
1108 | if (qh->type == USB_ENDPOINT_XFER_CONTROL) { | |
b1869000 AS |
1109 | |
1110 | /* When a control transfer is short, we have to restart | |
1111 | * the queue at the status stage transaction, which is | |
1112 | * the last TD. */ | |
59e29ed9 | 1113 | WARN_ON(list_empty(&urbp->td_list)); |
28b9325e | 1114 | qh->element = LINK_TO_TD(td); |
59e29ed9 | 1115 | tmp = td->list.prev; |
b1869000 AS |
1116 | ret = -EINPROGRESS; |
1117 | ||
59e29ed9 | 1118 | } else { |
b1869000 AS |
1119 | |
1120 | /* When a bulk/interrupt transfer is short, we have to | |
1121 | * fix up the toggles of the following URBs on the queue | |
1122 | * before restarting the queue at the next URB. */ | |
59e29ed9 | 1123 | qh->initial_toggle = uhci_toggle(td_token(qh->post_td)) ^ 1; |
b1869000 AS |
1124 | uhci_fixup_toggles(qh, 1); |
1125 | ||
59e29ed9 AS |
1126 | if (list_empty(&urbp->td_list)) |
1127 | td = qh->post_td; | |
b1869000 | 1128 | qh->element = td->link; |
59e29ed9 AS |
1129 | tmp = urbp->td_list.prev; |
1130 | ret = 0; | |
b1869000 AS |
1131 | } |
1132 | ||
59e29ed9 AS |
1133 | /* Remove all the TDs we skipped over, from tmp back to the start */ |
1134 | while (tmp != &urbp->td_list) { | |
1135 | td = list_entry(tmp, struct uhci_td, list); | |
1136 | tmp = tmp->prev; | |
1137 | ||
04538a25 AS |
1138 | uhci_remove_td_from_urbp(td); |
1139 | uhci_free_td(uhci, td); | |
59e29ed9 | 1140 | } |
b1869000 AS |
1141 | return ret; |
1142 | } | |
1143 | ||
1144 | /* | |
1145 | * Common result for control, bulk, and interrupt | |
1146 | */ | |
1147 | static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb) | |
1148 | { | |
1149 | struct urb_priv *urbp = urb->hcpriv; | |
1150 | struct uhci_qh *qh = urbp->qh; | |
59e29ed9 | 1151 | struct uhci_td *td, *tmp; |
b1869000 AS |
1152 | unsigned status; |
1153 | int ret = 0; | |
1154 | ||
59e29ed9 | 1155 | list_for_each_entry_safe(td, tmp, &urbp->td_list, list) { |
b1869000 AS |
1156 | unsigned int ctrlstat; |
1157 | int len; | |
1158 | ||
b1869000 AS |
1159 | ctrlstat = td_status(td); |
1160 | status = uhci_status_bits(ctrlstat); | |
1161 | if (status & TD_CTRL_ACTIVE) | |
1162 | return -EINPROGRESS; | |
1163 | ||
1164 | len = uhci_actual_length(ctrlstat); | |
1165 | urb->actual_length += len; | |
1166 | ||
1167 | if (status) { | |
1168 | ret = uhci_map_status(status, | |
1169 | uhci_packetout(td_token(td))); | |
1170 | if ((debug == 1 && ret != -EPIPE) || debug > 1) { | |
1171 | /* Some debugging code */ | |
be3cbc5f | 1172 | dev_dbg(&urb->dev->dev, |
b1869000 AS |
1173 | "%s: failed with status %x\n", |
1174 | __FUNCTION__, status); | |
1175 | ||
1176 | if (debug > 1 && errbuf) { | |
1177 | /* Print the chain for debugging */ | |
e009f1b2 | 1178 | uhci_show_qh(uhci, urbp->qh, errbuf, |
b1869000 AS |
1179 | ERRBUF_LEN, 0); |
1180 | lprintk(errbuf); | |
1181 | } | |
1182 | } | |
1183 | ||
e7e7c360 | 1184 | /* Did we receive a short packet? */ |
b1869000 AS |
1185 | } else if (len < uhci_expected_length(td_token(td))) { |
1186 | ||
e7e7c360 AS |
1187 | /* For control transfers, go to the status TD if |
1188 | * this isn't already the last data TD */ | |
1189 | if (qh->type == USB_ENDPOINT_XFER_CONTROL) { | |
1190 | if (td->list.next != urbp->td_list.prev) | |
1191 | ret = 1; | |
1192 | } | |
1193 | ||
1194 | /* For bulk and interrupt, this may be an error */ | |
1195 | else if (urb->transfer_flags & URB_SHORT_NOT_OK) | |
b1869000 | 1196 | ret = -EREMOTEIO; |
f443ddf1 AS |
1197 | |
1198 | /* Fixup needed only if this isn't the URB's last TD */ | |
1199 | else if (&td->list != urbp->td_list.prev) | |
b1869000 AS |
1200 | ret = 1; |
1201 | } | |
1202 | ||
04538a25 | 1203 | uhci_remove_td_from_urbp(td); |
59e29ed9 | 1204 | if (qh->post_td) |
04538a25 | 1205 | uhci_free_td(uhci, qh->post_td); |
59e29ed9 AS |
1206 | qh->post_td = td; |
1207 | ||
b1869000 AS |
1208 | if (ret != 0) |
1209 | goto err; | |
1210 | } | |
1211 | return ret; | |
1212 | ||
1213 | err: | |
1214 | if (ret < 0) { | |
b1869000 AS |
1215 | /* Note that the queue has stopped and save |
1216 | * the next toggle value */ | |
1217 | qh->element = UHCI_PTR_TERM; | |
1218 | qh->is_stopped = 1; | |
1219 | qh->needs_fixup = (qh->type != USB_ENDPOINT_XFER_CONTROL); | |
1220 | qh->initial_toggle = uhci_toggle(td_token(td)) ^ | |
1221 | (ret == -EREMOTEIO); | |
1222 | ||
1223 | } else /* Short packet received */ | |
59e29ed9 | 1224 | ret = uhci_fixup_short_transfer(uhci, qh, urbp); |
b1869000 AS |
1225 | return ret; |
1226 | } | |
1227 | ||
1da177e4 LT |
1228 | /* |
1229 | * Isochronous transfers | |
1230 | */ | |
0ed8fee1 AS |
1231 | static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb, |
1232 | struct uhci_qh *qh) | |
1da177e4 | 1233 | { |
0ed8fee1 AS |
1234 | struct uhci_td *td = NULL; /* Since urb->number_of_packets > 0 */ |
1235 | int i, frame; | |
1236 | unsigned long destination, status; | |
1237 | struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv; | |
1da177e4 | 1238 | |
caf3827a AS |
1239 | /* Values must not be too big (could overflow below) */ |
1240 | if (urb->interval >= UHCI_NUMFRAMES || | |
1241 | urb->number_of_packets >= UHCI_NUMFRAMES) | |
1da177e4 LT |
1242 | return -EFBIG; |
1243 | ||
caf3827a | 1244 | /* Check the period and figure out the starting frame number */ |
3ca2a321 AS |
1245 | if (!qh->bandwidth_reserved) { |
1246 | qh->period = urb->interval; | |
caf3827a | 1247 | if (urb->transfer_flags & URB_ISO_ASAP) { |
3ca2a321 AS |
1248 | qh->phase = -1; /* Find the best phase */ |
1249 | i = uhci_check_bandwidth(uhci, qh); | |
1250 | if (i) | |
1251 | return i; | |
1252 | ||
1253 | /* Allow a little time to allocate the TDs */ | |
c8155cc5 | 1254 | uhci_get_current_frame_number(uhci); |
3ca2a321 AS |
1255 | frame = uhci->frame_number + 10; |
1256 | ||
1257 | /* Move forward to the first frame having the | |
1258 | * correct phase */ | |
1259 | urb->start_frame = frame + ((qh->phase - frame) & | |
1260 | (qh->period - 1)); | |
caf3827a | 1261 | } else { |
c8155cc5 | 1262 | i = urb->start_frame - uhci->last_iso_frame; |
caf3827a AS |
1263 | if (i <= 0 || i >= UHCI_NUMFRAMES) |
1264 | return -EINVAL; | |
3ca2a321 AS |
1265 | qh->phase = urb->start_frame & (qh->period - 1); |
1266 | i = uhci_check_bandwidth(uhci, qh); | |
1267 | if (i) | |
1268 | return i; | |
caf3827a | 1269 | } |
3ca2a321 | 1270 | |
caf3827a AS |
1271 | } else if (qh->period != urb->interval) { |
1272 | return -EINVAL; /* Can't change the period */ | |
1da177e4 | 1273 | |
caf3827a | 1274 | } else { /* Pick up where the last URB leaves off */ |
0ed8fee1 | 1275 | if (list_empty(&qh->queue)) { |
c8155cc5 | 1276 | frame = qh->iso_frame; |
caf3827a AS |
1277 | } else { |
1278 | struct urb *lurb; | |
0ed8fee1 | 1279 | |
caf3827a | 1280 | lurb = list_entry(qh->queue.prev, |
0ed8fee1 | 1281 | struct urb_priv, node)->urb; |
caf3827a AS |
1282 | frame = lurb->start_frame + |
1283 | lurb->number_of_packets * | |
1284 | lurb->interval; | |
0ed8fee1 | 1285 | } |
caf3827a AS |
1286 | if (urb->transfer_flags & URB_ISO_ASAP) |
1287 | urb->start_frame = frame; | |
c8155cc5 AS |
1288 | else if (urb->start_frame != frame) |
1289 | return -EINVAL; | |
1da177e4 | 1290 | } |
1da177e4 | 1291 | |
caf3827a | 1292 | /* Make sure we won't have to go too far into the future */ |
c8155cc5 | 1293 | if (uhci_frame_before_eq(uhci->last_iso_frame + UHCI_NUMFRAMES, |
caf3827a AS |
1294 | urb->start_frame + urb->number_of_packets * |
1295 | urb->interval)) | |
1296 | return -EFBIG; | |
1297 | ||
1298 | status = TD_CTRL_ACTIVE | TD_CTRL_IOS; | |
1299 | destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe); | |
1300 | ||
b81d3436 | 1301 | for (i = 0; i < urb->number_of_packets; i++) { |
2532178a | 1302 | td = uhci_alloc_td(uhci); |
1da177e4 LT |
1303 | if (!td) |
1304 | return -ENOMEM; | |
1305 | ||
04538a25 | 1306 | uhci_add_td_to_urbp(td, urbp); |
dccf4a48 AS |
1307 | uhci_fill_td(td, status, destination | |
1308 | uhci_explen(urb->iso_frame_desc[i].length), | |
1309 | urb->transfer_dma + | |
1310 | urb->iso_frame_desc[i].offset); | |
b81d3436 | 1311 | } |
1da177e4 | 1312 | |
dccf4a48 AS |
1313 | /* Set the interrupt-on-completion flag on the last packet. */ |
1314 | td->status |= __constant_cpu_to_le32(TD_CTRL_IOC); | |
1315 | ||
dccf4a48 | 1316 | /* Add the TDs to the frame list */ |
b81d3436 AS |
1317 | frame = urb->start_frame; |
1318 | list_for_each_entry(td, &urbp->td_list, list) { | |
dccf4a48 | 1319 | uhci_insert_td_in_frame_list(uhci, td, frame); |
c8155cc5 AS |
1320 | frame += qh->period; |
1321 | } | |
1322 | ||
1323 | if (list_empty(&qh->queue)) { | |
1324 | qh->iso_packet_desc = &urb->iso_frame_desc[0]; | |
1325 | qh->iso_frame = urb->start_frame; | |
1da177e4 LT |
1326 | } |
1327 | ||
17230acd | 1328 | qh->skel = SKEL_ISO; |
3ca2a321 AS |
1329 | if (!qh->bandwidth_reserved) |
1330 | uhci_reserve_bandwidth(uhci, qh); | |
dccf4a48 | 1331 | return 0; |
1da177e4 LT |
1332 | } |
1333 | ||
1334 | static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb) | |
1335 | { | |
c8155cc5 AS |
1336 | struct uhci_td *td, *tmp; |
1337 | struct urb_priv *urbp = urb->hcpriv; | |
1338 | struct uhci_qh *qh = urbp->qh; | |
1da177e4 | 1339 | |
c8155cc5 AS |
1340 | list_for_each_entry_safe(td, tmp, &urbp->td_list, list) { |
1341 | unsigned int ctrlstat; | |
1342 | int status; | |
1da177e4 | 1343 | int actlength; |
1da177e4 | 1344 | |
c8155cc5 | 1345 | if (uhci_frame_before_eq(uhci->cur_iso_frame, qh->iso_frame)) |
1da177e4 LT |
1346 | return -EINPROGRESS; |
1347 | ||
c8155cc5 AS |
1348 | uhci_remove_tds_from_frame(uhci, qh->iso_frame); |
1349 | ||
1350 | ctrlstat = td_status(td); | |
1351 | if (ctrlstat & TD_CTRL_ACTIVE) { | |
1352 | status = -EXDEV; /* TD was added too late? */ | |
1353 | } else { | |
1354 | status = uhci_map_status(uhci_status_bits(ctrlstat), | |
1355 | usb_pipeout(urb->pipe)); | |
1356 | actlength = uhci_actual_length(ctrlstat); | |
1357 | ||
1358 | urb->actual_length += actlength; | |
1359 | qh->iso_packet_desc->actual_length = actlength; | |
1360 | qh->iso_packet_desc->status = status; | |
1361 | } | |
ee7d1f3f | 1362 | if (status) |
1da177e4 | 1363 | urb->error_count++; |
1da177e4 | 1364 | |
c8155cc5 AS |
1365 | uhci_remove_td_from_urbp(td); |
1366 | uhci_free_td(uhci, td); | |
1367 | qh->iso_frame += qh->period; | |
1368 | ++qh->iso_packet_desc; | |
1da177e4 | 1369 | } |
ee7d1f3f | 1370 | return 0; |
1da177e4 LT |
1371 | } |
1372 | ||
1da177e4 | 1373 | static int uhci_urb_enqueue(struct usb_hcd *hcd, |
55016f10 | 1374 | struct urb *urb, gfp_t mem_flags) |
1da177e4 LT |
1375 | { |
1376 | int ret; | |
1377 | struct uhci_hcd *uhci = hcd_to_uhci(hcd); | |
1378 | unsigned long flags; | |
dccf4a48 AS |
1379 | struct urb_priv *urbp; |
1380 | struct uhci_qh *qh; | |
1da177e4 LT |
1381 | |
1382 | spin_lock_irqsave(&uhci->lock, flags); | |
1383 | ||
e9df41c5 AS |
1384 | ret = usb_hcd_link_urb_to_ep(hcd, urb); |
1385 | if (ret) | |
1386 | goto done_not_linked; | |
1da177e4 | 1387 | |
dccf4a48 AS |
1388 | ret = -ENOMEM; |
1389 | urbp = uhci_alloc_urb_priv(uhci, urb); | |
1390 | if (!urbp) | |
1391 | goto done; | |
1da177e4 | 1392 | |
e9df41c5 AS |
1393 | if (urb->ep->hcpriv) |
1394 | qh = urb->ep->hcpriv; | |
dccf4a48 | 1395 | else { |
e9df41c5 | 1396 | qh = uhci_alloc_qh(uhci, urb->dev, urb->ep); |
dccf4a48 AS |
1397 | if (!qh) |
1398 | goto err_no_qh; | |
1da177e4 | 1399 | } |
dccf4a48 | 1400 | urbp->qh = qh; |
1da177e4 | 1401 | |
4de7d2c2 AS |
1402 | switch (qh->type) { |
1403 | case USB_ENDPOINT_XFER_CONTROL: | |
dccf4a48 AS |
1404 | ret = uhci_submit_control(uhci, urb, qh); |
1405 | break; | |
4de7d2c2 | 1406 | case USB_ENDPOINT_XFER_BULK: |
dccf4a48 | 1407 | ret = uhci_submit_bulk(uhci, urb, qh); |
1da177e4 | 1408 | break; |
4de7d2c2 | 1409 | case USB_ENDPOINT_XFER_INT: |
3ca2a321 | 1410 | ret = uhci_submit_interrupt(uhci, urb, qh); |
1da177e4 | 1411 | break; |
4de7d2c2 | 1412 | case USB_ENDPOINT_XFER_ISOC: |
c8155cc5 | 1413 | urb->error_count = 0; |
dccf4a48 | 1414 | ret = uhci_submit_isochronous(uhci, urb, qh); |
1da177e4 LT |
1415 | break; |
1416 | } | |
dccf4a48 AS |
1417 | if (ret != 0) |
1418 | goto err_submit_failed; | |
1da177e4 | 1419 | |
dccf4a48 AS |
1420 | /* Add this URB to the QH */ |
1421 | urbp->qh = qh; | |
1422 | list_add_tail(&urbp->node, &qh->queue); | |
1da177e4 | 1423 | |
dccf4a48 AS |
1424 | /* If the new URB is the first and only one on this QH then either |
1425 | * the QH is new and idle or else it's unlinked and waiting to | |
2775562a AS |
1426 | * become idle, so we can activate it right away. But only if the |
1427 | * queue isn't stopped. */ | |
84afddd7 | 1428 | if (qh->queue.next == &urbp->node && !qh->is_stopped) { |
dccf4a48 | 1429 | uhci_activate_qh(uhci, qh); |
c5e3b741 | 1430 | uhci_urbp_wants_fsbr(uhci, urbp); |
84afddd7 | 1431 | } |
dccf4a48 AS |
1432 | goto done; |
1433 | ||
1434 | err_submit_failed: | |
1435 | if (qh->state == QH_STATE_IDLE) | |
1436 | uhci_make_qh_idle(uhci, qh); /* Reclaim unused QH */ | |
dccf4a48 AS |
1437 | err_no_qh: |
1438 | uhci_free_urb_priv(uhci, urbp); | |
dccf4a48 | 1439 | done: |
e9df41c5 AS |
1440 | if (ret) |
1441 | usb_hcd_unlink_urb_from_ep(hcd, urb); | |
1442 | done_not_linked: | |
1da177e4 LT |
1443 | spin_unlock_irqrestore(&uhci->lock, flags); |
1444 | return ret; | |
1445 | } | |
1446 | ||
e9df41c5 | 1447 | static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) |
0ed8fee1 AS |
1448 | { |
1449 | struct uhci_hcd *uhci = hcd_to_uhci(hcd); | |
1450 | unsigned long flags; | |
10b8e47d | 1451 | struct uhci_qh *qh; |
e9df41c5 | 1452 | int rc; |
0ed8fee1 AS |
1453 | |
1454 | spin_lock_irqsave(&uhci->lock, flags); | |
e9df41c5 AS |
1455 | rc = usb_hcd_check_unlink_urb(hcd, urb, status); |
1456 | if (rc) | |
0ed8fee1 | 1457 | goto done; |
e9df41c5 AS |
1458 | |
1459 | qh = ((struct urb_priv *) urb->hcpriv)->qh; | |
0ed8fee1 AS |
1460 | |
1461 | /* Remove Isochronous TDs from the frame list ASAP */ | |
10b8e47d | 1462 | if (qh->type == USB_ENDPOINT_XFER_ISOC) { |
0ed8fee1 | 1463 | uhci_unlink_isochronous_tds(uhci, urb); |
10b8e47d AS |
1464 | mb(); |
1465 | ||
1466 | /* If the URB has already started, update the QH unlink time */ | |
1467 | uhci_get_current_frame_number(uhci); | |
1468 | if (uhci_frame_before_eq(urb->start_frame, uhci->frame_number)) | |
1469 | qh->unlink_frame = uhci->frame_number; | |
1470 | } | |
1471 | ||
1472 | uhci_unlink_qh(uhci, qh); | |
0ed8fee1 AS |
1473 | |
1474 | done: | |
1475 | spin_unlock_irqrestore(&uhci->lock, flags); | |
e9df41c5 | 1476 | return rc; |
0ed8fee1 AS |
1477 | } |
1478 | ||
1da177e4 | 1479 | /* |
0ed8fee1 | 1480 | * Finish unlinking an URB and give it back |
1da177e4 | 1481 | */ |
0ed8fee1 | 1482 | static void uhci_giveback_urb(struct uhci_hcd *uhci, struct uhci_qh *qh, |
7d12e780 | 1483 | struct urb *urb) |
0ed8fee1 AS |
1484 | __releases(uhci->lock) |
1485 | __acquires(uhci->lock) | |
1da177e4 | 1486 | { |
dccf4a48 | 1487 | struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv; |
1da177e4 | 1488 | |
e7e7c360 AS |
1489 | if (qh->type == USB_ENDPOINT_XFER_CONTROL) { |
1490 | ||
1491 | /* urb->actual_length < 0 means the setup transaction didn't | |
1492 | * complete successfully. Either it failed or the URB was | |
1493 | * unlinked first. Regardless, don't confuse people with a | |
1494 | * negative length. */ | |
1495 | urb->actual_length = max(urb->actual_length, 0); | |
e7e7c360 AS |
1496 | } |
1497 | ||
c8155cc5 AS |
1498 | /* When giving back the first URB in an Isochronous queue, |
1499 | * reinitialize the QH's iso-related members for the next URB. */ | |
e7e7c360 | 1500 | else if (qh->type == USB_ENDPOINT_XFER_ISOC && |
c8155cc5 AS |
1501 | urbp->node.prev == &qh->queue && |
1502 | urbp->node.next != &qh->queue) { | |
1503 | struct urb *nurb = list_entry(urbp->node.next, | |
1504 | struct urb_priv, node)->urb; | |
1505 | ||
1506 | qh->iso_packet_desc = &nurb->iso_frame_desc[0]; | |
1507 | qh->iso_frame = nurb->start_frame; | |
c8155cc5 | 1508 | } |
1da177e4 | 1509 | |
0ed8fee1 AS |
1510 | /* Take the URB off the QH's queue. If the queue is now empty, |
1511 | * this is a perfect time for a toggle fixup. */ | |
1512 | list_del_init(&urbp->node); | |
1513 | if (list_empty(&qh->queue) && qh->needs_fixup) { | |
1514 | usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), | |
1515 | usb_pipeout(urb->pipe), qh->initial_toggle); | |
1516 | qh->needs_fixup = 0; | |
1517 | } | |
1518 | ||
0ed8fee1 | 1519 | uhci_free_urb_priv(uhci, urbp); |
e9df41c5 | 1520 | usb_hcd_unlink_urb_from_ep(uhci_to_hcd(uhci), urb); |
1da177e4 | 1521 | |
0ed8fee1 | 1522 | spin_unlock(&uhci->lock); |
7d12e780 | 1523 | usb_hcd_giveback_urb(uhci_to_hcd(uhci), urb); |
0ed8fee1 | 1524 | spin_lock(&uhci->lock); |
1da177e4 | 1525 | |
0ed8fee1 AS |
1526 | /* If the queue is now empty, we can unlink the QH and give up its |
1527 | * reserved bandwidth. */ | |
1528 | if (list_empty(&qh->queue)) { | |
1529 | uhci_unlink_qh(uhci, qh); | |
3ca2a321 AS |
1530 | if (qh->bandwidth_reserved) |
1531 | uhci_release_bandwidth(uhci, qh); | |
0ed8fee1 | 1532 | } |
dccf4a48 | 1533 | } |
1da177e4 | 1534 | |
dccf4a48 | 1535 | /* |
0ed8fee1 | 1536 | * Scan the URBs in a QH's queue |
dccf4a48 | 1537 | */ |
0ed8fee1 AS |
1538 | #define QH_FINISHED_UNLINKING(qh) \ |
1539 | (qh->state == QH_STATE_UNLINKING && \ | |
1540 | uhci->frame_number + uhci->is_stopped != qh->unlink_frame) | |
1da177e4 | 1541 | |
7d12e780 | 1542 | static void uhci_scan_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) |
1da177e4 | 1543 | { |
1da177e4 | 1544 | struct urb_priv *urbp; |
0ed8fee1 AS |
1545 | struct urb *urb; |
1546 | int status; | |
1da177e4 | 1547 | |
0ed8fee1 AS |
1548 | while (!list_empty(&qh->queue)) { |
1549 | urbp = list_entry(qh->queue.next, struct urb_priv, node); | |
1550 | urb = urbp->urb; | |
1da177e4 | 1551 | |
b1869000 | 1552 | if (qh->type == USB_ENDPOINT_XFER_ISOC) |
0ed8fee1 | 1553 | status = uhci_result_isochronous(uhci, urb); |
b1869000 | 1554 | else |
0ed8fee1 | 1555 | status = uhci_result_common(uhci, urb); |
0ed8fee1 AS |
1556 | if (status == -EINPROGRESS) |
1557 | break; | |
1da177e4 | 1558 | |
0ed8fee1 | 1559 | spin_lock(&urb->lock); |
eb231054 | 1560 | urb->status = status; |
0ed8fee1 | 1561 | spin_unlock(&urb->lock); |
1da177e4 | 1562 | |
0ed8fee1 AS |
1563 | /* Dequeued but completed URBs can't be given back unless |
1564 | * the QH is stopped or has finished unlinking. */ | |
eb231054 | 1565 | if (urb->unlinked) { |
2775562a AS |
1566 | if (QH_FINISHED_UNLINKING(qh)) |
1567 | qh->is_stopped = 1; | |
1568 | else if (!qh->is_stopped) | |
1569 | return; | |
1570 | } | |
1da177e4 | 1571 | |
7d12e780 | 1572 | uhci_giveback_urb(uhci, qh, urb); |
ee7d1f3f | 1573 | if (status < 0) |
0ed8fee1 AS |
1574 | break; |
1575 | } | |
1da177e4 | 1576 | |
0ed8fee1 AS |
1577 | /* If the QH is neither stopped nor finished unlinking (normal case), |
1578 | * our work here is done. */ | |
2775562a AS |
1579 | if (QH_FINISHED_UNLINKING(qh)) |
1580 | qh->is_stopped = 1; | |
1581 | else if (!qh->is_stopped) | |
0ed8fee1 | 1582 | return; |
1da177e4 | 1583 | |
0ed8fee1 | 1584 | /* Otherwise give back each of the dequeued URBs */ |
2775562a | 1585 | restart: |
0ed8fee1 AS |
1586 | list_for_each_entry(urbp, &qh->queue, node) { |
1587 | urb = urbp->urb; | |
eb231054 | 1588 | if (urb->unlinked) { |
10b8e47d AS |
1589 | |
1590 | /* Fix up the TD links and save the toggles for | |
1591 | * non-Isochronous queues. For Isochronous queues, | |
1592 | * test for too-recent dequeues. */ | |
1593 | if (!uhci_cleanup_queue(uhci, qh, urb)) { | |
1594 | qh->is_stopped = 0; | |
1595 | return; | |
1596 | } | |
7d12e780 | 1597 | uhci_giveback_urb(uhci, qh, urb); |
0ed8fee1 AS |
1598 | goto restart; |
1599 | } | |
1600 | } | |
1601 | qh->is_stopped = 0; | |
1da177e4 | 1602 | |
0ed8fee1 AS |
1603 | /* There are no more dequeued URBs. If there are still URBs on the |
1604 | * queue, the QH can now be re-activated. */ | |
1605 | if (!list_empty(&qh->queue)) { | |
1606 | if (qh->needs_fixup) | |
1607 | uhci_fixup_toggles(qh, 0); | |
84afddd7 AS |
1608 | |
1609 | /* If the first URB on the queue wants FSBR but its time | |
1610 | * limit has expired, set the next TD to interrupt on | |
1611 | * completion before reactivating the QH. */ | |
1612 | urbp = list_entry(qh->queue.next, struct urb_priv, node); | |
1613 | if (urbp->fsbr && qh->wait_expired) { | |
1614 | struct uhci_td *td = list_entry(urbp->td_list.next, | |
1615 | struct uhci_td, list); | |
1616 | ||
1617 | td->status |= __cpu_to_le32(TD_CTRL_IOC); | |
1618 | } | |
1619 | ||
0ed8fee1 | 1620 | uhci_activate_qh(uhci, qh); |
1da177e4 LT |
1621 | } |
1622 | ||
0ed8fee1 AS |
1623 | /* The queue is empty. The QH can become idle if it is fully |
1624 | * unlinked. */ | |
1625 | else if (QH_FINISHED_UNLINKING(qh)) | |
1626 | uhci_make_qh_idle(uhci, qh); | |
1da177e4 LT |
1627 | } |
1628 | ||
84afddd7 AS |
1629 | /* |
1630 | * Check for queues that have made some forward progress. | |
1631 | * Returns 0 if the queue is not Isochronous, is ACTIVE, and | |
1632 | * has not advanced since last examined; 1 otherwise. | |
b761d9d8 AS |
1633 | * |
1634 | * Early Intel controllers have a bug which causes qh->element sometimes | |
1635 | * not to advance when a TD completes successfully. The queue remains | |
1636 | * stuck on the inactive completed TD. We detect such cases and advance | |
1637 | * the element pointer by hand. | |
84afddd7 AS |
1638 | */ |
1639 | static int uhci_advance_check(struct uhci_hcd *uhci, struct uhci_qh *qh) | |
1640 | { | |
1641 | struct urb_priv *urbp = NULL; | |
1642 | struct uhci_td *td; | |
1643 | int ret = 1; | |
1644 | unsigned status; | |
1645 | ||
1646 | if (qh->type == USB_ENDPOINT_XFER_ISOC) | |
c5e3b741 | 1647 | goto done; |
84afddd7 AS |
1648 | |
1649 | /* Treat an UNLINKING queue as though it hasn't advanced. | |
1650 | * This is okay because reactivation will treat it as though | |
1651 | * it has advanced, and if it is going to become IDLE then | |
1652 | * this doesn't matter anyway. Furthermore it's possible | |
1653 | * for an UNLINKING queue not to have any URBs at all, or | |
1654 | * for its first URB not to have any TDs (if it was dequeued | |
1655 | * just as it completed). So it's not easy in any case to | |
1656 | * test whether such queues have advanced. */ | |
1657 | if (qh->state != QH_STATE_ACTIVE) { | |
1658 | urbp = NULL; | |
1659 | status = 0; | |
1660 | ||
1661 | } else { | |
1662 | urbp = list_entry(qh->queue.next, struct urb_priv, node); | |
1663 | td = list_entry(urbp->td_list.next, struct uhci_td, list); | |
1664 | status = td_status(td); | |
1665 | if (!(status & TD_CTRL_ACTIVE)) { | |
1666 | ||
1667 | /* We're okay, the queue has advanced */ | |
1668 | qh->wait_expired = 0; | |
1669 | qh->advance_jiffies = jiffies; | |
c5e3b741 | 1670 | goto done; |
84afddd7 AS |
1671 | } |
1672 | ret = 0; | |
1673 | } | |
1674 | ||
1675 | /* The queue hasn't advanced; check for timeout */ | |
c5e3b741 AS |
1676 | if (qh->wait_expired) |
1677 | goto done; | |
1678 | ||
1679 | if (time_after(jiffies, qh->advance_jiffies + QH_WAIT_TIMEOUT)) { | |
b761d9d8 AS |
1680 | |
1681 | /* Detect the Intel bug and work around it */ | |
28b9325e | 1682 | if (qh->post_td && qh_element(qh) == LINK_TO_TD(qh->post_td)) { |
b761d9d8 AS |
1683 | qh->element = qh->post_td->link; |
1684 | qh->advance_jiffies = jiffies; | |
c5e3b741 AS |
1685 | ret = 1; |
1686 | goto done; | |
b761d9d8 AS |
1687 | } |
1688 | ||
84afddd7 AS |
1689 | qh->wait_expired = 1; |
1690 | ||
1691 | /* If the current URB wants FSBR, unlink it temporarily | |
1692 | * so that we can safely set the next TD to interrupt on | |
1693 | * completion. That way we'll know as soon as the queue | |
1694 | * starts moving again. */ | |
1695 | if (urbp && urbp->fsbr && !(status & TD_CTRL_IOC)) | |
1696 | uhci_unlink_qh(uhci, qh); | |
c5e3b741 AS |
1697 | |
1698 | } else { | |
1699 | /* Unmoving but not-yet-expired queues keep FSBR alive */ | |
1700 | if (urbp) | |
1701 | uhci_urbp_wants_fsbr(uhci, urbp); | |
84afddd7 | 1702 | } |
c5e3b741 AS |
1703 | |
1704 | done: | |
84afddd7 AS |
1705 | return ret; |
1706 | } | |
1707 | ||
0ed8fee1 AS |
1708 | /* |
1709 | * Process events in the schedule, but only in one thread at a time | |
1710 | */ | |
7d12e780 | 1711 | static void uhci_scan_schedule(struct uhci_hcd *uhci) |
1da177e4 | 1712 | { |
0ed8fee1 AS |
1713 | int i; |
1714 | struct uhci_qh *qh; | |
1da177e4 LT |
1715 | |
1716 | /* Don't allow re-entrant calls */ | |
1717 | if (uhci->scan_in_progress) { | |
1718 | uhci->need_rescan = 1; | |
1719 | return; | |
1720 | } | |
1721 | uhci->scan_in_progress = 1; | |
84afddd7 | 1722 | rescan: |
1da177e4 | 1723 | uhci->need_rescan = 0; |
c5e3b741 | 1724 | uhci->fsbr_is_wanted = 0; |
1da177e4 | 1725 | |
6c1b445c | 1726 | uhci_clear_next_interrupt(uhci); |
1da177e4 | 1727 | uhci_get_current_frame_number(uhci); |
c8155cc5 | 1728 | uhci->cur_iso_frame = uhci->frame_number; |
1da177e4 | 1729 | |
0ed8fee1 AS |
1730 | /* Go through all the QH queues and process the URBs in each one */ |
1731 | for (i = 0; i < UHCI_NUM_SKELQH - 1; ++i) { | |
1732 | uhci->next_qh = list_entry(uhci->skelqh[i]->node.next, | |
1733 | struct uhci_qh, node); | |
1734 | while ((qh = uhci->next_qh) != uhci->skelqh[i]) { | |
1735 | uhci->next_qh = list_entry(qh->node.next, | |
1736 | struct uhci_qh, node); | |
84afddd7 AS |
1737 | |
1738 | if (uhci_advance_check(uhci, qh)) { | |
7d12e780 | 1739 | uhci_scan_qh(uhci, qh); |
c5e3b741 AS |
1740 | if (qh->state == QH_STATE_ACTIVE) { |
1741 | uhci_urbp_wants_fsbr(uhci, | |
1742 | list_entry(qh->queue.next, struct urb_priv, node)); | |
1743 | } | |
84afddd7 | 1744 | } |
0ed8fee1 | 1745 | } |
1da177e4 | 1746 | } |
1da177e4 | 1747 | |
c8155cc5 | 1748 | uhci->last_iso_frame = uhci->cur_iso_frame; |
1da177e4 LT |
1749 | if (uhci->need_rescan) |
1750 | goto rescan; | |
1751 | uhci->scan_in_progress = 0; | |
1752 | ||
c5e3b741 AS |
1753 | if (uhci->fsbr_is_on && !uhci->fsbr_is_wanted && |
1754 | !uhci->fsbr_expiring) { | |
1755 | uhci->fsbr_expiring = 1; | |
1756 | mod_timer(&uhci->fsbr_timer, jiffies + FSBR_OFF_DELAY); | |
1757 | } | |
84afddd7 | 1758 | |
04538a25 | 1759 | if (list_empty(&uhci->skel_unlink_qh->node)) |
1da177e4 LT |
1760 | uhci_clear_next_interrupt(uhci); |
1761 | else | |
1762 | uhci_set_next_interrupt(uhci); | |
1da177e4 | 1763 | } |