Commit | Line | Data |
---|---|---|
550a7375 FB |
1 | /* |
2 | * MUSB OTG driver host support | |
3 | * | |
4 | * Copyright 2005 Mentor Graphics Corporation | |
5 | * Copyright (C) 2005-2006 by Texas Instruments | |
6 | * Copyright (C) 2006-2007 Nokia Corporation | |
c7bbc056 | 7 | * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com> |
550a7375 FB |
8 | * |
9 | * This program is free software; you can redistribute it and/or | |
10 | * modify it under the terms of the GNU General Public License | |
11 | * version 2 as published by the Free Software Foundation. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, but | |
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
16 | * General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program; if not, write to the Free Software | |
20 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | |
21 | * 02110-1301 USA | |
22 | * | |
23 | * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED | |
24 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF | |
25 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN | |
26 | * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, | |
27 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | |
28 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF | |
29 | * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON | |
30 | * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
31 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | |
32 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
33 | * | |
34 | */ | |
35 | ||
36 | #include <linux/module.h> | |
37 | #include <linux/kernel.h> | |
38 | #include <linux/delay.h> | |
39 | #include <linux/sched.h> | |
40 | #include <linux/slab.h> | |
41 | #include <linux/errno.h> | |
42 | #include <linux/init.h> | |
43 | #include <linux/list.h> | |
44 | ||
45 | #include "musb_core.h" | |
46 | #include "musb_host.h" | |
47 | ||
48 | ||
49 | /* MUSB HOST status 22-mar-2006 | |
50 | * | |
51 | * - There's still lots of partial code duplication for fault paths, so | |
52 | * they aren't handled as consistently as they need to be. | |
53 | * | |
54 | * - PIO mostly behaved when last tested. | |
55 | * + including ep0, with all usbtest cases 9, 10 | |
56 | * + usbtest 14 (ep0out) doesn't seem to run at all | |
57 | * + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest | |
58 | * configurations, but otherwise double buffering passes basic tests. | |
59 | * + for 2.6.N, for N > ~10, needs API changes for hcd framework. | |
60 | * | |
61 | * - DMA (CPPI) ... partially behaves, not currently recommended | |
62 | * + about 1/15 the speed of typical EHCI implementations (PCI) | |
63 | * + RX, all too often reqpkt seems to misbehave after tx | |
64 | * + TX, no known issues (other than evident silicon issue) | |
65 | * | |
66 | * - DMA (Mentor/OMAP) ...has at least toggle update problems | |
67 | * | |
1e0320f0 AKG |
68 | * - [23-feb-2009] minimal traffic scheduling to avoid bulk RX packet |
69 | * starvation ... nothing yet for TX, interrupt, or bulk. | |
550a7375 FB |
70 | * |
71 | * - Not tested with HNP, but some SRP paths seem to behave. | |
72 | * | |
73 | * NOTE 24-August-2006: | |
74 | * | |
75 | * - Bulk traffic finally uses both sides of hardware ep1, freeing up an | |
76 | * extra endpoint for periodic use enabling hub + keybd + mouse. That | |
77 | * mostly works, except that with "usbnet" it's easy to trigger cases | |
78 | * with "ping" where RX loses. (a) ping to davinci, even "ping -f", | |
79 | * fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses | |
80 | * although ARP RX wins. (That test was done with a full speed link.) | |
81 | */ | |
82 | ||
83 | ||
84 | /* | |
85 | * NOTE on endpoint usage: | |
86 | * | |
87 | * CONTROL transfers all go through ep0. BULK ones go through dedicated IN | |
88 | * and OUT endpoints ... hardware is dedicated for those "async" queue(s). | |
550a7375 | 89 | * (Yes, bulk _could_ use more of the endpoints than that, and would even |
1e0320f0 | 90 | * benefit from it.) |
550a7375 FB |
91 | * |
92 | * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints. | |
93 | * So far that scheduling is both dumb and optimistic: the endpoint will be | |
94 | * "claimed" until its software queue is no longer refilled. No multiplexing | |
95 | * of transfers between endpoints, or anything clever. | |
96 | */ | |
97 | ||
98 | ||
99 | static void musb_ep_program(struct musb *musb, u8 epnum, | |
6b6e9710 SS |
100 | struct urb *urb, int is_out, |
101 | u8 *buf, u32 offset, u32 len); | |
550a7375 FB |
102 | |
103 | /* | |
104 | * Clear TX fifo. Needed to avoid BABBLE errors. | |
105 | */ | |
c767c1c6 | 106 | static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep) |
550a7375 FB |
107 | { |
108 | void __iomem *epio = ep->regs; | |
109 | u16 csr; | |
bb1c9ef1 | 110 | u16 lastcsr = 0; |
550a7375 FB |
111 | int retries = 1000; |
112 | ||
113 | csr = musb_readw(epio, MUSB_TXCSR); | |
114 | while (csr & MUSB_TXCSR_FIFONOTEMPTY) { | |
bb1c9ef1 DB |
115 | if (csr != lastcsr) |
116 | DBG(3, "Host TX FIFONOTEMPTY csr: %02x\n", csr); | |
117 | lastcsr = csr; | |
550a7375 FB |
118 | csr |= MUSB_TXCSR_FLUSHFIFO; |
119 | musb_writew(epio, MUSB_TXCSR, csr); | |
120 | csr = musb_readw(epio, MUSB_TXCSR); | |
bb1c9ef1 DB |
121 | if (WARN(retries-- < 1, |
122 | "Could not flush host TX%d fifo: csr: %04x\n", | |
123 | ep->epnum, csr)) | |
550a7375 | 124 | return; |
550a7375 FB |
125 | mdelay(1); |
126 | } | |
127 | } | |
128 | ||
78322c1a DB |
129 | static void musb_h_ep0_flush_fifo(struct musb_hw_ep *ep) |
130 | { | |
131 | void __iomem *epio = ep->regs; | |
132 | u16 csr; | |
133 | int retries = 5; | |
134 | ||
135 | /* scrub any data left in the fifo */ | |
136 | do { | |
137 | csr = musb_readw(epio, MUSB_TXCSR); | |
138 | if (!(csr & (MUSB_CSR0_TXPKTRDY | MUSB_CSR0_RXPKTRDY))) | |
139 | break; | |
140 | musb_writew(epio, MUSB_TXCSR, MUSB_CSR0_FLUSHFIFO); | |
141 | csr = musb_readw(epio, MUSB_TXCSR); | |
142 | udelay(10); | |
143 | } while (--retries); | |
144 | ||
145 | WARN(!retries, "Could not flush host TX%d fifo: csr: %04x\n", | |
146 | ep->epnum, csr); | |
147 | ||
148 | /* and reset for the next transfer */ | |
149 | musb_writew(epio, MUSB_TXCSR, 0); | |
150 | } | |
151 | ||
550a7375 FB |
152 | /* |
153 | * Start transmit. Caller is responsible for locking shared resources. | |
154 | * musb must be locked. | |
155 | */ | |
156 | static inline void musb_h_tx_start(struct musb_hw_ep *ep) | |
157 | { | |
158 | u16 txcsr; | |
159 | ||
160 | /* NOTE: no locks here; caller should lock and select EP */ | |
161 | if (ep->epnum) { | |
162 | txcsr = musb_readw(ep->regs, MUSB_TXCSR); | |
163 | txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS; | |
164 | musb_writew(ep->regs, MUSB_TXCSR, txcsr); | |
165 | } else { | |
166 | txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY; | |
167 | musb_writew(ep->regs, MUSB_CSR0, txcsr); | |
168 | } | |
169 | ||
170 | } | |
171 | ||
c7bbc056 | 172 | static inline void musb_h_tx_dma_start(struct musb_hw_ep *ep) |
550a7375 FB |
173 | { |
174 | u16 txcsr; | |
175 | ||
176 | /* NOTE: no locks here; caller should lock and select EP */ | |
177 | txcsr = musb_readw(ep->regs, MUSB_TXCSR); | |
178 | txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS; | |
c7bbc056 SS |
179 | if (is_cppi_enabled()) |
180 | txcsr |= MUSB_TXCSR_DMAMODE; | |
550a7375 FB |
181 | musb_writew(ep->regs, MUSB_TXCSR, txcsr); |
182 | } | |
183 | ||
3e5c6dc7 SS |
184 | static void musb_ep_set_qh(struct musb_hw_ep *ep, int is_in, struct musb_qh *qh) |
185 | { | |
186 | if (is_in != 0 || ep->is_shared_fifo) | |
187 | ep->in_qh = qh; | |
188 | if (is_in == 0 || ep->is_shared_fifo) | |
189 | ep->out_qh = qh; | |
190 | } | |
191 | ||
192 | static struct musb_qh *musb_ep_get_qh(struct musb_hw_ep *ep, int is_in) | |
193 | { | |
194 | return is_in ? ep->in_qh : ep->out_qh; | |
195 | } | |
196 | ||
550a7375 FB |
197 | /* |
198 | * Start the URB at the front of an endpoint's queue | |
199 | * end must be claimed from the caller. | |
200 | * | |
201 | * Context: controller locked, irqs blocked | |
202 | */ | |
203 | static void | |
204 | musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh) | |
205 | { | |
206 | u16 frame; | |
207 | u32 len; | |
550a7375 FB |
208 | void __iomem *mbase = musb->mregs; |
209 | struct urb *urb = next_urb(qh); | |
6b6e9710 SS |
210 | void *buf = urb->transfer_buffer; |
211 | u32 offset = 0; | |
550a7375 FB |
212 | struct musb_hw_ep *hw_ep = qh->hw_ep; |
213 | unsigned pipe = urb->pipe; | |
214 | u8 address = usb_pipedevice(pipe); | |
215 | int epnum = hw_ep->epnum; | |
216 | ||
217 | /* initialize software qh state */ | |
218 | qh->offset = 0; | |
219 | qh->segsize = 0; | |
220 | ||
221 | /* gather right source of data */ | |
222 | switch (qh->type) { | |
223 | case USB_ENDPOINT_XFER_CONTROL: | |
224 | /* control transfers always start with SETUP */ | |
225 | is_in = 0; | |
550a7375 FB |
226 | musb->ep0_stage = MUSB_EP0_START; |
227 | buf = urb->setup_packet; | |
228 | len = 8; | |
229 | break; | |
230 | case USB_ENDPOINT_XFER_ISOC: | |
231 | qh->iso_idx = 0; | |
232 | qh->frame = 0; | |
6b6e9710 | 233 | offset = urb->iso_frame_desc[0].offset; |
550a7375 FB |
234 | len = urb->iso_frame_desc[0].length; |
235 | break; | |
236 | default: /* bulk, interrupt */ | |
1e0320f0 AKG |
237 | /* actual_length may be nonzero on retry paths */ |
238 | buf = urb->transfer_buffer + urb->actual_length; | |
239 | len = urb->transfer_buffer_length - urb->actual_length; | |
550a7375 FB |
240 | } |
241 | ||
242 | DBG(4, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n", | |
243 | qh, urb, address, qh->epnum, | |
244 | is_in ? "in" : "out", | |
245 | ({char *s; switch (qh->type) { | |
246 | case USB_ENDPOINT_XFER_CONTROL: s = ""; break; | |
247 | case USB_ENDPOINT_XFER_BULK: s = "-bulk"; break; | |
248 | case USB_ENDPOINT_XFER_ISOC: s = "-iso"; break; | |
249 | default: s = "-intr"; break; | |
250 | }; s; }), | |
6b6e9710 | 251 | epnum, buf + offset, len); |
550a7375 FB |
252 | |
253 | /* Configure endpoint */ | |
3e5c6dc7 | 254 | musb_ep_set_qh(hw_ep, is_in, qh); |
6b6e9710 | 255 | musb_ep_program(musb, epnum, urb, !is_in, buf, offset, len); |
550a7375 FB |
256 | |
257 | /* transmit may have more work: start it when it is time */ | |
258 | if (is_in) | |
259 | return; | |
260 | ||
261 | /* determine if the time is right for a periodic transfer */ | |
262 | switch (qh->type) { | |
263 | case USB_ENDPOINT_XFER_ISOC: | |
264 | case USB_ENDPOINT_XFER_INT: | |
265 | DBG(3, "check whether there's still time for periodic Tx\n"); | |
550a7375 FB |
266 | frame = musb_readw(mbase, MUSB_FRAME); |
267 | /* FIXME this doesn't implement that scheduling policy ... | |
268 | * or handle framecounter wrapping | |
269 | */ | |
270 | if ((urb->transfer_flags & URB_ISO_ASAP) | |
271 | || (frame >= urb->start_frame)) { | |
272 | /* REVISIT the SOF irq handler shouldn't duplicate | |
273 | * this code; and we don't init urb->start_frame... | |
274 | */ | |
275 | qh->frame = 0; | |
276 | goto start; | |
277 | } else { | |
278 | qh->frame = urb->start_frame; | |
279 | /* enable SOF interrupt so we can count down */ | |
280 | DBG(1, "SOF for %d\n", epnum); | |
281 | #if 1 /* ifndef CONFIG_ARCH_DAVINCI */ | |
282 | musb_writeb(mbase, MUSB_INTRUSBE, 0xff); | |
283 | #endif | |
284 | } | |
285 | break; | |
286 | default: | |
287 | start: | |
288 | DBG(4, "Start TX%d %s\n", epnum, | |
289 | hw_ep->tx_channel ? "dma" : "pio"); | |
290 | ||
291 | if (!hw_ep->tx_channel) | |
292 | musb_h_tx_start(hw_ep); | |
293 | else if (is_cppi_enabled() || tusb_dma_omap()) | |
c7bbc056 | 294 | musb_h_tx_dma_start(hw_ep); |
550a7375 FB |
295 | } |
296 | } | |
297 | ||
298 | /* caller owns controller lock, irqs are blocked */ | |
299 | static void | |
300 | __musb_giveback(struct musb *musb, struct urb *urb, int status) | |
301 | __releases(musb->lock) | |
302 | __acquires(musb->lock) | |
303 | { | |
bb1c9ef1 | 304 | DBG(({ int level; switch (status) { |
550a7375 FB |
305 | case 0: |
306 | level = 4; | |
307 | break; | |
308 | /* common/boring faults */ | |
309 | case -EREMOTEIO: | |
310 | case -ESHUTDOWN: | |
311 | case -ECONNRESET: | |
312 | case -EPIPE: | |
313 | level = 3; | |
314 | break; | |
315 | default: | |
316 | level = 2; | |
317 | break; | |
318 | }; level; }), | |
bb1c9ef1 DB |
319 | "complete %p %pF (%d), dev%d ep%d%s, %d/%d\n", |
320 | urb, urb->complete, status, | |
550a7375 FB |
321 | usb_pipedevice(urb->pipe), |
322 | usb_pipeendpoint(urb->pipe), | |
323 | usb_pipein(urb->pipe) ? "in" : "out", | |
324 | urb->actual_length, urb->transfer_buffer_length | |
325 | ); | |
326 | ||
2492e674 | 327 | usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb), urb); |
550a7375 FB |
328 | spin_unlock(&musb->lock); |
329 | usb_hcd_giveback_urb(musb_to_hcd(musb), urb, status); | |
330 | spin_lock(&musb->lock); | |
331 | } | |
332 | ||
846099a6 SS |
333 | /* For bulk/interrupt endpoints only */ |
334 | static inline void musb_save_toggle(struct musb_qh *qh, int is_in, | |
335 | struct urb *urb) | |
550a7375 | 336 | { |
846099a6 | 337 | void __iomem *epio = qh->hw_ep->regs; |
550a7375 | 338 | u16 csr; |
550a7375 | 339 | |
846099a6 SS |
340 | /* |
341 | * FIXME: the current Mentor DMA code seems to have | |
550a7375 FB |
342 | * problems getting toggle correct. |
343 | */ | |
344 | ||
846099a6 SS |
345 | if (is_in) |
346 | csr = musb_readw(epio, MUSB_RXCSR) & MUSB_RXCSR_H_DATATOGGLE; | |
550a7375 | 347 | else |
846099a6 | 348 | csr = musb_readw(epio, MUSB_TXCSR) & MUSB_TXCSR_H_DATATOGGLE; |
550a7375 | 349 | |
846099a6 | 350 | usb_settoggle(urb->dev, qh->epnum, !is_in, csr ? 1 : 0); |
550a7375 FB |
351 | } |
352 | ||
353 | /* caller owns controller lock, irqs are blocked */ | |
354 | static struct musb_qh * | |
355 | musb_giveback(struct musb_qh *qh, struct urb *urb, int status) | |
356 | { | |
550a7375 FB |
357 | struct musb_hw_ep *ep = qh->hw_ep; |
358 | struct musb *musb = ep->musb; | |
51d9f3e1 | 359 | int is_in = usb_pipein(urb->pipe); |
550a7375 FB |
360 | int ready = qh->is_ready; |
361 | ||
550a7375 FB |
362 | /* save toggle eagerly, for paranoia */ |
363 | switch (qh->type) { | |
364 | case USB_ENDPOINT_XFER_BULK: | |
365 | case USB_ENDPOINT_XFER_INT: | |
846099a6 | 366 | musb_save_toggle(qh, is_in, urb); |
550a7375 FB |
367 | break; |
368 | case USB_ENDPOINT_XFER_ISOC: | |
369 | if (status == 0 && urb->error_count) | |
370 | status = -EXDEV; | |
371 | break; | |
372 | } | |
373 | ||
550a7375 FB |
374 | qh->is_ready = 0; |
375 | __musb_giveback(musb, urb, status); | |
376 | qh->is_ready = ready; | |
377 | ||
378 | /* reclaim resources (and bandwidth) ASAP; deschedule it, and | |
379 | * invalidate qh as soon as list_empty(&hep->urb_list) | |
380 | */ | |
381 | if (list_empty(&qh->hep->urb_list)) { | |
382 | struct list_head *head; | |
383 | ||
384 | if (is_in) | |
385 | ep->rx_reinit = 1; | |
386 | else | |
387 | ep->tx_reinit = 1; | |
388 | ||
3e5c6dc7 SS |
389 | /* Clobber old pointers to this qh */ |
390 | musb_ep_set_qh(ep, is_in, NULL); | |
550a7375 FB |
391 | qh->hep->hcpriv = NULL; |
392 | ||
393 | switch (qh->type) { | |
394 | ||
23d15e07 AKG |
395 | case USB_ENDPOINT_XFER_CONTROL: |
396 | case USB_ENDPOINT_XFER_BULK: | |
397 | /* fifo policy for these lists, except that NAKing | |
398 | * should rotate a qh to the end (for fairness). | |
399 | */ | |
400 | if (qh->mux == 1) { | |
401 | head = qh->ring.prev; | |
402 | list_del(&qh->ring); | |
403 | kfree(qh); | |
404 | qh = first_qh(head); | |
405 | break; | |
406 | } | |
407 | ||
550a7375 FB |
408 | case USB_ENDPOINT_XFER_ISOC: |
409 | case USB_ENDPOINT_XFER_INT: | |
410 | /* this is where periodic bandwidth should be | |
411 | * de-allocated if it's tracked and allocated; | |
412 | * and where we'd update the schedule tree... | |
413 | */ | |
550a7375 FB |
414 | kfree(qh); |
415 | qh = NULL; | |
416 | break; | |
550a7375 FB |
417 | } |
418 | } | |
419 | return qh; | |
420 | } | |
421 | ||
422 | /* | |
423 | * Advance this hardware endpoint's queue, completing the specified urb and | |
424 | * advancing to either the next urb queued to that qh, or else invalidating | |
425 | * that qh and advancing to the next qh scheduled after the current one. | |
426 | * | |
427 | * Context: caller owns controller lock, irqs are blocked | |
428 | */ | |
429 | static void | |
430 | musb_advance_schedule(struct musb *musb, struct urb *urb, | |
431 | struct musb_hw_ep *hw_ep, int is_in) | |
432 | { | |
3e5c6dc7 | 433 | struct musb_qh *qh = musb_ep_get_qh(hw_ep, is_in); |
550a7375 FB |
434 | |
435 | if (urb->status == -EINPROGRESS) | |
436 | qh = musb_giveback(qh, urb, 0); | |
437 | else | |
438 | qh = musb_giveback(qh, urb, urb->status); | |
439 | ||
a2fd814e | 440 | if (qh != NULL && qh->is_ready) { |
550a7375 FB |
441 | DBG(4, "... next ep%d %cX urb %p\n", |
442 | hw_ep->epnum, is_in ? 'R' : 'T', | |
443 | next_urb(qh)); | |
444 | musb_start_urb(musb, is_in, qh); | |
445 | } | |
446 | } | |
447 | ||
c767c1c6 | 448 | static u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr) |
550a7375 FB |
449 | { |
450 | /* we don't want fifo to fill itself again; | |
451 | * ignore dma (various models), | |
452 | * leave toggle alone (may not have been saved yet) | |
453 | */ | |
454 | csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY; | |
455 | csr &= ~(MUSB_RXCSR_H_REQPKT | |
456 | | MUSB_RXCSR_H_AUTOREQ | |
457 | | MUSB_RXCSR_AUTOCLEAR); | |
458 | ||
459 | /* write 2x to allow double buffering */ | |
460 | musb_writew(hw_ep->regs, MUSB_RXCSR, csr); | |
461 | musb_writew(hw_ep->regs, MUSB_RXCSR, csr); | |
462 | ||
463 | /* flush writebuffer */ | |
464 | return musb_readw(hw_ep->regs, MUSB_RXCSR); | |
465 | } | |
466 | ||
467 | /* | |
468 | * PIO RX for a packet (or part of it). | |
469 | */ | |
470 | static bool | |
471 | musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err) | |
472 | { | |
473 | u16 rx_count; | |
474 | u8 *buf; | |
475 | u16 csr; | |
476 | bool done = false; | |
477 | u32 length; | |
478 | int do_flush = 0; | |
479 | struct musb_hw_ep *hw_ep = musb->endpoints + epnum; | |
480 | void __iomem *epio = hw_ep->regs; | |
481 | struct musb_qh *qh = hw_ep->in_qh; | |
482 | int pipe = urb->pipe; | |
483 | void *buffer = urb->transfer_buffer; | |
484 | ||
485 | /* musb_ep_select(mbase, epnum); */ | |
486 | rx_count = musb_readw(epio, MUSB_RXCOUNT); | |
487 | DBG(3, "RX%d count %d, buffer %p len %d/%d\n", epnum, rx_count, | |
488 | urb->transfer_buffer, qh->offset, | |
489 | urb->transfer_buffer_length); | |
490 | ||
491 | /* unload FIFO */ | |
492 | if (usb_pipeisoc(pipe)) { | |
493 | int status = 0; | |
494 | struct usb_iso_packet_descriptor *d; | |
495 | ||
496 | if (iso_err) { | |
497 | status = -EILSEQ; | |
498 | urb->error_count++; | |
499 | } | |
500 | ||
501 | d = urb->iso_frame_desc + qh->iso_idx; | |
502 | buf = buffer + d->offset; | |
503 | length = d->length; | |
504 | if (rx_count > length) { | |
505 | if (status == 0) { | |
506 | status = -EOVERFLOW; | |
507 | urb->error_count++; | |
508 | } | |
509 | DBG(2, "** OVERFLOW %d into %d\n", rx_count, length); | |
510 | do_flush = 1; | |
511 | } else | |
512 | length = rx_count; | |
513 | urb->actual_length += length; | |
514 | d->actual_length = length; | |
515 | ||
516 | d->status = status; | |
517 | ||
518 | /* see if we are done */ | |
519 | done = (++qh->iso_idx >= urb->number_of_packets); | |
520 | } else { | |
521 | /* non-isoch */ | |
522 | buf = buffer + qh->offset; | |
523 | length = urb->transfer_buffer_length - qh->offset; | |
524 | if (rx_count > length) { | |
525 | if (urb->status == -EINPROGRESS) | |
526 | urb->status = -EOVERFLOW; | |
527 | DBG(2, "** OVERFLOW %d into %d\n", rx_count, length); | |
528 | do_flush = 1; | |
529 | } else | |
530 | length = rx_count; | |
531 | urb->actual_length += length; | |
532 | qh->offset += length; | |
533 | ||
534 | /* see if we are done */ | |
535 | done = (urb->actual_length == urb->transfer_buffer_length) | |
536 | || (rx_count < qh->maxpacket) | |
537 | || (urb->status != -EINPROGRESS); | |
538 | if (done | |
539 | && (urb->status == -EINPROGRESS) | |
540 | && (urb->transfer_flags & URB_SHORT_NOT_OK) | |
541 | && (urb->actual_length | |
542 | < urb->transfer_buffer_length)) | |
543 | urb->status = -EREMOTEIO; | |
544 | } | |
545 | ||
546 | musb_read_fifo(hw_ep, length, buf); | |
547 | ||
548 | csr = musb_readw(epio, MUSB_RXCSR); | |
549 | csr |= MUSB_RXCSR_H_WZC_BITS; | |
550 | if (unlikely(do_flush)) | |
551 | musb_h_flush_rxfifo(hw_ep, csr); | |
552 | else { | |
553 | /* REVISIT this assumes AUTOCLEAR is never set */ | |
554 | csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT); | |
555 | if (!done) | |
556 | csr |= MUSB_RXCSR_H_REQPKT; | |
557 | musb_writew(epio, MUSB_RXCSR, csr); | |
558 | } | |
559 | ||
560 | return done; | |
561 | } | |
562 | ||
563 | /* we don't always need to reinit a given side of an endpoint... | |
564 | * when we do, use tx/rx reinit routine and then construct a new CSR | |
565 | * to address data toggle, NYET, and DMA or PIO. | |
566 | * | |
567 | * it's possible that driver bugs (especially for DMA) or aborting a | |
568 | * transfer might have left the endpoint busier than it should be. | |
569 | * the busy/not-empty tests are basically paranoia. | |
570 | */ | |
571 | static void | |
572 | musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep) | |
573 | { | |
574 | u16 csr; | |
575 | ||
576 | /* NOTE: we know the "rx" fifo reinit never triggers for ep0. | |
577 | * That always uses tx_reinit since ep0 repurposes TX register | |
578 | * offsets; the initial SETUP packet is also a kind of OUT. | |
579 | */ | |
580 | ||
581 | /* if programmed for Tx, put it in RX mode */ | |
582 | if (ep->is_shared_fifo) { | |
583 | csr = musb_readw(ep->regs, MUSB_TXCSR); | |
584 | if (csr & MUSB_TXCSR_MODE) { | |
585 | musb_h_tx_flush_fifo(ep); | |
b6e434a5 | 586 | csr = musb_readw(ep->regs, MUSB_TXCSR); |
550a7375 | 587 | musb_writew(ep->regs, MUSB_TXCSR, |
b6e434a5 | 588 | csr | MUSB_TXCSR_FRCDATATOG); |
550a7375 | 589 | } |
b6e434a5 SS |
590 | |
591 | /* | |
592 | * Clear the MODE bit (and everything else) to enable Rx. | |
593 | * NOTE: we mustn't clear the DMAMODE bit before DMAENAB. | |
594 | */ | |
595 | if (csr & MUSB_TXCSR_DMAMODE) | |
596 | musb_writew(ep->regs, MUSB_TXCSR, MUSB_TXCSR_DMAMODE); | |
550a7375 FB |
597 | musb_writew(ep->regs, MUSB_TXCSR, 0); |
598 | ||
599 | /* scrub all previous state, clearing toggle */ | |
600 | } else { | |
601 | csr = musb_readw(ep->regs, MUSB_RXCSR); | |
602 | if (csr & MUSB_RXCSR_RXPKTRDY) | |
603 | WARNING("rx%d, packet/%d ready?\n", ep->epnum, | |
604 | musb_readw(ep->regs, MUSB_RXCOUNT)); | |
605 | ||
606 | musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG); | |
607 | } | |
608 | ||
609 | /* target addr and (for multipoint) hub addr/port */ | |
610 | if (musb->is_multipoint) { | |
c6cf8b00 BW |
611 | musb_write_rxfunaddr(ep->target_regs, qh->addr_reg); |
612 | musb_write_rxhubaddr(ep->target_regs, qh->h_addr_reg); | |
613 | musb_write_rxhubport(ep->target_regs, qh->h_port_reg); | |
614 | ||
550a7375 FB |
615 | } else |
616 | musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg); | |
617 | ||
618 | /* protocol/endpoint, interval/NAKlimit, i/o size */ | |
619 | musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg); | |
620 | musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg); | |
621 | /* NOTE: bulk combining rewrites high bits of maxpacket */ | |
622 | musb_writew(ep->regs, MUSB_RXMAXP, qh->maxpacket); | |
623 | ||
624 | ep->rx_reinit = 0; | |
625 | } | |
626 | ||
6b6e9710 SS |
627 | static bool musb_tx_dma_program(struct dma_controller *dma, |
628 | struct musb_hw_ep *hw_ep, struct musb_qh *qh, | |
629 | struct urb *urb, u32 offset, u32 length) | |
630 | { | |
631 | struct dma_channel *channel = hw_ep->tx_channel; | |
632 | void __iomem *epio = hw_ep->regs; | |
633 | u16 pkt_size = qh->maxpacket; | |
634 | u16 csr; | |
635 | u8 mode; | |
636 | ||
637 | #ifdef CONFIG_USB_INVENTRA_DMA | |
638 | if (length > channel->max_len) | |
639 | length = channel->max_len; | |
640 | ||
641 | csr = musb_readw(epio, MUSB_TXCSR); | |
642 | if (length > pkt_size) { | |
643 | mode = 1; | |
644 | csr |= MUSB_TXCSR_AUTOSET | |
645 | | MUSB_TXCSR_DMAMODE | |
646 | | MUSB_TXCSR_DMAENAB; | |
647 | } else { | |
648 | mode = 0; | |
649 | csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE); | |
650 | csr |= MUSB_TXCSR_DMAENAB; /* against programmer's guide */ | |
651 | } | |
652 | channel->desired_mode = mode; | |
653 | musb_writew(epio, MUSB_TXCSR, csr); | |
654 | #else | |
655 | if (!is_cppi_enabled() && !tusb_dma_omap()) | |
656 | return false; | |
657 | ||
658 | channel->actual_len = 0; | |
659 | ||
660 | /* | |
661 | * TX uses "RNDIS" mode automatically but needs help | |
662 | * to identify the zero-length-final-packet case. | |
663 | */ | |
664 | mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0; | |
665 | #endif | |
666 | ||
667 | qh->segsize = length; | |
668 | ||
669 | if (!dma->channel_program(channel, pkt_size, mode, | |
670 | urb->transfer_dma + offset, length)) { | |
671 | dma->channel_release(channel); | |
672 | hw_ep->tx_channel = NULL; | |
673 | ||
674 | csr = musb_readw(epio, MUSB_TXCSR); | |
675 | csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB); | |
676 | musb_writew(epio, MUSB_TXCSR, csr | MUSB_TXCSR_H_WZC_BITS); | |
677 | return false; | |
678 | } | |
679 | return true; | |
680 | } | |
550a7375 FB |
681 | |
682 | /* | |
683 | * Program an HDRC endpoint as per the given URB | |
684 | * Context: irqs blocked, controller lock held | |
685 | */ | |
686 | static void musb_ep_program(struct musb *musb, u8 epnum, | |
6b6e9710 SS |
687 | struct urb *urb, int is_out, |
688 | u8 *buf, u32 offset, u32 len) | |
550a7375 FB |
689 | { |
690 | struct dma_controller *dma_controller; | |
691 | struct dma_channel *dma_channel; | |
692 | u8 dma_ok; | |
693 | void __iomem *mbase = musb->mregs; | |
694 | struct musb_hw_ep *hw_ep = musb->endpoints + epnum; | |
695 | void __iomem *epio = hw_ep->regs; | |
3e5c6dc7 SS |
696 | struct musb_qh *qh = musb_ep_get_qh(hw_ep, !is_out); |
697 | u16 packet_sz = qh->maxpacket; | |
550a7375 FB |
698 | |
699 | DBG(3, "%s hw%d urb %p spd%d dev%d ep%d%s " | |
700 | "h_addr%02x h_port%02x bytes %d\n", | |
701 | is_out ? "-->" : "<--", | |
702 | epnum, urb, urb->dev->speed, | |
703 | qh->addr_reg, qh->epnum, is_out ? "out" : "in", | |
704 | qh->h_addr_reg, qh->h_port_reg, | |
705 | len); | |
706 | ||
707 | musb_ep_select(mbase, epnum); | |
708 | ||
709 | /* candidate for DMA? */ | |
710 | dma_controller = musb->dma_controller; | |
711 | if (is_dma_capable() && epnum && dma_controller) { | |
712 | dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel; | |
713 | if (!dma_channel) { | |
714 | dma_channel = dma_controller->channel_alloc( | |
715 | dma_controller, hw_ep, is_out); | |
716 | if (is_out) | |
717 | hw_ep->tx_channel = dma_channel; | |
718 | else | |
719 | hw_ep->rx_channel = dma_channel; | |
720 | } | |
721 | } else | |
722 | dma_channel = NULL; | |
723 | ||
724 | /* make sure we clear DMAEnab, autoSet bits from previous run */ | |
725 | ||
726 | /* OUT/transmit/EP0 or IN/receive? */ | |
727 | if (is_out) { | |
728 | u16 csr; | |
729 | u16 int_txe; | |
730 | u16 load_count; | |
731 | ||
732 | csr = musb_readw(epio, MUSB_TXCSR); | |
733 | ||
734 | /* disable interrupt in case we flush */ | |
735 | int_txe = musb_readw(mbase, MUSB_INTRTXE); | |
736 | musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum)); | |
737 | ||
738 | /* general endpoint setup */ | |
739 | if (epnum) { | |
550a7375 FB |
740 | /* flush all old state, set default */ |
741 | musb_h_tx_flush_fifo(hw_ep); | |
b6e434a5 SS |
742 | |
743 | /* | |
744 | * We must not clear the DMAMODE bit before or in | |
745 | * the same cycle with the DMAENAB bit, so we clear | |
746 | * the latter first... | |
747 | */ | |
550a7375 | 748 | csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT |
b6e434a5 SS |
749 | | MUSB_TXCSR_AUTOSET |
750 | | MUSB_TXCSR_DMAENAB | |
550a7375 FB |
751 | | MUSB_TXCSR_FRCDATATOG |
752 | | MUSB_TXCSR_H_RXSTALL | |
753 | | MUSB_TXCSR_H_ERROR | |
754 | | MUSB_TXCSR_TXPKTRDY | |
755 | ); | |
756 | csr |= MUSB_TXCSR_MODE; | |
757 | ||
b6e434a5 | 758 | if (usb_gettoggle(urb->dev, qh->epnum, 1)) |
550a7375 FB |
759 | csr |= MUSB_TXCSR_H_WR_DATATOGGLE |
760 | | MUSB_TXCSR_H_DATATOGGLE; | |
761 | else | |
762 | csr |= MUSB_TXCSR_CLRDATATOG; | |
763 | ||
550a7375 FB |
764 | musb_writew(epio, MUSB_TXCSR, csr); |
765 | /* REVISIT may need to clear FLUSHFIFO ... */ | |
b6e434a5 | 766 | csr &= ~MUSB_TXCSR_DMAMODE; |
550a7375 FB |
767 | musb_writew(epio, MUSB_TXCSR, csr); |
768 | csr = musb_readw(epio, MUSB_TXCSR); | |
769 | } else { | |
770 | /* endpoint 0: just flush */ | |
78322c1a | 771 | musb_h_ep0_flush_fifo(hw_ep); |
550a7375 FB |
772 | } |
773 | ||
774 | /* target addr and (for multipoint) hub addr/port */ | |
775 | if (musb->is_multipoint) { | |
c6cf8b00 BW |
776 | musb_write_txfunaddr(mbase, epnum, qh->addr_reg); |
777 | musb_write_txhubaddr(mbase, epnum, qh->h_addr_reg); | |
778 | musb_write_txhubport(mbase, epnum, qh->h_port_reg); | |
550a7375 FB |
779 | /* FIXME if !epnum, do the same for RX ... */ |
780 | } else | |
781 | musb_writeb(mbase, MUSB_FADDR, qh->addr_reg); | |
782 | ||
783 | /* protocol/endpoint/interval/NAKlimit */ | |
784 | if (epnum) { | |
785 | musb_writeb(epio, MUSB_TXTYPE, qh->type_reg); | |
786 | if (can_bulk_split(musb, qh->type)) | |
787 | musb_writew(epio, MUSB_TXMAXP, | |
788 | packet_sz | |
789 | | ((hw_ep->max_packet_sz_tx / | |
790 | packet_sz) - 1) << 11); | |
791 | else | |
792 | musb_writew(epio, MUSB_TXMAXP, | |
793 | packet_sz); | |
794 | musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg); | |
795 | } else { | |
796 | musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg); | |
797 | if (musb->is_multipoint) | |
798 | musb_writeb(epio, MUSB_TYPE0, | |
799 | qh->type_reg); | |
800 | } | |
801 | ||
802 | if (can_bulk_split(musb, qh->type)) | |
803 | load_count = min((u32) hw_ep->max_packet_sz_tx, | |
804 | len); | |
805 | else | |
806 | load_count = min((u32) packet_sz, len); | |
807 | ||
6b6e9710 SS |
808 | if (dma_channel && musb_tx_dma_program(dma_controller, |
809 | hw_ep, qh, urb, offset, len)) | |
810 | load_count = 0; | |
550a7375 FB |
811 | |
812 | if (load_count) { | |
550a7375 FB |
813 | /* PIO to load FIFO */ |
814 | qh->segsize = load_count; | |
815 | musb_write_fifo(hw_ep, load_count, buf); | |
550a7375 FB |
816 | } |
817 | ||
818 | /* re-enable interrupt */ | |
819 | musb_writew(mbase, MUSB_INTRTXE, int_txe); | |
820 | ||
821 | /* IN/receive */ | |
822 | } else { | |
823 | u16 csr; | |
824 | ||
825 | if (hw_ep->rx_reinit) { | |
826 | musb_rx_reinit(musb, qh, hw_ep); | |
827 | ||
828 | /* init new state: toggle and NYET, maybe DMA later */ | |
829 | if (usb_gettoggle(urb->dev, qh->epnum, 0)) | |
830 | csr = MUSB_RXCSR_H_WR_DATATOGGLE | |
831 | | MUSB_RXCSR_H_DATATOGGLE; | |
832 | else | |
833 | csr = 0; | |
834 | if (qh->type == USB_ENDPOINT_XFER_INT) | |
835 | csr |= MUSB_RXCSR_DISNYET; | |
836 | ||
837 | } else { | |
838 | csr = musb_readw(hw_ep->regs, MUSB_RXCSR); | |
839 | ||
840 | if (csr & (MUSB_RXCSR_RXPKTRDY | |
841 | | MUSB_RXCSR_DMAENAB | |
842 | | MUSB_RXCSR_H_REQPKT)) | |
843 | ERR("broken !rx_reinit, ep%d csr %04x\n", | |
844 | hw_ep->epnum, csr); | |
845 | ||
846 | /* scrub any stale state, leaving toggle alone */ | |
847 | csr &= MUSB_RXCSR_DISNYET; | |
848 | } | |
849 | ||
850 | /* kick things off */ | |
851 | ||
852 | if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) { | |
853 | /* candidate for DMA */ | |
854 | if (dma_channel) { | |
855 | dma_channel->actual_len = 0L; | |
856 | qh->segsize = len; | |
857 | ||
858 | /* AUTOREQ is in a DMA register */ | |
859 | musb_writew(hw_ep->regs, MUSB_RXCSR, csr); | |
860 | csr = musb_readw(hw_ep->regs, | |
861 | MUSB_RXCSR); | |
862 | ||
863 | /* unless caller treats short rx transfers as | |
864 | * errors, we dare not queue multiple transfers. | |
865 | */ | |
866 | dma_ok = dma_controller->channel_program( | |
867 | dma_channel, packet_sz, | |
868 | !(urb->transfer_flags | |
869 | & URB_SHORT_NOT_OK), | |
6b6e9710 | 870 | urb->transfer_dma + offset, |
550a7375 FB |
871 | qh->segsize); |
872 | if (!dma_ok) { | |
873 | dma_controller->channel_release( | |
874 | dma_channel); | |
875 | hw_ep->rx_channel = NULL; | |
876 | dma_channel = NULL; | |
877 | } else | |
878 | csr |= MUSB_RXCSR_DMAENAB; | |
879 | } | |
880 | } | |
881 | ||
882 | csr |= MUSB_RXCSR_H_REQPKT; | |
883 | DBG(7, "RXCSR%d := %04x\n", epnum, csr); | |
884 | musb_writew(hw_ep->regs, MUSB_RXCSR, csr); | |
885 | csr = musb_readw(hw_ep->regs, MUSB_RXCSR); | |
886 | } | |
887 | } | |
888 | ||
889 | ||
890 | /* | |
891 | * Service the default endpoint (ep0) as host. | |
892 | * Return true until it's time to start the status stage. | |
893 | */ | |
894 | static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb) | |
895 | { | |
896 | bool more = false; | |
897 | u8 *fifo_dest = NULL; | |
898 | u16 fifo_count = 0; | |
899 | struct musb_hw_ep *hw_ep = musb->control_ep; | |
900 | struct musb_qh *qh = hw_ep->in_qh; | |
901 | struct usb_ctrlrequest *request; | |
902 | ||
903 | switch (musb->ep0_stage) { | |
904 | case MUSB_EP0_IN: | |
905 | fifo_dest = urb->transfer_buffer + urb->actual_length; | |
3ecdb9ac SS |
906 | fifo_count = min_t(size_t, len, urb->transfer_buffer_length - |
907 | urb->actual_length); | |
550a7375 FB |
908 | if (fifo_count < len) |
909 | urb->status = -EOVERFLOW; | |
910 | ||
911 | musb_read_fifo(hw_ep, fifo_count, fifo_dest); | |
912 | ||
913 | urb->actual_length += fifo_count; | |
914 | if (len < qh->maxpacket) { | |
915 | /* always terminate on short read; it's | |
916 | * rarely reported as an error. | |
917 | */ | |
918 | } else if (urb->actual_length < | |
919 | urb->transfer_buffer_length) | |
920 | more = true; | |
921 | break; | |
922 | case MUSB_EP0_START: | |
923 | request = (struct usb_ctrlrequest *) urb->setup_packet; | |
924 | ||
925 | if (!request->wLength) { | |
926 | DBG(4, "start no-DATA\n"); | |
927 | break; | |
928 | } else if (request->bRequestType & USB_DIR_IN) { | |
929 | DBG(4, "start IN-DATA\n"); | |
930 | musb->ep0_stage = MUSB_EP0_IN; | |
931 | more = true; | |
932 | break; | |
933 | } else { | |
934 | DBG(4, "start OUT-DATA\n"); | |
935 | musb->ep0_stage = MUSB_EP0_OUT; | |
936 | more = true; | |
937 | } | |
938 | /* FALLTHROUGH */ | |
939 | case MUSB_EP0_OUT: | |
3ecdb9ac SS |
940 | fifo_count = min_t(size_t, qh->maxpacket, |
941 | urb->transfer_buffer_length - | |
942 | urb->actual_length); | |
550a7375 FB |
943 | if (fifo_count) { |
944 | fifo_dest = (u8 *) (urb->transfer_buffer | |
945 | + urb->actual_length); | |
bb1c9ef1 DB |
946 | DBG(3, "Sending %d byte%s to ep0 fifo %p\n", |
947 | fifo_count, | |
948 | (fifo_count == 1) ? "" : "s", | |
949 | fifo_dest); | |
550a7375 FB |
950 | musb_write_fifo(hw_ep, fifo_count, fifo_dest); |
951 | ||
952 | urb->actual_length += fifo_count; | |
953 | more = true; | |
954 | } | |
955 | break; | |
956 | default: | |
957 | ERR("bogus ep0 stage %d\n", musb->ep0_stage); | |
958 | break; | |
959 | } | |
960 | ||
961 | return more; | |
962 | } | |
963 | ||
964 | /* | |
965 | * Handle default endpoint interrupt as host. Only called in IRQ time | |
c767c1c6 | 966 | * from musb_interrupt(). |
550a7375 FB |
967 | * |
968 | * called with controller irqlocked | |
969 | */ | |
970 | irqreturn_t musb_h_ep0_irq(struct musb *musb) | |
971 | { | |
972 | struct urb *urb; | |
973 | u16 csr, len; | |
974 | int status = 0; | |
975 | void __iomem *mbase = musb->mregs; | |
976 | struct musb_hw_ep *hw_ep = musb->control_ep; | |
977 | void __iomem *epio = hw_ep->regs; | |
978 | struct musb_qh *qh = hw_ep->in_qh; | |
979 | bool complete = false; | |
980 | irqreturn_t retval = IRQ_NONE; | |
981 | ||
982 | /* ep0 only has one queue, "in" */ | |
983 | urb = next_urb(qh); | |
984 | ||
985 | musb_ep_select(mbase, 0); | |
986 | csr = musb_readw(epio, MUSB_CSR0); | |
987 | len = (csr & MUSB_CSR0_RXPKTRDY) | |
988 | ? musb_readb(epio, MUSB_COUNT0) | |
989 | : 0; | |
990 | ||
991 | DBG(4, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n", | |
992 | csr, qh, len, urb, musb->ep0_stage); | |
993 | ||
994 | /* if we just did status stage, we are done */ | |
995 | if (MUSB_EP0_STATUS == musb->ep0_stage) { | |
996 | retval = IRQ_HANDLED; | |
997 | complete = true; | |
998 | } | |
999 | ||
1000 | /* prepare status */ | |
1001 | if (csr & MUSB_CSR0_H_RXSTALL) { | |
1002 | DBG(6, "STALLING ENDPOINT\n"); | |
1003 | status = -EPIPE; | |
1004 | ||
1005 | } else if (csr & MUSB_CSR0_H_ERROR) { | |
1006 | DBG(2, "no response, csr0 %04x\n", csr); | |
1007 | status = -EPROTO; | |
1008 | ||
1009 | } else if (csr & MUSB_CSR0_H_NAKTIMEOUT) { | |
1010 | DBG(2, "control NAK timeout\n"); | |
1011 | ||
1012 | /* NOTE: this code path would be a good place to PAUSE a | |
1013 | * control transfer, if another one is queued, so that | |
1e0320f0 AKG |
1014 | * ep0 is more likely to stay busy. That's already done |
1015 | * for bulk RX transfers. | |
550a7375 FB |
1016 | * |
1017 | * if (qh->ring.next != &musb->control), then | |
1018 | * we have a candidate... NAKing is *NOT* an error | |
1019 | */ | |
1020 | musb_writew(epio, MUSB_CSR0, 0); | |
1021 | retval = IRQ_HANDLED; | |
1022 | } | |
1023 | ||
1024 | if (status) { | |
1025 | DBG(6, "aborting\n"); | |
1026 | retval = IRQ_HANDLED; | |
1027 | if (urb) | |
1028 | urb->status = status; | |
1029 | complete = true; | |
1030 | ||
1031 | /* use the proper sequence to abort the transfer */ | |
1032 | if (csr & MUSB_CSR0_H_REQPKT) { | |
1033 | csr &= ~MUSB_CSR0_H_REQPKT; | |
1034 | musb_writew(epio, MUSB_CSR0, csr); | |
1035 | csr &= ~MUSB_CSR0_H_NAKTIMEOUT; | |
1036 | musb_writew(epio, MUSB_CSR0, csr); | |
1037 | } else { | |
78322c1a | 1038 | musb_h_ep0_flush_fifo(hw_ep); |
550a7375 FB |
1039 | } |
1040 | ||
1041 | musb_writeb(epio, MUSB_NAKLIMIT0, 0); | |
1042 | ||
1043 | /* clear it */ | |
1044 | musb_writew(epio, MUSB_CSR0, 0); | |
1045 | } | |
1046 | ||
1047 | if (unlikely(!urb)) { | |
1048 | /* stop endpoint since we have no place for its data, this | |
1049 | * SHOULD NEVER HAPPEN! */ | |
1050 | ERR("no URB for end 0\n"); | |
1051 | ||
78322c1a | 1052 | musb_h_ep0_flush_fifo(hw_ep); |
550a7375 FB |
1053 | goto done; |
1054 | } | |
1055 | ||
1056 | if (!complete) { | |
1057 | /* call common logic and prepare response */ | |
1058 | if (musb_h_ep0_continue(musb, len, urb)) { | |
1059 | /* more packets required */ | |
1060 | csr = (MUSB_EP0_IN == musb->ep0_stage) | |
1061 | ? MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY; | |
1062 | } else { | |
1063 | /* data transfer complete; perform status phase */ | |
1064 | if (usb_pipeout(urb->pipe) | |
1065 | || !urb->transfer_buffer_length) | |
1066 | csr = MUSB_CSR0_H_STATUSPKT | |
1067 | | MUSB_CSR0_H_REQPKT; | |
1068 | else | |
1069 | csr = MUSB_CSR0_H_STATUSPKT | |
1070 | | MUSB_CSR0_TXPKTRDY; | |
1071 | ||
1072 | /* flag status stage */ | |
1073 | musb->ep0_stage = MUSB_EP0_STATUS; | |
1074 | ||
1075 | DBG(5, "ep0 STATUS, csr %04x\n", csr); | |
1076 | ||
1077 | } | |
1078 | musb_writew(epio, MUSB_CSR0, csr); | |
1079 | retval = IRQ_HANDLED; | |
1080 | } else | |
1081 | musb->ep0_stage = MUSB_EP0_IDLE; | |
1082 | ||
1083 | /* call completion handler if done */ | |
1084 | if (complete) | |
1085 | musb_advance_schedule(musb, urb, hw_ep, 1); | |
1086 | done: | |
1087 | return retval; | |
1088 | } | |
1089 | ||
1090 | ||
1091 | #ifdef CONFIG_USB_INVENTRA_DMA | |
1092 | ||
1093 | /* Host side TX (OUT) using Mentor DMA works as follows: | |
1094 | submit_urb -> | |
1095 | - if queue was empty, Program Endpoint | |
1096 | - ... which starts DMA to fifo in mode 1 or 0 | |
1097 | ||
1098 | DMA Isr (transfer complete) -> TxAvail() | |
1099 | - Stop DMA (~DmaEnab) (<--- Alert ... currently happens | |
1100 | only in musb_cleanup_urb) | |
1101 | - TxPktRdy has to be set in mode 0 or for | |
1102 | short packets in mode 1. | |
1103 | */ | |
1104 | ||
1105 | #endif | |
1106 | ||
1107 | /* Service a Tx-Available or dma completion irq for the endpoint */ | |
1108 | void musb_host_tx(struct musb *musb, u8 epnum) | |
1109 | { | |
1110 | int pipe; | |
1111 | bool done = false; | |
1112 | u16 tx_csr; | |
6b6e9710 SS |
1113 | size_t length = 0; |
1114 | size_t offset = 0; | |
550a7375 FB |
1115 | struct musb_hw_ep *hw_ep = musb->endpoints + epnum; |
1116 | void __iomem *epio = hw_ep->regs; | |
3e5c6dc7 SS |
1117 | struct musb_qh *qh = hw_ep->out_qh; |
1118 | struct urb *urb = next_urb(qh); | |
550a7375 FB |
1119 | u32 status = 0; |
1120 | void __iomem *mbase = musb->mregs; | |
1121 | struct dma_channel *dma; | |
1122 | ||
550a7375 FB |
1123 | musb_ep_select(mbase, epnum); |
1124 | tx_csr = musb_readw(epio, MUSB_TXCSR); | |
1125 | ||
1126 | /* with CPPI, DMA sometimes triggers "extra" irqs */ | |
1127 | if (!urb) { | |
1128 | DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr); | |
6b6e9710 | 1129 | return; |
550a7375 FB |
1130 | } |
1131 | ||
1132 | pipe = urb->pipe; | |
1133 | dma = is_dma_capable() ? hw_ep->tx_channel : NULL; | |
1134 | DBG(4, "OUT/TX%d end, csr %04x%s\n", epnum, tx_csr, | |
1135 | dma ? ", dma" : ""); | |
1136 | ||
1137 | /* check for errors */ | |
1138 | if (tx_csr & MUSB_TXCSR_H_RXSTALL) { | |
1139 | /* dma was disabled, fifo flushed */ | |
1140 | DBG(3, "TX end %d stall\n", epnum); | |
1141 | ||
1142 | /* stall; record URB status */ | |
1143 | status = -EPIPE; | |
1144 | ||
1145 | } else if (tx_csr & MUSB_TXCSR_H_ERROR) { | |
1146 | /* (NON-ISO) dma was disabled, fifo flushed */ | |
1147 | DBG(3, "TX 3strikes on ep=%d\n", epnum); | |
1148 | ||
1149 | status = -ETIMEDOUT; | |
1150 | ||
1151 | } else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) { | |
1152 | DBG(6, "TX end=%d device not responding\n", epnum); | |
1153 | ||
1154 | /* NOTE: this code path would be a good place to PAUSE a | |
1155 | * transfer, if there's some other (nonperiodic) tx urb | |
1156 | * that could use this fifo. (dma complicates it...) | |
1e0320f0 | 1157 | * That's already done for bulk RX transfers. |
550a7375 FB |
1158 | * |
1159 | * if (bulk && qh->ring.next != &musb->out_bulk), then | |
1160 | * we have a candidate... NAKing is *NOT* an error | |
1161 | */ | |
1162 | musb_ep_select(mbase, epnum); | |
1163 | musb_writew(epio, MUSB_TXCSR, | |
1164 | MUSB_TXCSR_H_WZC_BITS | |
1165 | | MUSB_TXCSR_TXPKTRDY); | |
6b6e9710 | 1166 | return; |
550a7375 FB |
1167 | } |
1168 | ||
1169 | if (status) { | |
1170 | if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { | |
1171 | dma->status = MUSB_DMA_STATUS_CORE_ABORT; | |
1172 | (void) musb->dma_controller->channel_abort(dma); | |
1173 | } | |
1174 | ||
1175 | /* do the proper sequence to abort the transfer in the | |
1176 | * usb core; the dma engine should already be stopped. | |
1177 | */ | |
1178 | musb_h_tx_flush_fifo(hw_ep); | |
1179 | tx_csr &= ~(MUSB_TXCSR_AUTOSET | |
1180 | | MUSB_TXCSR_DMAENAB | |
1181 | | MUSB_TXCSR_H_ERROR | |
1182 | | MUSB_TXCSR_H_RXSTALL | |
1183 | | MUSB_TXCSR_H_NAKTIMEOUT | |
1184 | ); | |
1185 | ||
1186 | musb_ep_select(mbase, epnum); | |
1187 | musb_writew(epio, MUSB_TXCSR, tx_csr); | |
1188 | /* REVISIT may need to clear FLUSHFIFO ... */ | |
1189 | musb_writew(epio, MUSB_TXCSR, tx_csr); | |
1190 | musb_writeb(epio, MUSB_TXINTERVAL, 0); | |
1191 | ||
1192 | done = true; | |
1193 | } | |
1194 | ||
1195 | /* second cppi case */ | |
1196 | if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { | |
1197 | DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr); | |
6b6e9710 | 1198 | return; |
550a7375 FB |
1199 | } |
1200 | ||
c7bbc056 SS |
1201 | if (is_dma_capable() && dma && !status) { |
1202 | /* | |
1203 | * DMA has completed. But if we're using DMA mode 1 (multi | |
1204 | * packet DMA), we need a terminal TXPKTRDY interrupt before | |
1205 | * we can consider this transfer completed, lest we trash | |
1206 | * its last packet when writing the next URB's data. So we | |
1207 | * switch back to mode 0 to get that interrupt; we'll come | |
1208 | * back here once it happens. | |
1209 | */ | |
1210 | if (tx_csr & MUSB_TXCSR_DMAMODE) { | |
1211 | /* | |
1212 | * We shouldn't clear DMAMODE with DMAENAB set; so | |
1213 | * clear them in a safe order. That should be OK | |
1214 | * once TXPKTRDY has been set (and I've never seen | |
1215 | * it being 0 at this moment -- DMA interrupt latency | |
1216 | * is significant) but if it hasn't been then we have | |
1217 | * no choice but to stop being polite and ignore the | |
1218 | * programmer's guide... :-) | |
1219 | * | |
1220 | * Note that we must write TXCSR with TXPKTRDY cleared | |
1221 | * in order not to re-trigger the packet send (this bit | |
1222 | * can't be cleared by CPU), and there's another caveat: | |
1223 | * TXPKTRDY may be set shortly and then cleared in the | |
1224 | * double-buffered FIFO mode, so we do an extra TXCSR | |
1225 | * read for debouncing... | |
1226 | */ | |
1227 | tx_csr &= musb_readw(epio, MUSB_TXCSR); | |
1228 | if (tx_csr & MUSB_TXCSR_TXPKTRDY) { | |
1229 | tx_csr &= ~(MUSB_TXCSR_DMAENAB | | |
1230 | MUSB_TXCSR_TXPKTRDY); | |
1231 | musb_writew(epio, MUSB_TXCSR, | |
1232 | tx_csr | MUSB_TXCSR_H_WZC_BITS); | |
1233 | } | |
1234 | tx_csr &= ~(MUSB_TXCSR_DMAMODE | | |
1235 | MUSB_TXCSR_TXPKTRDY); | |
1236 | musb_writew(epio, MUSB_TXCSR, | |
1237 | tx_csr | MUSB_TXCSR_H_WZC_BITS); | |
1238 | ||
1239 | /* | |
1240 | * There is no guarantee that we'll get an interrupt | |
1241 | * after clearing DMAMODE as we might have done this | |
1242 | * too late (after TXPKTRDY was cleared by controller). | |
1243 | * Re-read TXCSR as we have spoiled its previous value. | |
1244 | */ | |
1245 | tx_csr = musb_readw(epio, MUSB_TXCSR); | |
1246 | } | |
1247 | ||
1248 | /* | |
1249 | * We may get here from a DMA completion or TXPKTRDY interrupt. | |
1250 | * In any case, we must check the FIFO status here and bail out | |
1251 | * only if the FIFO still has data -- that should prevent the | |
1252 | * "missed" TXPKTRDY interrupts and deal with double-buffered | |
1253 | * FIFO mode too... | |
1254 | */ | |
1255 | if (tx_csr & (MUSB_TXCSR_FIFONOTEMPTY | MUSB_TXCSR_TXPKTRDY)) { | |
1256 | DBG(2, "DMA complete but packet still in FIFO, " | |
1257 | "CSR %04x\n", tx_csr); | |
1258 | return; | |
1259 | } | |
1260 | } | |
1261 | ||
550a7375 FB |
1262 | if (!status || dma || usb_pipeisoc(pipe)) { |
1263 | if (dma) | |
6b6e9710 | 1264 | length = dma->actual_len; |
550a7375 | 1265 | else |
6b6e9710 SS |
1266 | length = qh->segsize; |
1267 | qh->offset += length; | |
550a7375 FB |
1268 | |
1269 | if (usb_pipeisoc(pipe)) { | |
1270 | struct usb_iso_packet_descriptor *d; | |
1271 | ||
1272 | d = urb->iso_frame_desc + qh->iso_idx; | |
6b6e9710 SS |
1273 | d->actual_length = length; |
1274 | d->status = status; | |
550a7375 FB |
1275 | if (++qh->iso_idx >= urb->number_of_packets) { |
1276 | done = true; | |
1277 | } else { | |
1278 | d++; | |
6b6e9710 SS |
1279 | offset = d->offset; |
1280 | length = d->length; | |
550a7375 FB |
1281 | } |
1282 | } else if (dma) { | |
1283 | done = true; | |
1284 | } else { | |
1285 | /* see if we need to send more data, or ZLP */ | |
1286 | if (qh->segsize < qh->maxpacket) | |
1287 | done = true; | |
1288 | else if (qh->offset == urb->transfer_buffer_length | |
1289 | && !(urb->transfer_flags | |
1290 | & URB_ZERO_PACKET)) | |
1291 | done = true; | |
1292 | if (!done) { | |
6b6e9710 SS |
1293 | offset = qh->offset; |
1294 | length = urb->transfer_buffer_length - offset; | |
550a7375 FB |
1295 | } |
1296 | } | |
1297 | } | |
1298 | ||
1299 | /* urb->status != -EINPROGRESS means request has been faulted, | |
1300 | * so we must abort this transfer after cleanup | |
1301 | */ | |
1302 | if (urb->status != -EINPROGRESS) { | |
1303 | done = true; | |
1304 | if (status == 0) | |
1305 | status = urb->status; | |
1306 | } | |
1307 | ||
1308 | if (done) { | |
1309 | /* set status */ | |
1310 | urb->status = status; | |
1311 | urb->actual_length = qh->offset; | |
1312 | musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT); | |
6b6e9710 SS |
1313 | return; |
1314 | } else if (usb_pipeisoc(pipe) && dma) { | |
1315 | if (musb_tx_dma_program(musb->dma_controller, hw_ep, qh, urb, | |
1316 | offset, length)) | |
1317 | return; | |
1318 | } else if (tx_csr & MUSB_TXCSR_DMAENAB) { | |
1319 | DBG(1, "not complete, but DMA enabled?\n"); | |
1320 | return; | |
1321 | } | |
550a7375 | 1322 | |
6b6e9710 SS |
1323 | /* |
1324 | * PIO: start next packet in this URB. | |
1325 | * | |
1326 | * REVISIT: some docs say that when hw_ep->tx_double_buffered, | |
1327 | * (and presumably, FIFO is not half-full) we should write *two* | |
1328 | * packets before updating TXCSR; other docs disagree... | |
1329 | */ | |
1330 | if (length > qh->maxpacket) | |
1331 | length = qh->maxpacket; | |
1332 | musb_write_fifo(hw_ep, length, urb->transfer_buffer + offset); | |
1333 | qh->segsize = length; | |
550a7375 | 1334 | |
6b6e9710 SS |
1335 | musb_ep_select(mbase, epnum); |
1336 | musb_writew(epio, MUSB_TXCSR, | |
1337 | MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY); | |
550a7375 FB |
1338 | } |
1339 | ||
1340 | ||
1341 | #ifdef CONFIG_USB_INVENTRA_DMA | |
1342 | ||
1343 | /* Host side RX (IN) using Mentor DMA works as follows: | |
1344 | submit_urb -> | |
1345 | - if queue was empty, ProgramEndpoint | |
1346 | - first IN token is sent out (by setting ReqPkt) | |
1347 | LinuxIsr -> RxReady() | |
1348 | /\ => first packet is received | |
1349 | | - Set in mode 0 (DmaEnab, ~ReqPkt) | |
1350 | | -> DMA Isr (transfer complete) -> RxReady() | |
1351 | | - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab) | |
1352 | | - if urb not complete, send next IN token (ReqPkt) | |
1353 | | | else complete urb. | |
1354 | | | | |
1355 | --------------------------- | |
1356 | * | |
1357 | * Nuances of mode 1: | |
1358 | * For short packets, no ack (+RxPktRdy) is sent automatically | |
1359 | * (even if AutoClear is ON) | |
1360 | * For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent | |
1361 | * automatically => major problem, as collecting the next packet becomes | |
1362 | * difficult. Hence mode 1 is not used. | |
1363 | * | |
1364 | * REVISIT | |
1365 | * All we care about at this driver level is that | |
1366 | * (a) all URBs terminate with REQPKT cleared and fifo(s) empty; | |
1367 | * (b) termination conditions are: short RX, or buffer full; | |
1368 | * (c) fault modes include | |
1369 | * - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO. | |
1370 | * (and that endpoint's dma queue stops immediately) | |
1371 | * - overflow (full, PLUS more bytes in the terminal packet) | |
1372 | * | |
1373 | * So for example, usb-storage sets URB_SHORT_NOT_OK, and would | |
1374 | * thus be a great candidate for using mode 1 ... for all but the | |
1375 | * last packet of one URB's transfer. | |
1376 | */ | |
1377 | ||
1378 | #endif | |
1379 | ||
1e0320f0 AKG |
1380 | /* Schedule next QH from musb->in_bulk and move the current qh to |
1381 | * the end; avoids starvation for other endpoints. | |
1382 | */ | |
1383 | static void musb_bulk_rx_nak_timeout(struct musb *musb, struct musb_hw_ep *ep) | |
1384 | { | |
1385 | struct dma_channel *dma; | |
1386 | struct urb *urb; | |
1387 | void __iomem *mbase = musb->mregs; | |
1388 | void __iomem *epio = ep->regs; | |
1389 | struct musb_qh *cur_qh, *next_qh; | |
1390 | u16 rx_csr; | |
1391 | ||
1392 | musb_ep_select(mbase, ep->epnum); | |
1393 | dma = is_dma_capable() ? ep->rx_channel : NULL; | |
1394 | ||
1395 | /* clear nak timeout bit */ | |
1396 | rx_csr = musb_readw(epio, MUSB_RXCSR); | |
1397 | rx_csr |= MUSB_RXCSR_H_WZC_BITS; | |
1398 | rx_csr &= ~MUSB_RXCSR_DATAERROR; | |
1399 | musb_writew(epio, MUSB_RXCSR, rx_csr); | |
1400 | ||
1401 | cur_qh = first_qh(&musb->in_bulk); | |
1402 | if (cur_qh) { | |
1403 | urb = next_urb(cur_qh); | |
1404 | if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { | |
1405 | dma->status = MUSB_DMA_STATUS_CORE_ABORT; | |
1406 | musb->dma_controller->channel_abort(dma); | |
1407 | urb->actual_length += dma->actual_len; | |
1408 | dma->actual_len = 0L; | |
1409 | } | |
846099a6 | 1410 | musb_save_toggle(cur_qh, 1, urb); |
1e0320f0 AKG |
1411 | |
1412 | /* move cur_qh to end of queue */ | |
1413 | list_move_tail(&cur_qh->ring, &musb->in_bulk); | |
1414 | ||
1415 | /* get the next qh from musb->in_bulk */ | |
1416 | next_qh = first_qh(&musb->in_bulk); | |
1417 | ||
1418 | /* set rx_reinit and schedule the next qh */ | |
1419 | ep->rx_reinit = 1; | |
1420 | musb_start_urb(musb, 1, next_qh); | |
1421 | } | |
1422 | } | |
1423 | ||
550a7375 FB |
1424 | /* |
1425 | * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso, | |
1426 | * and high-bandwidth IN transfer cases. | |
1427 | */ | |
1428 | void musb_host_rx(struct musb *musb, u8 epnum) | |
1429 | { | |
1430 | struct urb *urb; | |
1431 | struct musb_hw_ep *hw_ep = musb->endpoints + epnum; | |
1432 | void __iomem *epio = hw_ep->regs; | |
1433 | struct musb_qh *qh = hw_ep->in_qh; | |
1434 | size_t xfer_len; | |
1435 | void __iomem *mbase = musb->mregs; | |
1436 | int pipe; | |
1437 | u16 rx_csr, val; | |
1438 | bool iso_err = false; | |
1439 | bool done = false; | |
1440 | u32 status; | |
1441 | struct dma_channel *dma; | |
1442 | ||
1443 | musb_ep_select(mbase, epnum); | |
1444 | ||
1445 | urb = next_urb(qh); | |
1446 | dma = is_dma_capable() ? hw_ep->rx_channel : NULL; | |
1447 | status = 0; | |
1448 | xfer_len = 0; | |
1449 | ||
1450 | rx_csr = musb_readw(epio, MUSB_RXCSR); | |
1451 | val = rx_csr; | |
1452 | ||
1453 | if (unlikely(!urb)) { | |
1454 | /* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least | |
1455 | * usbtest #11 (unlinks) triggers it regularly, sometimes | |
1456 | * with fifo full. (Only with DMA??) | |
1457 | */ | |
1458 | DBG(3, "BOGUS RX%d ready, csr %04x, count %d\n", epnum, val, | |
1459 | musb_readw(epio, MUSB_RXCOUNT)); | |
1460 | musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG); | |
1461 | return; | |
1462 | } | |
1463 | ||
1464 | pipe = urb->pipe; | |
1465 | ||
1466 | DBG(5, "<== hw %d rxcsr %04x, urb actual %d (+dma %zu)\n", | |
1467 | epnum, rx_csr, urb->actual_length, | |
1468 | dma ? dma->actual_len : 0); | |
1469 | ||
1470 | /* check for errors, concurrent stall & unlink is not really | |
1471 | * handled yet! */ | |
1472 | if (rx_csr & MUSB_RXCSR_H_RXSTALL) { | |
1473 | DBG(3, "RX end %d STALL\n", epnum); | |
1474 | ||
1475 | /* stall; record URB status */ | |
1476 | status = -EPIPE; | |
1477 | ||
1478 | } else if (rx_csr & MUSB_RXCSR_H_ERROR) { | |
1479 | DBG(3, "end %d RX proto error\n", epnum); | |
1480 | ||
1481 | status = -EPROTO; | |
1482 | musb_writeb(epio, MUSB_RXINTERVAL, 0); | |
1483 | ||
1484 | } else if (rx_csr & MUSB_RXCSR_DATAERROR) { | |
1485 | ||
1486 | if (USB_ENDPOINT_XFER_ISOC != qh->type) { | |
1e0320f0 AKG |
1487 | DBG(6, "RX end %d NAK timeout\n", epnum); |
1488 | ||
1489 | /* NOTE: NAKing is *NOT* an error, so we want to | |
1490 | * continue. Except ... if there's a request for | |
1491 | * another QH, use that instead of starving it. | |
550a7375 | 1492 | * |
1e0320f0 AKG |
1493 | * Devices like Ethernet and serial adapters keep |
1494 | * reads posted at all times, which will starve | |
1495 | * other devices without this logic. | |
550a7375 | 1496 | */ |
1e0320f0 AKG |
1497 | if (usb_pipebulk(urb->pipe) |
1498 | && qh->mux == 1 | |
1499 | && !list_is_singular(&musb->in_bulk)) { | |
1500 | musb_bulk_rx_nak_timeout(musb, hw_ep); | |
1501 | return; | |
1502 | } | |
550a7375 | 1503 | musb_ep_select(mbase, epnum); |
1e0320f0 AKG |
1504 | rx_csr |= MUSB_RXCSR_H_WZC_BITS; |
1505 | rx_csr &= ~MUSB_RXCSR_DATAERROR; | |
1506 | musb_writew(epio, MUSB_RXCSR, rx_csr); | |
550a7375 FB |
1507 | |
1508 | goto finish; | |
1509 | } else { | |
1510 | DBG(4, "RX end %d ISO data error\n", epnum); | |
1511 | /* packet error reported later */ | |
1512 | iso_err = true; | |
1513 | } | |
1514 | } | |
1515 | ||
1516 | /* faults abort the transfer */ | |
1517 | if (status) { | |
1518 | /* clean up dma and collect transfer count */ | |
1519 | if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { | |
1520 | dma->status = MUSB_DMA_STATUS_CORE_ABORT; | |
1521 | (void) musb->dma_controller->channel_abort(dma); | |
1522 | xfer_len = dma->actual_len; | |
1523 | } | |
1524 | musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG); | |
1525 | musb_writeb(epio, MUSB_RXINTERVAL, 0); | |
1526 | done = true; | |
1527 | goto finish; | |
1528 | } | |
1529 | ||
1530 | if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) { | |
1531 | /* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */ | |
1532 | ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr); | |
1533 | goto finish; | |
1534 | } | |
1535 | ||
1536 | /* thorough shutdown for now ... given more precise fault handling | |
1537 | * and better queueing support, we might keep a DMA pipeline going | |
1538 | * while processing this irq for earlier completions. | |
1539 | */ | |
1540 | ||
1541 | /* FIXME this is _way_ too much in-line logic for Mentor DMA */ | |
1542 | ||
1543 | #ifndef CONFIG_USB_INVENTRA_DMA | |
1544 | if (rx_csr & MUSB_RXCSR_H_REQPKT) { | |
1545 | /* REVISIT this happened for a while on some short reads... | |
1546 | * the cleanup still needs investigation... looks bad... | |
1547 | * and also duplicates dma cleanup code above ... plus, | |
1548 | * shouldn't this be the "half full" double buffer case? | |
1549 | */ | |
1550 | if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { | |
1551 | dma->status = MUSB_DMA_STATUS_CORE_ABORT; | |
1552 | (void) musb->dma_controller->channel_abort(dma); | |
1553 | xfer_len = dma->actual_len; | |
1554 | done = true; | |
1555 | } | |
1556 | ||
1557 | DBG(2, "RXCSR%d %04x, reqpkt, len %zu%s\n", epnum, rx_csr, | |
1558 | xfer_len, dma ? ", dma" : ""); | |
1559 | rx_csr &= ~MUSB_RXCSR_H_REQPKT; | |
1560 | ||
1561 | musb_ep_select(mbase, epnum); | |
1562 | musb_writew(epio, MUSB_RXCSR, | |
1563 | MUSB_RXCSR_H_WZC_BITS | rx_csr); | |
1564 | } | |
1565 | #endif | |
1566 | if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) { | |
1567 | xfer_len = dma->actual_len; | |
1568 | ||
1569 | val &= ~(MUSB_RXCSR_DMAENAB | |
1570 | | MUSB_RXCSR_H_AUTOREQ | |
1571 | | MUSB_RXCSR_AUTOCLEAR | |
1572 | | MUSB_RXCSR_RXPKTRDY); | |
1573 | musb_writew(hw_ep->regs, MUSB_RXCSR, val); | |
1574 | ||
1575 | #ifdef CONFIG_USB_INVENTRA_DMA | |
f82a689f AKG |
1576 | if (usb_pipeisoc(pipe)) { |
1577 | struct usb_iso_packet_descriptor *d; | |
1578 | ||
1579 | d = urb->iso_frame_desc + qh->iso_idx; | |
1580 | d->actual_length = xfer_len; | |
1581 | ||
1582 | /* even if there was an error, we did the dma | |
1583 | * for iso_frame_desc->length | |
1584 | */ | |
1585 | if (d->status != EILSEQ && d->status != -EOVERFLOW) | |
1586 | d->status = 0; | |
1587 | ||
1588 | if (++qh->iso_idx >= urb->number_of_packets) | |
1589 | done = true; | |
1590 | else | |
1591 | done = false; | |
1592 | ||
1593 | } else { | |
550a7375 FB |
1594 | /* done if urb buffer is full or short packet is recd */ |
1595 | done = (urb->actual_length + xfer_len >= | |
1596 | urb->transfer_buffer_length | |
1597 | || dma->actual_len < qh->maxpacket); | |
f82a689f | 1598 | } |
550a7375 FB |
1599 | |
1600 | /* send IN token for next packet, without AUTOREQ */ | |
1601 | if (!done) { | |
1602 | val |= MUSB_RXCSR_H_REQPKT; | |
1603 | musb_writew(epio, MUSB_RXCSR, | |
1604 | MUSB_RXCSR_H_WZC_BITS | val); | |
1605 | } | |
1606 | ||
1607 | DBG(4, "ep %d dma %s, rxcsr %04x, rxcount %d\n", epnum, | |
1608 | done ? "off" : "reset", | |
1609 | musb_readw(epio, MUSB_RXCSR), | |
1610 | musb_readw(epio, MUSB_RXCOUNT)); | |
1611 | #else | |
1612 | done = true; | |
1613 | #endif | |
1614 | } else if (urb->status == -EINPROGRESS) { | |
1615 | /* if no errors, be sure a packet is ready for unloading */ | |
1616 | if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) { | |
1617 | status = -EPROTO; | |
1618 | ERR("Rx interrupt with no errors or packet!\n"); | |
1619 | ||
1620 | /* FIXME this is another "SHOULD NEVER HAPPEN" */ | |
1621 | ||
1622 | /* SCRUB (RX) */ | |
1623 | /* do the proper sequence to abort the transfer */ | |
1624 | musb_ep_select(mbase, epnum); | |
1625 | val &= ~MUSB_RXCSR_H_REQPKT; | |
1626 | musb_writew(epio, MUSB_RXCSR, val); | |
1627 | goto finish; | |
1628 | } | |
1629 | ||
1630 | /* we are expecting IN packets */ | |
1631 | #ifdef CONFIG_USB_INVENTRA_DMA | |
1632 | if (dma) { | |
1633 | struct dma_controller *c; | |
1634 | u16 rx_count; | |
f82a689f AKG |
1635 | int ret, length; |
1636 | dma_addr_t buf; | |
550a7375 FB |
1637 | |
1638 | rx_count = musb_readw(epio, MUSB_RXCOUNT); | |
1639 | ||
1640 | DBG(2, "RX%d count %d, buffer 0x%x len %d/%d\n", | |
1641 | epnum, rx_count, | |
1642 | urb->transfer_dma | |
1643 | + urb->actual_length, | |
1644 | qh->offset, | |
1645 | urb->transfer_buffer_length); | |
1646 | ||
1647 | c = musb->dma_controller; | |
1648 | ||
f82a689f AKG |
1649 | if (usb_pipeisoc(pipe)) { |
1650 | int status = 0; | |
1651 | struct usb_iso_packet_descriptor *d; | |
1652 | ||
1653 | d = urb->iso_frame_desc + qh->iso_idx; | |
1654 | ||
1655 | if (iso_err) { | |
1656 | status = -EILSEQ; | |
1657 | urb->error_count++; | |
1658 | } | |
1659 | if (rx_count > d->length) { | |
1660 | if (status == 0) { | |
1661 | status = -EOVERFLOW; | |
1662 | urb->error_count++; | |
1663 | } | |
1664 | DBG(2, "** OVERFLOW %d into %d\n",\ | |
1665 | rx_count, d->length); | |
1666 | ||
1667 | length = d->length; | |
1668 | } else | |
1669 | length = rx_count; | |
1670 | d->status = status; | |
1671 | buf = urb->transfer_dma + d->offset; | |
1672 | } else { | |
1673 | length = rx_count; | |
1674 | buf = urb->transfer_dma + | |
1675 | urb->actual_length; | |
1676 | } | |
1677 | ||
550a7375 FB |
1678 | dma->desired_mode = 0; |
1679 | #ifdef USE_MODE1 | |
1680 | /* because of the issue below, mode 1 will | |
1681 | * only rarely behave with correct semantics. | |
1682 | */ | |
1683 | if ((urb->transfer_flags & | |
1684 | URB_SHORT_NOT_OK) | |
1685 | && (urb->transfer_buffer_length - | |
1686 | urb->actual_length) | |
1687 | > qh->maxpacket) | |
1688 | dma->desired_mode = 1; | |
f82a689f AKG |
1689 | if (rx_count < hw_ep->max_packet_sz_rx) { |
1690 | length = rx_count; | |
1691 | dma->bDesiredMode = 0; | |
1692 | } else { | |
1693 | length = urb->transfer_buffer_length; | |
1694 | } | |
550a7375 FB |
1695 | #endif |
1696 | ||
1697 | /* Disadvantage of using mode 1: | |
1698 | * It's basically usable only for mass storage class; essentially all | |
1699 | * other protocols also terminate transfers on short packets. | |
1700 | * | |
1701 | * Details: | |
1702 | * An extra IN token is sent at the end of the transfer (due to AUTOREQ) | |
1703 | * If you try to use mode 1 for (transfer_buffer_length - 512), and try | |
1704 | * to use the extra IN token to grab the last packet using mode 0, then | |
1705 | * the problem is that you cannot be sure when the device will send the | |
1706 | * last packet and RxPktRdy set. Sometimes the packet is recd too soon | |
1707 | * such that it gets lost when RxCSR is re-set at the end of the mode 1 | |
1708 | * transfer, while sometimes it is recd just a little late so that if you | |
1709 | * try to configure for mode 0 soon after the mode 1 transfer is | |
1710 | * completed, you will find rxcount 0. Okay, so you might think why not | |
1711 | * wait for an interrupt when the pkt is recd. Well, you won't get any! | |
1712 | */ | |
1713 | ||
1714 | val = musb_readw(epio, MUSB_RXCSR); | |
1715 | val &= ~MUSB_RXCSR_H_REQPKT; | |
1716 | ||
1717 | if (dma->desired_mode == 0) | |
1718 | val &= ~MUSB_RXCSR_H_AUTOREQ; | |
1719 | else | |
1720 | val |= MUSB_RXCSR_H_AUTOREQ; | |
1721 | val |= MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAENAB; | |
1722 | ||
1723 | musb_writew(epio, MUSB_RXCSR, | |
1724 | MUSB_RXCSR_H_WZC_BITS | val); | |
1725 | ||
1726 | /* REVISIT if when actual_length != 0, | |
1727 | * transfer_buffer_length needs to be | |
1728 | * adjusted first... | |
1729 | */ | |
1730 | ret = c->channel_program( | |
1731 | dma, qh->maxpacket, | |
f82a689f | 1732 | dma->desired_mode, buf, length); |
550a7375 FB |
1733 | |
1734 | if (!ret) { | |
1735 | c->channel_release(dma); | |
1736 | hw_ep->rx_channel = NULL; | |
1737 | dma = NULL; | |
1738 | /* REVISIT reset CSR */ | |
1739 | } | |
1740 | } | |
1741 | #endif /* Mentor DMA */ | |
1742 | ||
1743 | if (!dma) { | |
1744 | done = musb_host_packet_rx(musb, urb, | |
1745 | epnum, iso_err); | |
1746 | DBG(6, "read %spacket\n", done ? "last " : ""); | |
1747 | } | |
1748 | } | |
1749 | ||
550a7375 FB |
1750 | finish: |
1751 | urb->actual_length += xfer_len; | |
1752 | qh->offset += xfer_len; | |
1753 | if (done) { | |
1754 | if (urb->status == -EINPROGRESS) | |
1755 | urb->status = status; | |
1756 | musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN); | |
1757 | } | |
1758 | } | |
1759 | ||
1760 | /* schedule nodes correspond to peripheral endpoints, like an OHCI QH. | |
1761 | * the software schedule associates multiple such nodes with a given | |
1762 | * host side hardware endpoint + direction; scheduling may activate | |
1763 | * that hardware endpoint. | |
1764 | */ | |
1765 | static int musb_schedule( | |
1766 | struct musb *musb, | |
1767 | struct musb_qh *qh, | |
1768 | int is_in) | |
1769 | { | |
1770 | int idle; | |
1771 | int best_diff; | |
1772 | int best_end, epnum; | |
1773 | struct musb_hw_ep *hw_ep = NULL; | |
1774 | struct list_head *head = NULL; | |
1775 | ||
1776 | /* use fixed hardware for control and bulk */ | |
23d15e07 | 1777 | if (qh->type == USB_ENDPOINT_XFER_CONTROL) { |
550a7375 FB |
1778 | head = &musb->control; |
1779 | hw_ep = musb->control_ep; | |
550a7375 FB |
1780 | goto success; |
1781 | } | |
1782 | ||
1783 | /* else, periodic transfers get muxed to other endpoints */ | |
1784 | ||
5d67a851 SS |
1785 | /* |
1786 | * We know this qh hasn't been scheduled, so all we need to do | |
550a7375 FB |
1787 | * is choose which hardware endpoint to put it on ... |
1788 | * | |
1789 | * REVISIT what we really want here is a regular schedule tree | |
5d67a851 | 1790 | * like e.g. OHCI uses. |
550a7375 FB |
1791 | */ |
1792 | best_diff = 4096; | |
1793 | best_end = -1; | |
1794 | ||
5d67a851 SS |
1795 | for (epnum = 1, hw_ep = musb->endpoints + 1; |
1796 | epnum < musb->nr_endpoints; | |
1797 | epnum++, hw_ep++) { | |
550a7375 FB |
1798 | int diff; |
1799 | ||
3e5c6dc7 | 1800 | if (musb_ep_get_qh(hw_ep, is_in) != NULL) |
550a7375 | 1801 | continue; |
5d67a851 | 1802 | |
550a7375 FB |
1803 | if (hw_ep == musb->bulk_ep) |
1804 | continue; | |
1805 | ||
1806 | if (is_in) | |
1807 | diff = hw_ep->max_packet_sz_rx - qh->maxpacket; | |
1808 | else | |
1809 | diff = hw_ep->max_packet_sz_tx - qh->maxpacket; | |
1810 | ||
23d15e07 | 1811 | if (diff >= 0 && best_diff > diff) { |
550a7375 FB |
1812 | best_diff = diff; |
1813 | best_end = epnum; | |
1814 | } | |
1815 | } | |
23d15e07 | 1816 | /* use bulk reserved ep1 if no other ep is free */ |
aa5cbbec | 1817 | if (best_end < 0 && qh->type == USB_ENDPOINT_XFER_BULK) { |
23d15e07 AKG |
1818 | hw_ep = musb->bulk_ep; |
1819 | if (is_in) | |
1820 | head = &musb->in_bulk; | |
1821 | else | |
1822 | head = &musb->out_bulk; | |
1e0320f0 AKG |
1823 | |
1824 | /* Enable bulk RX NAK timeout scheme when bulk requests are | |
1825 | * multiplexed. This scheme doen't work in high speed to full | |
1826 | * speed scenario as NAK interrupts are not coming from a | |
1827 | * full speed device connected to a high speed device. | |
1828 | * NAK timeout interval is 8 (128 uframe or 16ms) for HS and | |
1829 | * 4 (8 frame or 8ms) for FS device. | |
1830 | */ | |
1831 | if (is_in && qh->dev) | |
1832 | qh->intv_reg = | |
1833 | (USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4; | |
23d15e07 AKG |
1834 | goto success; |
1835 | } else if (best_end < 0) { | |
550a7375 | 1836 | return -ENOSPC; |
23d15e07 | 1837 | } |
550a7375 FB |
1838 | |
1839 | idle = 1; | |
23d15e07 | 1840 | qh->mux = 0; |
550a7375 | 1841 | hw_ep = musb->endpoints + best_end; |
550a7375 FB |
1842 | DBG(4, "qh %p periodic slot %d\n", qh, best_end); |
1843 | success: | |
23d15e07 AKG |
1844 | if (head) { |
1845 | idle = list_empty(head); | |
1846 | list_add_tail(&qh->ring, head); | |
1847 | qh->mux = 1; | |
1848 | } | |
550a7375 FB |
1849 | qh->hw_ep = hw_ep; |
1850 | qh->hep->hcpriv = qh; | |
1851 | if (idle) | |
1852 | musb_start_urb(musb, is_in, qh); | |
1853 | return 0; | |
1854 | } | |
1855 | ||
1856 | static int musb_urb_enqueue( | |
1857 | struct usb_hcd *hcd, | |
1858 | struct urb *urb, | |
1859 | gfp_t mem_flags) | |
1860 | { | |
1861 | unsigned long flags; | |
1862 | struct musb *musb = hcd_to_musb(hcd); | |
1863 | struct usb_host_endpoint *hep = urb->ep; | |
74bb3508 | 1864 | struct musb_qh *qh; |
550a7375 FB |
1865 | struct usb_endpoint_descriptor *epd = &hep->desc; |
1866 | int ret; | |
1867 | unsigned type_reg; | |
1868 | unsigned interval; | |
1869 | ||
1870 | /* host role must be active */ | |
1871 | if (!is_host_active(musb) || !musb->is_active) | |
1872 | return -ENODEV; | |
1873 | ||
1874 | spin_lock_irqsave(&musb->lock, flags); | |
1875 | ret = usb_hcd_link_urb_to_ep(hcd, urb); | |
74bb3508 DB |
1876 | qh = ret ? NULL : hep->hcpriv; |
1877 | if (qh) | |
1878 | urb->hcpriv = qh; | |
550a7375 | 1879 | spin_unlock_irqrestore(&musb->lock, flags); |
550a7375 FB |
1880 | |
1881 | /* DMA mapping was already done, if needed, and this urb is on | |
74bb3508 DB |
1882 | * hep->urb_list now ... so we're done, unless hep wasn't yet |
1883 | * scheduled onto a live qh. | |
550a7375 FB |
1884 | * |
1885 | * REVISIT best to keep hep->hcpriv valid until the endpoint gets | |
1886 | * disabled, testing for empty qh->ring and avoiding qh setup costs | |
1887 | * except for the first urb queued after a config change. | |
1888 | */ | |
74bb3508 DB |
1889 | if (qh || ret) |
1890 | return ret; | |
550a7375 FB |
1891 | |
1892 | /* Allocate and initialize qh, minimizing the work done each time | |
1893 | * hw_ep gets reprogrammed, or with irqs blocked. Then schedule it. | |
1894 | * | |
1895 | * REVISIT consider a dedicated qh kmem_cache, so it's harder | |
1896 | * for bugs in other kernel code to break this driver... | |
1897 | */ | |
1898 | qh = kzalloc(sizeof *qh, mem_flags); | |
1899 | if (!qh) { | |
2492e674 | 1900 | spin_lock_irqsave(&musb->lock, flags); |
550a7375 | 1901 | usb_hcd_unlink_urb_from_ep(hcd, urb); |
2492e674 | 1902 | spin_unlock_irqrestore(&musb->lock, flags); |
550a7375 FB |
1903 | return -ENOMEM; |
1904 | } | |
1905 | ||
1906 | qh->hep = hep; | |
1907 | qh->dev = urb->dev; | |
1908 | INIT_LIST_HEAD(&qh->ring); | |
1909 | qh->is_ready = 1; | |
1910 | ||
1911 | qh->maxpacket = le16_to_cpu(epd->wMaxPacketSize); | |
1912 | ||
1913 | /* no high bandwidth support yet */ | |
1914 | if (qh->maxpacket & ~0x7ff) { | |
1915 | ret = -EMSGSIZE; | |
1916 | goto done; | |
1917 | } | |
1918 | ||
96bcd090 JL |
1919 | qh->epnum = usb_endpoint_num(epd); |
1920 | qh->type = usb_endpoint_type(epd); | |
550a7375 FB |
1921 | |
1922 | /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */ | |
1923 | qh->addr_reg = (u8) usb_pipedevice(urb->pipe); | |
1924 | ||
1925 | /* precompute rxtype/txtype/type0 register */ | |
1926 | type_reg = (qh->type << 4) | qh->epnum; | |
1927 | switch (urb->dev->speed) { | |
1928 | case USB_SPEED_LOW: | |
1929 | type_reg |= 0xc0; | |
1930 | break; | |
1931 | case USB_SPEED_FULL: | |
1932 | type_reg |= 0x80; | |
1933 | break; | |
1934 | default: | |
1935 | type_reg |= 0x40; | |
1936 | } | |
1937 | qh->type_reg = type_reg; | |
1938 | ||
136733d6 | 1939 | /* Precompute RXINTERVAL/TXINTERVAL register */ |
550a7375 FB |
1940 | switch (qh->type) { |
1941 | case USB_ENDPOINT_XFER_INT: | |
136733d6 SS |
1942 | /* |
1943 | * Full/low speeds use the linear encoding, | |
1944 | * high speed uses the logarithmic encoding. | |
1945 | */ | |
1946 | if (urb->dev->speed <= USB_SPEED_FULL) { | |
1947 | interval = max_t(u8, epd->bInterval, 1); | |
1948 | break; | |
550a7375 FB |
1949 | } |
1950 | /* FALLTHROUGH */ | |
1951 | case USB_ENDPOINT_XFER_ISOC: | |
136733d6 SS |
1952 | /* ISO always uses logarithmic encoding */ |
1953 | interval = min_t(u8, epd->bInterval, 16); | |
550a7375 FB |
1954 | break; |
1955 | default: | |
1956 | /* REVISIT we actually want to use NAK limits, hinting to the | |
1957 | * transfer scheduling logic to try some other qh, e.g. try | |
1958 | * for 2 msec first: | |
1959 | * | |
1960 | * interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2; | |
1961 | * | |
1962 | * The downside of disabling this is that transfer scheduling | |
1963 | * gets VERY unfair for nonperiodic transfers; a misbehaving | |
1e0320f0 AKG |
1964 | * peripheral could make that hurt. That's perfectly normal |
1965 | * for reads from network or serial adapters ... so we have | |
1966 | * partial NAKlimit support for bulk RX. | |
550a7375 | 1967 | * |
1e0320f0 | 1968 | * The upside of disabling it is simpler transfer scheduling. |
550a7375 FB |
1969 | */ |
1970 | interval = 0; | |
1971 | } | |
1972 | qh->intv_reg = interval; | |
1973 | ||
1974 | /* precompute addressing for external hub/tt ports */ | |
1975 | if (musb->is_multipoint) { | |
1976 | struct usb_device *parent = urb->dev->parent; | |
1977 | ||
1978 | if (parent != hcd->self.root_hub) { | |
1979 | qh->h_addr_reg = (u8) parent->devnum; | |
1980 | ||
1981 | /* set up tt info if needed */ | |
1982 | if (urb->dev->tt) { | |
1983 | qh->h_port_reg = (u8) urb->dev->ttport; | |
ae5ad296 AKG |
1984 | if (urb->dev->tt->hub) |
1985 | qh->h_addr_reg = | |
1986 | (u8) urb->dev->tt->hub->devnum; | |
1987 | if (urb->dev->tt->multi) | |
1988 | qh->h_addr_reg |= 0x80; | |
550a7375 FB |
1989 | } |
1990 | } | |
1991 | } | |
1992 | ||
1993 | /* invariant: hep->hcpriv is null OR the qh that's already scheduled. | |
1994 | * until we get real dma queues (with an entry for each urb/buffer), | |
1995 | * we only have work to do in the former case. | |
1996 | */ | |
1997 | spin_lock_irqsave(&musb->lock, flags); | |
1998 | if (hep->hcpriv) { | |
1999 | /* some concurrent activity submitted another urb to hep... | |
2000 | * odd, rare, error prone, but legal. | |
2001 | */ | |
2002 | kfree(qh); | |
2003 | ret = 0; | |
2004 | } else | |
2005 | ret = musb_schedule(musb, qh, | |
2006 | epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK); | |
2007 | ||
2008 | if (ret == 0) { | |
2009 | urb->hcpriv = qh; | |
2010 | /* FIXME set urb->start_frame for iso/intr, it's tested in | |
2011 | * musb_start_urb(), but otherwise only konicawc cares ... | |
2012 | */ | |
2013 | } | |
2014 | spin_unlock_irqrestore(&musb->lock, flags); | |
2015 | ||
2016 | done: | |
2017 | if (ret != 0) { | |
2492e674 | 2018 | spin_lock_irqsave(&musb->lock, flags); |
550a7375 | 2019 | usb_hcd_unlink_urb_from_ep(hcd, urb); |
2492e674 | 2020 | spin_unlock_irqrestore(&musb->lock, flags); |
550a7375 FB |
2021 | kfree(qh); |
2022 | } | |
2023 | return ret; | |
2024 | } | |
2025 | ||
2026 | ||
2027 | /* | |
2028 | * abort a transfer that's at the head of a hardware queue. | |
2029 | * called with controller locked, irqs blocked | |
2030 | * that hardware queue advances to the next transfer, unless prevented | |
2031 | */ | |
2032 | static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh, int is_in) | |
2033 | { | |
2034 | struct musb_hw_ep *ep = qh->hw_ep; | |
2035 | void __iomem *epio = ep->regs; | |
2036 | unsigned hw_end = ep->epnum; | |
2037 | void __iomem *regs = ep->musb->mregs; | |
2038 | u16 csr; | |
2039 | int status = 0; | |
2040 | ||
2041 | musb_ep_select(regs, hw_end); | |
2042 | ||
2043 | if (is_dma_capable()) { | |
2044 | struct dma_channel *dma; | |
2045 | ||
2046 | dma = is_in ? ep->rx_channel : ep->tx_channel; | |
2047 | if (dma) { | |
2048 | status = ep->musb->dma_controller->channel_abort(dma); | |
2049 | DBG(status ? 1 : 3, | |
2050 | "abort %cX%d DMA for urb %p --> %d\n", | |
2051 | is_in ? 'R' : 'T', ep->epnum, | |
2052 | urb, status); | |
2053 | urb->actual_length += dma->actual_len; | |
2054 | } | |
2055 | } | |
2056 | ||
2057 | /* turn off DMA requests, discard state, stop polling ... */ | |
2058 | if (is_in) { | |
2059 | /* giveback saves bulk toggle */ | |
2060 | csr = musb_h_flush_rxfifo(ep, 0); | |
2061 | ||
2062 | /* REVISIT we still get an irq; should likely clear the | |
2063 | * endpoint's irq status here to avoid bogus irqs. | |
2064 | * clearing that status is platform-specific... | |
2065 | */ | |
78322c1a | 2066 | } else if (ep->epnum) { |
550a7375 FB |
2067 | musb_h_tx_flush_fifo(ep); |
2068 | csr = musb_readw(epio, MUSB_TXCSR); | |
2069 | csr &= ~(MUSB_TXCSR_AUTOSET | |
2070 | | MUSB_TXCSR_DMAENAB | |
2071 | | MUSB_TXCSR_H_RXSTALL | |
2072 | | MUSB_TXCSR_H_NAKTIMEOUT | |
2073 | | MUSB_TXCSR_H_ERROR | |
2074 | | MUSB_TXCSR_TXPKTRDY); | |
2075 | musb_writew(epio, MUSB_TXCSR, csr); | |
2076 | /* REVISIT may need to clear FLUSHFIFO ... */ | |
2077 | musb_writew(epio, MUSB_TXCSR, csr); | |
2078 | /* flush cpu writebuffer */ | |
2079 | csr = musb_readw(epio, MUSB_TXCSR); | |
78322c1a DB |
2080 | } else { |
2081 | musb_h_ep0_flush_fifo(ep); | |
550a7375 FB |
2082 | } |
2083 | if (status == 0) | |
2084 | musb_advance_schedule(ep->musb, urb, ep, is_in); | |
2085 | return status; | |
2086 | } | |
2087 | ||
2088 | static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | |
2089 | { | |
2090 | struct musb *musb = hcd_to_musb(hcd); | |
2091 | struct musb_qh *qh; | |
2092 | struct list_head *sched; | |
2093 | unsigned long flags; | |
2094 | int ret; | |
2095 | ||
2096 | DBG(4, "urb=%p, dev%d ep%d%s\n", urb, | |
2097 | usb_pipedevice(urb->pipe), | |
2098 | usb_pipeendpoint(urb->pipe), | |
2099 | usb_pipein(urb->pipe) ? "in" : "out"); | |
2100 | ||
2101 | spin_lock_irqsave(&musb->lock, flags); | |
2102 | ret = usb_hcd_check_unlink_urb(hcd, urb, status); | |
2103 | if (ret) | |
2104 | goto done; | |
2105 | ||
2106 | qh = urb->hcpriv; | |
2107 | if (!qh) | |
2108 | goto done; | |
2109 | ||
2110 | /* Any URB not actively programmed into endpoint hardware can be | |
a2fd814e | 2111 | * immediately given back; that's any URB not at the head of an |
550a7375 | 2112 | * endpoint queue, unless someday we get real DMA queues. And even |
a2fd814e | 2113 | * if it's at the head, it might not be known to the hardware... |
550a7375 FB |
2114 | * |
2115 | * Otherwise abort current transfer, pending dma, etc.; urb->status | |
2116 | * has already been updated. This is a synchronous abort; it'd be | |
2117 | * OK to hold off until after some IRQ, though. | |
2118 | */ | |
2119 | if (!qh->is_ready || urb->urb_list.prev != &qh->hep->urb_list) | |
2120 | ret = -EINPROGRESS; | |
2121 | else { | |
2122 | switch (qh->type) { | |
2123 | case USB_ENDPOINT_XFER_CONTROL: | |
2124 | sched = &musb->control; | |
2125 | break; | |
2126 | case USB_ENDPOINT_XFER_BULK: | |
23d15e07 AKG |
2127 | if (qh->mux == 1) { |
2128 | if (usb_pipein(urb->pipe)) | |
2129 | sched = &musb->in_bulk; | |
2130 | else | |
2131 | sched = &musb->out_bulk; | |
2132 | break; | |
2133 | } | |
550a7375 FB |
2134 | default: |
2135 | /* REVISIT when we get a schedule tree, periodic | |
2136 | * transfers won't always be at the head of a | |
2137 | * singleton queue... | |
2138 | */ | |
2139 | sched = NULL; | |
2140 | break; | |
2141 | } | |
2142 | } | |
2143 | ||
2144 | /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */ | |
2145 | if (ret < 0 || (sched && qh != first_qh(sched))) { | |
2146 | int ready = qh->is_ready; | |
2147 | ||
2148 | ret = 0; | |
2149 | qh->is_ready = 0; | |
2150 | __musb_giveback(musb, urb, 0); | |
2151 | qh->is_ready = ready; | |
a2fd814e SS |
2152 | |
2153 | /* If nothing else (usually musb_giveback) is using it | |
2154 | * and its URB list has emptied, recycle this qh. | |
2155 | */ | |
2156 | if (ready && list_empty(&qh->hep->urb_list)) { | |
2157 | qh->hep->hcpriv = NULL; | |
2158 | list_del(&qh->ring); | |
2159 | kfree(qh); | |
2160 | } | |
550a7375 FB |
2161 | } else |
2162 | ret = musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN); | |
2163 | done: | |
2164 | spin_unlock_irqrestore(&musb->lock, flags); | |
2165 | return ret; | |
2166 | } | |
2167 | ||
2168 | /* disable an endpoint */ | |
2169 | static void | |
2170 | musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep) | |
2171 | { | |
2172 | u8 epnum = hep->desc.bEndpointAddress; | |
2173 | unsigned long flags; | |
2174 | struct musb *musb = hcd_to_musb(hcd); | |
2175 | u8 is_in = epnum & USB_DIR_IN; | |
dc61d238 SS |
2176 | struct musb_qh *qh; |
2177 | struct urb *urb; | |
550a7375 FB |
2178 | struct list_head *sched; |
2179 | ||
550a7375 FB |
2180 | spin_lock_irqsave(&musb->lock, flags); |
2181 | ||
dc61d238 SS |
2182 | qh = hep->hcpriv; |
2183 | if (qh == NULL) | |
2184 | goto exit; | |
2185 | ||
550a7375 FB |
2186 | switch (qh->type) { |
2187 | case USB_ENDPOINT_XFER_CONTROL: | |
2188 | sched = &musb->control; | |
2189 | break; | |
2190 | case USB_ENDPOINT_XFER_BULK: | |
23d15e07 AKG |
2191 | if (qh->mux == 1) { |
2192 | if (is_in) | |
2193 | sched = &musb->in_bulk; | |
2194 | else | |
2195 | sched = &musb->out_bulk; | |
2196 | break; | |
2197 | } | |
550a7375 FB |
2198 | default: |
2199 | /* REVISIT when we get a schedule tree, periodic transfers | |
2200 | * won't always be at the head of a singleton queue... | |
2201 | */ | |
2202 | sched = NULL; | |
2203 | break; | |
2204 | } | |
2205 | ||
2206 | /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */ | |
2207 | ||
2208 | /* kick first urb off the hardware, if needed */ | |
2209 | qh->is_ready = 0; | |
2210 | if (!sched || qh == first_qh(sched)) { | |
2211 | urb = next_urb(qh); | |
2212 | ||
2213 | /* make software (then hardware) stop ASAP */ | |
2214 | if (!urb->unlinked) | |
2215 | urb->status = -ESHUTDOWN; | |
2216 | ||
2217 | /* cleanup */ | |
2218 | musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN); | |
550a7375 | 2219 | |
dc61d238 SS |
2220 | /* Then nuke all the others ... and advance the |
2221 | * queue on hw_ep (e.g. bulk ring) when we're done. | |
2222 | */ | |
2223 | while (!list_empty(&hep->urb_list)) { | |
2224 | urb = next_urb(qh); | |
2225 | urb->status = -ESHUTDOWN; | |
2226 | musb_advance_schedule(musb, urb, qh->hw_ep, is_in); | |
2227 | } | |
2228 | } else { | |
2229 | /* Just empty the queue; the hardware is busy with | |
2230 | * other transfers, and since !qh->is_ready nothing | |
2231 | * will activate any of these as it advances. | |
2232 | */ | |
2233 | while (!list_empty(&hep->urb_list)) | |
2234 | __musb_giveback(musb, next_urb(qh), -ESHUTDOWN); | |
550a7375 | 2235 | |
dc61d238 SS |
2236 | hep->hcpriv = NULL; |
2237 | list_del(&qh->ring); | |
2238 | kfree(qh); | |
2239 | } | |
2240 | exit: | |
550a7375 FB |
2241 | spin_unlock_irqrestore(&musb->lock, flags); |
2242 | } | |
2243 | ||
2244 | static int musb_h_get_frame_number(struct usb_hcd *hcd) | |
2245 | { | |
2246 | struct musb *musb = hcd_to_musb(hcd); | |
2247 | ||
2248 | return musb_readw(musb->mregs, MUSB_FRAME); | |
2249 | } | |
2250 | ||
2251 | static int musb_h_start(struct usb_hcd *hcd) | |
2252 | { | |
2253 | struct musb *musb = hcd_to_musb(hcd); | |
2254 | ||
2255 | /* NOTE: musb_start() is called when the hub driver turns | |
2256 | * on port power, or when (OTG) peripheral starts. | |
2257 | */ | |
2258 | hcd->state = HC_STATE_RUNNING; | |
2259 | musb->port1_status = 0; | |
2260 | return 0; | |
2261 | } | |
2262 | ||
2263 | static void musb_h_stop(struct usb_hcd *hcd) | |
2264 | { | |
2265 | musb_stop(hcd_to_musb(hcd)); | |
2266 | hcd->state = HC_STATE_HALT; | |
2267 | } | |
2268 | ||
2269 | static int musb_bus_suspend(struct usb_hcd *hcd) | |
2270 | { | |
2271 | struct musb *musb = hcd_to_musb(hcd); | |
2272 | ||
2273 | if (musb->xceiv.state == OTG_STATE_A_SUSPEND) | |
2274 | return 0; | |
2275 | ||
2276 | if (is_host_active(musb) && musb->is_active) { | |
2277 | WARNING("trying to suspend as %s is_active=%i\n", | |
2278 | otg_state_string(musb), musb->is_active); | |
2279 | return -EBUSY; | |
2280 | } else | |
2281 | return 0; | |
2282 | } | |
2283 | ||
2284 | static int musb_bus_resume(struct usb_hcd *hcd) | |
2285 | { | |
2286 | /* resuming child port does the work */ | |
2287 | return 0; | |
2288 | } | |
2289 | ||
2290 | const struct hc_driver musb_hc_driver = { | |
2291 | .description = "musb-hcd", | |
2292 | .product_desc = "MUSB HDRC host driver", | |
2293 | .hcd_priv_size = sizeof(struct musb), | |
2294 | .flags = HCD_USB2 | HCD_MEMORY, | |
2295 | ||
2296 | /* not using irq handler or reset hooks from usbcore, since | |
2297 | * those must be shared with peripheral code for OTG configs | |
2298 | */ | |
2299 | ||
2300 | .start = musb_h_start, | |
2301 | .stop = musb_h_stop, | |
2302 | ||
2303 | .get_frame_number = musb_h_get_frame_number, | |
2304 | ||
2305 | .urb_enqueue = musb_urb_enqueue, | |
2306 | .urb_dequeue = musb_urb_dequeue, | |
2307 | .endpoint_disable = musb_h_disable, | |
2308 | ||
2309 | .hub_status_data = musb_hub_status_data, | |
2310 | .hub_control = musb_hub_control, | |
2311 | .bus_suspend = musb_bus_suspend, | |
2312 | .bus_resume = musb_bus_resume, | |
2313 | /* .start_port_reset = NULL, */ | |
2314 | /* .hub_irq_enable = NULL, */ | |
2315 | }; |