USB: EHCI: Move sysfs related bits into ehci-sysfs.c
[deliverable/linux.git] / drivers / usb / host / ehci-hcd.c
1 /*
2 * Copyright (c) 2000-2004 by David Brownell
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation; either version 2 of the License, or (at your
7 * option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
11 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software Foundation,
16 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 */
18
19 #include <linux/module.h>
20 #include <linux/pci.h>
21 #include <linux/dmapool.h>
22 #include <linux/kernel.h>
23 #include <linux/delay.h>
24 #include <linux/ioport.h>
25 #include <linux/sched.h>
26 #include <linux/vmalloc.h>
27 #include <linux/errno.h>
28 #include <linux/init.h>
29 #include <linux/timer.h>
30 #include <linux/ktime.h>
31 #include <linux/list.h>
32 #include <linux/interrupt.h>
33 #include <linux/usb.h>
34 #include <linux/usb/hcd.h>
35 #include <linux/moduleparam.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/debugfs.h>
38 #include <linux/slab.h>
39 #include <linux/uaccess.h>
40
41 #include <asm/byteorder.h>
42 #include <asm/io.h>
43 #include <asm/irq.h>
44 #include <asm/system.h>
45 #include <asm/unaligned.h>
46
47 /*-------------------------------------------------------------------------*/
48
49 /*
50 * EHCI hc_driver implementation ... experimental, incomplete.
51 * Based on the final 1.0 register interface specification.
52 *
53 * USB 2.0 shows up in upcoming www.pcmcia.org technology.
54 * First was PCMCIA, like ISA; then CardBus, which is PCI.
55 * Next comes "CardBay", using USB 2.0 signals.
56 *
57 * Contains additional contributions by Brad Hards, Rory Bolt, and others.
58 * Special thanks to Intel and VIA for providing host controllers to
59 * test this driver on, and Cypress (including In-System Design) for
60 * providing early devices for those host controllers to talk to!
61 */
62
63 #define DRIVER_AUTHOR "David Brownell"
64 #define DRIVER_DESC "USB 2.0 'Enhanced' Host Controller (EHCI) Driver"
65
66 static const char hcd_name [] = "ehci_hcd";
67
68
69 #undef VERBOSE_DEBUG
70 #undef EHCI_URB_TRACE
71
72 #ifdef DEBUG
73 #define EHCI_STATS
74 #endif
75
76 /* magic numbers that can affect system performance */
77 #define EHCI_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */
78 #define EHCI_TUNE_RL_HS 4 /* nak throttle; see 4.9 */
79 #define EHCI_TUNE_RL_TT 0
80 #define EHCI_TUNE_MULT_HS 1 /* 1-3 transactions/uframe; 4.10.3 */
81 #define EHCI_TUNE_MULT_TT 1
82 /*
83 * Some drivers think it's safe to schedule isochronous transfers more than
84 * 256 ms into the future (partly as a result of an old bug in the scheduling
85 * code). In an attempt to avoid trouble, we will use a minimum scheduling
86 * length of 512 frames instead of 256.
87 */
88 #define EHCI_TUNE_FLS 1 /* (medium) 512-frame schedule */
89
90 #define EHCI_IAA_MSECS 10 /* arbitrary */
91 #define EHCI_IO_JIFFIES (HZ/10) /* io watchdog > irq_thresh */
92 #define EHCI_ASYNC_JIFFIES (HZ/20) /* async idle timeout */
93 #define EHCI_SHRINK_FRAMES 5 /* async qh unlink delay */
94
95 /* Initial IRQ latency: faster than hw default */
96 static int log2_irq_thresh = 0; // 0 to 6
97 module_param (log2_irq_thresh, int, S_IRUGO);
98 MODULE_PARM_DESC (log2_irq_thresh, "log2 IRQ latency, 1-64 microframes");
99
100 /* initial park setting: slower than hw default */
101 static unsigned park = 0;
102 module_param (park, uint, S_IRUGO);
103 MODULE_PARM_DESC (park, "park setting; 1-3 back-to-back async packets");
104
105 /* for flakey hardware, ignore overcurrent indicators */
106 static int ignore_oc = 0;
107 module_param (ignore_oc, bool, S_IRUGO);
108 MODULE_PARM_DESC (ignore_oc, "ignore bogus hardware overcurrent indications");
109
110 /* for link power management(LPM) feature */
111 static unsigned int hird;
112 module_param(hird, int, S_IRUGO);
113 MODULE_PARM_DESC(hird, "host initiated resume duration, +1 for each 75us");
114
115 #define INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT)
116
117 /*-------------------------------------------------------------------------*/
118
119 #include "ehci.h"
120 #include "ehci-dbg.c"
121 #include "pci-quirks.h"
122
123 /*-------------------------------------------------------------------------*/
124
125 static void
126 timer_action(struct ehci_hcd *ehci, enum ehci_timer_action action)
127 {
128 /* Don't override timeouts which shrink or (later) disable
129 * the async ring; just the I/O watchdog. Note that if a
130 * SHRINK were pending, OFF would never be requested.
131 */
132 if (timer_pending(&ehci->watchdog)
133 && ((BIT(TIMER_ASYNC_SHRINK) | BIT(TIMER_ASYNC_OFF))
134 & ehci->actions))
135 return;
136
137 if (!test_and_set_bit(action, &ehci->actions)) {
138 unsigned long t;
139
140 switch (action) {
141 case TIMER_IO_WATCHDOG:
142 if (!ehci->need_io_watchdog)
143 return;
144 t = EHCI_IO_JIFFIES;
145 break;
146 case TIMER_ASYNC_OFF:
147 t = EHCI_ASYNC_JIFFIES;
148 break;
149 /* case TIMER_ASYNC_SHRINK: */
150 default:
151 /* add a jiffie since we synch against the
152 * 8 KHz uframe counter.
153 */
154 t = DIV_ROUND_UP(EHCI_SHRINK_FRAMES * HZ, 1000) + 1;
155 break;
156 }
157 mod_timer(&ehci->watchdog, t + jiffies);
158 }
159 }
160
161 /*-------------------------------------------------------------------------*/
162
163 /*
164 * handshake - spin reading hc until handshake completes or fails
165 * @ptr: address of hc register to be read
166 * @mask: bits to look at in result of read
167 * @done: value of those bits when handshake succeeds
168 * @usec: timeout in microseconds
169 *
170 * Returns negative errno, or zero on success
171 *
172 * Success happens when the "mask" bits have the specified value (hardware
173 * handshake done). There are two failure modes: "usec" have passed (major
174 * hardware flakeout), or the register reads as all-ones (hardware removed).
175 *
176 * That last failure should_only happen in cases like physical cardbus eject
177 * before driver shutdown. But it also seems to be caused by bugs in cardbus
178 * bridge shutdown: shutting down the bridge before the devices using it.
179 */
180 static int handshake (struct ehci_hcd *ehci, void __iomem *ptr,
181 u32 mask, u32 done, int usec)
182 {
183 u32 result;
184
185 do {
186 result = ehci_readl(ehci, ptr);
187 if (result == ~(u32)0) /* card removed */
188 return -ENODEV;
189 result &= mask;
190 if (result == done)
191 return 0;
192 udelay (1);
193 usec--;
194 } while (usec > 0);
195 return -ETIMEDOUT;
196 }
197
198 /* check TDI/ARC silicon is in host mode */
199 static int tdi_in_host_mode (struct ehci_hcd *ehci)
200 {
201 u32 __iomem *reg_ptr;
202 u32 tmp;
203
204 reg_ptr = (u32 __iomem *)(((u8 __iomem *)ehci->regs) + USBMODE);
205 tmp = ehci_readl(ehci, reg_ptr);
206 return (tmp & 3) == USBMODE_CM_HC;
207 }
208
209 /* force HC to halt state from unknown (EHCI spec section 2.3) */
210 static int ehci_halt (struct ehci_hcd *ehci)
211 {
212 u32 temp = ehci_readl(ehci, &ehci->regs->status);
213
214 /* disable any irqs left enabled by previous code */
215 ehci_writel(ehci, 0, &ehci->regs->intr_enable);
216
217 if (ehci_is_TDI(ehci) && tdi_in_host_mode(ehci) == 0) {
218 return 0;
219 }
220
221 if ((temp & STS_HALT) != 0)
222 return 0;
223
224 temp = ehci_readl(ehci, &ehci->regs->command);
225 temp &= ~CMD_RUN;
226 ehci_writel(ehci, temp, &ehci->regs->command);
227 return handshake (ehci, &ehci->regs->status,
228 STS_HALT, STS_HALT, 16 * 125);
229 }
230
231 static int handshake_on_error_set_halt(struct ehci_hcd *ehci, void __iomem *ptr,
232 u32 mask, u32 done, int usec)
233 {
234 int error;
235
236 error = handshake(ehci, ptr, mask, done, usec);
237 if (error) {
238 ehci_halt(ehci);
239 ehci_to_hcd(ehci)->state = HC_STATE_HALT;
240 ehci_err(ehci, "force halt; handshake %p %08x %08x -> %d\n",
241 ptr, mask, done, error);
242 }
243
244 return error;
245 }
246
247 /* put TDI/ARC silicon into EHCI mode */
248 static void tdi_reset (struct ehci_hcd *ehci)
249 {
250 u32 __iomem *reg_ptr;
251 u32 tmp;
252
253 reg_ptr = (u32 __iomem *)(((u8 __iomem *)ehci->regs) + USBMODE);
254 tmp = ehci_readl(ehci, reg_ptr);
255 tmp |= USBMODE_CM_HC;
256 /* The default byte access to MMR space is LE after
257 * controller reset. Set the required endian mode
258 * for transfer buffers to match the host microprocessor
259 */
260 if (ehci_big_endian_mmio(ehci))
261 tmp |= USBMODE_BE;
262 ehci_writel(ehci, tmp, reg_ptr);
263 }
264
265 /* reset a non-running (STS_HALT == 1) controller */
266 static int ehci_reset (struct ehci_hcd *ehci)
267 {
268 int retval;
269 u32 command = ehci_readl(ehci, &ehci->regs->command);
270
271 /* If the EHCI debug controller is active, special care must be
272 * taken before and after a host controller reset */
273 if (ehci->debug && !dbgp_reset_prep())
274 ehci->debug = NULL;
275
276 command |= CMD_RESET;
277 dbg_cmd (ehci, "reset", command);
278 ehci_writel(ehci, command, &ehci->regs->command);
279 ehci_to_hcd(ehci)->state = HC_STATE_HALT;
280 ehci->next_statechange = jiffies;
281 retval = handshake (ehci, &ehci->regs->command,
282 CMD_RESET, 0, 250 * 1000);
283
284 if (ehci->has_hostpc) {
285 ehci_writel(ehci, USBMODE_EX_HC | USBMODE_EX_VBPS,
286 (u32 __iomem *)(((u8 *)ehci->regs) + USBMODE_EX));
287 ehci_writel(ehci, TXFIFO_DEFAULT,
288 (u32 __iomem *)(((u8 *)ehci->regs) + TXFILLTUNING));
289 }
290 if (retval)
291 return retval;
292
293 if (ehci_is_TDI(ehci))
294 tdi_reset (ehci);
295
296 if (ehci->debug)
297 dbgp_external_startup();
298
299 return retval;
300 }
301
302 /* idle the controller (from running) */
303 static void ehci_quiesce (struct ehci_hcd *ehci)
304 {
305 u32 temp;
306
307 #ifdef DEBUG
308 if (!HC_IS_RUNNING (ehci_to_hcd(ehci)->state))
309 BUG ();
310 #endif
311
312 /* wait for any schedule enables/disables to take effect */
313 temp = ehci_readl(ehci, &ehci->regs->command) << 10;
314 temp &= STS_ASS | STS_PSS;
315 if (handshake_on_error_set_halt(ehci, &ehci->regs->status,
316 STS_ASS | STS_PSS, temp, 16 * 125))
317 return;
318
319 /* then disable anything that's still active */
320 temp = ehci_readl(ehci, &ehci->regs->command);
321 temp &= ~(CMD_ASE | CMD_IAAD | CMD_PSE);
322 ehci_writel(ehci, temp, &ehci->regs->command);
323
324 /* hardware can take 16 microframes to turn off ... */
325 handshake_on_error_set_halt(ehci, &ehci->regs->status,
326 STS_ASS | STS_PSS, 0, 16 * 125);
327 }
328
329 /*-------------------------------------------------------------------------*/
330
331 static void end_unlink_async(struct ehci_hcd *ehci);
332 static void ehci_work(struct ehci_hcd *ehci);
333
334 #include "ehci-hub.c"
335 #include "ehci-lpm.c"
336 #include "ehci-mem.c"
337 #include "ehci-q.c"
338 #include "ehci-sched.c"
339 #include "ehci-sysfs.c"
340
341 /*-------------------------------------------------------------------------*/
342
343 static void ehci_iaa_watchdog(unsigned long param)
344 {
345 struct ehci_hcd *ehci = (struct ehci_hcd *) param;
346 unsigned long flags;
347
348 spin_lock_irqsave (&ehci->lock, flags);
349
350 /* Lost IAA irqs wedge things badly; seen first with a vt8235.
351 * So we need this watchdog, but must protect it against both
352 * (a) SMP races against real IAA firing and retriggering, and
353 * (b) clean HC shutdown, when IAA watchdog was pending.
354 */
355 if (ehci->reclaim
356 && !timer_pending(&ehci->iaa_watchdog)
357 && HC_IS_RUNNING(ehci_to_hcd(ehci)->state)) {
358 u32 cmd, status;
359
360 /* If we get here, IAA is *REALLY* late. It's barely
361 * conceivable that the system is so busy that CMD_IAAD
362 * is still legitimately set, so let's be sure it's
363 * clear before we read STS_IAA. (The HC should clear
364 * CMD_IAAD when it sets STS_IAA.)
365 */
366 cmd = ehci_readl(ehci, &ehci->regs->command);
367 if (cmd & CMD_IAAD)
368 ehci_writel(ehci, cmd & ~CMD_IAAD,
369 &ehci->regs->command);
370
371 /* If IAA is set here it either legitimately triggered
372 * before we cleared IAAD above (but _way_ late, so we'll
373 * still count it as lost) ... or a silicon erratum:
374 * - VIA seems to set IAA without triggering the IRQ;
375 * - IAAD potentially cleared without setting IAA.
376 */
377 status = ehci_readl(ehci, &ehci->regs->status);
378 if ((status & STS_IAA) || !(cmd & CMD_IAAD)) {
379 COUNT (ehci->stats.lost_iaa);
380 ehci_writel(ehci, STS_IAA, &ehci->regs->status);
381 }
382
383 ehci_vdbg(ehci, "IAA watchdog: status %x cmd %x\n",
384 status, cmd);
385 end_unlink_async(ehci);
386 }
387
388 spin_unlock_irqrestore(&ehci->lock, flags);
389 }
390
391 static void ehci_watchdog(unsigned long param)
392 {
393 struct ehci_hcd *ehci = (struct ehci_hcd *) param;
394 unsigned long flags;
395
396 spin_lock_irqsave(&ehci->lock, flags);
397
398 /* stop async processing after it's idled a bit */
399 if (test_bit (TIMER_ASYNC_OFF, &ehci->actions))
400 start_unlink_async (ehci, ehci->async);
401
402 /* ehci could run by timer, without IRQs ... */
403 ehci_work (ehci);
404
405 spin_unlock_irqrestore (&ehci->lock, flags);
406 }
407
408 /* On some systems, leaving remote wakeup enabled prevents system shutdown.
409 * The firmware seems to think that powering off is a wakeup event!
410 * This routine turns off remote wakeup and everything else, on all ports.
411 */
412 static void ehci_turn_off_all_ports(struct ehci_hcd *ehci)
413 {
414 int port = HCS_N_PORTS(ehci->hcs_params);
415
416 while (port--)
417 ehci_writel(ehci, PORT_RWC_BITS,
418 &ehci->regs->port_status[port]);
419 }
420
421 /*
422 * Halt HC, turn off all ports, and let the BIOS use the companion controllers.
423 * Should be called with ehci->lock held.
424 */
425 static void ehci_silence_controller(struct ehci_hcd *ehci)
426 {
427 ehci_halt(ehci);
428 ehci_turn_off_all_ports(ehci);
429
430 /* make BIOS/etc use companion controller during reboot */
431 ehci_writel(ehci, 0, &ehci->regs->configured_flag);
432
433 /* unblock posted writes */
434 ehci_readl(ehci, &ehci->regs->configured_flag);
435 }
436
437 /* ehci_shutdown kick in for silicon on any bus (not just pci, etc).
438 * This forcibly disables dma and IRQs, helping kexec and other cases
439 * where the next system software may expect clean state.
440 */
441 static void ehci_shutdown(struct usb_hcd *hcd)
442 {
443 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
444
445 del_timer_sync(&ehci->watchdog);
446 del_timer_sync(&ehci->iaa_watchdog);
447
448 spin_lock_irq(&ehci->lock);
449 ehci_silence_controller(ehci);
450 spin_unlock_irq(&ehci->lock);
451 }
452
453 static void ehci_port_power (struct ehci_hcd *ehci, int is_on)
454 {
455 unsigned port;
456
457 if (!HCS_PPC (ehci->hcs_params))
458 return;
459
460 ehci_dbg (ehci, "...power%s ports...\n", is_on ? "up" : "down");
461 for (port = HCS_N_PORTS (ehci->hcs_params); port > 0; )
462 (void) ehci_hub_control(ehci_to_hcd(ehci),
463 is_on ? SetPortFeature : ClearPortFeature,
464 USB_PORT_FEAT_POWER,
465 port--, NULL, 0);
466 /* Flush those writes */
467 ehci_readl(ehci, &ehci->regs->command);
468 msleep(20);
469 }
470
471 /*-------------------------------------------------------------------------*/
472
473 /*
474 * ehci_work is called from some interrupts, timers, and so on.
475 * it calls driver completion functions, after dropping ehci->lock.
476 */
477 static void ehci_work (struct ehci_hcd *ehci)
478 {
479 timer_action_done (ehci, TIMER_IO_WATCHDOG);
480
481 /* another CPU may drop ehci->lock during a schedule scan while
482 * it reports urb completions. this flag guards against bogus
483 * attempts at re-entrant schedule scanning.
484 */
485 if (ehci->scanning)
486 return;
487 ehci->scanning = 1;
488 scan_async (ehci);
489 if (ehci->next_uframe != -1)
490 scan_periodic (ehci);
491 ehci->scanning = 0;
492
493 /* the IO watchdog guards against hardware or driver bugs that
494 * misplace IRQs, and should let us run completely without IRQs.
495 * such lossage has been observed on both VT6202 and VT8235.
496 */
497 if (HC_IS_RUNNING (ehci_to_hcd(ehci)->state) &&
498 (ehci->async->qh_next.ptr != NULL ||
499 ehci->periodic_sched != 0))
500 timer_action (ehci, TIMER_IO_WATCHDOG);
501 }
502
503 /*
504 * Called when the ehci_hcd module is removed.
505 */
506 static void ehci_stop (struct usb_hcd *hcd)
507 {
508 struct ehci_hcd *ehci = hcd_to_ehci (hcd);
509
510 ehci_dbg (ehci, "stop\n");
511
512 /* no more interrupts ... */
513 del_timer_sync (&ehci->watchdog);
514 del_timer_sync(&ehci->iaa_watchdog);
515
516 spin_lock_irq(&ehci->lock);
517 if (HC_IS_RUNNING (hcd->state))
518 ehci_quiesce (ehci);
519
520 ehci_silence_controller(ehci);
521 ehci_reset (ehci);
522 spin_unlock_irq(&ehci->lock);
523
524 remove_sysfs_files(ehci);
525 remove_debug_files (ehci);
526
527 /* root hub is shut down separately (first, when possible) */
528 spin_lock_irq (&ehci->lock);
529 if (ehci->async)
530 ehci_work (ehci);
531 spin_unlock_irq (&ehci->lock);
532 ehci_mem_cleanup (ehci);
533
534 if (ehci->amd_pll_fix == 1)
535 usb_amd_dev_put();
536
537 #ifdef EHCI_STATS
538 ehci_dbg (ehci, "irq normal %ld err %ld reclaim %ld (lost %ld)\n",
539 ehci->stats.normal, ehci->stats.error, ehci->stats.reclaim,
540 ehci->stats.lost_iaa);
541 ehci_dbg (ehci, "complete %ld unlink %ld\n",
542 ehci->stats.complete, ehci->stats.unlink);
543 #endif
544
545 dbg_status (ehci, "ehci_stop completed",
546 ehci_readl(ehci, &ehci->regs->status));
547 }
548
549 /* one-time init, only for memory state */
550 static int ehci_init(struct usb_hcd *hcd)
551 {
552 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
553 u32 temp;
554 int retval;
555 u32 hcc_params;
556 struct ehci_qh_hw *hw;
557
558 spin_lock_init(&ehci->lock);
559
560 /*
561 * keep io watchdog by default, those good HCDs could turn off it later
562 */
563 ehci->need_io_watchdog = 1;
564 init_timer(&ehci->watchdog);
565 ehci->watchdog.function = ehci_watchdog;
566 ehci->watchdog.data = (unsigned long) ehci;
567
568 init_timer(&ehci->iaa_watchdog);
569 ehci->iaa_watchdog.function = ehci_iaa_watchdog;
570 ehci->iaa_watchdog.data = (unsigned long) ehci;
571
572 hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
573
574 /*
575 * hw default: 1K periodic list heads, one per frame.
576 * periodic_size can shrink by USBCMD update if hcc_params allows.
577 */
578 ehci->periodic_size = DEFAULT_I_TDPS;
579 INIT_LIST_HEAD(&ehci->cached_itd_list);
580 INIT_LIST_HEAD(&ehci->cached_sitd_list);
581
582 if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
583 /* periodic schedule size can be smaller than default */
584 switch (EHCI_TUNE_FLS) {
585 case 0: ehci->periodic_size = 1024; break;
586 case 1: ehci->periodic_size = 512; break;
587 case 2: ehci->periodic_size = 256; break;
588 default: BUG();
589 }
590 }
591 if ((retval = ehci_mem_init(ehci, GFP_KERNEL)) < 0)
592 return retval;
593
594 /* controllers may cache some of the periodic schedule ... */
595 if (HCC_ISOC_CACHE(hcc_params)) // full frame cache
596 ehci->i_thresh = 2 + 8;
597 else // N microframes cached
598 ehci->i_thresh = 2 + HCC_ISOC_THRES(hcc_params);
599
600 ehci->reclaim = NULL;
601 ehci->next_uframe = -1;
602 ehci->clock_frame = -1;
603
604 /*
605 * dedicate a qh for the async ring head, since we couldn't unlink
606 * a 'real' qh without stopping the async schedule [4.8]. use it
607 * as the 'reclamation list head' too.
608 * its dummy is used in hw_alt_next of many tds, to prevent the qh
609 * from automatically advancing to the next td after short reads.
610 */
611 ehci->async->qh_next.qh = NULL;
612 hw = ehci->async->hw;
613 hw->hw_next = QH_NEXT(ehci, ehci->async->qh_dma);
614 hw->hw_info1 = cpu_to_hc32(ehci, QH_HEAD);
615 hw->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT);
616 hw->hw_qtd_next = EHCI_LIST_END(ehci);
617 ehci->async->qh_state = QH_STATE_LINKED;
618 hw->hw_alt_next = QTD_NEXT(ehci, ehci->async->dummy->qtd_dma);
619
620 /* clear interrupt enables, set irq latency */
621 if (log2_irq_thresh < 0 || log2_irq_thresh > 6)
622 log2_irq_thresh = 0;
623 temp = 1 << (16 + log2_irq_thresh);
624 if (HCC_PER_PORT_CHANGE_EVENT(hcc_params)) {
625 ehci->has_ppcd = 1;
626 ehci_dbg(ehci, "enable per-port change event\n");
627 temp |= CMD_PPCEE;
628 }
629 if (HCC_CANPARK(hcc_params)) {
630 /* HW default park == 3, on hardware that supports it (like
631 * NVidia and ALI silicon), maximizes throughput on the async
632 * schedule by avoiding QH fetches between transfers.
633 *
634 * With fast usb storage devices and NForce2, "park" seems to
635 * make problems: throughput reduction (!), data errors...
636 */
637 if (park) {
638 park = min(park, (unsigned) 3);
639 temp |= CMD_PARK;
640 temp |= park << 8;
641 }
642 ehci_dbg(ehci, "park %d\n", park);
643 }
644 if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
645 /* periodic schedule size can be smaller than default */
646 temp &= ~(3 << 2);
647 temp |= (EHCI_TUNE_FLS << 2);
648 }
649 if (HCC_LPM(hcc_params)) {
650 /* support link power management EHCI 1.1 addendum */
651 ehci_dbg(ehci, "support lpm\n");
652 ehci->has_lpm = 1;
653 if (hird > 0xf) {
654 ehci_dbg(ehci, "hird %d invalid, use default 0",
655 hird);
656 hird = 0;
657 }
658 temp |= hird << 24;
659 }
660 ehci->command = temp;
661
662 /* Accept arbitrarily long scatter-gather lists */
663 if (!(hcd->driver->flags & HCD_LOCAL_MEM))
664 hcd->self.sg_tablesize = ~0;
665 return 0;
666 }
667
668 /* start HC running; it's halted, ehci_init() has been run (once) */
669 static int ehci_run (struct usb_hcd *hcd)
670 {
671 struct ehci_hcd *ehci = hcd_to_ehci (hcd);
672 int retval;
673 u32 temp;
674 u32 hcc_params;
675
676 hcd->uses_new_polling = 1;
677
678 /* EHCI spec section 4.1 */
679 /*
680 * TDI driver does the ehci_reset in their reset callback.
681 * Don't reset here, because configuration settings will
682 * vanish.
683 */
684 if (!ehci_is_TDI(ehci) && (retval = ehci_reset(ehci)) != 0) {
685 ehci_mem_cleanup(ehci);
686 return retval;
687 }
688 ehci_writel(ehci, ehci->periodic_dma, &ehci->regs->frame_list);
689 ehci_writel(ehci, (u32)ehci->async->qh_dma, &ehci->regs->async_next);
690
691 /*
692 * hcc_params controls whether ehci->regs->segment must (!!!)
693 * be used; it constrains QH/ITD/SITD and QTD locations.
694 * pci_pool consistent memory always uses segment zero.
695 * streaming mappings for I/O buffers, like pci_map_single(),
696 * can return segments above 4GB, if the device allows.
697 *
698 * NOTE: the dma mask is visible through dma_supported(), so
699 * drivers can pass this info along ... like NETIF_F_HIGHDMA,
700 * Scsi_Host.highmem_io, and so forth. It's readonly to all
701 * host side drivers though.
702 */
703 hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
704 if (HCC_64BIT_ADDR(hcc_params)) {
705 ehci_writel(ehci, 0, &ehci->regs->segment);
706 #if 0
707 // this is deeply broken on almost all architectures
708 if (!dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64)))
709 ehci_info(ehci, "enabled 64bit DMA\n");
710 #endif
711 }
712
713
714 // Philips, Intel, and maybe others need CMD_RUN before the
715 // root hub will detect new devices (why?); NEC doesn't
716 ehci->command &= ~(CMD_LRESET|CMD_IAAD|CMD_PSE|CMD_ASE|CMD_RESET);
717 ehci->command |= CMD_RUN;
718 ehci_writel(ehci, ehci->command, &ehci->regs->command);
719 dbg_cmd (ehci, "init", ehci->command);
720
721 /*
722 * Start, enabling full USB 2.0 functionality ... usb 1.1 devices
723 * are explicitly handed to companion controller(s), so no TT is
724 * involved with the root hub. (Except where one is integrated,
725 * and there's no companion controller unless maybe for USB OTG.)
726 *
727 * Turning on the CF flag will transfer ownership of all ports
728 * from the companions to the EHCI controller. If any of the
729 * companions are in the middle of a port reset at the time, it
730 * could cause trouble. Write-locking ehci_cf_port_reset_rwsem
731 * guarantees that no resets are in progress. After we set CF,
732 * a short delay lets the hardware catch up; new resets shouldn't
733 * be started before the port switching actions could complete.
734 */
735 down_write(&ehci_cf_port_reset_rwsem);
736 hcd->state = HC_STATE_RUNNING;
737 ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag);
738 ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */
739 msleep(5);
740 up_write(&ehci_cf_port_reset_rwsem);
741 ehci->last_periodic_enable = ktime_get_real();
742
743 temp = HC_VERSION(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
744 ehci_info (ehci,
745 "USB %x.%x started, EHCI %x.%02x%s\n",
746 ((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f),
747 temp >> 8, temp & 0xff,
748 ignore_oc ? ", overcurrent ignored" : "");
749
750 ehci_writel(ehci, INTR_MASK,
751 &ehci->regs->intr_enable); /* Turn On Interrupts */
752
753 /* GRR this is run-once init(), being done every time the HC starts.
754 * So long as they're part of class devices, we can't do it init()
755 * since the class device isn't created that early.
756 */
757 create_debug_files(ehci);
758 create_sysfs_files(ehci);
759
760 return 0;
761 }
762
763 /*-------------------------------------------------------------------------*/
764
765 static irqreturn_t ehci_irq (struct usb_hcd *hcd)
766 {
767 struct ehci_hcd *ehci = hcd_to_ehci (hcd);
768 u32 status, masked_status, pcd_status = 0, cmd;
769 int bh;
770
771 spin_lock (&ehci->lock);
772
773 status = ehci_readl(ehci, &ehci->regs->status);
774
775 /* e.g. cardbus physical eject */
776 if (status == ~(u32) 0) {
777 ehci_dbg (ehci, "device removed\n");
778 goto dead;
779 }
780
781 /* Shared IRQ? */
782 masked_status = status & INTR_MASK;
783 if (!masked_status || unlikely(hcd->state == HC_STATE_HALT)) {
784 spin_unlock(&ehci->lock);
785 return IRQ_NONE;
786 }
787
788 /* clear (just) interrupts */
789 ehci_writel(ehci, masked_status, &ehci->regs->status);
790 cmd = ehci_readl(ehci, &ehci->regs->command);
791 bh = 0;
792
793 #ifdef VERBOSE_DEBUG
794 /* unrequested/ignored: Frame List Rollover */
795 dbg_status (ehci, "irq", status);
796 #endif
797
798 /* INT, ERR, and IAA interrupt rates can be throttled */
799
800 /* normal [4.15.1.2] or error [4.15.1.1] completion */
801 if (likely ((status & (STS_INT|STS_ERR)) != 0)) {
802 if (likely ((status & STS_ERR) == 0))
803 COUNT (ehci->stats.normal);
804 else
805 COUNT (ehci->stats.error);
806 bh = 1;
807 }
808
809 /* complete the unlinking of some qh [4.15.2.3] */
810 if (status & STS_IAA) {
811 /* guard against (alleged) silicon errata */
812 if (cmd & CMD_IAAD) {
813 ehci_writel(ehci, cmd & ~CMD_IAAD,
814 &ehci->regs->command);
815 ehci_dbg(ehci, "IAA with IAAD still set?\n");
816 }
817 if (ehci->reclaim) {
818 COUNT(ehci->stats.reclaim);
819 end_unlink_async(ehci);
820 } else
821 ehci_dbg(ehci, "IAA with nothing to reclaim?\n");
822 }
823
824 /* remote wakeup [4.3.1] */
825 if (status & STS_PCD) {
826 unsigned i = HCS_N_PORTS (ehci->hcs_params);
827 u32 ppcd = 0;
828
829 /* kick root hub later */
830 pcd_status = status;
831
832 /* resume root hub? */
833 if (!(cmd & CMD_RUN))
834 usb_hcd_resume_root_hub(hcd);
835
836 /* get per-port change detect bits */
837 if (ehci->has_ppcd)
838 ppcd = status >> 16;
839
840 while (i--) {
841 int pstatus;
842
843 /* leverage per-port change bits feature */
844 if (ehci->has_ppcd && !(ppcd & (1 << i)))
845 continue;
846 pstatus = ehci_readl(ehci,
847 &ehci->regs->port_status[i]);
848
849 if (pstatus & PORT_OWNER)
850 continue;
851 if (!(test_bit(i, &ehci->suspended_ports) &&
852 ((pstatus & PORT_RESUME) ||
853 !(pstatus & PORT_SUSPEND)) &&
854 (pstatus & PORT_PE) &&
855 ehci->reset_done[i] == 0))
856 continue;
857
858 /* start 20 msec resume signaling from this port,
859 * and make khubd collect PORT_STAT_C_SUSPEND to
860 * stop that signaling. Use 5 ms extra for safety,
861 * like usb_port_resume() does.
862 */
863 ehci->reset_done[i] = jiffies + msecs_to_jiffies(25);
864 ehci_dbg (ehci, "port %d remote wakeup\n", i + 1);
865 mod_timer(&hcd->rh_timer, ehci->reset_done[i]);
866 }
867 }
868
869 /* PCI errors [4.15.2.4] */
870 if (unlikely ((status & STS_FATAL) != 0)) {
871 ehci_err(ehci, "fatal error\n");
872 dbg_cmd(ehci, "fatal", cmd);
873 dbg_status(ehci, "fatal", status);
874 ehci_halt(ehci);
875 dead:
876 ehci_reset(ehci);
877 ehci_writel(ehci, 0, &ehci->regs->configured_flag);
878 usb_hc_died(hcd);
879 /* generic layer kills/unlinks all urbs, then
880 * uses ehci_stop to clean up the rest
881 */
882 bh = 1;
883 }
884
885 if (bh)
886 ehci_work (ehci);
887 spin_unlock (&ehci->lock);
888 if (pcd_status)
889 usb_hcd_poll_rh_status(hcd);
890 return IRQ_HANDLED;
891 }
892
893 /*-------------------------------------------------------------------------*/
894
895 /*
896 * non-error returns are a promise to giveback() the urb later
897 * we drop ownership so next owner (or urb unlink) can get it
898 *
899 * urb + dev is in hcd.self.controller.urb_list
900 * we're queueing TDs onto software and hardware lists
901 *
902 * hcd-specific init for hcpriv hasn't been done yet
903 *
904 * NOTE: control, bulk, and interrupt share the same code to append TDs
905 * to a (possibly active) QH, and the same QH scanning code.
906 */
907 static int ehci_urb_enqueue (
908 struct usb_hcd *hcd,
909 struct urb *urb,
910 gfp_t mem_flags
911 ) {
912 struct ehci_hcd *ehci = hcd_to_ehci (hcd);
913 struct list_head qtd_list;
914
915 INIT_LIST_HEAD (&qtd_list);
916
917 switch (usb_pipetype (urb->pipe)) {
918 case PIPE_CONTROL:
919 /* qh_completions() code doesn't handle all the fault cases
920 * in multi-TD control transfers. Even 1KB is rare anyway.
921 */
922 if (urb->transfer_buffer_length > (16 * 1024))
923 return -EMSGSIZE;
924 /* FALLTHROUGH */
925 /* case PIPE_BULK: */
926 default:
927 if (!qh_urb_transaction (ehci, urb, &qtd_list, mem_flags))
928 return -ENOMEM;
929 return submit_async(ehci, urb, &qtd_list, mem_flags);
930
931 case PIPE_INTERRUPT:
932 if (!qh_urb_transaction (ehci, urb, &qtd_list, mem_flags))
933 return -ENOMEM;
934 return intr_submit(ehci, urb, &qtd_list, mem_flags);
935
936 case PIPE_ISOCHRONOUS:
937 if (urb->dev->speed == USB_SPEED_HIGH)
938 return itd_submit (ehci, urb, mem_flags);
939 else
940 return sitd_submit (ehci, urb, mem_flags);
941 }
942 }
943
944 static void unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
945 {
946 /* failfast */
947 if (!HC_IS_RUNNING(ehci_to_hcd(ehci)->state) && ehci->reclaim)
948 end_unlink_async(ehci);
949
950 /* If the QH isn't linked then there's nothing we can do
951 * unless we were called during a giveback, in which case
952 * qh_completions() has to deal with it.
953 */
954 if (qh->qh_state != QH_STATE_LINKED) {
955 if (qh->qh_state == QH_STATE_COMPLETING)
956 qh->needs_rescan = 1;
957 return;
958 }
959
960 /* defer till later if busy */
961 if (ehci->reclaim) {
962 struct ehci_qh *last;
963
964 for (last = ehci->reclaim;
965 last->reclaim;
966 last = last->reclaim)
967 continue;
968 qh->qh_state = QH_STATE_UNLINK_WAIT;
969 last->reclaim = qh;
970
971 /* start IAA cycle */
972 } else
973 start_unlink_async (ehci, qh);
974 }
975
976 /* remove from hardware lists
977 * completions normally happen asynchronously
978 */
979
980 static int ehci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
981 {
982 struct ehci_hcd *ehci = hcd_to_ehci (hcd);
983 struct ehci_qh *qh;
984 unsigned long flags;
985 int rc;
986
987 spin_lock_irqsave (&ehci->lock, flags);
988 rc = usb_hcd_check_unlink_urb(hcd, urb, status);
989 if (rc)
990 goto done;
991
992 switch (usb_pipetype (urb->pipe)) {
993 // case PIPE_CONTROL:
994 // case PIPE_BULK:
995 default:
996 qh = (struct ehci_qh *) urb->hcpriv;
997 if (!qh)
998 break;
999 switch (qh->qh_state) {
1000 case QH_STATE_LINKED:
1001 case QH_STATE_COMPLETING:
1002 unlink_async(ehci, qh);
1003 break;
1004 case QH_STATE_UNLINK:
1005 case QH_STATE_UNLINK_WAIT:
1006 /* already started */
1007 break;
1008 case QH_STATE_IDLE:
1009 /* QH might be waiting for a Clear-TT-Buffer */
1010 qh_completions(ehci, qh);
1011 break;
1012 }
1013 break;
1014
1015 case PIPE_INTERRUPT:
1016 qh = (struct ehci_qh *) urb->hcpriv;
1017 if (!qh)
1018 break;
1019 switch (qh->qh_state) {
1020 case QH_STATE_LINKED:
1021 case QH_STATE_COMPLETING:
1022 intr_deschedule (ehci, qh);
1023 break;
1024 case QH_STATE_IDLE:
1025 qh_completions (ehci, qh);
1026 break;
1027 default:
1028 ehci_dbg (ehci, "bogus qh %p state %d\n",
1029 qh, qh->qh_state);
1030 goto done;
1031 }
1032 break;
1033
1034 case PIPE_ISOCHRONOUS:
1035 // itd or sitd ...
1036
1037 // wait till next completion, do it then.
1038 // completion irqs can wait up to 1024 msec,
1039 break;
1040 }
1041 done:
1042 spin_unlock_irqrestore (&ehci->lock, flags);
1043 return rc;
1044 }
1045
1046 /*-------------------------------------------------------------------------*/
1047
1048 // bulk qh holds the data toggle
1049
1050 static void
1051 ehci_endpoint_disable (struct usb_hcd *hcd, struct usb_host_endpoint *ep)
1052 {
1053 struct ehci_hcd *ehci = hcd_to_ehci (hcd);
1054 unsigned long flags;
1055 struct ehci_qh *qh, *tmp;
1056
1057 /* ASSERT: any requests/urbs are being unlinked */
1058 /* ASSERT: nobody can be submitting urbs for this any more */
1059
1060 rescan:
1061 spin_lock_irqsave (&ehci->lock, flags);
1062 qh = ep->hcpriv;
1063 if (!qh)
1064 goto done;
1065
1066 /* endpoints can be iso streams. for now, we don't
1067 * accelerate iso completions ... so spin a while.
1068 */
1069 if (qh->hw == NULL) {
1070 ehci_vdbg (ehci, "iso delay\n");
1071 goto idle_timeout;
1072 }
1073
1074 if (!HC_IS_RUNNING (hcd->state))
1075 qh->qh_state = QH_STATE_IDLE;
1076 switch (qh->qh_state) {
1077 case QH_STATE_LINKED:
1078 case QH_STATE_COMPLETING:
1079 for (tmp = ehci->async->qh_next.qh;
1080 tmp && tmp != qh;
1081 tmp = tmp->qh_next.qh)
1082 continue;
1083 /* periodic qh self-unlinks on empty, and a COMPLETING qh
1084 * may already be unlinked.
1085 */
1086 if (tmp)
1087 unlink_async(ehci, qh);
1088 /* FALL THROUGH */
1089 case QH_STATE_UNLINK: /* wait for hw to finish? */
1090 case QH_STATE_UNLINK_WAIT:
1091 idle_timeout:
1092 spin_unlock_irqrestore (&ehci->lock, flags);
1093 schedule_timeout_uninterruptible(1);
1094 goto rescan;
1095 case QH_STATE_IDLE: /* fully unlinked */
1096 if (qh->clearing_tt)
1097 goto idle_timeout;
1098 if (list_empty (&qh->qtd_list)) {
1099 qh_put (qh);
1100 break;
1101 }
1102 /* else FALL THROUGH */
1103 default:
1104 /* caller was supposed to have unlinked any requests;
1105 * that's not our job. just leak this memory.
1106 */
1107 ehci_err (ehci, "qh %p (#%02x) state %d%s\n",
1108 qh, ep->desc.bEndpointAddress, qh->qh_state,
1109 list_empty (&qh->qtd_list) ? "" : "(has tds)");
1110 break;
1111 }
1112 ep->hcpriv = NULL;
1113 done:
1114 spin_unlock_irqrestore (&ehci->lock, flags);
1115 }
1116
1117 static void
1118 ehci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
1119 {
1120 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
1121 struct ehci_qh *qh;
1122 int eptype = usb_endpoint_type(&ep->desc);
1123 int epnum = usb_endpoint_num(&ep->desc);
1124 int is_out = usb_endpoint_dir_out(&ep->desc);
1125 unsigned long flags;
1126
1127 if (eptype != USB_ENDPOINT_XFER_BULK && eptype != USB_ENDPOINT_XFER_INT)
1128 return;
1129
1130 spin_lock_irqsave(&ehci->lock, flags);
1131 qh = ep->hcpriv;
1132
1133 /* For Bulk and Interrupt endpoints we maintain the toggle state
1134 * in the hardware; the toggle bits in udev aren't used at all.
1135 * When an endpoint is reset by usb_clear_halt() we must reset
1136 * the toggle bit in the QH.
1137 */
1138 if (qh) {
1139 usb_settoggle(qh->dev, epnum, is_out, 0);
1140 if (!list_empty(&qh->qtd_list)) {
1141 WARN_ONCE(1, "clear_halt for a busy endpoint\n");
1142 } else if (qh->qh_state == QH_STATE_LINKED ||
1143 qh->qh_state == QH_STATE_COMPLETING) {
1144
1145 /* The toggle value in the QH can't be updated
1146 * while the QH is active. Unlink it now;
1147 * re-linking will call qh_refresh().
1148 */
1149 if (eptype == USB_ENDPOINT_XFER_BULK)
1150 unlink_async(ehci, qh);
1151 else
1152 intr_deschedule(ehci, qh);
1153 }
1154 }
1155 spin_unlock_irqrestore(&ehci->lock, flags);
1156 }
1157
1158 static int ehci_get_frame (struct usb_hcd *hcd)
1159 {
1160 struct ehci_hcd *ehci = hcd_to_ehci (hcd);
1161 return (ehci_readl(ehci, &ehci->regs->frame_index) >> 3) %
1162 ehci->periodic_size;
1163 }
1164
1165 /*-------------------------------------------------------------------------*/
1166
1167 MODULE_DESCRIPTION(DRIVER_DESC);
1168 MODULE_AUTHOR (DRIVER_AUTHOR);
1169 MODULE_LICENSE ("GPL");
1170
1171 #ifdef CONFIG_PCI
1172 #include "ehci-pci.c"
1173 #define PCI_DRIVER ehci_pci_driver
1174 #endif
1175
1176 #ifdef CONFIG_USB_EHCI_FSL
1177 #include "ehci-fsl.c"
1178 #define PLATFORM_DRIVER ehci_fsl_driver
1179 #endif
1180
1181 #ifdef CONFIG_USB_EHCI_MXC
1182 #include "ehci-mxc.c"
1183 #define PLATFORM_DRIVER ehci_mxc_driver
1184 #endif
1185
1186 #ifdef CONFIG_USB_EHCI_SH
1187 #include "ehci-sh.c"
1188 #define PLATFORM_DRIVER ehci_hcd_sh_driver
1189 #endif
1190
1191 #ifdef CONFIG_SOC_AU1200
1192 #include "ehci-au1xxx.c"
1193 #define PLATFORM_DRIVER ehci_hcd_au1xxx_driver
1194 #endif
1195
1196 #ifdef CONFIG_USB_EHCI_HCD_OMAP
1197 #include "ehci-omap.c"
1198 #define PLATFORM_DRIVER ehci_hcd_omap_driver
1199 #endif
1200
1201 #ifdef CONFIG_PPC_PS3
1202 #include "ehci-ps3.c"
1203 #define PS3_SYSTEM_BUS_DRIVER ps3_ehci_driver
1204 #endif
1205
1206 #ifdef CONFIG_USB_EHCI_HCD_PPC_OF
1207 #include "ehci-ppc-of.c"
1208 #define OF_PLATFORM_DRIVER ehci_hcd_ppc_of_driver
1209 #endif
1210
1211 #ifdef CONFIG_XPS_USB_HCD_XILINX
1212 #include "ehci-xilinx-of.c"
1213 #define XILINX_OF_PLATFORM_DRIVER ehci_hcd_xilinx_of_driver
1214 #endif
1215
1216 #ifdef CONFIG_PLAT_ORION
1217 #include "ehci-orion.c"
1218 #define PLATFORM_DRIVER ehci_orion_driver
1219 #endif
1220
1221 #ifdef CONFIG_ARCH_IXP4XX
1222 #include "ehci-ixp4xx.c"
1223 #define PLATFORM_DRIVER ixp4xx_ehci_driver
1224 #endif
1225
1226 #ifdef CONFIG_USB_W90X900_EHCI
1227 #include "ehci-w90x900.c"
1228 #define PLATFORM_DRIVER ehci_hcd_w90x900_driver
1229 #endif
1230
1231 #ifdef CONFIG_ARCH_AT91
1232 #include "ehci-atmel.c"
1233 #define PLATFORM_DRIVER ehci_atmel_driver
1234 #endif
1235
1236 #ifdef CONFIG_USB_OCTEON_EHCI
1237 #include "ehci-octeon.c"
1238 #define PLATFORM_DRIVER ehci_octeon_driver
1239 #endif
1240
1241 #ifdef CONFIG_USB_CNS3XXX_EHCI
1242 #include "ehci-cns3xxx.c"
1243 #define PLATFORM_DRIVER cns3xxx_ehci_driver
1244 #endif
1245
1246 #ifdef CONFIG_ARCH_VT8500
1247 #include "ehci-vt8500.c"
1248 #define PLATFORM_DRIVER vt8500_ehci_driver
1249 #endif
1250
1251 #ifdef CONFIG_PLAT_SPEAR
1252 #include "ehci-spear.c"
1253 #define PLATFORM_DRIVER spear_ehci_hcd_driver
1254 #endif
1255
1256 #ifdef CONFIG_USB_EHCI_MSM
1257 #include "ehci-msm.c"
1258 #define PLATFORM_DRIVER ehci_msm_driver
1259 #endif
1260
1261 #ifdef CONFIG_USB_EHCI_HCD_PMC_MSP
1262 #include "ehci-pmcmsp.c"
1263 #define PLATFORM_DRIVER ehci_hcd_msp_driver
1264 #endif
1265
1266 #ifdef CONFIG_USB_EHCI_TEGRA
1267 #include "ehci-tegra.c"
1268 #define PLATFORM_DRIVER tegra_ehci_driver
1269 #endif
1270
1271 #ifdef CONFIG_USB_EHCI_S5P
1272 #include "ehci-s5p.c"
1273 #define PLATFORM_DRIVER s5p_ehci_driver
1274 #endif
1275
1276 #ifdef CONFIG_USB_EHCI_ATH79
1277 #include "ehci-ath79.c"
1278 #define PLATFORM_DRIVER ehci_ath79_driver
1279 #endif
1280
1281 #ifdef CONFIG_SPARC_LEON
1282 #include "ehci-grlib.c"
1283 #define PLATFORM_DRIVER ehci_grlib_driver
1284 #endif
1285
1286 #if !defined(PCI_DRIVER) && !defined(PLATFORM_DRIVER) && \
1287 !defined(PS3_SYSTEM_BUS_DRIVER) && !defined(OF_PLATFORM_DRIVER) && \
1288 !defined(XILINX_OF_PLATFORM_DRIVER)
1289 #error "missing bus glue for ehci-hcd"
1290 #endif
1291
1292 static int __init ehci_hcd_init(void)
1293 {
1294 int retval = 0;
1295
1296 if (usb_disabled())
1297 return -ENODEV;
1298
1299 printk(KERN_INFO "%s: " DRIVER_DESC "\n", hcd_name);
1300 set_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
1301 if (test_bit(USB_UHCI_LOADED, &usb_hcds_loaded) ||
1302 test_bit(USB_OHCI_LOADED, &usb_hcds_loaded))
1303 printk(KERN_WARNING "Warning! ehci_hcd should always be loaded"
1304 " before uhci_hcd and ohci_hcd, not after\n");
1305
1306 pr_debug("%s: block sizes: qh %Zd qtd %Zd itd %Zd sitd %Zd\n",
1307 hcd_name,
1308 sizeof(struct ehci_qh), sizeof(struct ehci_qtd),
1309 sizeof(struct ehci_itd), sizeof(struct ehci_sitd));
1310
1311 #ifdef DEBUG
1312 ehci_debug_root = debugfs_create_dir("ehci", usb_debug_root);
1313 if (!ehci_debug_root) {
1314 retval = -ENOENT;
1315 goto err_debug;
1316 }
1317 #endif
1318
1319 #ifdef PLATFORM_DRIVER
1320 retval = platform_driver_register(&PLATFORM_DRIVER);
1321 if (retval < 0)
1322 goto clean0;
1323 #endif
1324
1325 #ifdef PCI_DRIVER
1326 retval = pci_register_driver(&PCI_DRIVER);
1327 if (retval < 0)
1328 goto clean1;
1329 #endif
1330
1331 #ifdef PS3_SYSTEM_BUS_DRIVER
1332 retval = ps3_ehci_driver_register(&PS3_SYSTEM_BUS_DRIVER);
1333 if (retval < 0)
1334 goto clean2;
1335 #endif
1336
1337 #ifdef OF_PLATFORM_DRIVER
1338 retval = platform_driver_register(&OF_PLATFORM_DRIVER);
1339 if (retval < 0)
1340 goto clean3;
1341 #endif
1342
1343 #ifdef XILINX_OF_PLATFORM_DRIVER
1344 retval = platform_driver_register(&XILINX_OF_PLATFORM_DRIVER);
1345 if (retval < 0)
1346 goto clean4;
1347 #endif
1348 return retval;
1349
1350 #ifdef XILINX_OF_PLATFORM_DRIVER
1351 /* platform_driver_unregister(&XILINX_OF_PLATFORM_DRIVER); */
1352 clean4:
1353 #endif
1354 #ifdef OF_PLATFORM_DRIVER
1355 platform_driver_unregister(&OF_PLATFORM_DRIVER);
1356 clean3:
1357 #endif
1358 #ifdef PS3_SYSTEM_BUS_DRIVER
1359 ps3_ehci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
1360 clean2:
1361 #endif
1362 #ifdef PCI_DRIVER
1363 pci_unregister_driver(&PCI_DRIVER);
1364 clean1:
1365 #endif
1366 #ifdef PLATFORM_DRIVER
1367 platform_driver_unregister(&PLATFORM_DRIVER);
1368 clean0:
1369 #endif
1370 #ifdef DEBUG
1371 debugfs_remove(ehci_debug_root);
1372 ehci_debug_root = NULL;
1373 err_debug:
1374 #endif
1375 clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
1376 return retval;
1377 }
1378 module_init(ehci_hcd_init);
1379
1380 static void __exit ehci_hcd_cleanup(void)
1381 {
1382 #ifdef XILINX_OF_PLATFORM_DRIVER
1383 platform_driver_unregister(&XILINX_OF_PLATFORM_DRIVER);
1384 #endif
1385 #ifdef OF_PLATFORM_DRIVER
1386 platform_driver_unregister(&OF_PLATFORM_DRIVER);
1387 #endif
1388 #ifdef PLATFORM_DRIVER
1389 platform_driver_unregister(&PLATFORM_DRIVER);
1390 #endif
1391 #ifdef PCI_DRIVER
1392 pci_unregister_driver(&PCI_DRIVER);
1393 #endif
1394 #ifdef PS3_SYSTEM_BUS_DRIVER
1395 ps3_ehci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
1396 #endif
1397 #ifdef DEBUG
1398 debugfs_remove(ehci_debug_root);
1399 #endif
1400 clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
1401 }
1402 module_exit(ehci_hcd_cleanup);
1403
This page took 0.059097 seconds and 5 git commands to generate.