2 * ISP1362 HCD (Host Controller Driver) for USB.
4 * Copyright (C) 2005 Lothar Wassmann <LW@KARO-electronics.de>
6 * Derived from the SL811 HCD, rewritten for ISP116x.
7 * Copyright (C) 2005 Olav Kongas <ok@artecdesign.ee>
10 * Copyright (C) 2004 Psion Teklogix (for NetBook PRO)
11 * Copyright (C) 2004 David Brownell
15 * The ISP1362 chip requires a large delay (300ns and 462ns) between
16 * accesses to the address and data register.
17 * The following timing options exist:
19 * 1. Configure your memory controller to add such delays if it can (the best)
20 * 2. Implement platform-specific delay function possibly
21 * combined with configuring the memory controller; see
22 * include/linux/usb_isp1362.h for more info.
23 * 3. Use ndelay (easiest, poorest).
25 * Use the corresponding macros USE_PLATFORM_DELAY and USE_NDELAY in the
26 * platform specific section of isp1362.h to select the appropriate variant.
28 * Also note that according to the Philips "ISP1362 Errata" document
29 * Rev 1.00 from 27 May data corruption may occur when the #WR signal
30 * is reasserted (even with #CS deasserted) within 132ns after a
31 * write cycle to any controller register. If the hardware doesn't
32 * implement the recommended fix (gating the #WR with #CS) software
33 * must ensure that no further write cycle (not necessarily to the chip!)
34 * is issued by the CPU within this interval.
36 * For PXA25x this can be ensured by using VLIO with the maximum
37 * recovery time (MSCx = 0x7f8c) with a memory clock of 99.53 MHz.
40 #ifdef CONFIG_USB_DEBUG
41 # define ISP1362_DEBUG
47 * The PXA255 UDC apparently doesn't handle GET_STATUS, GET_CONFIG and
48 * GET_INTERFACE requests correctly when the SETUP and DATA stages of the
49 * requests are carried out in separate frames. This will delay any SETUP
50 * packets until the start of the next frame so that this situation is
51 * unlikely to occur (and makes usbtest happy running with a PXA255 target
54 #undef BUGGY_PXA2XX_UDC_USBTEST
61 /* This enables a memory test on the ISP1362 chip memory to make sure the
62 * chip access timing is correct.
64 #undef CHIP_BUFFER_TEST
66 #include <linux/module.h>
67 #include <linux/moduleparam.h>
68 #include <linux/kernel.h>
69 #include <linux/delay.h>
70 #include <linux/ioport.h>
71 #include <linux/sched.h>
72 #include <linux/slab.h>
73 #include <linux/errno.h>
74 #include <linux/init.h>
75 #include <linux/list.h>
76 #include <linux/interrupt.h>
77 #include <linux/usb.h>
78 #include <linux/usb/isp1362.h>
79 #include <linux/usb/hcd.h>
80 #include <linux/platform_device.h>
83 #include <linux/bitmap.h>
84 #include <linux/prefetch.h>
87 #include <asm/byteorder.h>
88 #include <asm/unaligned.h>
92 module_param(dbg_level
, int, 0644);
94 module_param(dbg_level
, int, 0);
95 #define STUB_DEBUG_FILE
98 #include "../core/usb.h"
102 #define DRIVER_VERSION "2005-04-04"
103 #define DRIVER_DESC "ISP1362 USB Host Controller Driver"
105 MODULE_DESCRIPTION(DRIVER_DESC
);
106 MODULE_LICENSE("GPL");
108 static const char hcd_name
[] = "isp1362-hcd";
110 static void isp1362_hc_stop(struct usb_hcd
*hcd
);
111 static int isp1362_hc_start(struct usb_hcd
*hcd
);
113 /*-------------------------------------------------------------------------*/
116 * When called from the interrupthandler only isp1362_hcd->irqenb is modified,
117 * since the interrupt handler will write isp1362_hcd->irqenb to HCuPINT upon
119 * We don't need a 'disable' counterpart, since interrupts will be disabled
120 * only by the interrupt handler.
122 static inline void isp1362_enable_int(struct isp1362_hcd
*isp1362_hcd
, u16 mask
)
124 if ((isp1362_hcd
->irqenb
| mask
) == isp1362_hcd
->irqenb
)
126 if (mask
& ~isp1362_hcd
->irqenb
)
127 isp1362_write_reg16(isp1362_hcd
, HCuPINT
, mask
& ~isp1362_hcd
->irqenb
);
128 isp1362_hcd
->irqenb
|= mask
;
129 if (isp1362_hcd
->irq_active
)
131 isp1362_write_reg16(isp1362_hcd
, HCuPINTENB
, isp1362_hcd
->irqenb
);
134 /*-------------------------------------------------------------------------*/
136 static inline struct isp1362_ep_queue
*get_ptd_queue(struct isp1362_hcd
*isp1362_hcd
,
139 struct isp1362_ep_queue
*epq
= NULL
;
141 if (offset
< isp1362_hcd
->istl_queue
[1].buf_start
)
142 epq
= &isp1362_hcd
->istl_queue
[0];
143 else if (offset
< isp1362_hcd
->intl_queue
.buf_start
)
144 epq
= &isp1362_hcd
->istl_queue
[1];
145 else if (offset
< isp1362_hcd
->atl_queue
.buf_start
)
146 epq
= &isp1362_hcd
->intl_queue
;
147 else if (offset
< isp1362_hcd
->atl_queue
.buf_start
+
148 isp1362_hcd
->atl_queue
.buf_size
)
149 epq
= &isp1362_hcd
->atl_queue
;
152 DBG(1, "%s: PTD $%04x is on %s queue\n", __func__
, offset
, epq
->name
);
154 pr_warning("%s: invalid PTD $%04x\n", __func__
, offset
);
159 static inline int get_ptd_offset(struct isp1362_ep_queue
*epq
, u8 index
)
163 if (index
* epq
->blk_size
> epq
->buf_size
) {
164 pr_warning("%s: Bad %s index %d(%d)\n", __func__
, epq
->name
, index
,
165 epq
->buf_size
/ epq
->blk_size
);
168 offset
= epq
->buf_start
+ index
* epq
->blk_size
;
169 DBG(3, "%s: %s PTD[%02x] # %04x\n", __func__
, epq
->name
, index
, offset
);
174 /*-------------------------------------------------------------------------*/
176 static inline u16
max_transfer_size(struct isp1362_ep_queue
*epq
, size_t size
,
179 u16 xfer_size
= min_t(size_t, MAX_XFER_SIZE
, size
);
181 xfer_size
= min_t(size_t, xfer_size
, epq
->buf_avail
* epq
->blk_size
- PTD_HEADER_SIZE
);
182 if (xfer_size
< size
&& xfer_size
% mps
)
183 xfer_size
-= xfer_size
% mps
;
188 static int claim_ptd_buffers(struct isp1362_ep_queue
*epq
,
189 struct isp1362_ep
*ep
, u16 len
)
191 int ptd_offset
= -EINVAL
;
192 int num_ptds
= ((len
+ PTD_HEADER_SIZE
- 1) / epq
->blk_size
) + 1;
195 BUG_ON(len
> epq
->buf_size
);
201 pr_err("%s: %s len %d/%d num_ptds %d buf_map %08lx skip_map %08lx\n", __func__
,
202 epq
->name
, len
, epq
->blk_size
, num_ptds
, epq
->buf_map
, epq
->skip_map
);
203 BUG_ON(ep
->num_ptds
!= 0);
205 found
= bitmap_find_next_zero_area(&epq
->buf_map
, epq
->buf_count
, 0,
207 if (found
>= epq
->buf_count
)
210 DBG(1, "%s: Found %d PTDs[%d] for %d/%d byte\n", __func__
,
211 num_ptds
, found
, len
, (int)(epq
->blk_size
- PTD_HEADER_SIZE
));
212 ptd_offset
= get_ptd_offset(epq
, found
);
213 WARN_ON(ptd_offset
< 0);
214 ep
->ptd_offset
= ptd_offset
;
215 ep
->num_ptds
+= num_ptds
;
216 epq
->buf_avail
-= num_ptds
;
217 BUG_ON(epq
->buf_avail
> epq
->buf_count
);
218 ep
->ptd_index
= found
;
219 bitmap_set(&epq
->buf_map
, found
, num_ptds
);
220 DBG(1, "%s: Done %s PTD[%d] $%04x, avail %d count %d claimed %d %08lx:%08lx\n",
221 __func__
, epq
->name
, ep
->ptd_index
, ep
->ptd_offset
,
222 epq
->buf_avail
, epq
->buf_count
, num_ptds
, epq
->buf_map
, epq
->skip_map
);
227 static inline void release_ptd_buffers(struct isp1362_ep_queue
*epq
, struct isp1362_ep
*ep
)
229 int last
= ep
->ptd_index
+ ep
->num_ptds
;
231 if (last
> epq
->buf_count
)
232 pr_err("%s: ep %p req %d len %d %s PTD[%d] $%04x num_ptds %d buf_count %d buf_avail %d buf_map %08lx skip_map %08lx\n",
233 __func__
, ep
, ep
->num_req
, ep
->length
, epq
->name
, ep
->ptd_index
,
234 ep
->ptd_offset
, ep
->num_ptds
, epq
->buf_count
, epq
->buf_avail
,
235 epq
->buf_map
, epq
->skip_map
);
236 BUG_ON(last
> epq
->buf_count
);
238 bitmap_clear(&epq
->buf_map
, ep
->ptd_index
, ep
->num_ptds
);
239 bitmap_set(&epq
->skip_map
, ep
->ptd_index
, ep
->num_ptds
);
240 epq
->buf_avail
+= ep
->num_ptds
;
243 BUG_ON(epq
->buf_avail
> epq
->buf_count
);
244 BUG_ON(epq
->ptd_count
> epq
->buf_count
);
246 DBG(1, "%s: Done %s PTDs $%04x released %d avail %d count %d\n",
248 ep
->ptd_offset
, ep
->num_ptds
, epq
->buf_avail
, epq
->buf_count
);
249 DBG(1, "%s: buf_map %08lx skip_map %08lx\n", __func__
,
250 epq
->buf_map
, epq
->skip_map
);
253 ep
->ptd_offset
= -EINVAL
;
254 ep
->ptd_index
= -EINVAL
;
257 /*-------------------------------------------------------------------------*/
262 static void prepare_ptd(struct isp1362_hcd
*isp1362_hcd
, struct urb
*urb
,
263 struct isp1362_ep
*ep
, struct isp1362_ep_queue
*epq
,
270 size_t buf_len
= urb
->transfer_buffer_length
- urb
->actual_length
;
272 DBG(3, "%s: %s ep %p\n", __func__
, epq
->name
, ep
);
276 ep
->data
= (unsigned char *)urb
->transfer_buffer
+ urb
->actual_length
;
278 switch (ep
->nextpid
) {
280 toggle
= usb_gettoggle(urb
->dev
, ep
->epnum
, 0);
282 if (usb_pipecontrol(urb
->pipe
)) {
283 len
= min_t(size_t, ep
->maxpacket
, buf_len
);
284 } else if (usb_pipeisoc(urb
->pipe
)) {
285 len
= min_t(size_t, urb
->iso_frame_desc
[fno
].length
, MAX_XFER_SIZE
);
286 ep
->data
= urb
->transfer_buffer
+ urb
->iso_frame_desc
[fno
].offset
;
288 len
= max_transfer_size(epq
, buf_len
, ep
->maxpacket
);
289 DBG(1, "%s: IN len %d/%d/%d from URB\n", __func__
, len
, ep
->maxpacket
,
293 toggle
= usb_gettoggle(urb
->dev
, ep
->epnum
, 1);
295 if (usb_pipecontrol(urb
->pipe
))
296 len
= min_t(size_t, ep
->maxpacket
, buf_len
);
297 else if (usb_pipeisoc(urb
->pipe
))
298 len
= min_t(size_t, urb
->iso_frame_desc
[0].length
, MAX_XFER_SIZE
);
300 len
= max_transfer_size(epq
, buf_len
, ep
->maxpacket
);
302 pr_info("%s: Sending ZERO packet: %d\n", __func__
,
303 urb
->transfer_flags
& URB_ZERO_PACKET
);
304 DBG(1, "%s: OUT len %d/%d/%d from URB\n", __func__
, len
, ep
->maxpacket
,
310 len
= sizeof(struct usb_ctrlrequest
);
311 DBG(1, "%s: SETUP len %d\n", __func__
, len
);
312 ep
->data
= urb
->setup_packet
;
317 dir
= (urb
->transfer_buffer_length
&& usb_pipein(urb
->pipe
)) ?
318 PTD_DIR_OUT
: PTD_DIR_IN
;
319 DBG(1, "%s: ACK len %d\n", __func__
, len
);
322 toggle
= dir
= len
= 0;
323 pr_err("%s@%d: ep->nextpid %02x\n", __func__
, __LINE__
, ep
->nextpid
);
331 ptd
->count
= PTD_CC_MSK
| PTD_ACTIVE_MSK
| PTD_TOGGLE(toggle
);
332 ptd
->mps
= PTD_MPS(ep
->maxpacket
) | PTD_SPD(urb
->dev
->speed
== USB_SPEED_LOW
) |
334 ptd
->len
= PTD_LEN(len
) | PTD_DIR(dir
);
335 ptd
->faddr
= PTD_FA(usb_pipedevice(urb
->pipe
));
337 if (usb_pipeint(urb
->pipe
)) {
338 ptd
->faddr
|= PTD_SF_INT(ep
->branch
);
339 ptd
->faddr
|= PTD_PR(ep
->interval
? __ffs(ep
->interval
) : 0);
341 if (usb_pipeisoc(urb
->pipe
))
342 ptd
->faddr
|= PTD_SF_ISO(fno
);
344 DBG(1, "%s: Finished\n", __func__
);
347 static void isp1362_write_ptd(struct isp1362_hcd
*isp1362_hcd
, struct isp1362_ep
*ep
,
348 struct isp1362_ep_queue
*epq
)
350 struct ptd
*ptd
= &ep
->ptd
;
351 int len
= PTD_GET_DIR(ptd
) == PTD_DIR_IN
? 0 : ep
->length
;
354 isp1362_write_buffer(isp1362_hcd
, ptd
, ep
->ptd_offset
, PTD_HEADER_SIZE
);
356 isp1362_write_buffer(isp1362_hcd
, ep
->data
,
357 ep
->ptd_offset
+ PTD_HEADER_SIZE
, len
);
360 dump_ptd_out_data(ptd
, ep
->data
);
363 static void isp1362_read_ptd(struct isp1362_hcd
*isp1362_hcd
, struct isp1362_ep
*ep
,
364 struct isp1362_ep_queue
*epq
)
366 struct ptd
*ptd
= &ep
->ptd
;
369 WARN_ON(list_empty(&ep
->active
));
370 BUG_ON(ep
->ptd_offset
< 0);
372 list_del_init(&ep
->active
);
373 DBG(1, "%s: ep %p removed from active list %p\n", __func__
, ep
, &epq
->active
);
376 isp1362_read_buffer(isp1362_hcd
, ptd
, ep
->ptd_offset
, PTD_HEADER_SIZE
);
378 act_len
= PTD_GET_COUNT(ptd
);
379 if (PTD_GET_DIR(ptd
) != PTD_DIR_IN
|| act_len
== 0)
381 if (act_len
> ep
->length
)
382 pr_err("%s: ep %p PTD $%04x act_len %d ep->length %d\n", __func__
, ep
,
383 ep
->ptd_offset
, act_len
, ep
->length
);
384 BUG_ON(act_len
> ep
->length
);
385 /* Only transfer the amount of data that has actually been overwritten
386 * in the chip buffer. We don't want any data that doesn't belong to the
387 * transfer to leak out of the chip to the callers transfer buffer!
390 isp1362_read_buffer(isp1362_hcd
, ep
->data
,
391 ep
->ptd_offset
+ PTD_HEADER_SIZE
, act_len
);
392 dump_ptd_in_data(ptd
, ep
->data
);
396 * INT PTDs will stay in the chip until data is available.
397 * This function will remove a PTD from the chip when the URB is dequeued.
398 * Must be called with the spinlock held and IRQs disabled
400 static void remove_ptd(struct isp1362_hcd
*isp1362_hcd
, struct isp1362_ep
*ep
)
404 struct isp1362_ep_queue
*epq
;
406 DBG(1, "%s: ep %p PTD[%d] $%04x\n", __func__
, ep
, ep
->ptd_index
, ep
->ptd_offset
);
407 BUG_ON(ep
->ptd_offset
< 0);
409 epq
= get_ptd_queue(isp1362_hcd
, ep
->ptd_offset
);
412 /* put ep in remove_list for cleanup */
413 WARN_ON(!list_empty(&ep
->remove_list
));
414 list_add_tail(&ep
->remove_list
, &isp1362_hcd
->remove_list
);
415 /* let SOF interrupt handle the cleanup */
416 isp1362_enable_int(isp1362_hcd
, HCuPINT_SOF
);
418 index
= ep
->ptd_index
;
420 /* ISO queues don't have SKIP registers */
423 DBG(1, "%s: Disabling PTD[%02x] $%04x %08lx|%08x\n", __func__
,
424 index
, ep
->ptd_offset
, epq
->skip_map
, 1 << index
);
426 /* prevent further processing of PTD (will be effective after next SOF) */
427 epq
->skip_map
|= 1 << index
;
428 if (epq
== &isp1362_hcd
->atl_queue
) {
429 DBG(2, "%s: ATLSKIP = %08x -> %08lx\n", __func__
,
430 isp1362_read_reg32(isp1362_hcd
, HCATLSKIP
), epq
->skip_map
);
431 isp1362_write_reg32(isp1362_hcd
, HCATLSKIP
, epq
->skip_map
);
432 if (~epq
->skip_map
== 0)
433 isp1362_clr_mask16(isp1362_hcd
, HCBUFSTAT
, HCBUFSTAT_ATL_ACTIVE
);
434 } else if (epq
== &isp1362_hcd
->intl_queue
) {
435 DBG(2, "%s: INTLSKIP = %08x -> %08lx\n", __func__
,
436 isp1362_read_reg32(isp1362_hcd
, HCINTLSKIP
), epq
->skip_map
);
437 isp1362_write_reg32(isp1362_hcd
, HCINTLSKIP
, epq
->skip_map
);
438 if (~epq
->skip_map
== 0)
439 isp1362_clr_mask16(isp1362_hcd
, HCBUFSTAT
, HCBUFSTAT_INTL_ACTIVE
);
444 Take done or failed requests out of schedule. Give back
447 static void finish_request(struct isp1362_hcd
*isp1362_hcd
, struct isp1362_ep
*ep
,
448 struct urb
*urb
, int status
)
449 __releases(isp1362_hcd
->lock
)
450 __acquires(isp1362_hcd
->lock
)
455 if (usb_pipecontrol(urb
->pipe
))
456 ep
->nextpid
= USB_PID_SETUP
;
458 URB_DBG("%s: req %d FA %d ep%d%s %s: len %d/%d %s stat %d\n", __func__
,
459 ep
->num_req
, usb_pipedevice(urb
->pipe
),
460 usb_pipeendpoint(urb
->pipe
),
461 !usb_pipein(urb
->pipe
) ? "out" : "in",
462 usb_pipecontrol(urb
->pipe
) ? "ctrl" :
463 usb_pipeint(urb
->pipe
) ? "int" :
464 usb_pipebulk(urb
->pipe
) ? "bulk" :
466 urb
->actual_length
, urb
->transfer_buffer_length
,
467 !(urb
->transfer_flags
& URB_SHORT_NOT_OK
) ?
468 "short_ok" : "", urb
->status
);
471 usb_hcd_unlink_urb_from_ep(isp1362_hcd_to_hcd(isp1362_hcd
), urb
);
472 spin_unlock(&isp1362_hcd
->lock
);
473 usb_hcd_giveback_urb(isp1362_hcd_to_hcd(isp1362_hcd
), urb
, status
);
474 spin_lock(&isp1362_hcd
->lock
);
476 /* take idle endpoints out of the schedule right away */
477 if (!list_empty(&ep
->hep
->urb_list
))
480 /* async deschedule */
481 if (!list_empty(&ep
->schedule
)) {
482 list_del_init(&ep
->schedule
);
488 /* periodic deschedule */
489 DBG(1, "deschedule qh%d/%p branch %d load %d bandwidth %d -> %d\n", ep
->interval
,
490 ep
, ep
->branch
, ep
->load
,
491 isp1362_hcd
->load
[ep
->branch
],
492 isp1362_hcd
->load
[ep
->branch
] - ep
->load
);
493 isp1362_hcd
->load
[ep
->branch
] -= ep
->load
;
494 ep
->branch
= PERIODIC_SIZE
;
499 * Analyze transfer results, handle partial transfers and errors
501 static void postproc_ep(struct isp1362_hcd
*isp1362_hcd
, struct isp1362_ep
*ep
)
503 struct urb
*urb
= get_urb(ep
);
504 struct usb_device
*udev
;
508 int urbstat
= -EINPROGRESS
;
511 DBG(2, "%s: ep %p req %d\n", __func__
, ep
, ep
->num_req
);
515 cc
= PTD_GET_CC(ptd
);
516 if (cc
== PTD_NOTACCESSED
) {
517 pr_err("%s: req %d PTD %p Untouched by ISP1362\n", __func__
,
522 short_ok
= !(urb
->transfer_flags
& URB_SHORT_NOT_OK
);
523 len
= urb
->transfer_buffer_length
- urb
->actual_length
;
525 /* Data underrun is special. For allowed underrun
526 we clear the error and continue as normal. For
527 forbidden underrun we finish the DATA stage
528 immediately while for control transfer,
529 we do a STATUS stage.
531 if (cc
== PTD_DATAUNDERRUN
) {
533 DBG(1, "%s: req %d Allowed data underrun short_%sok %d/%d/%d byte\n",
534 __func__
, ep
->num_req
, short_ok
? "" : "not_",
535 PTD_GET_COUNT(ptd
), ep
->maxpacket
, len
);
539 DBG(1, "%s: req %d Data Underrun %s nextpid %02x short_%sok %d/%d/%d byte\n",
540 __func__
, ep
->num_req
,
541 usb_pipein(urb
->pipe
) ? "IN" : "OUT", ep
->nextpid
,
542 short_ok
? "" : "not_",
543 PTD_GET_COUNT(ptd
), ep
->maxpacket
, len
);
544 /* save the data underrun error code for later and
545 * proceed with the status stage
547 urb
->actual_length
+= PTD_GET_COUNT(ptd
);
548 if (usb_pipecontrol(urb
->pipe
)) {
549 ep
->nextpid
= USB_PID_ACK
;
550 BUG_ON(urb
->actual_length
> urb
->transfer_buffer_length
);
552 if (urb
->status
== -EINPROGRESS
)
553 urb
->status
= cc_to_error
[PTD_DATAUNDERRUN
];
555 usb_settoggle(udev
, ep
->epnum
, ep
->nextpid
== USB_PID_OUT
,
556 PTD_GET_TOGGLE(ptd
));
557 urbstat
= cc_to_error
[PTD_DATAUNDERRUN
];
563 if (cc
!= PTD_CC_NOERROR
) {
564 if (++ep
->error_count
>= 3 || cc
== PTD_CC_STALL
|| cc
== PTD_DATAOVERRUN
) {
565 urbstat
= cc_to_error
[cc
];
566 DBG(1, "%s: req %d nextpid %02x, status %d, error %d, error_count %d\n",
567 __func__
, ep
->num_req
, ep
->nextpid
, urbstat
, cc
,
573 switch (ep
->nextpid
) {
575 if (PTD_GET_COUNT(ptd
) != ep
->length
)
576 pr_err("%s: count=%d len=%d\n", __func__
,
577 PTD_GET_COUNT(ptd
), ep
->length
);
578 BUG_ON(PTD_GET_COUNT(ptd
) != ep
->length
);
579 urb
->actual_length
+= ep
->length
;
580 BUG_ON(urb
->actual_length
> urb
->transfer_buffer_length
);
581 usb_settoggle(udev
, ep
->epnum
, 1, PTD_GET_TOGGLE(ptd
));
582 if (urb
->actual_length
== urb
->transfer_buffer_length
) {
583 DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__
,
584 ep
->num_req
, len
, ep
->maxpacket
, urbstat
);
585 if (usb_pipecontrol(urb
->pipe
)) {
586 DBG(3, "%s: req %d %s Wait for ACK\n", __func__
,
588 usb_pipein(urb
->pipe
) ? "IN" : "OUT");
589 ep
->nextpid
= USB_PID_ACK
;
591 if (len
% ep
->maxpacket
||
592 !(urb
->transfer_flags
& URB_ZERO_PACKET
)) {
594 DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n",
595 __func__
, ep
->num_req
, usb_pipein(urb
->pipe
) ? "IN" : "OUT",
596 urbstat
, len
, ep
->maxpacket
, urb
->actual_length
);
602 len
= PTD_GET_COUNT(ptd
);
603 BUG_ON(len
> ep
->length
);
604 urb
->actual_length
+= len
;
605 BUG_ON(urb
->actual_length
> urb
->transfer_buffer_length
);
606 usb_settoggle(udev
, ep
->epnum
, 0, PTD_GET_TOGGLE(ptd
));
607 /* if transfer completed or (allowed) data underrun */
608 if ((urb
->transfer_buffer_length
== urb
->actual_length
) ||
609 len
% ep
->maxpacket
) {
610 DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__
,
611 ep
->num_req
, len
, ep
->maxpacket
, urbstat
);
612 if (usb_pipecontrol(urb
->pipe
)) {
613 DBG(3, "%s: req %d %s Wait for ACK\n", __func__
,
615 usb_pipein(urb
->pipe
) ? "IN" : "OUT");
616 ep
->nextpid
= USB_PID_ACK
;
619 DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n",
620 __func__
, ep
->num_req
, usb_pipein(urb
->pipe
) ? "IN" : "OUT",
621 urbstat
, len
, ep
->maxpacket
, urb
->actual_length
);
626 if (urb
->transfer_buffer_length
== urb
->actual_length
) {
627 ep
->nextpid
= USB_PID_ACK
;
628 } else if (usb_pipeout(urb
->pipe
)) {
629 usb_settoggle(udev
, 0, 1, 1);
630 ep
->nextpid
= USB_PID_OUT
;
632 usb_settoggle(udev
, 0, 0, 1);
633 ep
->nextpid
= USB_PID_IN
;
637 DBG(3, "%s: req %d got ACK %d -> 0\n", __func__
, ep
->num_req
,
639 WARN_ON(urbstat
!= -EINPROGRESS
);
648 if (urbstat
!= -EINPROGRESS
) {
649 DBG(2, "%s: Finishing ep %p req %d urb %p status %d\n", __func__
,
650 ep
, ep
->num_req
, urb
, urbstat
);
651 finish_request(isp1362_hcd
, ep
, urb
, urbstat
);
655 static void finish_unlinks(struct isp1362_hcd
*isp1362_hcd
)
657 struct isp1362_ep
*ep
;
658 struct isp1362_ep
*tmp
;
660 list_for_each_entry_safe(ep
, tmp
, &isp1362_hcd
->remove_list
, remove_list
) {
661 struct isp1362_ep_queue
*epq
=
662 get_ptd_queue(isp1362_hcd
, ep
->ptd_offset
);
663 int index
= ep
->ptd_index
;
667 DBG(1, "%s: remove PTD[%d] $%04x\n", __func__
, index
, ep
->ptd_offset
);
668 BUG_ON(ep
->num_ptds
== 0);
669 release_ptd_buffers(epq
, ep
);
671 if (!list_empty(&ep
->hep
->urb_list
)) {
672 struct urb
*urb
= get_urb(ep
);
674 DBG(1, "%s: Finishing req %d ep %p from remove_list\n", __func__
,
676 finish_request(isp1362_hcd
, ep
, urb
, -ESHUTDOWN
);
678 WARN_ON(list_empty(&ep
->active
));
679 if (!list_empty(&ep
->active
)) {
680 list_del_init(&ep
->active
);
681 DBG(1, "%s: ep %p removed from active list\n", __func__
, ep
);
683 list_del_init(&ep
->remove_list
);
684 DBG(1, "%s: ep %p removed from remove_list\n", __func__
, ep
);
686 DBG(1, "%s: Done\n", __func__
);
689 static inline void enable_atl_transfers(struct isp1362_hcd
*isp1362_hcd
, int count
)
692 if (count
< isp1362_hcd
->atl_queue
.ptd_count
)
693 isp1362_write_reg16(isp1362_hcd
, HCATLDTC
, count
);
694 isp1362_enable_int(isp1362_hcd
, HCuPINT_ATL
);
695 isp1362_write_reg32(isp1362_hcd
, HCATLSKIP
, isp1362_hcd
->atl_queue
.skip_map
);
696 isp1362_set_mask16(isp1362_hcd
, HCBUFSTAT
, HCBUFSTAT_ATL_ACTIVE
);
698 isp1362_enable_int(isp1362_hcd
, HCuPINT_SOF
);
701 static inline void enable_intl_transfers(struct isp1362_hcd
*isp1362_hcd
)
703 isp1362_enable_int(isp1362_hcd
, HCuPINT_INTL
);
704 isp1362_set_mask16(isp1362_hcd
, HCBUFSTAT
, HCBUFSTAT_INTL_ACTIVE
);
705 isp1362_write_reg32(isp1362_hcd
, HCINTLSKIP
, isp1362_hcd
->intl_queue
.skip_map
);
708 static inline void enable_istl_transfers(struct isp1362_hcd
*isp1362_hcd
, int flip
)
710 isp1362_enable_int(isp1362_hcd
, flip
? HCuPINT_ISTL1
: HCuPINT_ISTL0
);
711 isp1362_set_mask16(isp1362_hcd
, HCBUFSTAT
, flip
?
712 HCBUFSTAT_ISTL1_FULL
: HCBUFSTAT_ISTL0_FULL
);
715 static int submit_req(struct isp1362_hcd
*isp1362_hcd
, struct urb
*urb
,
716 struct isp1362_ep
*ep
, struct isp1362_ep_queue
*epq
)
718 int index
= epq
->free_ptd
;
720 prepare_ptd(isp1362_hcd
, urb
, ep
, epq
, 0);
721 index
= claim_ptd_buffers(epq
, ep
, ep
->length
);
722 if (index
== -ENOMEM
) {
723 DBG(1, "%s: req %d No free %s PTD available: %d, %08lx:%08lx\n", __func__
,
724 ep
->num_req
, epq
->name
, ep
->num_ptds
, epq
->buf_map
, epq
->skip_map
);
726 } else if (index
== -EOVERFLOW
) {
727 DBG(1, "%s: req %d Not enough space for %d byte %s PTD %d %08lx:%08lx\n",
728 __func__
, ep
->num_req
, ep
->length
, epq
->name
, ep
->num_ptds
,
729 epq
->buf_map
, epq
->skip_map
);
733 list_add_tail(&ep
->active
, &epq
->active
);
734 DBG(1, "%s: ep %p req %d len %d added to active list %p\n", __func__
,
735 ep
, ep
->num_req
, ep
->length
, &epq
->active
);
736 DBG(1, "%s: Submitting %s PTD $%04x for ep %p req %d\n", __func__
, epq
->name
,
737 ep
->ptd_offset
, ep
, ep
->num_req
);
738 isp1362_write_ptd(isp1362_hcd
, ep
, epq
);
739 __clear_bit(ep
->ptd_index
, &epq
->skip_map
);
744 static void start_atl_transfers(struct isp1362_hcd
*isp1362_hcd
)
747 struct isp1362_ep_queue
*epq
= &isp1362_hcd
->atl_queue
;
748 struct isp1362_ep
*ep
;
751 if (atomic_read(&epq
->finishing
)) {
752 DBG(1, "%s: finish_transfers is active for %s\n", __func__
, epq
->name
);
756 list_for_each_entry(ep
, &isp1362_hcd
->async
, schedule
) {
757 struct urb
*urb
= get_urb(ep
);
760 if (!list_empty(&ep
->active
)) {
761 DBG(2, "%s: Skipping active %s ep %p\n", __func__
, epq
->name
, ep
);
765 DBG(1, "%s: Processing %s ep %p req %d\n", __func__
, epq
->name
,
768 ret
= submit_req(isp1362_hcd
, urb
, ep
, epq
);
769 if (ret
== -ENOMEM
) {
772 } else if (ret
== -EOVERFLOW
) {
776 #ifdef BUGGY_PXA2XX_UDC_USBTEST
777 defer
= ep
->nextpid
== USB_PID_SETUP
;
782 /* Avoid starving of endpoints */
783 if (isp1362_hcd
->async
.next
!= isp1362_hcd
->async
.prev
) {
784 DBG(2, "%s: Cycling ASYNC schedule %d\n", __func__
, ptd_count
);
785 list_move(&isp1362_hcd
->async
, isp1362_hcd
->async
.next
);
787 if (ptd_count
|| defer
)
788 enable_atl_transfers(isp1362_hcd
, defer
? 0 : ptd_count
);
790 epq
->ptd_count
+= ptd_count
;
791 if (epq
->ptd_count
> epq
->stat_maxptds
) {
792 epq
->stat_maxptds
= epq
->ptd_count
;
793 DBG(0, "%s: max_ptds: %d\n", __func__
, epq
->stat_maxptds
);
797 static void start_intl_transfers(struct isp1362_hcd
*isp1362_hcd
)
800 struct isp1362_ep_queue
*epq
= &isp1362_hcd
->intl_queue
;
801 struct isp1362_ep
*ep
;
803 if (atomic_read(&epq
->finishing
)) {
804 DBG(1, "%s: finish_transfers is active for %s\n", __func__
, epq
->name
);
808 list_for_each_entry(ep
, &isp1362_hcd
->periodic
, schedule
) {
809 struct urb
*urb
= get_urb(ep
);
812 if (!list_empty(&ep
->active
)) {
813 DBG(1, "%s: Skipping active %s ep %p\n", __func__
,
818 DBG(1, "%s: Processing %s ep %p req %d\n", __func__
,
819 epq
->name
, ep
, ep
->num_req
);
820 ret
= submit_req(isp1362_hcd
, urb
, ep
, epq
);
823 else if (ret
== -EOVERFLOW
)
829 static int last_count
;
831 if (ptd_count
!= last_count
) {
832 DBG(0, "%s: ptd_count: %d\n", __func__
, ptd_count
);
833 last_count
= ptd_count
;
835 enable_intl_transfers(isp1362_hcd
);
838 epq
->ptd_count
+= ptd_count
;
839 if (epq
->ptd_count
> epq
->stat_maxptds
)
840 epq
->stat_maxptds
= epq
->ptd_count
;
843 static inline int next_ptd(struct isp1362_ep_queue
*epq
, struct isp1362_ep
*ep
)
845 u16 ptd_offset
= ep
->ptd_offset
;
846 int num_ptds
= (ep
->length
+ PTD_HEADER_SIZE
+ (epq
->blk_size
- 1)) / epq
->blk_size
;
848 DBG(2, "%s: PTD offset $%04x + %04x => %d * %04x -> $%04x\n", __func__
, ptd_offset
,
849 ep
->length
, num_ptds
, epq
->blk_size
, ptd_offset
+ num_ptds
* epq
->blk_size
);
851 ptd_offset
+= num_ptds
* epq
->blk_size
;
852 if (ptd_offset
< epq
->buf_start
+ epq
->buf_size
)
858 static void start_iso_transfers(struct isp1362_hcd
*isp1362_hcd
)
861 int flip
= isp1362_hcd
->istl_flip
;
862 struct isp1362_ep_queue
*epq
;
864 struct isp1362_ep
*ep
;
865 struct isp1362_ep
*tmp
;
866 u16 fno
= isp1362_read_reg32(isp1362_hcd
, HCFMNUM
);
869 epq
= &isp1362_hcd
->istl_queue
[flip
];
870 if (atomic_read(&epq
->finishing
)) {
871 DBG(1, "%s: finish_transfers is active for %s\n", __func__
, epq
->name
);
875 if (!list_empty(&epq
->active
))
878 ptd_offset
= epq
->buf_start
;
879 list_for_each_entry_safe(ep
, tmp
, &isp1362_hcd
->isoc
, schedule
) {
880 struct urb
*urb
= get_urb(ep
);
881 s16 diff
= fno
- (u16
)urb
->start_frame
;
883 DBG(1, "%s: Processing %s ep %p\n", __func__
, epq
->name
, ep
);
885 if (diff
> urb
->number_of_packets
) {
886 /* time frame for this URB has elapsed */
887 finish_request(isp1362_hcd
, ep
, urb
, -EOVERFLOW
);
889 } else if (diff
< -1) {
890 /* URB is not due in this frame or the next one.
891 * Comparing with '-1' instead of '0' accounts for double
892 * buffering in the ISP1362 which enables us to queue the PTD
893 * one frame ahead of time
895 } else if (diff
== -1) {
896 /* submit PTD's that are due in the next frame */
897 prepare_ptd(isp1362_hcd
, urb
, ep
, epq
, fno
);
898 if (ptd_offset
+ PTD_HEADER_SIZE
+ ep
->length
>
899 epq
->buf_start
+ epq
->buf_size
) {
900 pr_err("%s: Not enough ISO buffer space for %d byte PTD\n",
901 __func__
, ep
->length
);
904 ep
->ptd_offset
= ptd_offset
;
905 list_add_tail(&ep
->active
, &epq
->active
);
907 ptd_offset
= next_ptd(epq
, ep
);
908 if (ptd_offset
< 0) {
909 pr_warning("%s: req %d No more %s PTD buffers available\n", __func__
,
910 ep
->num_req
, epq
->name
);
915 list_for_each_entry(ep
, &epq
->active
, active
) {
916 if (epq
->active
.next
== &ep
->active
)
917 ep
->ptd
.mps
|= PTD_LAST_MSK
;
918 isp1362_write_ptd(isp1362_hcd
, ep
, epq
);
923 enable_istl_transfers(isp1362_hcd
, flip
);
925 epq
->ptd_count
+= ptd_count
;
926 if (epq
->ptd_count
> epq
->stat_maxptds
)
927 epq
->stat_maxptds
= epq
->ptd_count
;
929 /* check, whether the second ISTL buffer may also be filled */
930 if (!(isp1362_read_reg16(isp1362_hcd
, HCBUFSTAT
) &
931 (flip
? HCBUFSTAT_ISTL0_FULL
: HCBUFSTAT_ISTL1_FULL
))) {
939 static void finish_transfers(struct isp1362_hcd
*isp1362_hcd
, unsigned long done_map
,
940 struct isp1362_ep_queue
*epq
)
942 struct isp1362_ep
*ep
;
943 struct isp1362_ep
*tmp
;
945 if (list_empty(&epq
->active
)) {
946 DBG(1, "%s: Nothing to do for %s queue\n", __func__
, epq
->name
);
950 DBG(1, "%s: Finishing %s transfers %08lx\n", __func__
, epq
->name
, done_map
);
952 atomic_inc(&epq
->finishing
);
953 list_for_each_entry_safe(ep
, tmp
, &epq
->active
, active
) {
954 int index
= ep
->ptd_index
;
956 DBG(1, "%s: Checking %s PTD[%02x] $%04x\n", __func__
, epq
->name
,
957 index
, ep
->ptd_offset
);
960 if (__test_and_clear_bit(index
, &done_map
)) {
961 isp1362_read_ptd(isp1362_hcd
, ep
, epq
);
962 epq
->free_ptd
= index
;
963 BUG_ON(ep
->num_ptds
== 0);
964 release_ptd_buffers(epq
, ep
);
966 DBG(1, "%s: ep %p req %d removed from active list\n", __func__
,
968 if (!list_empty(&ep
->remove_list
)) {
969 list_del_init(&ep
->remove_list
);
970 DBG(1, "%s: ep %p removed from remove list\n", __func__
, ep
);
972 DBG(1, "%s: Postprocessing %s ep %p req %d\n", __func__
, epq
->name
,
974 postproc_ep(isp1362_hcd
, ep
);
980 pr_warning("%s: done_map not clear: %08lx:%08lx\n", __func__
, done_map
,
982 atomic_dec(&epq
->finishing
);
985 static void finish_iso_transfers(struct isp1362_hcd
*isp1362_hcd
, struct isp1362_ep_queue
*epq
)
987 struct isp1362_ep
*ep
;
988 struct isp1362_ep
*tmp
;
990 if (list_empty(&epq
->active
)) {
991 DBG(1, "%s: Nothing to do for %s queue\n", __func__
, epq
->name
);
995 DBG(1, "%s: Finishing %s transfers\n", __func__
, epq
->name
);
997 atomic_inc(&epq
->finishing
);
998 list_for_each_entry_safe(ep
, tmp
, &epq
->active
, active
) {
999 DBG(1, "%s: Checking PTD $%04x\n", __func__
, ep
->ptd_offset
);
1001 isp1362_read_ptd(isp1362_hcd
, ep
, epq
);
1002 DBG(1, "%s: Postprocessing %s ep %p\n", __func__
, epq
->name
, ep
);
1003 postproc_ep(isp1362_hcd
, ep
);
1005 WARN_ON(epq
->blk_size
!= 0);
1006 atomic_dec(&epq
->finishing
);
1009 static irqreturn_t
isp1362_irq(struct usb_hcd
*hcd
)
1012 struct isp1362_hcd
*isp1362_hcd
= hcd_to_isp1362_hcd(hcd
);
1016 spin_lock(&isp1362_hcd
->lock
);
1018 BUG_ON(isp1362_hcd
->irq_active
++);
1020 isp1362_write_reg16(isp1362_hcd
, HCuPINTENB
, 0);
1022 irqstat
= isp1362_read_reg16(isp1362_hcd
, HCuPINT
);
1023 DBG(3, "%s: got IRQ %04x:%04x\n", __func__
, irqstat
, isp1362_hcd
->irqenb
);
1025 /* only handle interrupts that are currently enabled */
1026 irqstat
&= isp1362_hcd
->irqenb
;
1027 isp1362_write_reg16(isp1362_hcd
, HCuPINT
, irqstat
);
1030 if (irqstat
& HCuPINT_SOF
) {
1031 isp1362_hcd
->irqenb
&= ~HCuPINT_SOF
;
1032 isp1362_hcd
->irq_stat
[ISP1362_INT_SOF
]++;
1034 svc_mask
&= ~HCuPINT_SOF
;
1035 DBG(3, "%s: SOF\n", __func__
);
1036 isp1362_hcd
->fmindex
= isp1362_read_reg32(isp1362_hcd
, HCFMNUM
);
1037 if (!list_empty(&isp1362_hcd
->remove_list
))
1038 finish_unlinks(isp1362_hcd
);
1039 if (!list_empty(&isp1362_hcd
->async
) && !(irqstat
& HCuPINT_ATL
)) {
1040 if (list_empty(&isp1362_hcd
->atl_queue
.active
)) {
1041 start_atl_transfers(isp1362_hcd
);
1043 isp1362_enable_int(isp1362_hcd
, HCuPINT_ATL
);
1044 isp1362_write_reg32(isp1362_hcd
, HCATLSKIP
,
1045 isp1362_hcd
->atl_queue
.skip_map
);
1046 isp1362_set_mask16(isp1362_hcd
, HCBUFSTAT
, HCBUFSTAT_ATL_ACTIVE
);
1051 if (irqstat
& HCuPINT_ISTL0
) {
1052 isp1362_hcd
->irq_stat
[ISP1362_INT_ISTL0
]++;
1054 svc_mask
&= ~HCuPINT_ISTL0
;
1055 isp1362_clr_mask16(isp1362_hcd
, HCBUFSTAT
, HCBUFSTAT_ISTL0_FULL
);
1056 DBG(1, "%s: ISTL0\n", __func__
);
1057 WARN_ON((int)!!isp1362_hcd
->istl_flip
);
1058 WARN_ON(isp1362_read_reg16(isp1362_hcd
, HCBUFSTAT
) &
1059 HCBUFSTAT_ISTL0_ACTIVE
);
1060 WARN_ON(!(isp1362_read_reg16(isp1362_hcd
, HCBUFSTAT
) &
1061 HCBUFSTAT_ISTL0_DONE
));
1062 isp1362_hcd
->irqenb
&= ~HCuPINT_ISTL0
;
1065 if (irqstat
& HCuPINT_ISTL1
) {
1066 isp1362_hcd
->irq_stat
[ISP1362_INT_ISTL1
]++;
1068 svc_mask
&= ~HCuPINT_ISTL1
;
1069 isp1362_clr_mask16(isp1362_hcd
, HCBUFSTAT
, HCBUFSTAT_ISTL1_FULL
);
1070 DBG(1, "%s: ISTL1\n", __func__
);
1071 WARN_ON(!(int)isp1362_hcd
->istl_flip
);
1072 WARN_ON(isp1362_read_reg16(isp1362_hcd
, HCBUFSTAT
) &
1073 HCBUFSTAT_ISTL1_ACTIVE
);
1074 WARN_ON(!(isp1362_read_reg16(isp1362_hcd
, HCBUFSTAT
) &
1075 HCBUFSTAT_ISTL1_DONE
));
1076 isp1362_hcd
->irqenb
&= ~HCuPINT_ISTL1
;
1079 if (irqstat
& (HCuPINT_ISTL0
| HCuPINT_ISTL1
)) {
1080 WARN_ON((irqstat
& (HCuPINT_ISTL0
| HCuPINT_ISTL1
)) ==
1081 (HCuPINT_ISTL0
| HCuPINT_ISTL1
));
1082 finish_iso_transfers(isp1362_hcd
,
1083 &isp1362_hcd
->istl_queue
[isp1362_hcd
->istl_flip
]);
1084 start_iso_transfers(isp1362_hcd
);
1085 isp1362_hcd
->istl_flip
= 1 - isp1362_hcd
->istl_flip
;
1088 if (irqstat
& HCuPINT_INTL
) {
1089 u32 done_map
= isp1362_read_reg32(isp1362_hcd
, HCINTLDONE
);
1090 u32 skip_map
= isp1362_read_reg32(isp1362_hcd
, HCINTLSKIP
);
1091 isp1362_hcd
->irq_stat
[ISP1362_INT_INTL
]++;
1093 DBG(2, "%s: INTL\n", __func__
);
1095 svc_mask
&= ~HCuPINT_INTL
;
1097 isp1362_write_reg32(isp1362_hcd
, HCINTLSKIP
, skip_map
| done_map
);
1098 if (~(done_map
| skip_map
) == 0)
1099 /* All PTDs are finished, disable INTL processing entirely */
1100 isp1362_clr_mask16(isp1362_hcd
, HCBUFSTAT
, HCBUFSTAT_INTL_ACTIVE
);
1105 DBG(3, "%s: INTL done_map %08x\n", __func__
, done_map
);
1106 finish_transfers(isp1362_hcd
, done_map
, &isp1362_hcd
->intl_queue
);
1107 start_intl_transfers(isp1362_hcd
);
1111 if (irqstat
& HCuPINT_ATL
) {
1112 u32 done_map
= isp1362_read_reg32(isp1362_hcd
, HCATLDONE
);
1113 u32 skip_map
= isp1362_read_reg32(isp1362_hcd
, HCATLSKIP
);
1114 isp1362_hcd
->irq_stat
[ISP1362_INT_ATL
]++;
1116 DBG(2, "%s: ATL\n", __func__
);
1118 svc_mask
&= ~HCuPINT_ATL
;
1120 isp1362_write_reg32(isp1362_hcd
, HCATLSKIP
, skip_map
| done_map
);
1121 if (~(done_map
| skip_map
) == 0)
1122 isp1362_clr_mask16(isp1362_hcd
, HCBUFSTAT
, HCBUFSTAT_ATL_ACTIVE
);
1124 DBG(3, "%s: ATL done_map %08x\n", __func__
, done_map
);
1125 finish_transfers(isp1362_hcd
, done_map
, &isp1362_hcd
->atl_queue
);
1126 start_atl_transfers(isp1362_hcd
);
1131 if (irqstat
& HCuPINT_OPR
) {
1132 u32 intstat
= isp1362_read_reg32(isp1362_hcd
, HCINTSTAT
);
1133 isp1362_hcd
->irq_stat
[ISP1362_INT_OPR
]++;
1135 svc_mask
&= ~HCuPINT_OPR
;
1136 DBG(2, "%s: OPR %08x:%08x\n", __func__
, intstat
, isp1362_hcd
->intenb
);
1137 intstat
&= isp1362_hcd
->intenb
;
1138 if (intstat
& OHCI_INTR_UE
) {
1139 pr_err("Unrecoverable error\n");
1140 /* FIXME: do here reset or cleanup or whatever */
1142 if (intstat
& OHCI_INTR_RHSC
) {
1143 isp1362_hcd
->rhstatus
= isp1362_read_reg32(isp1362_hcd
, HCRHSTATUS
);
1144 isp1362_hcd
->rhport
[0] = isp1362_read_reg32(isp1362_hcd
, HCRHPORT1
);
1145 isp1362_hcd
->rhport
[1] = isp1362_read_reg32(isp1362_hcd
, HCRHPORT2
);
1147 if (intstat
& OHCI_INTR_RD
) {
1148 pr_info("%s: RESUME DETECTED\n", __func__
);
1149 isp1362_show_reg(isp1362_hcd
, HCCONTROL
);
1150 usb_hcd_resume_root_hub(hcd
);
1152 isp1362_write_reg32(isp1362_hcd
, HCINTSTAT
, intstat
);
1153 irqstat
&= ~HCuPINT_OPR
;
1157 if (irqstat
& HCuPINT_SUSP
) {
1158 isp1362_hcd
->irq_stat
[ISP1362_INT_SUSP
]++;
1160 svc_mask
&= ~HCuPINT_SUSP
;
1162 pr_info("%s: SUSPEND IRQ\n", __func__
);
1165 if (irqstat
& HCuPINT_CLKRDY
) {
1166 isp1362_hcd
->irq_stat
[ISP1362_INT_CLKRDY
]++;
1168 isp1362_hcd
->irqenb
&= ~HCuPINT_CLKRDY
;
1169 svc_mask
&= ~HCuPINT_CLKRDY
;
1170 pr_info("%s: CLKRDY IRQ\n", __func__
);
1174 pr_err("%s: Unserviced interrupt(s) %04x\n", __func__
, svc_mask
);
1176 isp1362_write_reg16(isp1362_hcd
, HCuPINTENB
, isp1362_hcd
->irqenb
);
1177 isp1362_hcd
->irq_active
--;
1178 spin_unlock(&isp1362_hcd
->lock
);
1180 return IRQ_RETVAL(handled
);
1183 /*-------------------------------------------------------------------------*/
1185 #define MAX_PERIODIC_LOAD 900 /* out of 1000 usec */
1186 static int balance(struct isp1362_hcd
*isp1362_hcd
, u16 interval
, u16 load
)
1188 int i
, branch
= -ENOSPC
;
1190 /* search for the least loaded schedule branch of that interval
1191 * which has enough bandwidth left unreserved.
1193 for (i
= 0; i
< interval
; i
++) {
1194 if (branch
< 0 || isp1362_hcd
->load
[branch
] > isp1362_hcd
->load
[i
]) {
1197 for (j
= i
; j
< PERIODIC_SIZE
; j
+= interval
) {
1198 if ((isp1362_hcd
->load
[j
] + load
) > MAX_PERIODIC_LOAD
) {
1199 pr_err("%s: new load %d load[%02x] %d max %d\n", __func__
,
1200 load
, j
, isp1362_hcd
->load
[j
], MAX_PERIODIC_LOAD
);
1204 if (j
< PERIODIC_SIZE
)
1212 /* NB! ALL the code above this point runs with isp1362_hcd->lock
1216 /*-------------------------------------------------------------------------*/
1218 static int isp1362_urb_enqueue(struct usb_hcd
*hcd
,
1222 struct isp1362_hcd
*isp1362_hcd
= hcd_to_isp1362_hcd(hcd
);
1223 struct usb_device
*udev
= urb
->dev
;
1224 unsigned int pipe
= urb
->pipe
;
1225 int is_out
= !usb_pipein(pipe
);
1226 int type
= usb_pipetype(pipe
);
1227 int epnum
= usb_pipeendpoint(pipe
);
1228 struct usb_host_endpoint
*hep
= urb
->ep
;
1229 struct isp1362_ep
*ep
= NULL
;
1230 unsigned long flags
;
1233 DBG(3, "%s: urb %p\n", __func__
, urb
);
1235 if (type
== PIPE_ISOCHRONOUS
) {
1236 pr_err("Isochronous transfers not supported\n");
1240 URB_DBG("%s: FA %d ep%d%s %s: len %d %s%s\n", __func__
,
1241 usb_pipedevice(pipe
), epnum
,
1242 is_out
? "out" : "in",
1243 usb_pipecontrol(pipe
) ? "ctrl" :
1244 usb_pipeint(pipe
) ? "int" :
1245 usb_pipebulk(pipe
) ? "bulk" :
1247 urb
->transfer_buffer_length
,
1248 (urb
->transfer_flags
& URB_ZERO_PACKET
) ? "ZERO_PACKET " : "",
1249 !(urb
->transfer_flags
& URB_SHORT_NOT_OK
) ?
1252 /* avoid all allocations within spinlocks: request or endpoint */
1254 ep
= kzalloc(sizeof *ep
, mem_flags
);
1258 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
1260 /* don't submit to a dead or disabled port */
1261 if (!((isp1362_hcd
->rhport
[0] | isp1362_hcd
->rhport
[1]) &
1262 USB_PORT_STAT_ENABLE
) ||
1263 !HC_IS_RUNNING(hcd
->state
)) {
1266 goto fail_not_linked
;
1269 retval
= usb_hcd_link_urb_to_ep(hcd
, urb
);
1272 goto fail_not_linked
;
1278 INIT_LIST_HEAD(&ep
->schedule
);
1279 INIT_LIST_HEAD(&ep
->active
);
1280 INIT_LIST_HEAD(&ep
->remove_list
);
1281 ep
->udev
= usb_get_dev(udev
);
1284 ep
->maxpacket
= usb_maxpacket(udev
, urb
->pipe
, is_out
);
1285 ep
->ptd_offset
= -EINVAL
;
1286 ep
->ptd_index
= -EINVAL
;
1287 usb_settoggle(udev
, epnum
, is_out
, 0);
1289 if (type
== PIPE_CONTROL
)
1290 ep
->nextpid
= USB_PID_SETUP
;
1292 ep
->nextpid
= USB_PID_OUT
;
1294 ep
->nextpid
= USB_PID_IN
;
1297 case PIPE_ISOCHRONOUS
:
1298 case PIPE_INTERRUPT
:
1299 if (urb
->interval
> PERIODIC_SIZE
)
1300 urb
->interval
= PERIODIC_SIZE
;
1301 ep
->interval
= urb
->interval
;
1302 ep
->branch
= PERIODIC_SIZE
;
1303 ep
->load
= usb_calc_bus_time(udev
->speed
, !is_out
,
1304 (type
== PIPE_ISOCHRONOUS
),
1305 usb_maxpacket(udev
, pipe
, is_out
)) / 1000;
1310 ep
->num_req
= isp1362_hcd
->req_serial
++;
1312 /* maybe put endpoint into schedule */
1316 if (list_empty(&ep
->schedule
)) {
1317 DBG(1, "%s: Adding ep %p req %d to async schedule\n",
1318 __func__
, ep
, ep
->num_req
);
1319 list_add_tail(&ep
->schedule
, &isp1362_hcd
->async
);
1322 case PIPE_ISOCHRONOUS
:
1323 case PIPE_INTERRUPT
:
1324 urb
->interval
= ep
->interval
;
1326 /* urb submitted for already existing EP */
1327 if (ep
->branch
< PERIODIC_SIZE
)
1330 retval
= balance(isp1362_hcd
, ep
->interval
, ep
->load
);
1332 pr_err("%s: balance returned %d\n", __func__
, retval
);
1335 ep
->branch
= retval
;
1337 isp1362_hcd
->fmindex
= isp1362_read_reg32(isp1362_hcd
, HCFMNUM
);
1338 DBG(1, "%s: Current frame %04x branch %02x start_frame %04x(%04x)\n",
1339 __func__
, isp1362_hcd
->fmindex
, ep
->branch
,
1340 ((isp1362_hcd
->fmindex
+ PERIODIC_SIZE
- 1) &
1341 ~(PERIODIC_SIZE
- 1)) + ep
->branch
,
1342 (isp1362_hcd
->fmindex
& (PERIODIC_SIZE
- 1)) + ep
->branch
);
1344 if (list_empty(&ep
->schedule
)) {
1345 if (type
== PIPE_ISOCHRONOUS
) {
1346 u16 frame
= isp1362_hcd
->fmindex
;
1348 frame
+= max_t(u16
, 8, ep
->interval
);
1349 frame
&= ~(ep
->interval
- 1);
1350 frame
|= ep
->branch
;
1351 if (frame_before(frame
, isp1362_hcd
->fmindex
))
1352 frame
+= ep
->interval
;
1353 urb
->start_frame
= frame
;
1355 DBG(1, "%s: Adding ep %p to isoc schedule\n", __func__
, ep
);
1356 list_add_tail(&ep
->schedule
, &isp1362_hcd
->isoc
);
1358 DBG(1, "%s: Adding ep %p to periodic schedule\n", __func__
, ep
);
1359 list_add_tail(&ep
->schedule
, &isp1362_hcd
->periodic
);
1362 DBG(1, "%s: ep %p already scheduled\n", __func__
, ep
);
1364 DBG(2, "%s: load %d bandwidth %d -> %d\n", __func__
,
1365 ep
->load
/ ep
->interval
, isp1362_hcd
->load
[ep
->branch
],
1366 isp1362_hcd
->load
[ep
->branch
] + ep
->load
);
1367 isp1362_hcd
->load
[ep
->branch
] += ep
->load
;
1371 ALIGNSTAT(isp1362_hcd
, urb
->transfer_buffer
);
1376 start_atl_transfers(isp1362_hcd
);
1378 case PIPE_INTERRUPT
:
1379 start_intl_transfers(isp1362_hcd
);
1381 case PIPE_ISOCHRONOUS
:
1382 start_iso_transfers(isp1362_hcd
);
1389 usb_hcd_unlink_urb_from_ep(hcd
, urb
);
1393 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
1395 DBG(0, "%s: urb %p failed with %d\n", __func__
, urb
, retval
);
1399 static int isp1362_urb_dequeue(struct usb_hcd
*hcd
, struct urb
*urb
, int status
)
1401 struct isp1362_hcd
*isp1362_hcd
= hcd_to_isp1362_hcd(hcd
);
1402 struct usb_host_endpoint
*hep
;
1403 unsigned long flags
;
1404 struct isp1362_ep
*ep
;
1407 DBG(3, "%s: urb %p\n", __func__
, urb
);
1409 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
1410 retval
= usb_hcd_check_unlink_urb(hcd
, urb
, status
);
1417 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
1423 /* In front of queue? */
1424 if (ep
->hep
->urb_list
.next
== &urb
->urb_list
) {
1425 if (!list_empty(&ep
->active
)) {
1426 DBG(1, "%s: urb %p ep %p req %d active PTD[%d] $%04x\n", __func__
,
1427 urb
, ep
, ep
->num_req
, ep
->ptd_index
, ep
->ptd_offset
);
1428 /* disable processing and queue PTD for removal */
1429 remove_ptd(isp1362_hcd
, ep
);
1434 DBG(1, "%s: Finishing ep %p req %d\n", __func__
, ep
,
1436 finish_request(isp1362_hcd
, ep
, urb
, status
);
1438 DBG(1, "%s: urb %p active; wait4irq\n", __func__
, urb
);
1440 pr_warning("%s: No EP in URB %p\n", __func__
, urb
);
1444 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
1446 DBG(3, "%s: exit\n", __func__
);
1451 static void isp1362_endpoint_disable(struct usb_hcd
*hcd
, struct usb_host_endpoint
*hep
)
1453 struct isp1362_ep
*ep
= hep
->hcpriv
;
1454 struct isp1362_hcd
*isp1362_hcd
= hcd_to_isp1362_hcd(hcd
);
1455 unsigned long flags
;
1457 DBG(1, "%s: ep %p\n", __func__
, ep
);
1460 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
1461 if (!list_empty(&hep
->urb_list
)) {
1462 if (!list_empty(&ep
->active
) && list_empty(&ep
->remove_list
)) {
1463 DBG(1, "%s: Removing ep %p req %d PTD[%d] $%04x\n", __func__
,
1464 ep
, ep
->num_req
, ep
->ptd_index
, ep
->ptd_offset
);
1465 remove_ptd(isp1362_hcd
, ep
);
1466 pr_info("%s: Waiting for Interrupt to clean up\n", __func__
);
1469 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
1470 /* Wait for interrupt to clear out active list */
1471 while (!list_empty(&ep
->active
))
1474 DBG(1, "%s: Freeing EP %p\n", __func__
, ep
);
1476 usb_put_dev(ep
->udev
);
1481 static int isp1362_get_frame(struct usb_hcd
*hcd
)
1483 struct isp1362_hcd
*isp1362_hcd
= hcd_to_isp1362_hcd(hcd
);
1485 unsigned long flags
;
1487 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
1488 fmnum
= isp1362_read_reg32(isp1362_hcd
, HCFMNUM
);
1489 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
1494 /*-------------------------------------------------------------------------*/
1496 /* Adapted from ohci-hub.c */
1497 static int isp1362_hub_status_data(struct usb_hcd
*hcd
, char *buf
)
1499 struct isp1362_hcd
*isp1362_hcd
= hcd_to_isp1362_hcd(hcd
);
1500 int ports
, i
, changed
= 0;
1501 unsigned long flags
;
1503 if (!HC_IS_RUNNING(hcd
->state
))
1506 /* Report no status change now, if we are scheduled to be
1508 if (timer_pending(&hcd
->rh_timer
))
1511 ports
= isp1362_hcd
->rhdesca
& RH_A_NDP
;
1514 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
1516 if (isp1362_hcd
->rhstatus
& (RH_HS_LPSC
| RH_HS_OCIC
))
1517 buf
[0] = changed
= 1;
1521 for (i
= 0; i
< ports
; i
++) {
1522 u32 status
= isp1362_hcd
->rhport
[i
];
1524 if (status
& (RH_PS_CSC
| RH_PS_PESC
| RH_PS_PSSC
|
1525 RH_PS_OCIC
| RH_PS_PRSC
)) {
1527 buf
[0] |= 1 << (i
+ 1);
1531 if (!(status
& RH_PS_CCS
))
1534 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
1538 static void isp1362_hub_descriptor(struct isp1362_hcd
*isp1362_hcd
,
1539 struct usb_hub_descriptor
*desc
)
1541 u32 reg
= isp1362_hcd
->rhdesca
;
1543 DBG(3, "%s: enter\n", __func__
);
1545 desc
->bDescriptorType
= 0x29;
1546 desc
->bDescLength
= 9;
1547 desc
->bHubContrCurrent
= 0;
1548 desc
->bNbrPorts
= reg
& 0x3;
1549 /* Power switching, device type, overcurrent. */
1550 desc
->wHubCharacteristics
= cpu_to_le16((reg
>> 8) & 0x1f);
1551 DBG(0, "%s: hubcharacteristics = %02x\n", __func__
, cpu_to_le16((reg
>> 8) & 0x1f));
1552 desc
->bPwrOn2PwrGood
= (reg
>> 24) & 0xff;
1553 /* ports removable, and legacy PortPwrCtrlMask */
1554 desc
->u
.hs
.DeviceRemovable
[0] = desc
->bNbrPorts
== 1 ? 1 << 1 : 3 << 1;
1555 desc
->u
.hs
.DeviceRemovable
[1] = ~0;
1557 DBG(3, "%s: exit\n", __func__
);
1560 /* Adapted from ohci-hub.c */
1561 static int isp1362_hub_control(struct usb_hcd
*hcd
, u16 typeReq
, u16 wValue
,
1562 u16 wIndex
, char *buf
, u16 wLength
)
1564 struct isp1362_hcd
*isp1362_hcd
= hcd_to_isp1362_hcd(hcd
);
1566 unsigned long flags
;
1568 int ports
= isp1362_hcd
->rhdesca
& RH_A_NDP
;
1572 case ClearHubFeature
:
1573 DBG(0, "ClearHubFeature: ");
1575 case C_HUB_OVER_CURRENT
:
1576 _DBG(0, "C_HUB_OVER_CURRENT\n");
1577 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
1578 isp1362_write_reg32(isp1362_hcd
, HCRHSTATUS
, RH_HS_OCIC
);
1579 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
1580 case C_HUB_LOCAL_POWER
:
1581 _DBG(0, "C_HUB_LOCAL_POWER\n");
1588 DBG(0, "SetHubFeature: ");
1590 case C_HUB_OVER_CURRENT
:
1591 case C_HUB_LOCAL_POWER
:
1592 _DBG(0, "C_HUB_OVER_CURRENT or C_HUB_LOCAL_POWER\n");
1598 case GetHubDescriptor
:
1599 DBG(0, "GetHubDescriptor\n");
1600 isp1362_hub_descriptor(isp1362_hcd
, (struct usb_hub_descriptor
*)buf
);
1603 DBG(0, "GetHubStatus\n");
1604 put_unaligned(cpu_to_le32(0), (__le32
*) buf
);
1608 DBG(0, "GetPortStatus\n");
1610 if (!wIndex
|| wIndex
> ports
)
1612 tmp
= isp1362_hcd
->rhport
[--wIndex
];
1613 put_unaligned(cpu_to_le32(tmp
), (__le32
*) buf
);
1615 case ClearPortFeature
:
1616 DBG(0, "ClearPortFeature: ");
1617 if (!wIndex
|| wIndex
> ports
)
1622 case USB_PORT_FEAT_ENABLE
:
1623 _DBG(0, "USB_PORT_FEAT_ENABLE\n");
1626 case USB_PORT_FEAT_C_ENABLE
:
1627 _DBG(0, "USB_PORT_FEAT_C_ENABLE\n");
1630 case USB_PORT_FEAT_SUSPEND
:
1631 _DBG(0, "USB_PORT_FEAT_SUSPEND\n");
1634 case USB_PORT_FEAT_C_SUSPEND
:
1635 _DBG(0, "USB_PORT_FEAT_C_SUSPEND\n");
1638 case USB_PORT_FEAT_POWER
:
1639 _DBG(0, "USB_PORT_FEAT_POWER\n");
1643 case USB_PORT_FEAT_C_CONNECTION
:
1644 _DBG(0, "USB_PORT_FEAT_C_CONNECTION\n");
1647 case USB_PORT_FEAT_C_OVER_CURRENT
:
1648 _DBG(0, "USB_PORT_FEAT_C_OVER_CURRENT\n");
1651 case USB_PORT_FEAT_C_RESET
:
1652 _DBG(0, "USB_PORT_FEAT_C_RESET\n");
1659 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
1660 isp1362_write_reg32(isp1362_hcd
, HCRHPORT1
+ wIndex
, tmp
);
1661 isp1362_hcd
->rhport
[wIndex
] =
1662 isp1362_read_reg32(isp1362_hcd
, HCRHPORT1
+ wIndex
);
1663 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
1665 case SetPortFeature
:
1666 DBG(0, "SetPortFeature: ");
1667 if (!wIndex
|| wIndex
> ports
)
1671 case USB_PORT_FEAT_SUSPEND
:
1672 _DBG(0, "USB_PORT_FEAT_SUSPEND\n");
1673 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
1674 isp1362_write_reg32(isp1362_hcd
, HCRHPORT1
+ wIndex
, RH_PS_PSS
);
1675 isp1362_hcd
->rhport
[wIndex
] =
1676 isp1362_read_reg32(isp1362_hcd
, HCRHPORT1
+ wIndex
);
1677 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
1679 case USB_PORT_FEAT_POWER
:
1680 _DBG(0, "USB_PORT_FEAT_POWER\n");
1681 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
1682 isp1362_write_reg32(isp1362_hcd
, HCRHPORT1
+ wIndex
, RH_PS_PPS
);
1683 isp1362_hcd
->rhport
[wIndex
] =
1684 isp1362_read_reg32(isp1362_hcd
, HCRHPORT1
+ wIndex
);
1685 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
1687 case USB_PORT_FEAT_RESET
:
1688 _DBG(0, "USB_PORT_FEAT_RESET\n");
1689 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
1691 t1
= jiffies
+ msecs_to_jiffies(USB_RESET_WIDTH
);
1692 while (time_before(jiffies
, t1
)) {
1693 /* spin until any current reset finishes */
1695 tmp
= isp1362_read_reg32(isp1362_hcd
, HCRHPORT1
+ wIndex
);
1696 if (!(tmp
& RH_PS_PRS
))
1700 if (!(tmp
& RH_PS_CCS
))
1702 /* Reset lasts 10ms (claims datasheet) */
1703 isp1362_write_reg32(isp1362_hcd
, HCRHPORT1
+ wIndex
, (RH_PS_PRS
));
1705 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
1707 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
1710 isp1362_hcd
->rhport
[wIndex
] = isp1362_read_reg32(isp1362_hcd
,
1711 HCRHPORT1
+ wIndex
);
1712 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
1721 /* "protocol stall" on error */
1722 _DBG(0, "PROTOCOL STALL\n");
1730 static int isp1362_bus_suspend(struct usb_hcd
*hcd
)
1733 struct isp1362_hcd
*isp1362_hcd
= hcd_to_isp1362_hcd(hcd
);
1734 unsigned long flags
;
1736 if (time_before(jiffies
, isp1362_hcd
->next_statechange
))
1739 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
1741 isp1362_hcd
->hc_control
= isp1362_read_reg32(isp1362_hcd
, HCCONTROL
);
1742 switch (isp1362_hcd
->hc_control
& OHCI_CTRL_HCFS
) {
1743 case OHCI_USB_RESUME
:
1744 DBG(0, "%s: resume/suspend?\n", __func__
);
1745 isp1362_hcd
->hc_control
&= ~OHCI_CTRL_HCFS
;
1746 isp1362_hcd
->hc_control
|= OHCI_USB_RESET
;
1747 isp1362_write_reg32(isp1362_hcd
, HCCONTROL
, isp1362_hcd
->hc_control
);
1749 case OHCI_USB_RESET
:
1751 pr_warning("%s: needs reinit!\n", __func__
);
1753 case OHCI_USB_SUSPEND
:
1754 pr_warning("%s: already suspended?\n", __func__
);
1757 DBG(0, "%s: suspend root hub\n", __func__
);
1759 /* First stop any processing */
1760 hcd
->state
= HC_STATE_QUIESCING
;
1761 if (!list_empty(&isp1362_hcd
->atl_queue
.active
) ||
1762 !list_empty(&isp1362_hcd
->intl_queue
.active
) ||
1763 !list_empty(&isp1362_hcd
->istl_queue
[0] .active
) ||
1764 !list_empty(&isp1362_hcd
->istl_queue
[1] .active
)) {
1767 isp1362_write_reg32(isp1362_hcd
, HCATLSKIP
, ~0);
1768 isp1362_write_reg32(isp1362_hcd
, HCINTLSKIP
, ~0);
1769 isp1362_write_reg16(isp1362_hcd
, HCBUFSTAT
, 0);
1770 isp1362_write_reg16(isp1362_hcd
, HCuPINTENB
, 0);
1771 isp1362_write_reg32(isp1362_hcd
, HCINTSTAT
, OHCI_INTR_SF
);
1773 DBG(0, "%s: stopping schedules ...\n", __func__
);
1778 if (isp1362_read_reg32(isp1362_hcd
, HCINTSTAT
) & OHCI_INTR_SF
)
1782 if (isp1362_read_reg16(isp1362_hcd
, HCuPINT
) & HCuPINT_ATL
) {
1783 u32 done_map
= isp1362_read_reg32(isp1362_hcd
, HCATLDONE
);
1784 finish_transfers(isp1362_hcd
, done_map
, &isp1362_hcd
->atl_queue
);
1786 if (isp1362_read_reg16(isp1362_hcd
, HCuPINT
) & HCuPINT_INTL
) {
1787 u32 done_map
= isp1362_read_reg32(isp1362_hcd
, HCINTLDONE
);
1788 finish_transfers(isp1362_hcd
, done_map
, &isp1362_hcd
->intl_queue
);
1790 if (isp1362_read_reg16(isp1362_hcd
, HCuPINT
) & HCuPINT_ISTL0
)
1791 finish_iso_transfers(isp1362_hcd
, &isp1362_hcd
->istl_queue
[0]);
1792 if (isp1362_read_reg16(isp1362_hcd
, HCuPINT
) & HCuPINT_ISTL1
)
1793 finish_iso_transfers(isp1362_hcd
, &isp1362_hcd
->istl_queue
[1]);
1795 DBG(0, "%s: HCINTSTAT: %08x\n", __func__
,
1796 isp1362_read_reg32(isp1362_hcd
, HCINTSTAT
));
1797 isp1362_write_reg32(isp1362_hcd
, HCINTSTAT
,
1798 isp1362_read_reg32(isp1362_hcd
, HCINTSTAT
));
1801 isp1362_hcd
->hc_control
= OHCI_USB_SUSPEND
;
1802 isp1362_show_reg(isp1362_hcd
, HCCONTROL
);
1803 isp1362_write_reg32(isp1362_hcd
, HCCONTROL
, isp1362_hcd
->hc_control
);
1804 isp1362_show_reg(isp1362_hcd
, HCCONTROL
);
1807 isp1362_hcd
->hc_control
= isp1362_read_reg32(isp1362_hcd
, HCCONTROL
);
1808 if ((isp1362_hcd
->hc_control
& OHCI_CTRL_HCFS
) != OHCI_USB_SUSPEND
) {
1809 pr_err("%s: controller won't suspend %08x\n", __func__
,
1810 isp1362_hcd
->hc_control
);
1815 /* no resumes until devices finish suspending */
1816 isp1362_hcd
->next_statechange
= jiffies
+ msecs_to_jiffies(5);
1820 hcd
->state
= HC_STATE_SUSPENDED
;
1821 DBG(0, "%s: HCD suspended: %08x\n", __func__
,
1822 isp1362_read_reg32(isp1362_hcd
, HCCONTROL
));
1824 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
1828 static int isp1362_bus_resume(struct usb_hcd
*hcd
)
1830 struct isp1362_hcd
*isp1362_hcd
= hcd_to_isp1362_hcd(hcd
);
1832 unsigned long flags
;
1833 int status
= -EINPROGRESS
;
1835 if (time_before(jiffies
, isp1362_hcd
->next_statechange
))
1838 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
1839 isp1362_hcd
->hc_control
= isp1362_read_reg32(isp1362_hcd
, HCCONTROL
);
1840 pr_info("%s: HCCONTROL: %08x\n", __func__
, isp1362_hcd
->hc_control
);
1841 if (hcd
->state
== HC_STATE_RESUMING
) {
1842 pr_warning("%s: duplicate resume\n", __func__
);
1845 switch (isp1362_hcd
->hc_control
& OHCI_CTRL_HCFS
) {
1846 case OHCI_USB_SUSPEND
:
1847 DBG(0, "%s: resume root hub\n", __func__
);
1848 isp1362_hcd
->hc_control
&= ~OHCI_CTRL_HCFS
;
1849 isp1362_hcd
->hc_control
|= OHCI_USB_RESUME
;
1850 isp1362_write_reg32(isp1362_hcd
, HCCONTROL
, isp1362_hcd
->hc_control
);
1852 case OHCI_USB_RESUME
:
1853 /* HCFS changes sometime after INTR_RD */
1854 DBG(0, "%s: remote wakeup\n", __func__
);
1857 DBG(0, "%s: odd resume\n", __func__
);
1859 hcd
->self
.root_hub
->dev
.power
.power_state
= PMSG_ON
;
1861 default: /* RESET, we lost power */
1862 DBG(0, "%s: root hub hardware reset\n", __func__
);
1865 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
1866 if (status
== -EBUSY
) {
1867 DBG(0, "%s: Restarting HC\n", __func__
);
1868 isp1362_hc_stop(hcd
);
1869 return isp1362_hc_start(hcd
);
1871 if (status
!= -EINPROGRESS
)
1873 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
1874 port
= isp1362_read_reg32(isp1362_hcd
, HCRHDESCA
) & RH_A_NDP
;
1876 u32 stat
= isp1362_read_reg32(isp1362_hcd
, HCRHPORT1
+ port
);
1878 /* force global, not selective, resume */
1879 if (!(stat
& RH_PS_PSS
)) {
1880 DBG(0, "%s: Not Resuming RH port %d\n", __func__
, port
);
1883 DBG(0, "%s: Resuming RH port %d\n", __func__
, port
);
1884 isp1362_write_reg32(isp1362_hcd
, HCRHPORT1
+ port
, RH_PS_POCI
);
1886 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
1888 /* Some controllers (lucent) need extra-long delays */
1889 hcd
->state
= HC_STATE_RESUMING
;
1890 mdelay(20 /* usb 11.5.1.10 */ + 15);
1892 isp1362_hcd
->hc_control
= OHCI_USB_OPER
;
1893 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
1894 isp1362_show_reg(isp1362_hcd
, HCCONTROL
);
1895 isp1362_write_reg32(isp1362_hcd
, HCCONTROL
, isp1362_hcd
->hc_control
);
1896 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
1900 /* keep it alive for ~5x suspend + resume costs */
1901 isp1362_hcd
->next_statechange
= jiffies
+ msecs_to_jiffies(250);
1903 hcd
->self
.root_hub
->dev
.power
.power_state
= PMSG_ON
;
1904 hcd
->state
= HC_STATE_RUNNING
;
1908 #define isp1362_bus_suspend NULL
1909 #define isp1362_bus_resume NULL
1912 /*-------------------------------------------------------------------------*/
1914 #ifdef STUB_DEBUG_FILE
1916 static inline void create_debug_file(struct isp1362_hcd
*isp1362_hcd
)
1919 static inline void remove_debug_file(struct isp1362_hcd
*isp1362_hcd
)
1925 #include <linux/proc_fs.h>
1926 #include <linux/seq_file.h>
1928 static void dump_irq(struct seq_file
*s
, char *label
, u16 mask
)
1930 seq_printf(s
, "%-15s %04x%s%s%s%s%s%s\n", label
, mask
,
1931 mask
& HCuPINT_CLKRDY
? " clkrdy" : "",
1932 mask
& HCuPINT_SUSP
? " susp" : "",
1933 mask
& HCuPINT_OPR
? " opr" : "",
1934 mask
& HCuPINT_EOT
? " eot" : "",
1935 mask
& HCuPINT_ATL
? " atl" : "",
1936 mask
& HCuPINT_SOF
? " sof" : "");
1939 static void dump_int(struct seq_file
*s
, char *label
, u32 mask
)
1941 seq_printf(s
, "%-15s %08x%s%s%s%s%s%s%s\n", label
, mask
,
1942 mask
& OHCI_INTR_MIE
? " MIE" : "",
1943 mask
& OHCI_INTR_RHSC
? " rhsc" : "",
1944 mask
& OHCI_INTR_FNO
? " fno" : "",
1945 mask
& OHCI_INTR_UE
? " ue" : "",
1946 mask
& OHCI_INTR_RD
? " rd" : "",
1947 mask
& OHCI_INTR_SF
? " sof" : "",
1948 mask
& OHCI_INTR_SO
? " so" : "");
1951 static void dump_ctrl(struct seq_file
*s
, char *label
, u32 mask
)
1953 seq_printf(s
, "%-15s %08x%s%s%s\n", label
, mask
,
1954 mask
& OHCI_CTRL_RWC
? " rwc" : "",
1955 mask
& OHCI_CTRL_RWE
? " rwe" : "",
1958 switch (mask
& OHCI_CTRL_HCFS
) {
1962 case OHCI_USB_RESET
:
1965 case OHCI_USB_RESUME
:
1968 case OHCI_USB_SUSPEND
:
1978 static void dump_regs(struct seq_file
*s
, struct isp1362_hcd
*isp1362_hcd
)
1980 seq_printf(s
, "HCREVISION [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCREVISION
),
1981 isp1362_read_reg32(isp1362_hcd
, HCREVISION
));
1982 seq_printf(s
, "HCCONTROL [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCCONTROL
),
1983 isp1362_read_reg32(isp1362_hcd
, HCCONTROL
));
1984 seq_printf(s
, "HCCMDSTAT [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCCMDSTAT
),
1985 isp1362_read_reg32(isp1362_hcd
, HCCMDSTAT
));
1986 seq_printf(s
, "HCINTSTAT [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTSTAT
),
1987 isp1362_read_reg32(isp1362_hcd
, HCINTSTAT
));
1988 seq_printf(s
, "HCINTENB [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTENB
),
1989 isp1362_read_reg32(isp1362_hcd
, HCINTENB
));
1990 seq_printf(s
, "HCFMINTVL [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMINTVL
),
1991 isp1362_read_reg32(isp1362_hcd
, HCFMINTVL
));
1992 seq_printf(s
, "HCFMREM [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMREM
),
1993 isp1362_read_reg32(isp1362_hcd
, HCFMREM
));
1994 seq_printf(s
, "HCFMNUM [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMNUM
),
1995 isp1362_read_reg32(isp1362_hcd
, HCFMNUM
));
1996 seq_printf(s
, "HCLSTHRESH [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCLSTHRESH
),
1997 isp1362_read_reg32(isp1362_hcd
, HCLSTHRESH
));
1998 seq_printf(s
, "HCRHDESCA [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHDESCA
),
1999 isp1362_read_reg32(isp1362_hcd
, HCRHDESCA
));
2000 seq_printf(s
, "HCRHDESCB [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHDESCB
),
2001 isp1362_read_reg32(isp1362_hcd
, HCRHDESCB
));
2002 seq_printf(s
, "HCRHSTATUS [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHSTATUS
),
2003 isp1362_read_reg32(isp1362_hcd
, HCRHSTATUS
));
2004 seq_printf(s
, "HCRHPORT1 [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHPORT1
),
2005 isp1362_read_reg32(isp1362_hcd
, HCRHPORT1
));
2006 seq_printf(s
, "HCRHPORT2 [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHPORT2
),
2007 isp1362_read_reg32(isp1362_hcd
, HCRHPORT2
));
2008 seq_printf(s
, "\n");
2009 seq_printf(s
, "HCHWCFG [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCHWCFG
),
2010 isp1362_read_reg16(isp1362_hcd
, HCHWCFG
));
2011 seq_printf(s
, "HCDMACFG [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCDMACFG
),
2012 isp1362_read_reg16(isp1362_hcd
, HCDMACFG
));
2013 seq_printf(s
, "HCXFERCTR [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCXFERCTR
),
2014 isp1362_read_reg16(isp1362_hcd
, HCXFERCTR
));
2015 seq_printf(s
, "HCuPINT [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINT
),
2016 isp1362_read_reg16(isp1362_hcd
, HCuPINT
));
2017 seq_printf(s
, "HCuPINTENB [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINTENB
),
2018 isp1362_read_reg16(isp1362_hcd
, HCuPINTENB
));
2019 seq_printf(s
, "HCCHIPID [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCCHIPID
),
2020 isp1362_read_reg16(isp1362_hcd
, HCCHIPID
));
2021 seq_printf(s
, "HCSCRATCH [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCSCRATCH
),
2022 isp1362_read_reg16(isp1362_hcd
, HCSCRATCH
));
2023 seq_printf(s
, "HCBUFSTAT [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCBUFSTAT
),
2024 isp1362_read_reg16(isp1362_hcd
, HCBUFSTAT
));
2025 seq_printf(s
, "HCDIRADDR [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCDIRADDR
),
2026 isp1362_read_reg32(isp1362_hcd
, HCDIRADDR
));
2028 seq_printf(s
, "HCDIRDATA [%02x] %04x\n", ISP1362_REG_NO(HCDIRDATA
),
2029 isp1362_read_reg16(isp1362_hcd
, HCDIRDATA
));
2031 seq_printf(s
, "HCISTLBUFSZ[%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCISTLBUFSZ
),
2032 isp1362_read_reg16(isp1362_hcd
, HCISTLBUFSZ
));
2033 seq_printf(s
, "HCISTLRATE [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCISTLRATE
),
2034 isp1362_read_reg16(isp1362_hcd
, HCISTLRATE
));
2035 seq_printf(s
, "\n");
2036 seq_printf(s
, "HCINTLBUFSZ[%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLBUFSZ
),
2037 isp1362_read_reg16(isp1362_hcd
, HCINTLBUFSZ
));
2038 seq_printf(s
, "HCINTLBLKSZ[%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLBLKSZ
),
2039 isp1362_read_reg16(isp1362_hcd
, HCINTLBLKSZ
));
2040 seq_printf(s
, "HCINTLDONE [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLDONE
),
2041 isp1362_read_reg32(isp1362_hcd
, HCINTLDONE
));
2042 seq_printf(s
, "HCINTLSKIP [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLSKIP
),
2043 isp1362_read_reg32(isp1362_hcd
, HCINTLSKIP
));
2044 seq_printf(s
, "HCINTLLAST [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLLAST
),
2045 isp1362_read_reg32(isp1362_hcd
, HCINTLLAST
));
2046 seq_printf(s
, "HCINTLCURR [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLCURR
),
2047 isp1362_read_reg16(isp1362_hcd
, HCINTLCURR
));
2048 seq_printf(s
, "\n");
2049 seq_printf(s
, "HCATLBUFSZ [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLBUFSZ
),
2050 isp1362_read_reg16(isp1362_hcd
, HCATLBUFSZ
));
2051 seq_printf(s
, "HCATLBLKSZ [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLBLKSZ
),
2052 isp1362_read_reg16(isp1362_hcd
, HCATLBLKSZ
));
2054 seq_printf(s
, "HCATLDONE [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDONE
),
2055 isp1362_read_reg32(isp1362_hcd
, HCATLDONE
));
2057 seq_printf(s
, "HCATLSKIP [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLSKIP
),
2058 isp1362_read_reg32(isp1362_hcd
, HCATLSKIP
));
2059 seq_printf(s
, "HCATLLAST [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLLAST
),
2060 isp1362_read_reg32(isp1362_hcd
, HCATLLAST
));
2061 seq_printf(s
, "HCATLCURR [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLCURR
),
2062 isp1362_read_reg16(isp1362_hcd
, HCATLCURR
));
2063 seq_printf(s
, "\n");
2064 seq_printf(s
, "HCATLDTC [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDTC
),
2065 isp1362_read_reg16(isp1362_hcd
, HCATLDTC
));
2066 seq_printf(s
, "HCATLDTCTO [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDTCTO
),
2067 isp1362_read_reg16(isp1362_hcd
, HCATLDTCTO
));
2070 static int proc_isp1362_show(struct seq_file
*s
, void *unused
)
2072 struct isp1362_hcd
*isp1362_hcd
= s
->private;
2073 struct isp1362_ep
*ep
;
2076 seq_printf(s
, "%s\n%s version %s\n",
2077 isp1362_hcd_to_hcd(isp1362_hcd
)->product_desc
, hcd_name
, DRIVER_VERSION
);
2079 /* collect statistics to help estimate potential win for
2080 * DMA engines that care about alignment (PXA)
2082 seq_printf(s
, "alignment: 16b/%ld 8b/%ld 4b/%ld 2b/%ld 1b/%ld\n",
2083 isp1362_hcd
->stat16
, isp1362_hcd
->stat8
, isp1362_hcd
->stat4
,
2084 isp1362_hcd
->stat2
, isp1362_hcd
->stat1
);
2085 seq_printf(s
, "max # ptds in ATL fifo: %d\n", isp1362_hcd
->atl_queue
.stat_maxptds
);
2086 seq_printf(s
, "max # ptds in INTL fifo: %d\n", isp1362_hcd
->intl_queue
.stat_maxptds
);
2087 seq_printf(s
, "max # ptds in ISTL fifo: %d\n",
2088 max(isp1362_hcd
->istl_queue
[0] .stat_maxptds
,
2089 isp1362_hcd
->istl_queue
[1] .stat_maxptds
));
2091 /* FIXME: don't show the following in suspended state */
2092 spin_lock_irq(&isp1362_hcd
->lock
);
2094 dump_irq(s
, "hc_irq_enable", isp1362_read_reg16(isp1362_hcd
, HCuPINTENB
));
2095 dump_irq(s
, "hc_irq_status", isp1362_read_reg16(isp1362_hcd
, HCuPINT
));
2096 dump_int(s
, "ohci_int_enable", isp1362_read_reg32(isp1362_hcd
, HCINTENB
));
2097 dump_int(s
, "ohci_int_status", isp1362_read_reg32(isp1362_hcd
, HCINTSTAT
));
2098 dump_ctrl(s
, "ohci_control", isp1362_read_reg32(isp1362_hcd
, HCCONTROL
));
2100 for (i
= 0; i
< NUM_ISP1362_IRQS
; i
++)
2101 if (isp1362_hcd
->irq_stat
[i
])
2102 seq_printf(s
, "%-15s: %d\n",
2103 ISP1362_INT_NAME(i
), isp1362_hcd
->irq_stat
[i
]);
2105 dump_regs(s
, isp1362_hcd
);
2106 list_for_each_entry(ep
, &isp1362_hcd
->async
, schedule
) {
2109 seq_printf(s
, "%p, ep%d%s, maxpacket %d:\n", ep
, ep
->epnum
,
2112 switch (ep
->nextpid
) {
2129 s
;}), ep
->maxpacket
) ;
2130 list_for_each_entry(urb
, &ep
->hep
->urb_list
, urb_list
) {
2131 seq_printf(s
, " urb%p, %d/%d\n", urb
,
2133 urb
->transfer_buffer_length
);
2136 if (!list_empty(&isp1362_hcd
->async
))
2137 seq_printf(s
, "\n");
2138 dump_ptd_queue(&isp1362_hcd
->atl_queue
);
2140 seq_printf(s
, "periodic size= %d\n", PERIODIC_SIZE
);
2142 list_for_each_entry(ep
, &isp1362_hcd
->periodic
, schedule
) {
2143 seq_printf(s
, "branch:%2d load:%3d PTD[%d] $%04x:\n", ep
->branch
,
2144 isp1362_hcd
->load
[ep
->branch
], ep
->ptd_index
, ep
->ptd_offset
);
2146 seq_printf(s
, " %d/%p (%sdev%d ep%d%s max %d)\n",
2148 (ep
->udev
->speed
== USB_SPEED_FULL
) ? "" : "ls ",
2149 ep
->udev
->devnum
, ep
->epnum
,
2150 (ep
->epnum
== 0) ? "" :
2151 ((ep
->nextpid
== USB_PID_IN
) ?
2152 "in" : "out"), ep
->maxpacket
);
2154 dump_ptd_queue(&isp1362_hcd
->intl_queue
);
2156 seq_printf(s
, "ISO:\n");
2158 list_for_each_entry(ep
, &isp1362_hcd
->isoc
, schedule
) {
2159 seq_printf(s
, " %d/%p (%sdev%d ep%d%s max %d)\n",
2161 (ep
->udev
->speed
== USB_SPEED_FULL
) ? "" : "ls ",
2162 ep
->udev
->devnum
, ep
->epnum
,
2163 (ep
->epnum
== 0) ? "" :
2164 ((ep
->nextpid
== USB_PID_IN
) ?
2165 "in" : "out"), ep
->maxpacket
);
2168 spin_unlock_irq(&isp1362_hcd
->lock
);
2169 seq_printf(s
, "\n");
2174 static int proc_isp1362_open(struct inode
*inode
, struct file
*file
)
2176 return single_open(file
, proc_isp1362_show
, PDE_DATA(inode
));
2179 static const struct file_operations proc_ops
= {
2180 .open
= proc_isp1362_open
,
2182 .llseek
= seq_lseek
,
2183 .release
= single_release
,
2186 /* expect just one isp1362_hcd per system */
2187 static const char proc_filename
[] = "driver/isp1362";
2189 static void create_debug_file(struct isp1362_hcd
*isp1362_hcd
)
2191 struct proc_dir_entry
*pde
;
2193 pde
= proc_create_data(proc_filename
, 0, NULL
, &proc_ops
, isp1362_hcd
);
2195 pr_warning("%s: Failed to create debug file '%s'\n", __func__
, proc_filename
);
2198 isp1362_hcd
->pde
= pde
;
2201 static void remove_debug_file(struct isp1362_hcd
*isp1362_hcd
)
2203 if (isp1362_hcd
->pde
)
2204 remove_proc_entry(proc_filename
, NULL
);
2209 /*-------------------------------------------------------------------------*/
2211 static void __isp1362_sw_reset(struct isp1362_hcd
*isp1362_hcd
)
2215 isp1362_write_reg16(isp1362_hcd
, HCSWRES
, HCSWRES_MAGIC
);
2216 isp1362_write_reg32(isp1362_hcd
, HCCMDSTAT
, OHCI_HCR
);
2219 if (!(isp1362_read_reg32(isp1362_hcd
, HCCMDSTAT
) & OHCI_HCR
))
2223 pr_err("Software reset timeout\n");
2226 static void isp1362_sw_reset(struct isp1362_hcd
*isp1362_hcd
)
2228 unsigned long flags
;
2230 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
2231 __isp1362_sw_reset(isp1362_hcd
);
2232 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
2235 static int isp1362_mem_config(struct usb_hcd
*hcd
)
2237 struct isp1362_hcd
*isp1362_hcd
= hcd_to_isp1362_hcd(hcd
);
2238 unsigned long flags
;
2240 u16 istl_size
= ISP1362_ISTL_BUFSIZE
;
2241 u16 intl_blksize
= ISP1362_INTL_BLKSIZE
+ PTD_HEADER_SIZE
;
2242 u16 intl_size
= ISP1362_INTL_BUFFERS
* intl_blksize
;
2243 u16 atl_blksize
= ISP1362_ATL_BLKSIZE
+ PTD_HEADER_SIZE
;
2244 u16 atl_buffers
= (ISP1362_BUF_SIZE
- (istl_size
+ intl_size
)) / atl_blksize
;
2248 WARN_ON(istl_size
& 3);
2249 WARN_ON(atl_blksize
& 3);
2250 WARN_ON(intl_blksize
& 3);
2251 WARN_ON(atl_blksize
< PTD_HEADER_SIZE
);
2252 WARN_ON(intl_blksize
< PTD_HEADER_SIZE
);
2254 BUG_ON((unsigned)ISP1362_INTL_BUFFERS
> 32);
2255 if (atl_buffers
> 32)
2257 atl_size
= atl_buffers
* atl_blksize
;
2258 total
= atl_size
+ intl_size
+ istl_size
;
2259 dev_info(hcd
->self
.controller
, "ISP1362 Memory usage:\n");
2260 dev_info(hcd
->self
.controller
, " ISTL: 2 * %4d: %4d @ $%04x:$%04x\n",
2261 istl_size
/ 2, istl_size
, 0, istl_size
/ 2);
2262 dev_info(hcd
->self
.controller
, " INTL: %4d * (%3zu+8): %4d @ $%04x\n",
2263 ISP1362_INTL_BUFFERS
, intl_blksize
- PTD_HEADER_SIZE
,
2264 intl_size
, istl_size
);
2265 dev_info(hcd
->self
.controller
, " ATL : %4d * (%3zu+8): %4d @ $%04x\n",
2266 atl_buffers
, atl_blksize
- PTD_HEADER_SIZE
,
2267 atl_size
, istl_size
+ intl_size
);
2268 dev_info(hcd
->self
.controller
, " USED/FREE: %4d %4d\n", total
,
2269 ISP1362_BUF_SIZE
- total
);
2271 if (total
> ISP1362_BUF_SIZE
) {
2272 dev_err(hcd
->self
.controller
, "%s: Memory requested: %d, available %d\n",
2273 __func__
, total
, ISP1362_BUF_SIZE
);
2277 total
= istl_size
+ intl_size
+ atl_size
;
2278 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
2280 for (i
= 0; i
< 2; i
++) {
2281 isp1362_hcd
->istl_queue
[i
].buf_start
= i
* istl_size
/ 2,
2282 isp1362_hcd
->istl_queue
[i
].buf_size
= istl_size
/ 2;
2283 isp1362_hcd
->istl_queue
[i
].blk_size
= 4;
2284 INIT_LIST_HEAD(&isp1362_hcd
->istl_queue
[i
].active
);
2285 snprintf(isp1362_hcd
->istl_queue
[i
].name
,
2286 sizeof(isp1362_hcd
->istl_queue
[i
].name
), "ISTL%d", i
);
2287 DBG(3, "%s: %5s buf $%04x %d\n", __func__
,
2288 isp1362_hcd
->istl_queue
[i
].name
,
2289 isp1362_hcd
->istl_queue
[i
].buf_start
,
2290 isp1362_hcd
->istl_queue
[i
].buf_size
);
2292 isp1362_write_reg16(isp1362_hcd
, HCISTLBUFSZ
, istl_size
/ 2);
2294 isp1362_hcd
->intl_queue
.buf_start
= istl_size
;
2295 isp1362_hcd
->intl_queue
.buf_size
= intl_size
;
2296 isp1362_hcd
->intl_queue
.buf_count
= ISP1362_INTL_BUFFERS
;
2297 isp1362_hcd
->intl_queue
.blk_size
= intl_blksize
;
2298 isp1362_hcd
->intl_queue
.buf_avail
= isp1362_hcd
->intl_queue
.buf_count
;
2299 isp1362_hcd
->intl_queue
.skip_map
= ~0;
2300 INIT_LIST_HEAD(&isp1362_hcd
->intl_queue
.active
);
2302 isp1362_write_reg16(isp1362_hcd
, HCINTLBUFSZ
,
2303 isp1362_hcd
->intl_queue
.buf_size
);
2304 isp1362_write_reg16(isp1362_hcd
, HCINTLBLKSZ
,
2305 isp1362_hcd
->intl_queue
.blk_size
- PTD_HEADER_SIZE
);
2306 isp1362_write_reg32(isp1362_hcd
, HCINTLSKIP
, ~0);
2307 isp1362_write_reg32(isp1362_hcd
, HCINTLLAST
,
2308 1 << (ISP1362_INTL_BUFFERS
- 1));
2310 isp1362_hcd
->atl_queue
.buf_start
= istl_size
+ intl_size
;
2311 isp1362_hcd
->atl_queue
.buf_size
= atl_size
;
2312 isp1362_hcd
->atl_queue
.buf_count
= atl_buffers
;
2313 isp1362_hcd
->atl_queue
.blk_size
= atl_blksize
;
2314 isp1362_hcd
->atl_queue
.buf_avail
= isp1362_hcd
->atl_queue
.buf_count
;
2315 isp1362_hcd
->atl_queue
.skip_map
= ~0;
2316 INIT_LIST_HEAD(&isp1362_hcd
->atl_queue
.active
);
2318 isp1362_write_reg16(isp1362_hcd
, HCATLBUFSZ
,
2319 isp1362_hcd
->atl_queue
.buf_size
);
2320 isp1362_write_reg16(isp1362_hcd
, HCATLBLKSZ
,
2321 isp1362_hcd
->atl_queue
.blk_size
- PTD_HEADER_SIZE
);
2322 isp1362_write_reg32(isp1362_hcd
, HCATLSKIP
, ~0);
2323 isp1362_write_reg32(isp1362_hcd
, HCATLLAST
,
2324 1 << (atl_buffers
- 1));
2326 snprintf(isp1362_hcd
->atl_queue
.name
,
2327 sizeof(isp1362_hcd
->atl_queue
.name
), "ATL");
2328 snprintf(isp1362_hcd
->intl_queue
.name
,
2329 sizeof(isp1362_hcd
->intl_queue
.name
), "INTL");
2330 DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__
,
2331 isp1362_hcd
->intl_queue
.name
,
2332 isp1362_hcd
->intl_queue
.buf_start
,
2333 ISP1362_INTL_BUFFERS
, isp1362_hcd
->intl_queue
.blk_size
,
2334 isp1362_hcd
->intl_queue
.buf_size
);
2335 DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__
,
2336 isp1362_hcd
->atl_queue
.name
,
2337 isp1362_hcd
->atl_queue
.buf_start
,
2338 atl_buffers
, isp1362_hcd
->atl_queue
.blk_size
,
2339 isp1362_hcd
->atl_queue
.buf_size
);
2341 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
2346 static int isp1362_hc_reset(struct usb_hcd
*hcd
)
2349 struct isp1362_hcd
*isp1362_hcd
= hcd_to_isp1362_hcd(hcd
);
2351 unsigned long timeout
= 100;
2352 unsigned long flags
;
2355 pr_debug("%s:\n", __func__
);
2357 if (isp1362_hcd
->board
&& isp1362_hcd
->board
->reset
) {
2358 isp1362_hcd
->board
->reset(hcd
->self
.controller
, 1);
2360 if (isp1362_hcd
->board
->clock
)
2361 isp1362_hcd
->board
->clock(hcd
->self
.controller
, 1);
2362 isp1362_hcd
->board
->reset(hcd
->self
.controller
, 0);
2364 isp1362_sw_reset(isp1362_hcd
);
2366 /* chip has been reset. First we need to see a clock */
2367 t
= jiffies
+ msecs_to_jiffies(timeout
);
2368 while (!clkrdy
&& time_before_eq(jiffies
, t
)) {
2369 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
2370 clkrdy
= isp1362_read_reg16(isp1362_hcd
, HCuPINT
) & HCuPINT_CLKRDY
;
2371 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
2376 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
2377 isp1362_write_reg16(isp1362_hcd
, HCuPINT
, HCuPINT_CLKRDY
);
2378 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
2380 pr_err("Clock not ready after %lums\n", timeout
);
2386 static void isp1362_hc_stop(struct usb_hcd
*hcd
)
2388 struct isp1362_hcd
*isp1362_hcd
= hcd_to_isp1362_hcd(hcd
);
2389 unsigned long flags
;
2392 pr_debug("%s:\n", __func__
);
2394 del_timer_sync(&hcd
->rh_timer
);
2396 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
2398 isp1362_write_reg16(isp1362_hcd
, HCuPINTENB
, 0);
2400 /* Switch off power for all ports */
2401 tmp
= isp1362_read_reg32(isp1362_hcd
, HCRHDESCA
);
2402 tmp
&= ~(RH_A_NPS
| RH_A_PSM
);
2403 isp1362_write_reg32(isp1362_hcd
, HCRHDESCA
, tmp
);
2404 isp1362_write_reg32(isp1362_hcd
, HCRHSTATUS
, RH_HS_LPS
);
2406 /* Reset the chip */
2407 if (isp1362_hcd
->board
&& isp1362_hcd
->board
->reset
)
2408 isp1362_hcd
->board
->reset(hcd
->self
.controller
, 1);
2410 __isp1362_sw_reset(isp1362_hcd
);
2412 if (isp1362_hcd
->board
&& isp1362_hcd
->board
->clock
)
2413 isp1362_hcd
->board
->clock(hcd
->self
.controller
, 0);
2415 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
2418 #ifdef CHIP_BUFFER_TEST
2419 static int isp1362_chip_test(struct isp1362_hcd
*isp1362_hcd
)
2423 unsigned long flags
;
2425 ref
= kmalloc(2 * ISP1362_BUF_SIZE
, GFP_KERNEL
);
2428 u16
*tst
= &ref
[ISP1362_BUF_SIZE
/ 2];
2430 for (offset
= 0; offset
< ISP1362_BUF_SIZE
/ 2; offset
++) {
2431 ref
[offset
] = ~offset
;
2432 tst
[offset
] = offset
;
2435 for (offset
= 0; offset
< 4; offset
++) {
2438 for (j
= 0; j
< 8; j
++) {
2439 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
2440 isp1362_write_buffer(isp1362_hcd
, (u8
*)ref
+ offset
, 0, j
);
2441 isp1362_read_buffer(isp1362_hcd
, (u8
*)tst
+ offset
, 0, j
);
2442 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
2444 if (memcmp(ref
, tst
, j
)) {
2446 pr_err("%s: memory check with %d byte offset %d failed\n",
2447 __func__
, j
, offset
);
2448 dump_data((u8
*)ref
+ offset
, j
);
2449 dump_data((u8
*)tst
+ offset
, j
);
2454 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
2455 isp1362_write_buffer(isp1362_hcd
, ref
, 0, ISP1362_BUF_SIZE
);
2456 isp1362_read_buffer(isp1362_hcd
, tst
, 0, ISP1362_BUF_SIZE
);
2457 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
2459 if (memcmp(ref
, tst
, ISP1362_BUF_SIZE
)) {
2461 pr_err("%s: memory check failed\n", __func__
);
2462 dump_data((u8
*)tst
, ISP1362_BUF_SIZE
/ 2);
2465 for (offset
= 0; offset
< 256; offset
++) {
2470 memset(tst
, 0, ISP1362_BUF_SIZE
);
2471 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
2472 isp1362_write_buffer(isp1362_hcd
, tst
, 0, ISP1362_BUF_SIZE
);
2473 isp1362_read_buffer(isp1362_hcd
, tst
, 0, ISP1362_BUF_SIZE
);
2474 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
2475 if (memcmp(tst
, tst
+ (ISP1362_BUF_SIZE
/ (2 * sizeof(*tst
))),
2476 ISP1362_BUF_SIZE
/ 2)) {
2477 pr_err("%s: Failed to clear buffer\n", __func__
);
2478 dump_data((u8
*)tst
, ISP1362_BUF_SIZE
);
2481 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
2482 isp1362_write_buffer(isp1362_hcd
, ref
, offset
* 2, PTD_HEADER_SIZE
);
2483 isp1362_write_buffer(isp1362_hcd
, ref
+ PTD_HEADER_SIZE
/ sizeof(*ref
),
2484 offset
* 2 + PTD_HEADER_SIZE
, test_size
);
2485 isp1362_read_buffer(isp1362_hcd
, tst
, offset
* 2,
2486 PTD_HEADER_SIZE
+ test_size
);
2487 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
2488 if (memcmp(ref
, tst
, PTD_HEADER_SIZE
+ test_size
)) {
2489 dump_data(((u8
*)ref
) + offset
, PTD_HEADER_SIZE
+ test_size
);
2490 dump_data((u8
*)tst
, PTD_HEADER_SIZE
+ test_size
);
2491 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
2492 isp1362_read_buffer(isp1362_hcd
, tst
, offset
* 2,
2493 PTD_HEADER_SIZE
+ test_size
);
2494 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
2495 if (memcmp(ref
, tst
, PTD_HEADER_SIZE
+ test_size
)) {
2497 pr_err("%s: memory check with offset %02x failed\n",
2501 pr_warning("%s: memory check with offset %02x ok after second read\n",
2511 static int isp1362_hc_start(struct usb_hcd
*hcd
)
2514 struct isp1362_hcd
*isp1362_hcd
= hcd_to_isp1362_hcd(hcd
);
2515 struct isp1362_platform_data
*board
= isp1362_hcd
->board
;
2518 unsigned long flags
;
2520 pr_debug("%s:\n", __func__
);
2522 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
2523 chipid
= isp1362_read_reg16(isp1362_hcd
, HCCHIPID
);
2524 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
2526 if ((chipid
& HCCHIPID_MASK
) != HCCHIPID_MAGIC
) {
2527 pr_err("%s: Invalid chip ID %04x\n", __func__
, chipid
);
2531 #ifdef CHIP_BUFFER_TEST
2532 ret
= isp1362_chip_test(isp1362_hcd
);
2536 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
2537 /* clear interrupt status and disable all interrupt sources */
2538 isp1362_write_reg16(isp1362_hcd
, HCuPINT
, 0xff);
2539 isp1362_write_reg16(isp1362_hcd
, HCuPINTENB
, 0);
2542 hwcfg
= HCHWCFG_INT_ENABLE
| HCHWCFG_DBWIDTH(1);
2543 if (board
->sel15Kres
)
2544 hwcfg
|= HCHWCFG_PULLDOWN_DS2
|
2545 ((MAX_ROOT_PORTS
> 1) ? HCHWCFG_PULLDOWN_DS1
: 0);
2546 if (board
->clknotstop
)
2547 hwcfg
|= HCHWCFG_CLKNOTSTOP
;
2548 if (board
->oc_enable
)
2549 hwcfg
|= HCHWCFG_ANALOG_OC
;
2550 if (board
->int_act_high
)
2551 hwcfg
|= HCHWCFG_INT_POL
;
2552 if (board
->int_edge_triggered
)
2553 hwcfg
|= HCHWCFG_INT_TRIGGER
;
2554 if (board
->dreq_act_high
)
2555 hwcfg
|= HCHWCFG_DREQ_POL
;
2556 if (board
->dack_act_high
)
2557 hwcfg
|= HCHWCFG_DACK_POL
;
2558 isp1362_write_reg16(isp1362_hcd
, HCHWCFG
, hwcfg
);
2559 isp1362_show_reg(isp1362_hcd
, HCHWCFG
);
2560 isp1362_write_reg16(isp1362_hcd
, HCDMACFG
, 0);
2561 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
2563 ret
= isp1362_mem_config(hcd
);
2567 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
2570 isp1362_hcd
->rhdesca
= 0;
2571 if (board
->no_power_switching
)
2572 isp1362_hcd
->rhdesca
|= RH_A_NPS
;
2573 if (board
->power_switching_mode
)
2574 isp1362_hcd
->rhdesca
|= RH_A_PSM
;
2576 isp1362_hcd
->rhdesca
|= (board
->potpg
<< 24) & RH_A_POTPGT
;
2578 isp1362_hcd
->rhdesca
|= (25 << 24) & RH_A_POTPGT
;
2580 isp1362_write_reg32(isp1362_hcd
, HCRHDESCA
, isp1362_hcd
->rhdesca
& ~RH_A_OCPM
);
2581 isp1362_write_reg32(isp1362_hcd
, HCRHDESCA
, isp1362_hcd
->rhdesca
| RH_A_OCPM
);
2582 isp1362_hcd
->rhdesca
= isp1362_read_reg32(isp1362_hcd
, HCRHDESCA
);
2584 isp1362_hcd
->rhdescb
= RH_B_PPCM
;
2585 isp1362_write_reg32(isp1362_hcd
, HCRHDESCB
, isp1362_hcd
->rhdescb
);
2586 isp1362_hcd
->rhdescb
= isp1362_read_reg32(isp1362_hcd
, HCRHDESCB
);
2588 isp1362_read_reg32(isp1362_hcd
, HCFMINTVL
);
2589 isp1362_write_reg32(isp1362_hcd
, HCFMINTVL
, (FSMP(FI
) << 16) | FI
);
2590 isp1362_write_reg32(isp1362_hcd
, HCLSTHRESH
, LSTHRESH
);
2592 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
2594 isp1362_hcd
->hc_control
= OHCI_USB_OPER
;
2595 hcd
->state
= HC_STATE_RUNNING
;
2597 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
2598 /* Set up interrupts */
2599 isp1362_hcd
->intenb
= OHCI_INTR_MIE
| OHCI_INTR_RHSC
| OHCI_INTR_UE
;
2600 isp1362_hcd
->intenb
|= OHCI_INTR_RD
;
2601 isp1362_hcd
->irqenb
= HCuPINT_OPR
| HCuPINT_SUSP
;
2602 isp1362_write_reg32(isp1362_hcd
, HCINTENB
, isp1362_hcd
->intenb
);
2603 isp1362_write_reg16(isp1362_hcd
, HCuPINTENB
, isp1362_hcd
->irqenb
);
2605 /* Go operational */
2606 isp1362_write_reg32(isp1362_hcd
, HCCONTROL
, isp1362_hcd
->hc_control
);
2607 /* enable global power */
2608 isp1362_write_reg32(isp1362_hcd
, HCRHSTATUS
, RH_HS_LPSC
| RH_HS_DRWE
);
2610 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
2615 /*-------------------------------------------------------------------------*/
2617 static struct hc_driver isp1362_hc_driver
= {
2618 .description
= hcd_name
,
2619 .product_desc
= "ISP1362 Host Controller",
2620 .hcd_priv_size
= sizeof(struct isp1362_hcd
),
2623 .flags
= HCD_USB11
| HCD_MEMORY
,
2625 .reset
= isp1362_hc_reset
,
2626 .start
= isp1362_hc_start
,
2627 .stop
= isp1362_hc_stop
,
2629 .urb_enqueue
= isp1362_urb_enqueue
,
2630 .urb_dequeue
= isp1362_urb_dequeue
,
2631 .endpoint_disable
= isp1362_endpoint_disable
,
2633 .get_frame_number
= isp1362_get_frame
,
2635 .hub_status_data
= isp1362_hub_status_data
,
2636 .hub_control
= isp1362_hub_control
,
2637 .bus_suspend
= isp1362_bus_suspend
,
2638 .bus_resume
= isp1362_bus_resume
,
2641 /*-------------------------------------------------------------------------*/
2643 static int isp1362_remove(struct platform_device
*pdev
)
2645 struct usb_hcd
*hcd
= platform_get_drvdata(pdev
);
2646 struct isp1362_hcd
*isp1362_hcd
= hcd_to_isp1362_hcd(hcd
);
2647 struct resource
*res
;
2649 remove_debug_file(isp1362_hcd
);
2650 DBG(0, "%s: Removing HCD\n", __func__
);
2651 usb_remove_hcd(hcd
);
2653 DBG(0, "%s: Unmapping data_reg @ %p\n", __func__
,
2654 isp1362_hcd
->data_reg
);
2655 iounmap(isp1362_hcd
->data_reg
);
2657 DBG(0, "%s: Unmapping addr_reg @ %p\n", __func__
,
2658 isp1362_hcd
->addr_reg
);
2659 iounmap(isp1362_hcd
->addr_reg
);
2661 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
2662 DBG(0, "%s: release mem_region: %08lx\n", __func__
, (long unsigned int)res
->start
);
2664 release_mem_region(res
->start
, resource_size(res
));
2666 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2667 DBG(0, "%s: release mem_region: %08lx\n", __func__
, (long unsigned int)res
->start
);
2669 release_mem_region(res
->start
, resource_size(res
));
2671 DBG(0, "%s: put_hcd\n", __func__
);
2673 DBG(0, "%s: Done\n", __func__
);
2678 static int isp1362_probe(struct platform_device
*pdev
)
2680 struct usb_hcd
*hcd
;
2681 struct isp1362_hcd
*isp1362_hcd
;
2682 struct resource
*addr
, *data
;
2683 void __iomem
*addr_reg
;
2684 void __iomem
*data_reg
;
2687 struct resource
*irq_res
;
2688 unsigned int irq_flags
= 0;
2693 /* basic sanity checks first. board-specific init logic should
2694 * have initialized this the three resources and probably board
2695 * specific platform_data. we don't probe for IRQs, and do only
2696 * minimal sanity checking.
2698 if (pdev
->num_resources
< 3) {
2703 data
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2704 addr
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
2705 irq_res
= platform_get_resource(pdev
, IORESOURCE_IRQ
, 0);
2706 if (!addr
|| !data
|| !irq_res
) {
2710 irq
= irq_res
->start
;
2712 if (pdev
->dev
.dma_mask
) {
2713 DBG(1, "won't do DMA");
2718 if (!request_mem_region(addr
->start
, resource_size(addr
), hcd_name
)) {
2722 addr_reg
= ioremap(addr
->start
, resource_size(addr
));
2723 if (addr_reg
== NULL
) {
2728 if (!request_mem_region(data
->start
, resource_size(data
), hcd_name
)) {
2732 data_reg
= ioremap(data
->start
, resource_size(data
));
2733 if (data_reg
== NULL
) {
2738 /* allocate and initialize hcd */
2739 hcd
= usb_create_hcd(&isp1362_hc_driver
, &pdev
->dev
, dev_name(&pdev
->dev
));
2744 hcd
->rsrc_start
= data
->start
;
2745 isp1362_hcd
= hcd_to_isp1362_hcd(hcd
);
2746 isp1362_hcd
->data_reg
= data_reg
;
2747 isp1362_hcd
->addr_reg
= addr_reg
;
2749 isp1362_hcd
->next_statechange
= jiffies
;
2750 spin_lock_init(&isp1362_hcd
->lock
);
2751 INIT_LIST_HEAD(&isp1362_hcd
->async
);
2752 INIT_LIST_HEAD(&isp1362_hcd
->periodic
);
2753 INIT_LIST_HEAD(&isp1362_hcd
->isoc
);
2754 INIT_LIST_HEAD(&isp1362_hcd
->remove_list
);
2755 isp1362_hcd
->board
= pdev
->dev
.platform_data
;
2756 #if USE_PLATFORM_DELAY
2757 if (!isp1362_hcd
->board
->delay
) {
2758 dev_err(hcd
->self
.controller
, "No platform delay function given\n");
2764 if (irq_res
->flags
& IORESOURCE_IRQ_HIGHEDGE
)
2765 irq_flags
|= IRQF_TRIGGER_RISING
;
2766 if (irq_res
->flags
& IORESOURCE_IRQ_LOWEDGE
)
2767 irq_flags
|= IRQF_TRIGGER_FALLING
;
2768 if (irq_res
->flags
& IORESOURCE_IRQ_HIGHLEVEL
)
2769 irq_flags
|= IRQF_TRIGGER_HIGH
;
2770 if (irq_res
->flags
& IORESOURCE_IRQ_LOWLEVEL
)
2771 irq_flags
|= IRQF_TRIGGER_LOW
;
2773 retval
= usb_add_hcd(hcd
, irq
, irq_flags
| IRQF_SHARED
);
2776 pr_info("%s, irq %d\n", hcd
->product_desc
, irq
);
2778 create_debug_file(isp1362_hcd
);
2783 DBG(0, "%s: Freeing dev %p\n", __func__
, isp1362_hcd
);
2786 DBG(0, "%s: Unmapping data_reg @ %p\n", __func__
, data_reg
);
2789 DBG(0, "%s: Releasing mem region %08lx\n", __func__
, (long unsigned int)data
->start
);
2790 release_mem_region(data
->start
, resource_size(data
));
2792 DBG(0, "%s: Unmapping addr_reg @ %p\n", __func__
, addr_reg
);
2795 DBG(0, "%s: Releasing mem region %08lx\n", __func__
, (long unsigned int)addr
->start
);
2796 release_mem_region(addr
->start
, resource_size(addr
));
2798 pr_err("%s: init error, %d\n", __func__
, retval
);
2804 static int isp1362_suspend(struct platform_device
*pdev
, pm_message_t state
)
2806 struct usb_hcd
*hcd
= platform_get_drvdata(pdev
);
2807 struct isp1362_hcd
*isp1362_hcd
= hcd_to_isp1362_hcd(hcd
);
2808 unsigned long flags
;
2811 DBG(0, "%s: Suspending device\n", __func__
);
2813 if (state
.event
== PM_EVENT_FREEZE
) {
2814 DBG(0, "%s: Suspending root hub\n", __func__
);
2815 retval
= isp1362_bus_suspend(hcd
);
2817 DBG(0, "%s: Suspending RH ports\n", __func__
);
2818 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
2819 isp1362_write_reg32(isp1362_hcd
, HCRHSTATUS
, RH_HS_LPS
);
2820 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
2823 pdev
->dev
.power
.power_state
= state
;
2827 static int isp1362_resume(struct platform_device
*pdev
)
2829 struct usb_hcd
*hcd
= platform_get_drvdata(pdev
);
2830 struct isp1362_hcd
*isp1362_hcd
= hcd_to_isp1362_hcd(hcd
);
2831 unsigned long flags
;
2833 DBG(0, "%s: Resuming\n", __func__
);
2835 if (pdev
->dev
.power
.power_state
.event
== PM_EVENT_SUSPEND
) {
2836 DBG(0, "%s: Resume RH ports\n", __func__
);
2837 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
2838 isp1362_write_reg32(isp1362_hcd
, HCRHSTATUS
, RH_HS_LPSC
);
2839 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
2843 pdev
->dev
.power
.power_state
= PMSG_ON
;
2845 return isp1362_bus_resume(isp1362_hcd_to_hcd(isp1362_hcd
));
2848 #define isp1362_suspend NULL
2849 #define isp1362_resume NULL
2852 static struct platform_driver isp1362_driver
= {
2853 .probe
= isp1362_probe
,
2854 .remove
= isp1362_remove
,
2856 .suspend
= isp1362_suspend
,
2857 .resume
= isp1362_resume
,
2859 .name
= (char *)hcd_name
,
2860 .owner
= THIS_MODULE
,
2864 module_platform_driver(isp1362_driver
);