[PATCH] Fix up rpaphp driver for pci hotplug header move
[deliverable/linux.git] / drivers / usb / gadget / pxa2xx_udc.c
CommitLineData
1da177e4
LT
1/*
2 * linux/drivers/usb/gadget/pxa2xx_udc.c
91987693 3 * Intel PXA25x and IXP4xx on-chip full speed USB device controllers
1da177e4
LT
4 *
5 * Copyright (C) 2002 Intrinsyc, Inc. (Frank Becker)
6 * Copyright (C) 2003 Robert Schwebel, Pengutronix
7 * Copyright (C) 2003 Benedikt Spranger, Pengutronix
8 * Copyright (C) 2003 David Brownell
9 * Copyright (C) 2003 Joshua Wise
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 *
25 */
26
27#undef DEBUG
28// #define VERBOSE DBG_VERBOSE
29
1da177e4
LT
30#include <linux/module.h>
31#include <linux/kernel.h>
32#include <linux/ioport.h>
33#include <linux/types.h>
1da177e4
LT
34#include <linux/errno.h>
35#include <linux/delay.h>
36#include <linux/sched.h>
37#include <linux/slab.h>
38#include <linux/init.h>
39#include <linux/timer.h>
40#include <linux/list.h>
41#include <linux/interrupt.h>
42#include <linux/proc_fs.h>
43#include <linux/mm.h>
d052d1be 44#include <linux/platform_device.h>
1da177e4
LT
45#include <linux/dma-mapping.h>
46
47#include <asm/byteorder.h>
48#include <asm/dma.h>
49#include <asm/io.h>
50#include <asm/irq.h>
51#include <asm/system.h>
52#include <asm/mach-types.h>
53#include <asm/unaligned.h>
54#include <asm/hardware.h>
44df45a0 55#ifdef CONFIG_ARCH_PXA
1da177e4 56#include <asm/arch/pxa-regs.h>
44df45a0 57#endif
1da177e4
LT
58
59#include <linux/usb_ch9.h>
60#include <linux/usb_gadget.h>
61
bf7e8511 62#include <asm/arch/udc.h>
1da177e4
LT
63
64
65/*
91987693 66 * This driver handles the USB Device Controller (UDC) in Intel's PXA 25x
1da177e4
LT
67 * series processors. The UDC for the IXP 4xx series is very similar.
68 * There are fifteen endpoints, in addition to ep0.
69 *
70 * Such controller drivers work with a gadget driver. The gadget driver
71 * returns descriptors, implements configuration and data protocols used
72 * by the host to interact with this device, and allocates endpoints to
73 * the different protocol interfaces. The controller driver virtualizes
74 * usb hardware so that the gadget drivers will be more portable.
75 *
76 * This UDC hardware wants to implement a bit too much USB protocol, so
77 * it constrains the sorts of USB configuration change events that work.
78 * The errata for these chips are misleading; some "fixed" bugs from
79 * pxa250 a0/a1 b0/b1/b2 sure act like they're still there.
80 */
81
91987693
DB
82#define DRIVER_VERSION "4-May-2005"
83#define DRIVER_DESC "PXA 25x USB Device Controller driver"
1da177e4
LT
84
85
86static const char driver_name [] = "pxa2xx_udc";
87
88static const char ep0name [] = "ep0";
89
90
91// #define USE_DMA
92// #define USE_OUT_DMA
93// #define DISABLE_TEST_MODE
94
95#ifdef CONFIG_ARCH_IXP4XX
96#undef USE_DMA
97
98/* cpu-specific register addresses are compiled in to this code */
99#ifdef CONFIG_ARCH_PXA
100#error "Can't configure both IXP and PXA"
101#endif
102
103#endif
104
105#include "pxa2xx_udc.h"
106
107
108#ifdef USE_DMA
109static int use_dma = 1;
110module_param(use_dma, bool, 0);
111MODULE_PARM_DESC (use_dma, "true to use dma");
112
7d12e780 113static void dma_nodesc_handler (int dmach, void *_ep);
1da177e4
LT
114static void kick_dma(struct pxa2xx_ep *ep, struct pxa2xx_request *req);
115
116#ifdef USE_OUT_DMA
117#define DMASTR " (dma support)"
118#else
119#define DMASTR " (dma in)"
120#endif
121
122#else /* !USE_DMA */
123#define DMASTR " (pio only)"
124#undef USE_OUT_DMA
125#endif
126
127#ifdef CONFIG_USB_PXA2XX_SMALL
128#define SIZE_STR " (small)"
129#else
130#define SIZE_STR ""
131#endif
132
133#ifdef DISABLE_TEST_MODE
134/* (mode == 0) == no undocumented chip tweaks
135 * (mode & 1) == double buffer bulk IN
136 * (mode & 2) == double buffer bulk OUT
137 * ... so mode = 3 (or 7, 15, etc) does it for both
138 */
139static ushort fifo_mode = 0;
140module_param(fifo_mode, ushort, 0);
141MODULE_PARM_DESC (fifo_mode, "pxa2xx udc fifo mode");
142#endif
143
144/* ---------------------------------------------------------------------------
145 * endpoint related parts of the api to the usb controller hardware,
146 * used by gadget driver; and the inner talker-to-hardware core.
147 * ---------------------------------------------------------------------------
148 */
149
150static void pxa2xx_ep_fifo_flush (struct usb_ep *ep);
151static void nuke (struct pxa2xx_ep *, int status);
152
b2bbb20b
DB
153/* one GPIO should be used to detect VBUS from the host */
154static int is_vbus_present(void)
155{
156 struct pxa2xx_udc_mach_info *mach = the_controller->mach;
157
158 if (mach->gpio_vbus)
159 return pxa_gpio_get(mach->gpio_vbus);
160 if (mach->udc_is_connected)
161 return mach->udc_is_connected();
162 return 1;
163}
164
165/* one GPIO should control a D+ pullup, so host sees this device (or not) */
166static void pullup_off(void)
167{
168 struct pxa2xx_udc_mach_info *mach = the_controller->mach;
169
170 if (mach->gpio_pullup)
171 pxa_gpio_set(mach->gpio_pullup, 0);
172 else if (mach->udc_command)
173 mach->udc_command(PXA2XX_UDC_CMD_DISCONNECT);
174}
175
176static void pullup_on(void)
177{
178 struct pxa2xx_udc_mach_info *mach = the_controller->mach;
179
180 if (mach->gpio_pullup)
181 pxa_gpio_set(mach->gpio_pullup, 1);
182 else if (mach->udc_command)
183 mach->udc_command(PXA2XX_UDC_CMD_CONNECT);
184}
185
1da177e4
LT
186static void pio_irq_enable(int bEndpointAddress)
187{
188 bEndpointAddress &= 0xf;
189 if (bEndpointAddress < 8)
190 UICR0 &= ~(1 << bEndpointAddress);
191 else {
192 bEndpointAddress -= 8;
193 UICR1 &= ~(1 << bEndpointAddress);
194 }
195}
196
197static void pio_irq_disable(int bEndpointAddress)
198{
199 bEndpointAddress &= 0xf;
200 if (bEndpointAddress < 8)
201 UICR0 |= 1 << bEndpointAddress;
202 else {
203 bEndpointAddress -= 8;
204 UICR1 |= 1 << bEndpointAddress;
205 }
206}
207
208/* The UDCCR reg contains mask and interrupt status bits,
209 * so using '|=' isn't safe as it may ack an interrupt.
210 */
211#define UDCCR_MASK_BITS (UDCCR_REM | UDCCR_SRM | UDCCR_UDE)
212
213static inline void udc_set_mask_UDCCR(int mask)
214{
215 UDCCR = (UDCCR & UDCCR_MASK_BITS) | (mask & UDCCR_MASK_BITS);
216}
217
218static inline void udc_clear_mask_UDCCR(int mask)
219{
220 UDCCR = (UDCCR & UDCCR_MASK_BITS) & ~(mask & UDCCR_MASK_BITS);
221}
222
223static inline void udc_ack_int_UDCCR(int mask)
224{
225 /* udccr contains the bits we dont want to change */
226 __u32 udccr = UDCCR & UDCCR_MASK_BITS;
227
228 UDCCR = udccr | (mask & ~UDCCR_MASK_BITS);
229}
230
231/*
232 * endpoint enable/disable
233 *
234 * we need to verify the descriptors used to enable endpoints. since pxa2xx
235 * endpoint configurations are fixed, and are pretty much always enabled,
236 * there's not a lot to manage here.
237 *
238 * because pxa2xx can't selectively initialize bulk (or interrupt) endpoints,
239 * (resetting endpoint halt and toggle), SET_INTERFACE is unusable except
240 * for a single interface (with only the default altsetting) and for gadget
241 * drivers that don't halt endpoints (not reset by set_interface). that also
242 * means that if you use ISO, you must violate the USB spec rule that all
243 * iso endpoints must be in non-default altsettings.
244 */
245static int pxa2xx_ep_enable (struct usb_ep *_ep,
246 const struct usb_endpoint_descriptor *desc)
247{
248 struct pxa2xx_ep *ep;
249 struct pxa2xx_udc *dev;
250
251 ep = container_of (_ep, struct pxa2xx_ep, ep);
252 if (!_ep || !desc || ep->desc || _ep->name == ep0name
253 || desc->bDescriptorType != USB_DT_ENDPOINT
254 || ep->bEndpointAddress != desc->bEndpointAddress
255 || ep->fifo_size < le16_to_cpu
256 (desc->wMaxPacketSize)) {
257 DMSG("%s, bad ep or descriptor\n", __FUNCTION__);
258 return -EINVAL;
259 }
260
261 /* xfer types must match, except that interrupt ~= bulk */
262 if (ep->bmAttributes != desc->bmAttributes
263 && ep->bmAttributes != USB_ENDPOINT_XFER_BULK
264 && desc->bmAttributes != USB_ENDPOINT_XFER_INT) {
265 DMSG("%s, %s type mismatch\n", __FUNCTION__, _ep->name);
266 return -EINVAL;
267 }
268
269 /* hardware _could_ do smaller, but driver doesn't */
270 if ((desc->bmAttributes == USB_ENDPOINT_XFER_BULK
271 && le16_to_cpu (desc->wMaxPacketSize)
272 != BULK_FIFO_SIZE)
273 || !desc->wMaxPacketSize) {
274 DMSG("%s, bad %s maxpacket\n", __FUNCTION__, _ep->name);
275 return -ERANGE;
276 }
277
278 dev = ep->dev;
279 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) {
280 DMSG("%s, bogus device state\n", __FUNCTION__);
281 return -ESHUTDOWN;
282 }
283
284 ep->desc = desc;
285 ep->dma = -1;
286 ep->stopped = 0;
287 ep->pio_irqs = ep->dma_irqs = 0;
288 ep->ep.maxpacket = le16_to_cpu (desc->wMaxPacketSize);
289
290 /* flush fifo (mostly for OUT buffers) */
291 pxa2xx_ep_fifo_flush (_ep);
292
293 /* ... reset halt state too, if we could ... */
294
295#ifdef USE_DMA
296 /* for (some) bulk and ISO endpoints, try to get a DMA channel and
297 * bind it to the endpoint. otherwise use PIO.
298 */
299 switch (ep->bmAttributes) {
300 case USB_ENDPOINT_XFER_ISOC:
301 if (le16_to_cpu(desc->wMaxPacketSize) % 32)
302 break;
303 // fall through
304 case USB_ENDPOINT_XFER_BULK:
305 if (!use_dma || !ep->reg_drcmr)
306 break;
307 ep->dma = pxa_request_dma ((char *)_ep->name,
308 (le16_to_cpu (desc->wMaxPacketSize) > 64)
309 ? DMA_PRIO_MEDIUM /* some iso */
310 : DMA_PRIO_LOW,
311 dma_nodesc_handler, ep);
312 if (ep->dma >= 0) {
313 *ep->reg_drcmr = DRCMR_MAPVLD | ep->dma;
314 DMSG("%s using dma%d\n", _ep->name, ep->dma);
315 }
316 }
317#endif
318
319 DBG(DBG_VERBOSE, "enabled %s\n", _ep->name);
320 return 0;
321}
322
323static int pxa2xx_ep_disable (struct usb_ep *_ep)
324{
325 struct pxa2xx_ep *ep;
91987693 326 unsigned long flags;
1da177e4
LT
327
328 ep = container_of (_ep, struct pxa2xx_ep, ep);
329 if (!_ep || !ep->desc) {
330 DMSG("%s, %s not enabled\n", __FUNCTION__,
331 _ep ? ep->ep.name : NULL);
332 return -EINVAL;
333 }
91987693
DB
334 local_irq_save(flags);
335
1da177e4
LT
336 nuke (ep, -ESHUTDOWN);
337
338#ifdef USE_DMA
339 if (ep->dma >= 0) {
340 *ep->reg_drcmr = 0;
341 pxa_free_dma (ep->dma);
342 ep->dma = -1;
343 }
344#endif
345
346 /* flush fifo (mostly for IN buffers) */
347 pxa2xx_ep_fifo_flush (_ep);
348
349 ep->desc = NULL;
350 ep->stopped = 1;
351
91987693 352 local_irq_restore(flags);
1da177e4
LT
353 DBG(DBG_VERBOSE, "%s disabled\n", _ep->name);
354 return 0;
355}
356
357/*-------------------------------------------------------------------------*/
358
359/* for the pxa2xx, these can just wrap kmalloc/kfree. gadget drivers
360 * must still pass correctly initialized endpoints, since other controller
361 * drivers may care about how it's currently set up (dma issues etc).
362 */
363
364/*
365 * pxa2xx_ep_alloc_request - allocate a request data structure
366 */
367static struct usb_request *
55016f10 368pxa2xx_ep_alloc_request (struct usb_ep *_ep, gfp_t gfp_flags)
1da177e4
LT
369{
370 struct pxa2xx_request *req;
371
7039f422 372 req = kzalloc(sizeof(*req), gfp_flags);
1da177e4
LT
373 if (!req)
374 return NULL;
375
1da177e4
LT
376 INIT_LIST_HEAD (&req->queue);
377 return &req->req;
378}
379
380
381/*
382 * pxa2xx_ep_free_request - deallocate a request data structure
383 */
384static void
385pxa2xx_ep_free_request (struct usb_ep *_ep, struct usb_request *_req)
386{
387 struct pxa2xx_request *req;
388
389 req = container_of (_req, struct pxa2xx_request, req);
390 WARN_ON (!list_empty (&req->queue));
391 kfree(req);
392}
393
394
395/* PXA cache needs flushing with DMA I/O (it's dma-incoherent), but there's
396 * no device-affinity and the heap works perfectly well for i/o buffers.
397 * It wastes much less memory than dma_alloc_coherent() would, and even
398 * prevents cacheline (32 bytes wide) sharing problems.
399 */
400static void *
401pxa2xx_ep_alloc_buffer(struct usb_ep *_ep, unsigned bytes,
55016f10 402 dma_addr_t *dma, gfp_t gfp_flags)
1da177e4
LT
403{
404 char *retval;
405
406 retval = kmalloc (bytes, gfp_flags & ~(__GFP_DMA|__GFP_HIGHMEM));
407 if (retval)
408#ifdef USE_DMA
409 *dma = virt_to_bus (retval);
410#else
411 *dma = (dma_addr_t)~0;
412#endif
413 return retval;
414}
415
416static void
417pxa2xx_ep_free_buffer(struct usb_ep *_ep, void *buf, dma_addr_t dma,
418 unsigned bytes)
419{
420 kfree (buf);
421}
422
423/*-------------------------------------------------------------------------*/
424
425/*
426 * done - retire a request; caller blocked irqs
427 */
428static void done(struct pxa2xx_ep *ep, struct pxa2xx_request *req, int status)
429{
430 unsigned stopped = ep->stopped;
431
432 list_del_init(&req->queue);
433
434 if (likely (req->req.status == -EINPROGRESS))
435 req->req.status = status;
436 else
437 status = req->req.status;
438
439 if (status && status != -ESHUTDOWN)
440 DBG(DBG_VERBOSE, "complete %s req %p stat %d len %u/%u\n",
441 ep->ep.name, &req->req, status,
442 req->req.actual, req->req.length);
443
444 /* don't modify queue heads during completion callback */
445 ep->stopped = 1;
446 req->req.complete(&ep->ep, &req->req);
447 ep->stopped = stopped;
448}
449
450
451static inline void ep0_idle (struct pxa2xx_udc *dev)
452{
453 dev->ep0state = EP0_IDLE;
454}
455
456static int
63a4b52c 457write_packet(volatile u32 *uddr, struct pxa2xx_request *req, unsigned max)
1da177e4
LT
458{
459 u8 *buf;
460 unsigned length, count;
461
462 buf = req->req.buf + req->req.actual;
463 prefetch(buf);
464
465 /* how big will this packet be? */
466 length = min(req->req.length - req->req.actual, max);
467 req->req.actual += length;
468
469 count = length;
470 while (likely(count--))
471 *uddr = *buf++;
472
473 return length;
474}
475
476/*
477 * write to an IN endpoint fifo, as many packets as possible.
478 * irqs will use this to write the rest later.
479 * caller guarantees at least one packet buffer is ready (or a zlp).
480 */
481static int
482write_fifo (struct pxa2xx_ep *ep, struct pxa2xx_request *req)
483{
484 unsigned max;
485
486 max = le16_to_cpu(ep->desc->wMaxPacketSize);
487 do {
488 unsigned count;
489 int is_last, is_short;
490
491 count = write_packet(ep->reg_uddr, req, max);
492
493 /* last packet is usually short (or a zlp) */
494 if (unlikely (count != max))
495 is_last = is_short = 1;
496 else {
497 if (likely(req->req.length != req->req.actual)
498 || req->req.zero)
499 is_last = 0;
500 else
501 is_last = 1;
502 /* interrupt/iso maxpacket may not fill the fifo */
503 is_short = unlikely (max < ep->fifo_size);
504 }
505
506 DBG(DBG_VERY_NOISY, "wrote %s %d bytes%s%s %d left %p\n",
507 ep->ep.name, count,
508 is_last ? "/L" : "", is_short ? "/S" : "",
509 req->req.length - req->req.actual, req);
510
511 /* let loose that packet. maybe try writing another one,
512 * double buffering might work. TSP, TPC, and TFS
513 * bit values are the same for all normal IN endpoints.
514 */
515 *ep->reg_udccs = UDCCS_BI_TPC;
516 if (is_short)
517 *ep->reg_udccs = UDCCS_BI_TSP;
518
519 /* requests complete when all IN data is in the FIFO */
520 if (is_last) {
521 done (ep, req, 0);
522 if (list_empty(&ep->queue) || unlikely(ep->dma >= 0)) {
523 pio_irq_disable (ep->bEndpointAddress);
524#ifdef USE_DMA
525 /* unaligned data and zlps couldn't use dma */
526 if (unlikely(!list_empty(&ep->queue))) {
527 req = list_entry(ep->queue.next,
528 struct pxa2xx_request, queue);
529 kick_dma(ep,req);
530 return 0;
531 }
532#endif
533 }
534 return 1;
535 }
536
537 // TODO experiment: how robust can fifo mode tweaking be?
538 // double buffering is off in the default fifo mode, which
539 // prevents TFS from being set here.
540
541 } while (*ep->reg_udccs & UDCCS_BI_TFS);
542 return 0;
543}
544
545/* caller asserts req->pending (ep0 irq status nyet cleared); starts
546 * ep0 data stage. these chips want very simple state transitions.
547 */
548static inline
549void ep0start(struct pxa2xx_udc *dev, u32 flags, const char *tag)
550{
551 UDCCS0 = flags|UDCCS0_SA|UDCCS0_OPR;
552 USIR0 = USIR0_IR0;
553 dev->req_pending = 0;
554 DBG(DBG_VERY_NOISY, "%s %s, %02x/%02x\n",
555 __FUNCTION__, tag, UDCCS0, flags);
556}
557
558static int
559write_ep0_fifo (struct pxa2xx_ep *ep, struct pxa2xx_request *req)
560{
561 unsigned count;
562 int is_short;
563
564 count = write_packet(&UDDR0, req, EP0_FIFO_SIZE);
565 ep->dev->stats.write.bytes += count;
566
567 /* last packet "must be" short (or a zlp) */
568 is_short = (count != EP0_FIFO_SIZE);
569
570 DBG(DBG_VERY_NOISY, "ep0in %d bytes %d left %p\n", count,
571 req->req.length - req->req.actual, req);
572
573 if (unlikely (is_short)) {
574 if (ep->dev->req_pending)
575 ep0start(ep->dev, UDCCS0_IPR, "short IN");
576 else
577 UDCCS0 = UDCCS0_IPR;
578
579 count = req->req.length;
580 done (ep, req, 0);
581 ep0_idle(ep->dev);
043ea18b 582#ifndef CONFIG_ARCH_IXP4XX
1da177e4
LT
583#if 1
584 /* This seems to get rid of lost status irqs in some cases:
585 * host responds quickly, or next request involves config
586 * change automagic, or should have been hidden, or ...
587 *
588 * FIXME get rid of all udelays possible...
589 */
590 if (count >= EP0_FIFO_SIZE) {
591 count = 100;
592 do {
593 if ((UDCCS0 & UDCCS0_OPR) != 0) {
594 /* clear OPR, generate ack */
595 UDCCS0 = UDCCS0_OPR;
596 break;
597 }
598 count--;
599 udelay(1);
600 } while (count);
601 }
043ea18b 602#endif
1da177e4
LT
603#endif
604 } else if (ep->dev->req_pending)
605 ep0start(ep->dev, 0, "IN");
606 return is_short;
607}
608
609
610/*
611 * read_fifo - unload packet(s) from the fifo we use for usb OUT
612 * transfers and put them into the request. caller should have made
613 * sure there's at least one packet ready.
614 *
615 * returns true if the request completed because of short packet or the
616 * request buffer having filled (and maybe overran till end-of-packet).
617 */
618static int
619read_fifo (struct pxa2xx_ep *ep, struct pxa2xx_request *req)
620{
621 for (;;) {
622 u32 udccs;
623 u8 *buf;
624 unsigned bufferspace, count, is_short;
625
626 /* make sure there's a packet in the FIFO.
627 * UDCCS_{BO,IO}_RPC are all the same bit value.
628 * UDCCS_{BO,IO}_RNE are all the same bit value.
629 */
630 udccs = *ep->reg_udccs;
631 if (unlikely ((udccs & UDCCS_BO_RPC) == 0))
632 break;
633 buf = req->req.buf + req->req.actual;
634 prefetchw(buf);
635 bufferspace = req->req.length - req->req.actual;
636
637 /* read all bytes from this packet */
638 if (likely (udccs & UDCCS_BO_RNE)) {
639 count = 1 + (0x0ff & *ep->reg_ubcr);
640 req->req.actual += min (count, bufferspace);
641 } else /* zlp */
642 count = 0;
643 is_short = (count < ep->ep.maxpacket);
644 DBG(DBG_VERY_NOISY, "read %s %02x, %d bytes%s req %p %d/%d\n",
645 ep->ep.name, udccs, count,
646 is_short ? "/S" : "",
647 req, req->req.actual, req->req.length);
648 while (likely (count-- != 0)) {
649 u8 byte = (u8) *ep->reg_uddr;
650
651 if (unlikely (bufferspace == 0)) {
652 /* this happens when the driver's buffer
653 * is smaller than what the host sent.
654 * discard the extra data.
655 */
656 if (req->req.status != -EOVERFLOW)
657 DMSG("%s overflow %d\n",
658 ep->ep.name, count);
659 req->req.status = -EOVERFLOW;
660 } else {
661 *buf++ = byte;
662 bufferspace--;
663 }
664 }
665 *ep->reg_udccs = UDCCS_BO_RPC;
666 /* RPC/RSP/RNE could now reflect the other packet buffer */
667
668 /* iso is one request per packet */
669 if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
670 if (udccs & UDCCS_IO_ROF)
671 req->req.status = -EHOSTUNREACH;
672 /* more like "is_done" */
673 is_short = 1;
674 }
675
676 /* completion */
677 if (is_short || req->req.actual == req->req.length) {
678 done (ep, req, 0);
679 if (list_empty(&ep->queue))
680 pio_irq_disable (ep->bEndpointAddress);
681 return 1;
682 }
683
684 /* finished that packet. the next one may be waiting... */
685 }
686 return 0;
687}
688
689/*
690 * special ep0 version of the above. no UBCR0 or double buffering; status
691 * handshaking is magic. most device protocols don't need control-OUT.
692 * CDC vendor commands (and RNDIS), mass storage CB/CBI, and some other
693 * protocols do use them.
694 */
695static int
696read_ep0_fifo (struct pxa2xx_ep *ep, struct pxa2xx_request *req)
697{
698 u8 *buf, byte;
699 unsigned bufferspace;
700
701 buf = req->req.buf + req->req.actual;
702 bufferspace = req->req.length - req->req.actual;
703
704 while (UDCCS0 & UDCCS0_RNE) {
705 byte = (u8) UDDR0;
706
707 if (unlikely (bufferspace == 0)) {
708 /* this happens when the driver's buffer
709 * is smaller than what the host sent.
710 * discard the extra data.
711 */
712 if (req->req.status != -EOVERFLOW)
713 DMSG("%s overflow\n", ep->ep.name);
714 req->req.status = -EOVERFLOW;
715 } else {
716 *buf++ = byte;
717 req->req.actual++;
718 bufferspace--;
719 }
720 }
721
722 UDCCS0 = UDCCS0_OPR | UDCCS0_IPR;
723
724 /* completion */
725 if (req->req.actual >= req->req.length)
726 return 1;
727
728 /* finished that packet. the next one may be waiting... */
729 return 0;
730}
731
732#ifdef USE_DMA
733
734#define MAX_IN_DMA ((DCMD_LENGTH + 1) - BULK_FIFO_SIZE)
735
736static void
737start_dma_nodesc(struct pxa2xx_ep *ep, struct pxa2xx_request *req, int is_in)
738{
739 u32 dcmd = req->req.length;
740 u32 buf = req->req.dma;
741 u32 fifo = io_v2p ((u32)ep->reg_uddr);
742
743 /* caller guarantees there's a packet or more remaining
744 * - IN may end with a short packet (TSP set separately),
745 * - OUT is always full length
746 */
747 buf += req->req.actual;
748 dcmd -= req->req.actual;
749 ep->dma_fixup = 0;
750
751 /* no-descriptor mode can be simple for bulk-in, iso-in, iso-out */
752 DCSR(ep->dma) = DCSR_NODESC;
753 if (is_in) {
754 DSADR(ep->dma) = buf;
755 DTADR(ep->dma) = fifo;
756 if (dcmd > MAX_IN_DMA)
757 dcmd = MAX_IN_DMA;
758 else
759 ep->dma_fixup = (dcmd % ep->ep.maxpacket) != 0;
760 dcmd |= DCMD_BURST32 | DCMD_WIDTH1
761 | DCMD_FLOWTRG | DCMD_INCSRCADDR;
762 } else {
763#ifdef USE_OUT_DMA
764 DSADR(ep->dma) = fifo;
765 DTADR(ep->dma) = buf;
766 if (ep->bmAttributes != USB_ENDPOINT_XFER_ISOC)
767 dcmd = ep->ep.maxpacket;
768 dcmd |= DCMD_BURST32 | DCMD_WIDTH1
769 | DCMD_FLOWSRC | DCMD_INCTRGADDR;
770#endif
771 }
772 DCMD(ep->dma) = dcmd;
773 DCSR(ep->dma) = DCSR_RUN | DCSR_NODESC
774 | (unlikely(is_in)
775 ? DCSR_STOPIRQEN /* use dma_nodesc_handler() */
776 : 0); /* use handle_ep() */
777}
778
779static void kick_dma(struct pxa2xx_ep *ep, struct pxa2xx_request *req)
780{
781 int is_in = ep->bEndpointAddress & USB_DIR_IN;
782
783 if (is_in) {
784 /* unaligned tx buffers and zlps only work with PIO */
785 if ((req->req.dma & 0x0f) != 0
786 || unlikely((req->req.length - req->req.actual)
787 == 0)) {
788 pio_irq_enable(ep->bEndpointAddress);
789 if ((*ep->reg_udccs & UDCCS_BI_TFS) != 0)
790 (void) write_fifo(ep, req);
791 } else {
792 start_dma_nodesc(ep, req, USB_DIR_IN);
793 }
794 } else {
795 if ((req->req.length - req->req.actual) < ep->ep.maxpacket) {
796 DMSG("%s short dma read...\n", ep->ep.name);
797 /* we're always set up for pio out */
798 read_fifo (ep, req);
799 } else {
800 *ep->reg_udccs = UDCCS_BO_DME
801 | (*ep->reg_udccs & UDCCS_BO_FST);
802 start_dma_nodesc(ep, req, USB_DIR_OUT);
803 }
804 }
805}
806
807static void cancel_dma(struct pxa2xx_ep *ep)
808{
809 struct pxa2xx_request *req;
810 u32 tmp;
811
812 if (DCSR(ep->dma) == 0 || list_empty(&ep->queue))
813 return;
814
815 DCSR(ep->dma) = 0;
816 while ((DCSR(ep->dma) & DCSR_STOPSTATE) == 0)
817 cpu_relax();
818
819 req = list_entry(ep->queue.next, struct pxa2xx_request, queue);
820 tmp = DCMD(ep->dma) & DCMD_LENGTH;
821 req->req.actual = req->req.length - (tmp & DCMD_LENGTH);
822
823 /* the last tx packet may be incomplete, so flush the fifo.
824 * FIXME correct req.actual if we can
825 */
826 if (ep->bEndpointAddress & USB_DIR_IN)
827 *ep->reg_udccs = UDCCS_BI_FTF;
828}
829
830/* dma channel stopped ... normal tx end (IN), or on error (IN/OUT) */
7d12e780 831static void dma_nodesc_handler(int dmach, void *_ep)
1da177e4
LT
832{
833 struct pxa2xx_ep *ep = _ep;
834 struct pxa2xx_request *req;
835 u32 tmp, completed;
836
837 local_irq_disable();
838
839 req = list_entry(ep->queue.next, struct pxa2xx_request, queue);
840
841 ep->dma_irqs++;
842 ep->dev->stats.irqs++;
843 HEX_DISPLAY(ep->dev->stats.irqs);
844
845 /* ack/clear */
846 tmp = DCSR(ep->dma);
847 DCSR(ep->dma) = tmp;
848 if ((tmp & DCSR_STOPSTATE) == 0
849 || (DDADR(ep->dma) & DDADR_STOP) != 0) {
850 DBG(DBG_VERBOSE, "%s, dcsr %08x ddadr %08x\n",
851 ep->ep.name, DCSR(ep->dma), DDADR(ep->dma));
852 goto done;
853 }
854 DCSR(ep->dma) = 0; /* clear DCSR_STOPSTATE */
855
856 /* update transfer status */
857 completed = tmp & DCSR_BUSERR;
858 if (ep->bEndpointAddress & USB_DIR_IN)
859 tmp = DSADR(ep->dma);
860 else
861 tmp = DTADR(ep->dma);
862 req->req.actual = tmp - req->req.dma;
863
864 /* FIXME seems we sometimes see partial transfers... */
865
866 if (unlikely(completed != 0))
867 req->req.status = -EIO;
868 else if (req->req.actual) {
869 /* these registers have zeroes in low bits; they miscount
870 * some (end-of-transfer) short packets: tx 14 as tx 12
871 */
872 if (ep->dma_fixup)
873 req->req.actual = min(req->req.actual + 3,
874 req->req.length);
875
876 tmp = (req->req.length - req->req.actual);
877 completed = (tmp == 0);
878 if (completed && (ep->bEndpointAddress & USB_DIR_IN)) {
879
880 /* maybe validate final short packet ... */
881 if ((req->req.actual % ep->ep.maxpacket) != 0)
882 *ep->reg_udccs = UDCCS_BI_TSP/*|UDCCS_BI_TPC*/;
883
884 /* ... or zlp, using pio fallback */
885 else if (ep->bmAttributes == USB_ENDPOINT_XFER_BULK
886 && req->req.zero) {
887 DMSG("%s zlp terminate ...\n", ep->ep.name);
888 completed = 0;
889 }
890 }
891 }
892
893 if (likely(completed)) {
894 done(ep, req, 0);
895
896 /* maybe re-activate after completion */
897 if (ep->stopped || list_empty(&ep->queue))
898 goto done;
899 req = list_entry(ep->queue.next, struct pxa2xx_request, queue);
900 }
901 kick_dma(ep, req);
902done:
903 local_irq_enable();
904}
905
906#endif
907
908/*-------------------------------------------------------------------------*/
909
910static int
55016f10 911pxa2xx_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
1da177e4
LT
912{
913 struct pxa2xx_request *req;
914 struct pxa2xx_ep *ep;
915 struct pxa2xx_udc *dev;
916 unsigned long flags;
917
918 req = container_of(_req, struct pxa2xx_request, req);
919 if (unlikely (!_req || !_req->complete || !_req->buf
920 || !list_empty(&req->queue))) {
921 DMSG("%s, bad params\n", __FUNCTION__);
922 return -EINVAL;
923 }
924
925 ep = container_of(_ep, struct pxa2xx_ep, ep);
926 if (unlikely (!_ep || (!ep->desc && ep->ep.name != ep0name))) {
927 DMSG("%s, bad ep\n", __FUNCTION__);
928 return -EINVAL;
929 }
930
931 dev = ep->dev;
932 if (unlikely (!dev->driver
933 || dev->gadget.speed == USB_SPEED_UNKNOWN)) {
934 DMSG("%s, bogus device state\n", __FUNCTION__);
935 return -ESHUTDOWN;
936 }
937
938 /* iso is always one packet per request, that's the only way
939 * we can report per-packet status. that also helps with dma.
940 */
941 if (unlikely (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC
942 && req->req.length > le16_to_cpu
943 (ep->desc->wMaxPacketSize)))
944 return -EMSGSIZE;
945
946#ifdef USE_DMA
947 // FIXME caller may already have done the dma mapping
948 if (ep->dma >= 0) {
949 _req->dma = dma_map_single(dev->dev,
950 _req->buf, _req->length,
951 ((ep->bEndpointAddress & USB_DIR_IN) != 0)
952 ? DMA_TO_DEVICE
953 : DMA_FROM_DEVICE);
954 }
955#endif
956
957 DBG(DBG_NOISY, "%s queue req %p, len %d buf %p\n",
958 _ep->name, _req, _req->length, _req->buf);
959
960 local_irq_save(flags);
961
962 _req->status = -EINPROGRESS;
963 _req->actual = 0;
964
965 /* kickstart this i/o queue? */
966 if (list_empty(&ep->queue) && !ep->stopped) {
967 if (ep->desc == 0 /* ep0 */) {
968 unsigned length = _req->length;
969
970 switch (dev->ep0state) {
971 case EP0_IN_DATA_PHASE:
972 dev->stats.write.ops++;
973 if (write_ep0_fifo(ep, req))
974 req = NULL;
975 break;
976
977 case EP0_OUT_DATA_PHASE:
978 dev->stats.read.ops++;
979 /* messy ... */
980 if (dev->req_config) {
981 DBG(DBG_VERBOSE, "ep0 config ack%s\n",
982 dev->has_cfr ? "" : " raced");
983 if (dev->has_cfr)
984 UDCCFR = UDCCFR_AREN|UDCCFR_ACM
985 |UDCCFR_MB1;
986 done(ep, req, 0);
987 dev->ep0state = EP0_END_XFER;
988 local_irq_restore (flags);
989 return 0;
990 }
991 if (dev->req_pending)
992 ep0start(dev, UDCCS0_IPR, "OUT");
993 if (length == 0 || ((UDCCS0 & UDCCS0_RNE) != 0
994 && read_ep0_fifo(ep, req))) {
995 ep0_idle(dev);
996 done(ep, req, 0);
997 req = NULL;
998 }
999 break;
1000
1001 default:
1002 DMSG("ep0 i/o, odd state %d\n", dev->ep0state);
1003 local_irq_restore (flags);
1004 return -EL2HLT;
1005 }
1006#ifdef USE_DMA
1007 /* either start dma or prime pio pump */
1008 } else if (ep->dma >= 0) {
1009 kick_dma(ep, req);
1010#endif
1011 /* can the FIFO can satisfy the request immediately? */
91987693
DB
1012 } else if ((ep->bEndpointAddress & USB_DIR_IN) != 0) {
1013 if ((*ep->reg_udccs & UDCCS_BI_TFS) != 0
1014 && write_fifo(ep, req))
1015 req = NULL;
1da177e4
LT
1016 } else if ((*ep->reg_udccs & UDCCS_BO_RFS) != 0
1017 && read_fifo(ep, req)) {
1018 req = NULL;
1019 }
1020
1021 if (likely (req && ep->desc) && ep->dma < 0)
1022 pio_irq_enable(ep->bEndpointAddress);
1023 }
1024
1025 /* pio or dma irq handler advances the queue. */
1026 if (likely (req != 0))
1027 list_add_tail(&req->queue, &ep->queue);
1028 local_irq_restore(flags);
1029
1030 return 0;
1031}
1032
1033
1034/*
1035 * nuke - dequeue ALL requests
1036 */
1037static void nuke(struct pxa2xx_ep *ep, int status)
1038{
1039 struct pxa2xx_request *req;
1040
1041 /* called with irqs blocked */
1042#ifdef USE_DMA
1043 if (ep->dma >= 0 && !ep->stopped)
1044 cancel_dma(ep);
1045#endif
1046 while (!list_empty(&ep->queue)) {
1047 req = list_entry(ep->queue.next,
1048 struct pxa2xx_request,
1049 queue);
1050 done(ep, req, status);
1051 }
1052 if (ep->desc)
1053 pio_irq_disable (ep->bEndpointAddress);
1054}
1055
1056
1057/* dequeue JUST ONE request */
1058static int pxa2xx_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
1059{
1060 struct pxa2xx_ep *ep;
1061 struct pxa2xx_request *req;
1062 unsigned long flags;
1063
1064 ep = container_of(_ep, struct pxa2xx_ep, ep);
1065 if (!_ep || ep->ep.name == ep0name)
1066 return -EINVAL;
1067
1068 local_irq_save(flags);
1069
1070 /* make sure it's actually queued on this endpoint */
1071 list_for_each_entry (req, &ep->queue, queue) {
1072 if (&req->req == _req)
1073 break;
1074 }
1075 if (&req->req != _req) {
1076 local_irq_restore(flags);
1077 return -EINVAL;
1078 }
1079
1080#ifdef USE_DMA
1081 if (ep->dma >= 0 && ep->queue.next == &req->queue && !ep->stopped) {
1082 cancel_dma(ep);
1083 done(ep, req, -ECONNRESET);
1084 /* restart i/o */
1085 if (!list_empty(&ep->queue)) {
1086 req = list_entry(ep->queue.next,
1087 struct pxa2xx_request, queue);
1088 kick_dma(ep, req);
1089 }
1090 } else
1091#endif
1092 done(ep, req, -ECONNRESET);
1093
1094 local_irq_restore(flags);
1095 return 0;
1096}
1097
1098/*-------------------------------------------------------------------------*/
1099
1100static int pxa2xx_ep_set_halt(struct usb_ep *_ep, int value)
1101{
1102 struct pxa2xx_ep *ep;
1103 unsigned long flags;
1104
1105 ep = container_of(_ep, struct pxa2xx_ep, ep);
1106 if (unlikely (!_ep
1107 || (!ep->desc && ep->ep.name != ep0name))
1108 || ep->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
1109 DMSG("%s, bad ep\n", __FUNCTION__);
1110 return -EINVAL;
1111 }
1112 if (value == 0) {
1113 /* this path (reset toggle+halt) is needed to implement
1114 * SET_INTERFACE on normal hardware. but it can't be
1115 * done from software on the PXA UDC, and the hardware
1116 * forgets to do it as part of SET_INTERFACE automagic.
1117 */
1118 DMSG("only host can clear %s halt\n", _ep->name);
1119 return -EROFS;
1120 }
1121
1122 local_irq_save(flags);
1123
1124 if ((ep->bEndpointAddress & USB_DIR_IN) != 0
1125 && ((*ep->reg_udccs & UDCCS_BI_TFS) == 0
1126 || !list_empty(&ep->queue))) {
1127 local_irq_restore(flags);
1128 return -EAGAIN;
1129 }
1130
1131 /* FST bit is the same for control, bulk in, bulk out, interrupt in */
1132 *ep->reg_udccs = UDCCS_BI_FST|UDCCS_BI_FTF;
1133
1134 /* ep0 needs special care */
1135 if (!ep->desc) {
1136 start_watchdog(ep->dev);
1137 ep->dev->req_pending = 0;
1138 ep->dev->ep0state = EP0_STALL;
1139
1140 /* and bulk/intr endpoints like dropping stalls too */
1141 } else {
1142 unsigned i;
1143 for (i = 0; i < 1000; i += 20) {
1144 if (*ep->reg_udccs & UDCCS_BI_SST)
1145 break;
1146 udelay(20);
1147 }
1148 }
1149 local_irq_restore(flags);
1150
1151 DBG(DBG_VERBOSE, "%s halt\n", _ep->name);
1152 return 0;
1153}
1154
1155static int pxa2xx_ep_fifo_status(struct usb_ep *_ep)
1156{
1157 struct pxa2xx_ep *ep;
1158
1159 ep = container_of(_ep, struct pxa2xx_ep, ep);
1160 if (!_ep) {
1161 DMSG("%s, bad ep\n", __FUNCTION__);
1162 return -ENODEV;
1163 }
1164 /* pxa can't report unclaimed bytes from IN fifos */
1165 if ((ep->bEndpointAddress & USB_DIR_IN) != 0)
1166 return -EOPNOTSUPP;
1167 if (ep->dev->gadget.speed == USB_SPEED_UNKNOWN
1168 || (*ep->reg_udccs & UDCCS_BO_RFS) == 0)
1169 return 0;
1170 else
1171 return (*ep->reg_ubcr & 0xfff) + 1;
1172}
1173
1174static void pxa2xx_ep_fifo_flush(struct usb_ep *_ep)
1175{
1176 struct pxa2xx_ep *ep;
1177
1178 ep = container_of(_ep, struct pxa2xx_ep, ep);
1179 if (!_ep || ep->ep.name == ep0name || !list_empty(&ep->queue)) {
1180 DMSG("%s, bad ep\n", __FUNCTION__);
1181 return;
1182 }
1183
1184 /* toggle and halt bits stay unchanged */
1185
1186 /* for OUT, just read and discard the FIFO contents. */
1187 if ((ep->bEndpointAddress & USB_DIR_IN) == 0) {
1188 while (((*ep->reg_udccs) & UDCCS_BO_RNE) != 0)
1189 (void) *ep->reg_uddr;
1190 return;
1191 }
1192
1193 /* most IN status is the same, but ISO can't stall */
1194 *ep->reg_udccs = UDCCS_BI_TPC|UDCCS_BI_FTF|UDCCS_BI_TUR
1195 | (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC)
1196 ? 0 : UDCCS_BI_SST;
1197}
1198
1199
1200static struct usb_ep_ops pxa2xx_ep_ops = {
1201 .enable = pxa2xx_ep_enable,
1202 .disable = pxa2xx_ep_disable,
1203
1204 .alloc_request = pxa2xx_ep_alloc_request,
1205 .free_request = pxa2xx_ep_free_request,
1206
1207 .alloc_buffer = pxa2xx_ep_alloc_buffer,
1208 .free_buffer = pxa2xx_ep_free_buffer,
1209
1210 .queue = pxa2xx_ep_queue,
1211 .dequeue = pxa2xx_ep_dequeue,
1212
1213 .set_halt = pxa2xx_ep_set_halt,
1214 .fifo_status = pxa2xx_ep_fifo_status,
1215 .fifo_flush = pxa2xx_ep_fifo_flush,
1216};
1217
1218
1219/* ---------------------------------------------------------------------------
1220 * device-scoped parts of the api to the usb controller hardware
1221 * ---------------------------------------------------------------------------
1222 */
1223
1224static int pxa2xx_udc_get_frame(struct usb_gadget *_gadget)
1225{
1226 return ((UFNRH & 0x07) << 8) | (UFNRL & 0xff);
1227}
1228
1229static int pxa2xx_udc_wakeup(struct usb_gadget *_gadget)
1230{
1231 /* host may not have enabled remote wakeup */
1232 if ((UDCCS0 & UDCCS0_DRWF) == 0)
1233 return -EHOSTUNREACH;
1234 udc_set_mask_UDCCR(UDCCR_RSM);
1235 return 0;
1236}
1237
1238static void stop_activity(struct pxa2xx_udc *, struct usb_gadget_driver *);
1239static void udc_enable (struct pxa2xx_udc *);
1240static void udc_disable(struct pxa2xx_udc *);
1241
1242/* We disable the UDC -- and its 48 MHz clock -- whenever it's not
1243 * in active use.
1244 */
1245static int pullup(struct pxa2xx_udc *udc, int is_active)
1246{
1247 is_active = is_active && udc->vbus && udc->pullup;
1248 DMSG("%s\n", is_active ? "active" : "inactive");
1249 if (is_active)
1250 udc_enable(udc);
1251 else {
1252 if (udc->gadget.speed != USB_SPEED_UNKNOWN) {
1253 DMSG("disconnect %s\n", udc->driver
1254 ? udc->driver->driver.name
1255 : "(no driver)");
1256 stop_activity(udc, udc->driver);
1257 }
1258 udc_disable(udc);
1259 }
1260 return 0;
1261}
1262
1263/* VBUS reporting logically comes from a transceiver */
1264static int pxa2xx_udc_vbus_session(struct usb_gadget *_gadget, int is_active)
1265{
1266 struct pxa2xx_udc *udc;
1267
1268 udc = container_of(_gadget, struct pxa2xx_udc, gadget);
1269 udc->vbus = is_active = (is_active != 0);
1270 DMSG("vbus %s\n", is_active ? "supplied" : "inactive");
1271 pullup(udc, is_active);
1272 return 0;
1273}
1274
1275/* drivers may have software control over D+ pullup */
1276static int pxa2xx_udc_pullup(struct usb_gadget *_gadget, int is_active)
1277{
1278 struct pxa2xx_udc *udc;
1279
1280 udc = container_of(_gadget, struct pxa2xx_udc, gadget);
1281
1282 /* not all boards support pullup control */
1283 if (!udc->mach->udc_command)
1284 return -EOPNOTSUPP;
1285
1286 is_active = (is_active != 0);
1287 udc->pullup = is_active;
1288 pullup(udc, is_active);
1289 return 0;
1290}
1291
1292static const struct usb_gadget_ops pxa2xx_udc_ops = {
1293 .get_frame = pxa2xx_udc_get_frame,
1294 .wakeup = pxa2xx_udc_wakeup,
1295 .vbus_session = pxa2xx_udc_vbus_session,
1296 .pullup = pxa2xx_udc_pullup,
1297
1298 // .vbus_draw ... boards may consume current from VBUS, up to
1299 // 100-500mA based on config. the 500uA suspend ceiling means
1300 // that exclusively vbus-powered PXA designs violate USB specs.
1301};
1302
1303/*-------------------------------------------------------------------------*/
1304
1305#ifdef CONFIG_USB_GADGET_DEBUG_FILES
1306
1307static const char proc_node_name [] = "driver/udc";
1308
1309static int
1310udc_proc_read(char *page, char **start, off_t off, int count,
1311 int *eof, void *_dev)
1312{
1313 char *buf = page;
1314 struct pxa2xx_udc *dev = _dev;
1315 char *next = buf;
1316 unsigned size = count;
1317 unsigned long flags;
1318 int i, t;
1319 u32 tmp;
1320
1321 if (off != 0)
1322 return 0;
1323
1324 local_irq_save(flags);
1325
1326 /* basic device status */
1327 t = scnprintf(next, size, DRIVER_DESC "\n"
1328 "%s version: %s\nGadget driver: %s\nHost %s\n\n",
1329 driver_name, DRIVER_VERSION SIZE_STR DMASTR,
1330 dev->driver ? dev->driver->driver.name : "(none)",
91987693 1331 is_vbus_present() ? "full speed" : "disconnected");
1da177e4
LT
1332 size -= t;
1333 next += t;
1334
1335 /* registers for device and ep0 */
1336 t = scnprintf(next, size,
1337 "uicr %02X.%02X, usir %02X.%02x, ufnr %02X.%02X\n",
1338 UICR1, UICR0, USIR1, USIR0, UFNRH, UFNRL);
1339 size -= t;
1340 next += t;
1341
1342 tmp = UDCCR;
1343 t = scnprintf(next, size,
1344 "udccr %02X =%s%s%s%s%s%s%s%s\n", tmp,
1345 (tmp & UDCCR_REM) ? " rem" : "",
1346 (tmp & UDCCR_RSTIR) ? " rstir" : "",
1347 (tmp & UDCCR_SRM) ? " srm" : "",
1348 (tmp & UDCCR_SUSIR) ? " susir" : "",
1349 (tmp & UDCCR_RESIR) ? " resir" : "",
1350 (tmp & UDCCR_RSM) ? " rsm" : "",
1351 (tmp & UDCCR_UDA) ? " uda" : "",
1352 (tmp & UDCCR_UDE) ? " ude" : "");
1353 size -= t;
1354 next += t;
1355
1356 tmp = UDCCS0;
1357 t = scnprintf(next, size,
1358 "udccs0 %02X =%s%s%s%s%s%s%s%s\n", tmp,
1359 (tmp & UDCCS0_SA) ? " sa" : "",
1360 (tmp & UDCCS0_RNE) ? " rne" : "",
1361 (tmp & UDCCS0_FST) ? " fst" : "",
1362 (tmp & UDCCS0_SST) ? " sst" : "",
1363 (tmp & UDCCS0_DRWF) ? " dwrf" : "",
1364 (tmp & UDCCS0_FTF) ? " ftf" : "",
1365 (tmp & UDCCS0_IPR) ? " ipr" : "",
1366 (tmp & UDCCS0_OPR) ? " opr" : "");
1367 size -= t;
1368 next += t;
1369
1370 if (dev->has_cfr) {
1371 tmp = UDCCFR;
1372 t = scnprintf(next, size,
1373 "udccfr %02X =%s%s\n", tmp,
1374 (tmp & UDCCFR_AREN) ? " aren" : "",
1375 (tmp & UDCCFR_ACM) ? " acm" : "");
1376 size -= t;
1377 next += t;
1378 }
1379
91987693 1380 if (!is_vbus_present() || !dev->driver)
1da177e4
LT
1381 goto done;
1382
1383 t = scnprintf(next, size, "ep0 IN %lu/%lu, OUT %lu/%lu\nirqs %lu\n\n",
1384 dev->stats.write.bytes, dev->stats.write.ops,
1385 dev->stats.read.bytes, dev->stats.read.ops,
1386 dev->stats.irqs);
1387 size -= t;
1388 next += t;
1389
1390 /* dump endpoint queues */
1391 for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) {
1392 struct pxa2xx_ep *ep = &dev->ep [i];
1393 struct pxa2xx_request *req;
1394 int t;
1395
1396 if (i != 0) {
1397 const struct usb_endpoint_descriptor *d;
1398
1399 d = ep->desc;
1400 if (!d)
1401 continue;
1402 tmp = *dev->ep [i].reg_udccs;
1403 t = scnprintf(next, size,
1404 "%s max %d %s udccs %02x irqs %lu/%lu\n",
1405 ep->ep.name, le16_to_cpu (d->wMaxPacketSize),
1406 (ep->dma >= 0) ? "dma" : "pio", tmp,
1407 ep->pio_irqs, ep->dma_irqs);
1408 /* TODO translate all five groups of udccs bits! */
1409
1410 } else /* ep0 should only have one transfer queued */
1411 t = scnprintf(next, size, "ep0 max 16 pio irqs %lu\n",
1412 ep->pio_irqs);
1413 if (t <= 0 || t > size)
1414 goto done;
1415 size -= t;
1416 next += t;
1417
1418 if (list_empty(&ep->queue)) {
1419 t = scnprintf(next, size, "\t(nothing queued)\n");
1420 if (t <= 0 || t > size)
1421 goto done;
1422 size -= t;
1423 next += t;
1424 continue;
1425 }
1426 list_for_each_entry(req, &ep->queue, queue) {
1427#ifdef USE_DMA
1428 if (ep->dma >= 0 && req->queue.prev == &ep->queue)
1429 t = scnprintf(next, size,
1430 "\treq %p len %d/%d "
1431 "buf %p (dma%d dcmd %08x)\n",
1432 &req->req, req->req.actual,
1433 req->req.length, req->req.buf,
1434 ep->dma, DCMD(ep->dma)
1435 // low 13 bits == bytes-to-go
1436 );
1437 else
1438#endif
1439 t = scnprintf(next, size,
1440 "\treq %p len %d/%d buf %p\n",
1441 &req->req, req->req.actual,
1442 req->req.length, req->req.buf);
1443 if (t <= 0 || t > size)
1444 goto done;
1445 size -= t;
1446 next += t;
1447 }
1448 }
1449
1450done:
1451 local_irq_restore(flags);
1452 *eof = 1;
1453 return count - size;
1454}
1455
1456#define create_proc_files() \
1457 create_proc_read_entry(proc_node_name, 0, NULL, udc_proc_read, dev)
1458#define remove_proc_files() \
1459 remove_proc_entry(proc_node_name, NULL)
1460
1461#else /* !CONFIG_USB_GADGET_DEBUG_FILES */
1462
1463#define create_proc_files() do {} while (0)
1464#define remove_proc_files() do {} while (0)
1465
1466#endif /* CONFIG_USB_GADGET_DEBUG_FILES */
1467
1468/* "function" sysfs attribute */
1469static ssize_t
10523b3b 1470show_function (struct device *_dev, struct device_attribute *attr, char *buf)
1da177e4
LT
1471{
1472 struct pxa2xx_udc *dev = dev_get_drvdata (_dev);
1473
1474 if (!dev->driver
1475 || !dev->driver->function
1476 || strlen (dev->driver->function) > PAGE_SIZE)
1477 return 0;
1478 return scnprintf (buf, PAGE_SIZE, "%s\n", dev->driver->function);
1479}
1480static DEVICE_ATTR (function, S_IRUGO, show_function, NULL);
1481
1482/*-------------------------------------------------------------------------*/
1483
1484/*
1485 * udc_disable - disable USB device controller
1486 */
1487static void udc_disable(struct pxa2xx_udc *dev)
1488{
1489 /* block all irqs */
1490 udc_set_mask_UDCCR(UDCCR_SRM|UDCCR_REM);
1491 UICR0 = UICR1 = 0xff;
1492 UFNRH = UFNRH_SIM;
1493
1494 /* if hardware supports it, disconnect from usb */
91987693 1495 pullup_off();
1da177e4
LT
1496
1497 udc_clear_mask_UDCCR(UDCCR_UDE);
1498
1499#ifdef CONFIG_ARCH_PXA
1500 /* Disable clock for USB device */
1501 pxa_set_cken(CKEN11_USB, 0);
1502#endif
1503
1504 ep0_idle (dev);
1505 dev->gadget.speed = USB_SPEED_UNKNOWN;
1506 LED_CONNECTED_OFF;
1507}
1508
1509
1510/*
1511 * udc_reinit - initialize software state
1512 */
1513static void udc_reinit(struct pxa2xx_udc *dev)
1514{
1515 u32 i;
1516
1517 /* device/ep0 records init */
1518 INIT_LIST_HEAD (&dev->gadget.ep_list);
1519 INIT_LIST_HEAD (&dev->gadget.ep0->ep_list);
1520 dev->ep0state = EP0_IDLE;
1521
1522 /* basic endpoint records init */
1523 for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) {
1524 struct pxa2xx_ep *ep = &dev->ep[i];
1525
1526 if (i != 0)
1527 list_add_tail (&ep->ep.ep_list, &dev->gadget.ep_list);
1528
1529 ep->desc = NULL;
1530 ep->stopped = 0;
1531 INIT_LIST_HEAD (&ep->queue);
1532 ep->pio_irqs = ep->dma_irqs = 0;
1533 }
1534
1535 /* the rest was statically initialized, and is read-only */
1536}
1537
1538/* until it's enabled, this UDC should be completely invisible
1539 * to any USB host.
1540 */
1541static void udc_enable (struct pxa2xx_udc *dev)
1542{
1543 udc_clear_mask_UDCCR(UDCCR_UDE);
1544
1545#ifdef CONFIG_ARCH_PXA
1546 /* Enable clock for USB device */
1547 pxa_set_cken(CKEN11_USB, 1);
1548 udelay(5);
1549#endif
1550
1551 /* try to clear these bits before we enable the udc */
1552 udc_ack_int_UDCCR(UDCCR_SUSIR|/*UDCCR_RSTIR|*/UDCCR_RESIR);
1553
1554 ep0_idle(dev);
1555 dev->gadget.speed = USB_SPEED_UNKNOWN;
1556 dev->stats.irqs = 0;
1557
1558 /*
1559 * sequence taken from chapter 12.5.10, PXA250 AppProcDevManual:
1560 * - enable UDC
1561 * - if RESET is already in progress, ack interrupt
1562 * - unmask reset interrupt
1563 */
1564 udc_set_mask_UDCCR(UDCCR_UDE);
1565 if (!(UDCCR & UDCCR_UDA))
1566 udc_ack_int_UDCCR(UDCCR_RSTIR);
1567
1568 if (dev->has_cfr /* UDC_RES2 is defined */) {
1569 /* pxa255 (a0+) can avoid a set_config race that could
1570 * prevent gadget drivers from configuring correctly
1571 */
1572 UDCCFR = UDCCFR_ACM | UDCCFR_MB1;
1573 } else {
1574 /* "USB test mode" for pxa250 errata 40-42 (stepping a0, a1)
1575 * which could result in missing packets and interrupts.
1576 * supposedly one bit per endpoint, controlling whether it
1577 * double buffers or not; ACM/AREN bits fit into the holes.
1578 * zero bits (like USIR0_IRx) disable double buffering.
1579 */
1580 UDC_RES1 = 0x00;
1581 UDC_RES2 = 0x00;
1582 }
1583
1584#ifdef DISABLE_TEST_MODE
1585 /* "test mode" seems to have become the default in later chip
1586 * revs, preventing double buffering (and invalidating docs).
1587 * this EXPERIMENT enables it for bulk endpoints by tweaking
1588 * undefined/reserved register bits (that other drivers clear).
1589 * Belcarra code comments noted this usage.
1590 */
1591 if (fifo_mode & 1) { /* IN endpoints */
1592 UDC_RES1 |= USIR0_IR1|USIR0_IR6;
1593 UDC_RES2 |= USIR1_IR11;
1594 }
1595 if (fifo_mode & 2) { /* OUT endpoints */
1596 UDC_RES1 |= USIR0_IR2|USIR0_IR7;
1597 UDC_RES2 |= USIR1_IR12;
1598 }
1599#endif
1600
1601 /* enable suspend/resume and reset irqs */
1602 udc_clear_mask_UDCCR(UDCCR_SRM | UDCCR_REM);
1603
1604 /* enable ep0 irqs */
1605 UICR0 &= ~UICR0_IM0;
1606
1607 /* if hardware supports it, pullup D+ and wait for reset */
91987693 1608 pullup_on();
1da177e4
LT
1609}
1610
1611
1612/* when a driver is successfully registered, it will receive
1613 * control requests including set_configuration(), which enables
1614 * non-control requests. then usb traffic follows until a
1615 * disconnect is reported. then a host may connect again, or
1616 * the driver might get unbound.
1617 */
1618int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1619{
1620 struct pxa2xx_udc *dev = the_controller;
1621 int retval;
1622
1623 if (!driver
7c0642c1 1624 || driver->speed < USB_SPEED_FULL
1da177e4
LT
1625 || !driver->bind
1626 || !driver->unbind
1627 || !driver->disconnect
1628 || !driver->setup)
1629 return -EINVAL;
1630 if (!dev)
1631 return -ENODEV;
1632 if (dev->driver)
1633 return -EBUSY;
1634
1635 /* first hook up the driver ... */
1636 dev->driver = driver;
1637 dev->gadget.dev.driver = &driver->driver;
1638 dev->pullup = 1;
1639
1640 device_add (&dev->gadget.dev);
1641 retval = driver->bind(&dev->gadget);
1642 if (retval) {
1643 DMSG("bind to driver %s --> error %d\n",
1644 driver->driver.name, retval);
1645 device_del (&dev->gadget.dev);
1646
1647 dev->driver = NULL;
1648 dev->gadget.dev.driver = NULL;
1649 return retval;
1650 }
1651 device_create_file(dev->dev, &dev_attr_function);
1652
1653 /* ... then enable host detection and ep0; and we're ready
1654 * for set_configuration as well as eventual disconnect.
1655 */
1656 DMSG("registered gadget driver '%s'\n", driver->driver.name);
1657 pullup(dev, 1);
1658 dump_state(dev);
1659 return 0;
1660}
1661EXPORT_SYMBOL(usb_gadget_register_driver);
1662
1663static void
1664stop_activity(struct pxa2xx_udc *dev, struct usb_gadget_driver *driver)
1665{
1666 int i;
1667
1668 /* don't disconnect drivers more than once */
1669 if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1670 driver = NULL;
1671 dev->gadget.speed = USB_SPEED_UNKNOWN;
1672
1673 /* prevent new request submissions, kill any outstanding requests */
1674 for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) {
1675 struct pxa2xx_ep *ep = &dev->ep[i];
1676
1677 ep->stopped = 1;
1678 nuke(ep, -ESHUTDOWN);
1679 }
1680 del_timer_sync(&dev->timer);
1681
1682 /* report disconnect; the driver is already quiesced */
1683 LED_CONNECTED_OFF;
1684 if (driver)
1685 driver->disconnect(&dev->gadget);
1686
1687 /* re-init driver-visible data structures */
1688 udc_reinit(dev);
1689}
1690
1691int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
1692{
1693 struct pxa2xx_udc *dev = the_controller;
1694
1695 if (!dev)
1696 return -ENODEV;
1697 if (!driver || driver != dev->driver)
1698 return -EINVAL;
1699
1700 local_irq_disable();
1701 pullup(dev, 0);
1702 stop_activity(dev, driver);
1703 local_irq_enable();
1704
1705 driver->unbind(&dev->gadget);
1706 dev->driver = NULL;
1707
1708 device_del (&dev->gadget.dev);
1709 device_remove_file(dev->dev, &dev_attr_function);
1710
1711 DMSG("unregistered gadget driver '%s'\n", driver->driver.name);
1712 dump_state(dev);
1713 return 0;
1714}
1715EXPORT_SYMBOL(usb_gadget_unregister_driver);
1716
1717
1718/*-------------------------------------------------------------------------*/
1719
1720#ifdef CONFIG_ARCH_LUBBOCK
1721
1722/* Lubbock has separate connect and disconnect irqs. More typical designs
1723 * use one GPIO as the VBUS IRQ, and another to control the D+ pullup.
1724 */
1725
1726static irqreturn_t
7d12e780 1727lubbock_vbus_irq(int irq, void *_dev)
1da177e4
LT
1728{
1729 struct pxa2xx_udc *dev = _dev;
1730 int vbus;
1731
1732 dev->stats.irqs++;
1733 HEX_DISPLAY(dev->stats.irqs);
1734 switch (irq) {
1735 case LUBBOCK_USB_IRQ:
1736 LED_CONNECTED_ON;
1737 vbus = 1;
1738 disable_irq(LUBBOCK_USB_IRQ);
1739 enable_irq(LUBBOCK_USB_DISC_IRQ);
1740 break;
1741 case LUBBOCK_USB_DISC_IRQ:
1742 LED_CONNECTED_OFF;
1743 vbus = 0;
1744 disable_irq(LUBBOCK_USB_DISC_IRQ);
1745 enable_irq(LUBBOCK_USB_IRQ);
1746 break;
1747 default:
1748 return IRQ_NONE;
1749 }
1750
1751 pxa2xx_udc_vbus_session(&dev->gadget, vbus);
1752 return IRQ_HANDLED;
1753}
1754
1755#endif
1756
7d12e780 1757static irqreturn_t udc_vbus_irq(int irq, void *_dev)
b2bbb20b
DB
1758{
1759 struct pxa2xx_udc *dev = _dev;
1760 int vbus = pxa_gpio_get(dev->mach->gpio_vbus);
1761
1762 pxa2xx_udc_vbus_session(&dev->gadget, vbus);
1763 return IRQ_HANDLED;
1764}
1765
1da177e4
LT
1766
1767/*-------------------------------------------------------------------------*/
1768
1769static inline void clear_ep_state (struct pxa2xx_udc *dev)
1770{
1771 unsigned i;
1772
1773 /* hardware SET_{CONFIGURATION,INTERFACE} automagic resets endpoint
1774 * fifos, and pending transactions mustn't be continued in any case.
1775 */
1776 for (i = 1; i < PXA_UDC_NUM_ENDPOINTS; i++)
1777 nuke(&dev->ep[i], -ECONNABORTED);
1778}
1779
1780static void udc_watchdog(unsigned long _dev)
1781{
1782 struct pxa2xx_udc *dev = (void *)_dev;
1783
1784 local_irq_disable();
1785 if (dev->ep0state == EP0_STALL
1786 && (UDCCS0 & UDCCS0_FST) == 0
1787 && (UDCCS0 & UDCCS0_SST) == 0) {
1788 UDCCS0 = UDCCS0_FST|UDCCS0_FTF;
1789 DBG(DBG_VERBOSE, "ep0 re-stall\n");
1790 start_watchdog(dev);
1791 }
1792 local_irq_enable();
1793}
1794
1795static void handle_ep0 (struct pxa2xx_udc *dev)
1796{
1797 u32 udccs0 = UDCCS0;
1798 struct pxa2xx_ep *ep = &dev->ep [0];
1799 struct pxa2xx_request *req;
1800 union {
1801 struct usb_ctrlrequest r;
1802 u8 raw [8];
1803 u32 word [2];
1804 } u;
1805
1806 if (list_empty(&ep->queue))
1807 req = NULL;
1808 else
1809 req = list_entry(ep->queue.next, struct pxa2xx_request, queue);
1810
1811 /* clear stall status */
1812 if (udccs0 & UDCCS0_SST) {
1813 nuke(ep, -EPIPE);
1814 UDCCS0 = UDCCS0_SST;
1815 del_timer(&dev->timer);
1816 ep0_idle(dev);
1817 }
1818
1819 /* previous request unfinished? non-error iff back-to-back ... */
1820 if ((udccs0 & UDCCS0_SA) != 0 && dev->ep0state != EP0_IDLE) {
1821 nuke(ep, 0);
1822 del_timer(&dev->timer);
1823 ep0_idle(dev);
1824 }
1825
1826 switch (dev->ep0state) {
1827 case EP0_IDLE:
1828 /* late-breaking status? */
1829 udccs0 = UDCCS0;
1830
1831 /* start control request? */
1832 if (likely((udccs0 & (UDCCS0_OPR|UDCCS0_SA|UDCCS0_RNE))
1833 == (UDCCS0_OPR|UDCCS0_SA|UDCCS0_RNE))) {
1834 int i;
1835
1836 nuke (ep, -EPROTO);
1837
1838 /* read SETUP packet */
1839 for (i = 0; i < 8; i++) {
1840 if (unlikely(!(UDCCS0 & UDCCS0_RNE))) {
1841bad_setup:
1842 DMSG("SETUP %d!\n", i);
1843 goto stall;
1844 }
1845 u.raw [i] = (u8) UDDR0;
1846 }
1847 if (unlikely((UDCCS0 & UDCCS0_RNE) != 0))
1848 goto bad_setup;
1849
1850got_setup:
1851 DBG(DBG_VERBOSE, "SETUP %02x.%02x v%04x i%04x l%04x\n",
1852 u.r.bRequestType, u.r.bRequest,
1853 le16_to_cpu(u.r.wValue),
1854 le16_to_cpu(u.r.wIndex),
1855 le16_to_cpu(u.r.wLength));
1856
1857 /* cope with automagic for some standard requests. */
1858 dev->req_std = (u.r.bRequestType & USB_TYPE_MASK)
1859 == USB_TYPE_STANDARD;
1860 dev->req_config = 0;
1861 dev->req_pending = 1;
1862 switch (u.r.bRequest) {
1863 /* hardware restricts gadget drivers here! */
1864 case USB_REQ_SET_CONFIGURATION:
1865 if (u.r.bRequestType == USB_RECIP_DEVICE) {
1866 /* reflect hardware's automagic
1867 * up to the gadget driver.
1868 */
1869config_change:
1870 dev->req_config = 1;
1871 clear_ep_state(dev);
1872 /* if !has_cfr, there's no synch
1873 * else use AREN (later) not SA|OPR
1874 * USIR0_IR0 acts edge sensitive
1875 */
1876 }
1877 break;
1878 /* ... and here, even more ... */
1879 case USB_REQ_SET_INTERFACE:
1880 if (u.r.bRequestType == USB_RECIP_INTERFACE) {
1881 /* udc hardware is broken by design:
1882 * - altsetting may only be zero;
1883 * - hw resets all interfaces' eps;
1884 * - ep reset doesn't include halt(?).
1885 */
1886 DMSG("broken set_interface (%d/%d)\n",
1887 le16_to_cpu(u.r.wIndex),
1888 le16_to_cpu(u.r.wValue));
1889 goto config_change;
1890 }
1891 break;
1892 /* hardware was supposed to hide this */
1893 case USB_REQ_SET_ADDRESS:
1894 if (u.r.bRequestType == USB_RECIP_DEVICE) {
1895 ep0start(dev, 0, "address");
1896 return;
1897 }
1898 break;
1899 }
1900
1901 if (u.r.bRequestType & USB_DIR_IN)
1902 dev->ep0state = EP0_IN_DATA_PHASE;
1903 else
1904 dev->ep0state = EP0_OUT_DATA_PHASE;
1905
1906 i = dev->driver->setup(&dev->gadget, &u.r);
1907 if (i < 0) {
1908 /* hardware automagic preventing STALL... */
1909 if (dev->req_config) {
1910 /* hardware sometimes neglects to tell
1911 * tell us about config change events,
1912 * so later ones may fail...
1913 */
1914 WARN("config change %02x fail %d?\n",
1915 u.r.bRequest, i);
1916 return;
1917 /* TODO experiment: if has_cfr,
1918 * hardware didn't ACK; maybe we
1919 * could actually STALL!
1920 */
1921 }
1922 DBG(DBG_VERBOSE, "protocol STALL, "
1923 "%02x err %d\n", UDCCS0, i);
1924stall:
1925 /* the watchdog timer helps deal with cases
1926 * where udc seems to clear FST wrongly, and
1927 * then NAKs instead of STALLing.
1928 */
1929 ep0start(dev, UDCCS0_FST|UDCCS0_FTF, "stall");
1930 start_watchdog(dev);
1931 dev->ep0state = EP0_STALL;
1932
1933 /* deferred i/o == no response yet */
1934 } else if (dev->req_pending) {
1935 if (likely(dev->ep0state == EP0_IN_DATA_PHASE
1936 || dev->req_std || u.r.wLength))
1937 ep0start(dev, 0, "defer");
1938 else
1939 ep0start(dev, UDCCS0_IPR, "defer/IPR");
1940 }
1941
1942 /* expect at least one data or status stage irq */
1943 return;
1944
1945 } else if (likely((udccs0 & (UDCCS0_OPR|UDCCS0_SA))
1946 == (UDCCS0_OPR|UDCCS0_SA))) {
1947 unsigned i;
1948
1949 /* pxa210/250 erratum 131 for B0/B1 says RNE lies.
1950 * still observed on a pxa255 a0.
1951 */
1952 DBG(DBG_VERBOSE, "e131\n");
1953 nuke(ep, -EPROTO);
1954
1955 /* read SETUP data, but don't trust it too much */
1956 for (i = 0; i < 8; i++)
1957 u.raw [i] = (u8) UDDR0;
1958 if ((u.r.bRequestType & USB_RECIP_MASK)
1959 > USB_RECIP_OTHER)
1960 goto stall;
1961 if (u.word [0] == 0 && u.word [1] == 0)
1962 goto stall;
1963 goto got_setup;
1964 } else {
1965 /* some random early IRQ:
1966 * - we acked FST
1967 * - IPR cleared
1968 * - OPR got set, without SA (likely status stage)
1969 */
1970 UDCCS0 = udccs0 & (UDCCS0_SA|UDCCS0_OPR);
1971 }
1972 break;
1973 case EP0_IN_DATA_PHASE: /* GET_DESCRIPTOR etc */
1974 if (udccs0 & UDCCS0_OPR) {
1975 UDCCS0 = UDCCS0_OPR|UDCCS0_FTF;
1976 DBG(DBG_VERBOSE, "ep0in premature status\n");
1977 if (req)
1978 done(ep, req, 0);
1979 ep0_idle(dev);
1980 } else /* irq was IPR clearing */ {
1981 if (req) {
1982 /* this IN packet might finish the request */
1983 (void) write_ep0_fifo(ep, req);
1984 } /* else IN token before response was written */
1985 }
1986 break;
1987 case EP0_OUT_DATA_PHASE: /* SET_DESCRIPTOR etc */
1988 if (udccs0 & UDCCS0_OPR) {
1989 if (req) {
1990 /* this OUT packet might finish the request */
1991 if (read_ep0_fifo(ep, req))
1992 done(ep, req, 0);
1993 /* else more OUT packets expected */
1994 } /* else OUT token before read was issued */
1995 } else /* irq was IPR clearing */ {
1996 DBG(DBG_VERBOSE, "ep0out premature status\n");
1997 if (req)
1998 done(ep, req, 0);
1999 ep0_idle(dev);
2000 }
2001 break;
2002 case EP0_END_XFER:
2003 if (req)
2004 done(ep, req, 0);
2005 /* ack control-IN status (maybe in-zlp was skipped)
2006 * also appears after some config change events.
2007 */
2008 if (udccs0 & UDCCS0_OPR)
2009 UDCCS0 = UDCCS0_OPR;
2010 ep0_idle(dev);
2011 break;
2012 case EP0_STALL:
2013 UDCCS0 = UDCCS0_FST;
2014 break;
2015 }
2016 USIR0 = USIR0_IR0;
2017}
2018
2019static void handle_ep(struct pxa2xx_ep *ep)
2020{
2021 struct pxa2xx_request *req;
2022 int is_in = ep->bEndpointAddress & USB_DIR_IN;
2023 int completed;
2024 u32 udccs, tmp;
2025
2026 do {
2027 completed = 0;
2028 if (likely (!list_empty(&ep->queue)))
2029 req = list_entry(ep->queue.next,
2030 struct pxa2xx_request, queue);
2031 else
2032 req = NULL;
2033
2034 // TODO check FST handling
2035
2036 udccs = *ep->reg_udccs;
2037 if (unlikely(is_in)) { /* irq from TPC, SST, or (ISO) TUR */
2038 tmp = UDCCS_BI_TUR;
2039 if (likely(ep->bmAttributes == USB_ENDPOINT_XFER_BULK))
2040 tmp |= UDCCS_BI_SST;
2041 tmp &= udccs;
2042 if (likely (tmp))
2043 *ep->reg_udccs = tmp;
2044 if (req && likely ((udccs & UDCCS_BI_TFS) != 0))
2045 completed = write_fifo(ep, req);
2046
2047 } else { /* irq from RPC (or for ISO, ROF) */
2048 if (likely(ep->bmAttributes == USB_ENDPOINT_XFER_BULK))
2049 tmp = UDCCS_BO_SST | UDCCS_BO_DME;
2050 else
2051 tmp = UDCCS_IO_ROF | UDCCS_IO_DME;
2052 tmp &= udccs;
2053 if (likely(tmp))
2054 *ep->reg_udccs = tmp;
2055
2056 /* fifos can hold packets, ready for reading... */
2057 if (likely(req)) {
2058#ifdef USE_OUT_DMA
2059// TODO didn't yet debug out-dma. this approach assumes
2060// the worst about short packets and RPC; it might be better.
2061
2062 if (likely(ep->dma >= 0)) {
2063 if (!(udccs & UDCCS_BO_RSP)) {
2064 *ep->reg_udccs = UDCCS_BO_RPC;
2065 ep->dma_irqs++;
2066 return;
2067 }
2068 }
2069#endif
2070 completed = read_fifo(ep, req);
2071 } else
2072 pio_irq_disable (ep->bEndpointAddress);
2073 }
2074 ep->pio_irqs++;
2075 } while (completed);
2076}
2077
2078/*
2079 * pxa2xx_udc_irq - interrupt handler
2080 *
2081 * avoid delays in ep0 processing. the control handshaking isn't always
2082 * under software control (pxa250c0 and the pxa255 are better), and delays
2083 * could cause usb protocol errors.
2084 */
2085static irqreturn_t
7d12e780 2086pxa2xx_udc_irq(int irq, void *_dev)
1da177e4
LT
2087{
2088 struct pxa2xx_udc *dev = _dev;
2089 int handled;
2090
2091 dev->stats.irqs++;
2092 HEX_DISPLAY(dev->stats.irqs);
2093 do {
2094 u32 udccr = UDCCR;
2095
2096 handled = 0;
2097
2098 /* SUSpend Interrupt Request */
2099 if (unlikely(udccr & UDCCR_SUSIR)) {
2100 udc_ack_int_UDCCR(UDCCR_SUSIR);
2101 handled = 1;
91987693 2102 DBG(DBG_VERBOSE, "USB suspend%s\n", is_vbus_present()
1da177e4
LT
2103 ? "" : "+disconnect");
2104
91987693 2105 if (!is_vbus_present())
1da177e4
LT
2106 stop_activity(dev, dev->driver);
2107 else if (dev->gadget.speed != USB_SPEED_UNKNOWN
2108 && dev->driver
2109 && dev->driver->suspend)
2110 dev->driver->suspend(&dev->gadget);
2111 ep0_idle (dev);
2112 }
2113
2114 /* RESume Interrupt Request */
2115 if (unlikely(udccr & UDCCR_RESIR)) {
2116 udc_ack_int_UDCCR(UDCCR_RESIR);
2117 handled = 1;
2118 DBG(DBG_VERBOSE, "USB resume\n");
2119
2120 if (dev->gadget.speed != USB_SPEED_UNKNOWN
2121 && dev->driver
2122 && dev->driver->resume
91987693 2123 && is_vbus_present())
1da177e4
LT
2124 dev->driver->resume(&dev->gadget);
2125 }
2126
2127 /* ReSeT Interrupt Request - USB reset */
2128 if (unlikely(udccr & UDCCR_RSTIR)) {
2129 udc_ack_int_UDCCR(UDCCR_RSTIR);
2130 handled = 1;
2131
2132 if ((UDCCR & UDCCR_UDA) == 0) {
2133 DBG(DBG_VERBOSE, "USB reset start\n");
2134
2135 /* reset driver and endpoints,
2136 * in case that's not yet done
2137 */
2138 stop_activity (dev, dev->driver);
2139
2140 } else {
2141 DBG(DBG_VERBOSE, "USB reset end\n");
2142 dev->gadget.speed = USB_SPEED_FULL;
2143 LED_CONNECTED_ON;
2144 memset(&dev->stats, 0, sizeof dev->stats);
2145 /* driver and endpoints are still reset */
2146 }
2147
2148 } else {
2149 u32 usir0 = USIR0 & ~UICR0;
2150 u32 usir1 = USIR1 & ~UICR1;
2151 int i;
2152
2153 if (unlikely (!usir0 && !usir1))
2154 continue;
2155
2156 DBG(DBG_VERY_NOISY, "irq %02x.%02x\n", usir1, usir0);
2157
2158 /* control traffic */
2159 if (usir0 & USIR0_IR0) {
2160 dev->ep[0].pio_irqs++;
2161 handle_ep0(dev);
2162 handled = 1;
2163 }
2164
2165 /* endpoint data transfers */
2166 for (i = 0; i < 8; i++) {
2167 u32 tmp = 1 << i;
2168
2169 if (i && (usir0 & tmp)) {
2170 handle_ep(&dev->ep[i]);
2171 USIR0 |= tmp;
2172 handled = 1;
2173 }
2174 if (usir1 & tmp) {
2175 handle_ep(&dev->ep[i+8]);
2176 USIR1 |= tmp;
2177 handled = 1;
2178 }
2179 }
2180 }
2181
2182 /* we could also ask for 1 msec SOF (SIR) interrupts */
2183
2184 } while (handled);
2185 return IRQ_HANDLED;
2186}
2187
2188/*-------------------------------------------------------------------------*/
2189
2190static void nop_release (struct device *dev)
2191{
2192 DMSG("%s %s\n", __FUNCTION__, dev->bus_id);
2193}
2194
2195/* this uses load-time allocation and initialization (instead of
2196 * doing it at run-time) to save code, eliminate fault paths, and
2197 * be more obviously correct.
2198 */
2199static struct pxa2xx_udc memory = {
2200 .gadget = {
2201 .ops = &pxa2xx_udc_ops,
2202 .ep0 = &memory.ep[0].ep,
2203 .name = driver_name,
2204 .dev = {
2205 .bus_id = "gadget",
2206 .release = nop_release,
2207 },
2208 },
2209
2210 /* control endpoint */
2211 .ep[0] = {
2212 .ep = {
2213 .name = ep0name,
2214 .ops = &pxa2xx_ep_ops,
2215 .maxpacket = EP0_FIFO_SIZE,
2216 },
2217 .dev = &memory,
2218 .reg_udccs = &UDCCS0,
2219 .reg_uddr = &UDDR0,
2220 },
2221
2222 /* first group of endpoints */
2223 .ep[1] = {
2224 .ep = {
2225 .name = "ep1in-bulk",
2226 .ops = &pxa2xx_ep_ops,
2227 .maxpacket = BULK_FIFO_SIZE,
2228 },
2229 .dev = &memory,
2230 .fifo_size = BULK_FIFO_SIZE,
2231 .bEndpointAddress = USB_DIR_IN | 1,
2232 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2233 .reg_udccs = &UDCCS1,
2234 .reg_uddr = &UDDR1,
2235 drcmr (25)
2236 },
2237 .ep[2] = {
2238 .ep = {
2239 .name = "ep2out-bulk",
2240 .ops = &pxa2xx_ep_ops,
2241 .maxpacket = BULK_FIFO_SIZE,
2242 },
2243 .dev = &memory,
2244 .fifo_size = BULK_FIFO_SIZE,
2245 .bEndpointAddress = 2,
2246 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2247 .reg_udccs = &UDCCS2,
2248 .reg_ubcr = &UBCR2,
2249 .reg_uddr = &UDDR2,
2250 drcmr (26)
2251 },
2252#ifndef CONFIG_USB_PXA2XX_SMALL
2253 .ep[3] = {
2254 .ep = {
2255 .name = "ep3in-iso",
2256 .ops = &pxa2xx_ep_ops,
2257 .maxpacket = ISO_FIFO_SIZE,
2258 },
2259 .dev = &memory,
2260 .fifo_size = ISO_FIFO_SIZE,
2261 .bEndpointAddress = USB_DIR_IN | 3,
2262 .bmAttributes = USB_ENDPOINT_XFER_ISOC,
2263 .reg_udccs = &UDCCS3,
2264 .reg_uddr = &UDDR3,
2265 drcmr (27)
2266 },
2267 .ep[4] = {
2268 .ep = {
2269 .name = "ep4out-iso",
2270 .ops = &pxa2xx_ep_ops,
2271 .maxpacket = ISO_FIFO_SIZE,
2272 },
2273 .dev = &memory,
2274 .fifo_size = ISO_FIFO_SIZE,
2275 .bEndpointAddress = 4,
2276 .bmAttributes = USB_ENDPOINT_XFER_ISOC,
2277 .reg_udccs = &UDCCS4,
2278 .reg_ubcr = &UBCR4,
2279 .reg_uddr = &UDDR4,
2280 drcmr (28)
2281 },
2282 .ep[5] = {
2283 .ep = {
2284 .name = "ep5in-int",
2285 .ops = &pxa2xx_ep_ops,
2286 .maxpacket = INT_FIFO_SIZE,
2287 },
2288 .dev = &memory,
2289 .fifo_size = INT_FIFO_SIZE,
2290 .bEndpointAddress = USB_DIR_IN | 5,
2291 .bmAttributes = USB_ENDPOINT_XFER_INT,
2292 .reg_udccs = &UDCCS5,
2293 .reg_uddr = &UDDR5,
2294 },
2295
2296 /* second group of endpoints */
2297 .ep[6] = {
2298 .ep = {
2299 .name = "ep6in-bulk",
2300 .ops = &pxa2xx_ep_ops,
2301 .maxpacket = BULK_FIFO_SIZE,
2302 },
2303 .dev = &memory,
2304 .fifo_size = BULK_FIFO_SIZE,
2305 .bEndpointAddress = USB_DIR_IN | 6,
2306 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2307 .reg_udccs = &UDCCS6,
2308 .reg_uddr = &UDDR6,
2309 drcmr (30)
2310 },
2311 .ep[7] = {
2312 .ep = {
2313 .name = "ep7out-bulk",
2314 .ops = &pxa2xx_ep_ops,
2315 .maxpacket = BULK_FIFO_SIZE,
2316 },
2317 .dev = &memory,
2318 .fifo_size = BULK_FIFO_SIZE,
2319 .bEndpointAddress = 7,
2320 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2321 .reg_udccs = &UDCCS7,
2322 .reg_ubcr = &UBCR7,
2323 .reg_uddr = &UDDR7,
2324 drcmr (31)
2325 },
2326 .ep[8] = {
2327 .ep = {
2328 .name = "ep8in-iso",
2329 .ops = &pxa2xx_ep_ops,
2330 .maxpacket = ISO_FIFO_SIZE,
2331 },
2332 .dev = &memory,
2333 .fifo_size = ISO_FIFO_SIZE,
2334 .bEndpointAddress = USB_DIR_IN | 8,
2335 .bmAttributes = USB_ENDPOINT_XFER_ISOC,
2336 .reg_udccs = &UDCCS8,
2337 .reg_uddr = &UDDR8,
2338 drcmr (32)
2339 },
2340 .ep[9] = {
2341 .ep = {
2342 .name = "ep9out-iso",
2343 .ops = &pxa2xx_ep_ops,
2344 .maxpacket = ISO_FIFO_SIZE,
2345 },
2346 .dev = &memory,
2347 .fifo_size = ISO_FIFO_SIZE,
2348 .bEndpointAddress = 9,
2349 .bmAttributes = USB_ENDPOINT_XFER_ISOC,
2350 .reg_udccs = &UDCCS9,
2351 .reg_ubcr = &UBCR9,
2352 .reg_uddr = &UDDR9,
2353 drcmr (33)
2354 },
2355 .ep[10] = {
2356 .ep = {
2357 .name = "ep10in-int",
2358 .ops = &pxa2xx_ep_ops,
2359 .maxpacket = INT_FIFO_SIZE,
2360 },
2361 .dev = &memory,
2362 .fifo_size = INT_FIFO_SIZE,
2363 .bEndpointAddress = USB_DIR_IN | 10,
2364 .bmAttributes = USB_ENDPOINT_XFER_INT,
2365 .reg_udccs = &UDCCS10,
2366 .reg_uddr = &UDDR10,
2367 },
2368
2369 /* third group of endpoints */
2370 .ep[11] = {
2371 .ep = {
2372 .name = "ep11in-bulk",
2373 .ops = &pxa2xx_ep_ops,
2374 .maxpacket = BULK_FIFO_SIZE,
2375 },
2376 .dev = &memory,
2377 .fifo_size = BULK_FIFO_SIZE,
2378 .bEndpointAddress = USB_DIR_IN | 11,
2379 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2380 .reg_udccs = &UDCCS11,
2381 .reg_uddr = &UDDR11,
2382 drcmr (35)
2383 },
2384 .ep[12] = {
2385 .ep = {
2386 .name = "ep12out-bulk",
2387 .ops = &pxa2xx_ep_ops,
2388 .maxpacket = BULK_FIFO_SIZE,
2389 },
2390 .dev = &memory,
2391 .fifo_size = BULK_FIFO_SIZE,
2392 .bEndpointAddress = 12,
2393 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2394 .reg_udccs = &UDCCS12,
2395 .reg_ubcr = &UBCR12,
2396 .reg_uddr = &UDDR12,
2397 drcmr (36)
2398 },
2399 .ep[13] = {
2400 .ep = {
2401 .name = "ep13in-iso",
2402 .ops = &pxa2xx_ep_ops,
2403 .maxpacket = ISO_FIFO_SIZE,
2404 },
2405 .dev = &memory,
2406 .fifo_size = ISO_FIFO_SIZE,
2407 .bEndpointAddress = USB_DIR_IN | 13,
2408 .bmAttributes = USB_ENDPOINT_XFER_ISOC,
2409 .reg_udccs = &UDCCS13,
2410 .reg_uddr = &UDDR13,
2411 drcmr (37)
2412 },
2413 .ep[14] = {
2414 .ep = {
2415 .name = "ep14out-iso",
2416 .ops = &pxa2xx_ep_ops,
2417 .maxpacket = ISO_FIFO_SIZE,
2418 },
2419 .dev = &memory,
2420 .fifo_size = ISO_FIFO_SIZE,
2421 .bEndpointAddress = 14,
2422 .bmAttributes = USB_ENDPOINT_XFER_ISOC,
2423 .reg_udccs = &UDCCS14,
2424 .reg_ubcr = &UBCR14,
2425 .reg_uddr = &UDDR14,
2426 drcmr (38)
2427 },
2428 .ep[15] = {
2429 .ep = {
2430 .name = "ep15in-int",
2431 .ops = &pxa2xx_ep_ops,
2432 .maxpacket = INT_FIFO_SIZE,
2433 },
2434 .dev = &memory,
2435 .fifo_size = INT_FIFO_SIZE,
2436 .bEndpointAddress = USB_DIR_IN | 15,
2437 .bmAttributes = USB_ENDPOINT_XFER_INT,
2438 .reg_udccs = &UDCCS15,
2439 .reg_uddr = &UDDR15,
2440 },
2441#endif /* !CONFIG_USB_PXA2XX_SMALL */
2442};
2443
2444#define CP15R0_VENDOR_MASK 0xffffe000
2445
2446#if defined(CONFIG_ARCH_PXA)
2447#define CP15R0_XSCALE_VALUE 0x69052000 /* intel/arm/xscale */
2448
2449#elif defined(CONFIG_ARCH_IXP4XX)
2450#define CP15R0_XSCALE_VALUE 0x69054000 /* intel/arm/ixp4xx */
2451
2452#endif
2453
2454#define CP15R0_PROD_MASK 0x000003f0
2455#define PXA25x 0x00000100 /* and PXA26x */
2456#define PXA210 0x00000120
2457
2458#define CP15R0_REV_MASK 0x0000000f
2459
2460#define CP15R0_PRODREV_MASK (CP15R0_PROD_MASK | CP15R0_REV_MASK)
2461
2462#define PXA255_A0 0x00000106 /* or PXA260_B1 */
2463#define PXA250_C0 0x00000105 /* or PXA26x_B0 */
2464#define PXA250_B2 0x00000104
2465#define PXA250_B1 0x00000103 /* or PXA260_A0 */
2466#define PXA250_B0 0x00000102
2467#define PXA250_A1 0x00000101
2468#define PXA250_A0 0x00000100
2469
2470#define PXA210_C0 0x00000125
2471#define PXA210_B2 0x00000124
2472#define PXA210_B1 0x00000123
2473#define PXA210_B0 0x00000122
2474#define IXP425_A0 0x000001c1
043ea18b 2475#define IXP465_AD 0x00000200
1da177e4
LT
2476
2477/*
2478 * probe - binds to the platform device
2479 */
3ae5eaec 2480static int __init pxa2xx_udc_probe(struct platform_device *pdev)
1da177e4
LT
2481{
2482 struct pxa2xx_udc *dev = &memory;
b2bbb20b 2483 int retval, out_dma = 1, vbus_irq;
1da177e4
LT
2484 u32 chiprev;
2485
2486 /* insist on Intel/ARM/XScale */
2487 asm("mrc%? p15, 0, %0, c0, c0" : "=r" (chiprev));
2488 if ((chiprev & CP15R0_VENDOR_MASK) != CP15R0_XSCALE_VALUE) {
2489 printk(KERN_ERR "%s: not XScale!\n", driver_name);
2490 return -ENODEV;
2491 }
2492
2493 /* trigger chiprev-specific logic */
2494 switch (chiprev & CP15R0_PRODREV_MASK) {
2495#if defined(CONFIG_ARCH_PXA)
2496 case PXA255_A0:
2497 dev->has_cfr = 1;
2498 break;
2499 case PXA250_A0:
2500 case PXA250_A1:
2501 /* A0/A1 "not released"; ep 13, 15 unusable */
2502 /* fall through */
2503 case PXA250_B2: case PXA210_B2:
2504 case PXA250_B1: case PXA210_B1:
2505 case PXA250_B0: case PXA210_B0:
2506 out_dma = 0;
2507 /* fall through */
2508 case PXA250_C0: case PXA210_C0:
2509 break;
2510#elif defined(CONFIG_ARCH_IXP4XX)
2511 case IXP425_A0:
043ea18b
MS
2512 case IXP465_AD:
2513 dev->has_cfr = 1;
1da177e4
LT
2514 out_dma = 0;
2515 break;
2516#endif
2517 default:
2518 out_dma = 0;
2519 printk(KERN_ERR "%s: unrecognized processor: %08x\n",
2520 driver_name, chiprev);
2521 /* iop3xx, ixp4xx, ... */
2522 return -ENODEV;
2523 }
2524
2525 pr_debug("%s: IRQ %d%s%s%s\n", driver_name, IRQ_USB,
2526 dev->has_cfr ? "" : " (!cfr)",
2527 out_dma ? "" : " (broken dma-out)",
2528 SIZE_STR DMASTR
2529 );
2530
2531#ifdef USE_DMA
2532#ifndef USE_OUT_DMA
2533 out_dma = 0;
2534#endif
2535 /* pxa 250 erratum 130 prevents using OUT dma (fixed C0) */
2536 if (!out_dma) {
2537 DMSG("disabled OUT dma\n");
2538 dev->ep[ 2].reg_drcmr = dev->ep[ 4].reg_drcmr = 0;
2539 dev->ep[ 7].reg_drcmr = dev->ep[ 9].reg_drcmr = 0;
2540 dev->ep[12].reg_drcmr = dev->ep[14].reg_drcmr = 0;
2541 }
2542#endif
2543
2544 /* other non-static parts of init */
3ae5eaec
RK
2545 dev->dev = &pdev->dev;
2546 dev->mach = pdev->dev.platform_data;
b2bbb20b
DB
2547 if (dev->mach->gpio_vbus) {
2548 vbus_irq = IRQ_GPIO(dev->mach->gpio_vbus & GPIO_MD_MASK_NR);
2549 pxa_gpio_mode((dev->mach->gpio_vbus & GPIO_MD_MASK_NR)
2550 | GPIO_IN);
2551 set_irq_type(vbus_irq, IRQT_BOTHEDGE);
2552 } else
2553 vbus_irq = 0;
2554 if (dev->mach->gpio_pullup)
2555 pxa_gpio_mode((dev->mach->gpio_pullup & GPIO_MD_MASK_NR)
2556 | GPIO_OUT | GPIO_DFLT_LOW);
1da177e4
LT
2557
2558 init_timer(&dev->timer);
2559 dev->timer.function = udc_watchdog;
2560 dev->timer.data = (unsigned long) dev;
2561
2562 device_initialize(&dev->gadget.dev);
3ae5eaec
RK
2563 dev->gadget.dev.parent = &pdev->dev;
2564 dev->gadget.dev.dma_mask = pdev->dev.dma_mask;
1da177e4
LT
2565
2566 the_controller = dev;
3ae5eaec 2567 platform_set_drvdata(pdev, dev);
1da177e4
LT
2568
2569 udc_disable(dev);
2570 udc_reinit(dev);
2571
91987693 2572 dev->vbus = is_vbus_present();
1da177e4
LT
2573
2574 /* irq setup after old hardware state is cleaned up */
2575 retval = request_irq(IRQ_USB, pxa2xx_udc_irq,
d54b5caa 2576 IRQF_DISABLED, driver_name, dev);
1da177e4
LT
2577 if (retval != 0) {
2578 printk(KERN_ERR "%s: can't get irq %i, err %d\n",
2579 driver_name, IRQ_USB, retval);
2580 return -EBUSY;
2581 }
2582 dev->got_irq = 1;
2583
2584#ifdef CONFIG_ARCH_LUBBOCK
2585 if (machine_is_lubbock()) {
2586 retval = request_irq(LUBBOCK_USB_DISC_IRQ,
2587 lubbock_vbus_irq,
d54b5caa 2588 IRQF_DISABLED | IRQF_SAMPLE_RANDOM,
1da177e4
LT
2589 driver_name, dev);
2590 if (retval != 0) {
2591 printk(KERN_ERR "%s: can't get irq %i, err %d\n",
2592 driver_name, LUBBOCK_USB_DISC_IRQ, retval);
2593lubbock_fail0:
2594 free_irq(IRQ_USB, dev);
2595 return -EBUSY;
2596 }
2597 retval = request_irq(LUBBOCK_USB_IRQ,
2598 lubbock_vbus_irq,
d54b5caa 2599 IRQF_DISABLED | IRQF_SAMPLE_RANDOM,
1da177e4
LT
2600 driver_name, dev);
2601 if (retval != 0) {
2602 printk(KERN_ERR "%s: can't get irq %i, err %d\n",
2603 driver_name, LUBBOCK_USB_IRQ, retval);
2604 free_irq(LUBBOCK_USB_DISC_IRQ, dev);
2605 goto lubbock_fail0;
2606 }
2607#ifdef DEBUG
2608 /* with U-Boot (but not BLOB), hex is off by default */
2609 HEX_DISPLAY(dev->stats.irqs);
2610 LUB_DISC_BLNK_LED &= 0xff;
2611#endif
b2bbb20b 2612 } else
1da177e4 2613#endif
b2bbb20b
DB
2614 if (vbus_irq) {
2615 retval = request_irq(vbus_irq, udc_vbus_irq,
2616 SA_INTERRUPT | SA_SAMPLE_RANDOM,
2617 driver_name, dev);
2618 if (retval != 0) {
2619 printk(KERN_ERR "%s: can't get irq %i, err %d\n",
2620 driver_name, vbus_irq, retval);
2621 free_irq(IRQ_USB, dev);
2622 return -EBUSY;
2623 }
2624 }
1da177e4
LT
2625 create_proc_files();
2626
2627 return 0;
2628}
91987693 2629
3ae5eaec 2630static void pxa2xx_udc_shutdown(struct platform_device *_dev)
91987693
DB
2631{
2632 pullup_off();
2633}
2634
3ae5eaec 2635static int __exit pxa2xx_udc_remove(struct platform_device *pdev)
1da177e4 2636{
3ae5eaec 2637 struct pxa2xx_udc *dev = platform_get_drvdata(pdev);
1da177e4
LT
2638
2639 udc_disable(dev);
2640 remove_proc_files();
2641 usb_gadget_unregister_driver(dev->driver);
2642
2643 if (dev->got_irq) {
2644 free_irq(IRQ_USB, dev);
2645 dev->got_irq = 0;
2646 }
44df45a0 2647#ifdef CONFIG_ARCH_LUBBOCK
1da177e4
LT
2648 if (machine_is_lubbock()) {
2649 free_irq(LUBBOCK_USB_DISC_IRQ, dev);
2650 free_irq(LUBBOCK_USB_IRQ, dev);
2651 }
44df45a0 2652#endif
b2bbb20b
DB
2653 if (dev->mach->gpio_vbus)
2654 free_irq(IRQ_GPIO(dev->mach->gpio_vbus), dev);
3ae5eaec 2655 platform_set_drvdata(pdev, NULL);
1da177e4
LT
2656 the_controller = NULL;
2657 return 0;
2658}
2659
2660/*-------------------------------------------------------------------------*/
2661
2662#ifdef CONFIG_PM
2663
2664/* USB suspend (controlled by the host) and system suspend (controlled
2665 * by the PXA) don't necessarily work well together. If USB is active,
2666 * the 48 MHz clock is required; so the system can't enter 33 MHz idle
2667 * mode, or any deeper PM saving state.
2668 *
2669 * For now, we punt and forcibly disconnect from the USB host when PXA
2670 * enters any suspend state. While we're disconnected, we always disable
2671 * the 48MHz USB clock ... allowing PXA sleep and/or 33 MHz idle states.
2672 * Boards without software pullup control shouldn't use those states.
2673 * VBUS IRQs should probably be ignored so that the PXA device just acts
2674 * "dead" to USB hosts until system resume.
2675 */
3ae5eaec 2676static int pxa2xx_udc_suspend(struct platform_device *dev, pm_message_t state)
1da177e4 2677{
3ae5eaec 2678 struct pxa2xx_udc *udc = platform_get_drvdata(dev);
1da177e4 2679
9480e307
RK
2680 if (!udc->mach->udc_command)
2681 WARN("USB host won't detect disconnect!\n");
2682 pullup(udc, 0);
2683
1da177e4
LT
2684 return 0;
2685}
2686
3ae5eaec 2687static int pxa2xx_udc_resume(struct platform_device *dev)
1da177e4 2688{
3ae5eaec 2689 struct pxa2xx_udc *udc = platform_get_drvdata(dev);
1da177e4 2690
9480e307
RK
2691 pullup(udc, 1);
2692
1da177e4
LT
2693 return 0;
2694}
2695
2696#else
2697#define pxa2xx_udc_suspend NULL
2698#define pxa2xx_udc_resume NULL
2699#endif
2700
2701/*-------------------------------------------------------------------------*/
2702
3ae5eaec 2703static struct platform_driver udc_driver = {
1da177e4 2704 .probe = pxa2xx_udc_probe,
91987693 2705 .shutdown = pxa2xx_udc_shutdown,
1da177e4
LT
2706 .remove = __exit_p(pxa2xx_udc_remove),
2707 .suspend = pxa2xx_udc_suspend,
2708 .resume = pxa2xx_udc_resume,
3ae5eaec
RK
2709 .driver = {
2710 .owner = THIS_MODULE,
2711 .name = "pxa2xx-udc",
2712 },
1da177e4
LT
2713};
2714
2715static int __init udc_init(void)
2716{
2717 printk(KERN_INFO "%s: version %s\n", driver_name, DRIVER_VERSION);
3ae5eaec 2718 return platform_driver_register(&udc_driver);
1da177e4
LT
2719}
2720module_init(udc_init);
2721
2722static void __exit udc_exit(void)
2723{
3ae5eaec 2724 platform_driver_unregister(&udc_driver);
1da177e4
LT
2725}
2726module_exit(udc_exit);
2727
2728MODULE_DESCRIPTION(DRIVER_DESC);
2729MODULE_AUTHOR("Frank Becker, Robert Schwebel, David Brownell");
2730MODULE_LICENSE("GPL");
2731
This page took 0.32335 seconds and 5 git commands to generate.