usb: gadget: mv_udc: fix bug when handle setup package.
[deliverable/linux.git] / drivers / usb / gadget / mv_udc_core.c
CommitLineData
dde34cc5
NZ
1/*
2 * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
3 * Author: Chao Xie <chao.xie@marvell.com>
4 * Neil Zhang <zhangwm@marvell.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
e7cddda4 12#include <linux/module.h>
13#include <linux/pci.h>
14#include <linux/dma-mapping.h>
15#include <linux/dmapool.h>
16#include <linux/kernel.h>
17#include <linux/delay.h>
18#include <linux/ioport.h>
19#include <linux/sched.h>
20#include <linux/slab.h>
21#include <linux/errno.h>
22#include <linux/init.h>
23#include <linux/timer.h>
24#include <linux/list.h>
25#include <linux/interrupt.h>
26#include <linux/moduleparam.h>
27#include <linux/device.h>
28#include <linux/usb/ch9.h>
29#include <linux/usb/gadget.h>
30#include <linux/usb/otg.h>
31#include <linux/pm.h>
32#include <linux/io.h>
33#include <linux/irq.h>
34#include <linux/platform_device.h>
35#include <linux/clk.h>
dde34cc5 36#include <linux/platform_data/mv_usb.h>
e7cddda4 37#include <asm/system.h>
38#include <asm/unaligned.h>
39
40#include "mv_udc.h"
41
42#define DRIVER_DESC "Marvell PXA USB Device Controller driver"
43#define DRIVER_VERSION "8 Nov 2010"
44
45#define ep_dir(ep) (((ep)->ep_num == 0) ? \
46 ((ep)->udc->ep0_dir) : ((ep)->direction))
47
48/* timeout value -- usec */
49#define RESET_TIMEOUT 10000
50#define FLUSH_TIMEOUT 10000
51#define EPSTATUS_TIMEOUT 10000
52#define PRIME_TIMEOUT 10000
53#define READSAFE_TIMEOUT 1000
54#define DTD_TIMEOUT 1000
55
56#define LOOPS_USEC_SHIFT 4
57#define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
58#define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
59
dde34cc5
NZ
60static DECLARE_COMPLETION(release_done);
61
e7cddda4 62static const char driver_name[] = "mv_udc";
63static const char driver_desc[] = DRIVER_DESC;
64
65/* controller device global variable */
66static struct mv_udc *the_controller;
67int mv_usb_otgsc;
68
69static void nuke(struct mv_ep *ep, int status);
70
71/* for endpoint 0 operations */
72static const struct usb_endpoint_descriptor mv_ep0_desc = {
73 .bLength = USB_DT_ENDPOINT_SIZE,
74 .bDescriptorType = USB_DT_ENDPOINT,
75 .bEndpointAddress = 0,
76 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
77 .wMaxPacketSize = EP0_MAX_PKT_SIZE,
78};
79
80static void ep0_reset(struct mv_udc *udc)
81{
82 struct mv_ep *ep;
83 u32 epctrlx;
84 int i = 0;
85
86 /* ep0 in and out */
87 for (i = 0; i < 2; i++) {
88 ep = &udc->eps[i];
89 ep->udc = udc;
90
91 /* ep0 dQH */
92 ep->dqh = &udc->ep_dqh[i];
93
94 /* configure ep0 endpoint capabilities in dQH */
95 ep->dqh->max_packet_length =
96 (EP0_MAX_PKT_SIZE << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
97 | EP_QUEUE_HEAD_IOS;
98
fbebe1f0
NZ
99 ep->dqh->next_dtd_ptr = EP_QUEUE_HEAD_NEXT_TERMINATE;
100
e7cddda4 101 epctrlx = readl(&udc->op_regs->epctrlx[0]);
102 if (i) { /* TX */
43ad9f3f 103 epctrlx |= EPCTRL_TX_ENABLE
e7cddda4 104 | (USB_ENDPOINT_XFER_CONTROL
105 << EPCTRL_TX_EP_TYPE_SHIFT);
106
107 } else { /* RX */
43ad9f3f 108 epctrlx |= EPCTRL_RX_ENABLE
e7cddda4 109 | (USB_ENDPOINT_XFER_CONTROL
110 << EPCTRL_RX_EP_TYPE_SHIFT);
111 }
112
113 writel(epctrlx, &udc->op_regs->epctrlx[0]);
114 }
115}
116
117/* protocol ep0 stall, will automatically be cleared on new transaction */
118static void ep0_stall(struct mv_udc *udc)
119{
120 u32 epctrlx;
121
122 /* set TX and RX to stall */
123 epctrlx = readl(&udc->op_regs->epctrlx[0]);
124 epctrlx |= EPCTRL_RX_EP_STALL | EPCTRL_TX_EP_STALL;
125 writel(epctrlx, &udc->op_regs->epctrlx[0]);
126
127 /* update ep0 state */
128 udc->ep0_state = WAIT_FOR_SETUP;
129 udc->ep0_dir = EP_DIR_OUT;
130}
131
132static int process_ep_req(struct mv_udc *udc, int index,
133 struct mv_req *curr_req)
134{
135 struct mv_dtd *curr_dtd;
136 struct mv_dqh *curr_dqh;
137 int td_complete, actual, remaining_length;
138 int i, direction;
139 int retval = 0;
140 u32 errors;
141
142 curr_dqh = &udc->ep_dqh[index];
143 direction = index % 2;
144
145 curr_dtd = curr_req->head;
146 td_complete = 0;
147 actual = curr_req->req.length;
148
149 for (i = 0; i < curr_req->dtd_count; i++) {
150 if (curr_dtd->size_ioc_sts & DTD_STATUS_ACTIVE) {
151 dev_dbg(&udc->dev->dev, "%s, dTD not completed\n",
152 udc->eps[index].name);
153 return 1;
154 }
155
156 errors = curr_dtd->size_ioc_sts & DTD_ERROR_MASK;
157 if (!errors) {
158 remaining_length +=
159 (curr_dtd->size_ioc_sts & DTD_PACKET_SIZE)
160 >> DTD_LENGTH_BIT_POS;
161 actual -= remaining_length;
162 } else {
163 dev_info(&udc->dev->dev,
164 "complete_tr error: ep=%d %s: error = 0x%x\n",
165 index >> 1, direction ? "SEND" : "RECV",
166 errors);
167 if (errors & DTD_STATUS_HALTED) {
168 /* Clear the errors and Halt condition */
169 curr_dqh->size_ioc_int_sts &= ~errors;
170 retval = -EPIPE;
171 } else if (errors & DTD_STATUS_DATA_BUFF_ERR) {
172 retval = -EPROTO;
173 } else if (errors & DTD_STATUS_TRANSACTION_ERR) {
174 retval = -EILSEQ;
175 }
176 }
177 if (i != curr_req->dtd_count - 1)
178 curr_dtd = (struct mv_dtd *)curr_dtd->next_dtd_virt;
179 }
180 if (retval)
181 return retval;
182
183 curr_req->req.actual = actual;
184
185 return 0;
186}
187
188/*
189 * done() - retire a request; caller blocked irqs
190 * @status : request status to be set, only works when
191 * request is still in progress.
192 */
193static void done(struct mv_ep *ep, struct mv_req *req, int status)
194{
195 struct mv_udc *udc = NULL;
196 unsigned char stopped = ep->stopped;
197 struct mv_dtd *curr_td, *next_td;
198 int j;
199
200 udc = (struct mv_udc *)ep->udc;
201 /* Removed the req from fsl_ep->queue */
202 list_del_init(&req->queue);
203
204 /* req.status should be set as -EINPROGRESS in ep_queue() */
205 if (req->req.status == -EINPROGRESS)
206 req->req.status = status;
207 else
208 status = req->req.status;
209
210 /* Free dtd for the request */
211 next_td = req->head;
212 for (j = 0; j < req->dtd_count; j++) {
213 curr_td = next_td;
214 if (j != req->dtd_count - 1)
215 next_td = curr_td->next_dtd_virt;
216 dma_pool_free(udc->dtd_pool, curr_td, curr_td->td_dma);
217 }
218
219 if (req->mapped) {
220 dma_unmap_single(ep->udc->gadget.dev.parent,
221 req->req.dma, req->req.length,
222 ((ep_dir(ep) == EP_DIR_IN) ?
223 DMA_TO_DEVICE : DMA_FROM_DEVICE));
224 req->req.dma = DMA_ADDR_INVALID;
225 req->mapped = 0;
226 } else
227 dma_sync_single_for_cpu(ep->udc->gadget.dev.parent,
228 req->req.dma, req->req.length,
229 ((ep_dir(ep) == EP_DIR_IN) ?
230 DMA_TO_DEVICE : DMA_FROM_DEVICE));
231
232 if (status && (status != -ESHUTDOWN))
233 dev_info(&udc->dev->dev, "complete %s req %p stat %d len %u/%u",
234 ep->ep.name, &req->req, status,
235 req->req.actual, req->req.length);
236
237 ep->stopped = 1;
238
239 spin_unlock(&ep->udc->lock);
240 /*
241 * complete() is from gadget layer,
242 * eg fsg->bulk_in_complete()
243 */
244 if (req->req.complete)
245 req->req.complete(&ep->ep, &req->req);
246
247 spin_lock(&ep->udc->lock);
248 ep->stopped = stopped;
249}
250
251static int queue_dtd(struct mv_ep *ep, struct mv_req *req)
252{
253 u32 tmp, epstatus, bit_pos, direction;
254 struct mv_udc *udc;
255 struct mv_dqh *dqh;
256 unsigned int loops;
257 int readsafe, retval = 0;
258
259 udc = ep->udc;
260 direction = ep_dir(ep);
261 dqh = &(udc->ep_dqh[ep->ep_num * 2 + direction]);
262 bit_pos = 1 << (((direction == EP_DIR_OUT) ? 0 : 16) + ep->ep_num);
263
264 /* check if the pipe is empty */
265 if (!(list_empty(&ep->queue))) {
266 struct mv_req *lastreq;
267 lastreq = list_entry(ep->queue.prev, struct mv_req, queue);
268 lastreq->tail->dtd_next =
269 req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
270 if (readl(&udc->op_regs->epprime) & bit_pos) {
271 loops = LOOPS(PRIME_TIMEOUT);
272 while (readl(&udc->op_regs->epprime) & bit_pos) {
273 if (loops == 0) {
274 retval = -ETIME;
275 goto done;
276 }
277 udelay(LOOPS_USEC);
278 loops--;
279 }
280 if (readl(&udc->op_regs->epstatus) & bit_pos)
281 goto done;
282 }
283 readsafe = 0;
284 loops = LOOPS(READSAFE_TIMEOUT);
285 while (readsafe == 0) {
286 if (loops == 0) {
287 retval = -ETIME;
288 goto done;
289 }
290 /* start with setting the semaphores */
291 tmp = readl(&udc->op_regs->usbcmd);
292 tmp |= USBCMD_ATDTW_TRIPWIRE_SET;
293 writel(tmp, &udc->op_regs->usbcmd);
294
295 /* read the endpoint status */
296 epstatus = readl(&udc->op_regs->epstatus) & bit_pos;
297
298 /*
299 * Reread the ATDTW semaphore bit to check if it is
300 * cleared. When hardware see a hazard, it will clear
301 * the bit or else we remain set to 1 and we can
302 * proceed with priming of endpoint if not already
303 * primed.
304 */
305 if (readl(&udc->op_regs->usbcmd)
306 & USBCMD_ATDTW_TRIPWIRE_SET) {
307 readsafe = 1;
308 }
309 loops--;
310 udelay(LOOPS_USEC);
311 }
312
313 /* Clear the semaphore */
314 tmp = readl(&udc->op_regs->usbcmd);
315 tmp &= USBCMD_ATDTW_TRIPWIRE_CLEAR;
316 writel(tmp, &udc->op_regs->usbcmd);
317
318 /* If endpoint is not active, we activate it now. */
319 if (!epstatus) {
320 if (direction == EP_DIR_IN) {
321 struct mv_dtd *curr_dtd = dma_to_virt(
322 &udc->dev->dev, dqh->curr_dtd_ptr);
323
324 loops = LOOPS(DTD_TIMEOUT);
325 while (curr_dtd->size_ioc_sts
326 & DTD_STATUS_ACTIVE) {
327 if (loops == 0) {
328 retval = -ETIME;
329 goto done;
330 }
331 loops--;
332 udelay(LOOPS_USEC);
333 }
334 }
335 /* No other transfers on the queue */
336
337 /* Write dQH next pointer and terminate bit to 0 */
338 dqh->next_dtd_ptr = req->head->td_dma
339 & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
340 dqh->size_ioc_int_sts = 0;
341
342 /*
343 * Ensure that updates to the QH will
25985edc 344 * occur before priming.
e7cddda4 345 */
346 wmb();
347
348 /* Prime the Endpoint */
349 writel(bit_pos, &udc->op_regs->epprime);
350 }
351 } else {
352 /* Write dQH next pointer and terminate bit to 0 */
353 dqh->next_dtd_ptr = req->head->td_dma
354 & EP_QUEUE_HEAD_NEXT_POINTER_MASK;;
355 dqh->size_ioc_int_sts = 0;
356
25985edc 357 /* Ensure that updates to the QH will occur before priming. */
e7cddda4 358 wmb();
359
360 /* Prime the Endpoint */
361 writel(bit_pos, &udc->op_regs->epprime);
362
363 if (direction == EP_DIR_IN) {
364 /* FIXME add status check after prime the IN ep */
365 int prime_again;
366 u32 curr_dtd_ptr = dqh->curr_dtd_ptr;
367
368 loops = LOOPS(DTD_TIMEOUT);
369 prime_again = 0;
370 while ((curr_dtd_ptr != req->head->td_dma)) {
371 curr_dtd_ptr = dqh->curr_dtd_ptr;
372 if (loops == 0) {
373 dev_err(&udc->dev->dev,
374 "failed to prime %s\n",
375 ep->name);
376 retval = -ETIME;
377 goto done;
378 }
379 loops--;
380 udelay(LOOPS_USEC);
381
382 if (loops == (LOOPS(DTD_TIMEOUT) >> 2)) {
383 if (prime_again)
384 goto done;
385 dev_info(&udc->dev->dev,
386 "prime again\n");
387 writel(bit_pos,
388 &udc->op_regs->epprime);
389 prime_again = 1;
390 }
391 }
392 }
393 }
394done:
395 return retval;;
396}
397
398static struct mv_dtd *build_dtd(struct mv_req *req, unsigned *length,
399 dma_addr_t *dma, int *is_last)
400{
401 u32 temp;
402 struct mv_dtd *dtd;
403 struct mv_udc *udc;
404
405 /* how big will this transfer be? */
406 *length = min(req->req.length - req->req.actual,
407 (unsigned)EP_MAX_LENGTH_TRANSFER);
408
409 udc = req->ep->udc;
410
411 /*
412 * Be careful that no _GFP_HIGHMEM is set,
413 * or we can not use dma_to_virt
414 */
415 dtd = dma_pool_alloc(udc->dtd_pool, GFP_KERNEL, dma);
416 if (dtd == NULL)
417 return dtd;
418
419 dtd->td_dma = *dma;
420 /* initialize buffer page pointers */
421 temp = (u32)(req->req.dma + req->req.actual);
422 dtd->buff_ptr0 = cpu_to_le32(temp);
423 temp &= ~0xFFF;
424 dtd->buff_ptr1 = cpu_to_le32(temp + 0x1000);
425 dtd->buff_ptr2 = cpu_to_le32(temp + 0x2000);
426 dtd->buff_ptr3 = cpu_to_le32(temp + 0x3000);
427 dtd->buff_ptr4 = cpu_to_le32(temp + 0x4000);
428
429 req->req.actual += *length;
430
431 /* zlp is needed if req->req.zero is set */
432 if (req->req.zero) {
433 if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0)
434 *is_last = 1;
435 else
436 *is_last = 0;
437 } else if (req->req.length == req->req.actual)
438 *is_last = 1;
439 else
440 *is_last = 0;
441
442 /* Fill in the transfer size; set active bit */
443 temp = ((*length << DTD_LENGTH_BIT_POS) | DTD_STATUS_ACTIVE);
444
445 /* Enable interrupt for the last dtd of a request */
446 if (*is_last && !req->req.no_interrupt)
447 temp |= DTD_IOC;
448
449 dtd->size_ioc_sts = temp;
450
451 mb();
452
453 return dtd;
454}
455
456/* generate dTD linked list for a request */
457static int req_to_dtd(struct mv_req *req)
458{
459 unsigned count;
460 int is_last, is_first = 1;
461 struct mv_dtd *dtd, *last_dtd = NULL;
462 struct mv_udc *udc;
463 dma_addr_t dma;
464
465 udc = req->ep->udc;
466
467 do {
468 dtd = build_dtd(req, &count, &dma, &is_last);
469 if (dtd == NULL)
470 return -ENOMEM;
471
472 if (is_first) {
473 is_first = 0;
474 req->head = dtd;
475 } else {
476 last_dtd->dtd_next = dma;
477 last_dtd->next_dtd_virt = dtd;
478 }
479 last_dtd = dtd;
480 req->dtd_count++;
481 } while (!is_last);
482
483 /* set terminate bit to 1 for the last dTD */
484 dtd->dtd_next = DTD_NEXT_TERMINATE;
485
486 req->tail = dtd;
487
488 return 0;
489}
490
491static int mv_ep_enable(struct usb_ep *_ep,
492 const struct usb_endpoint_descriptor *desc)
493{
494 struct mv_udc *udc;
495 struct mv_ep *ep;
496 struct mv_dqh *dqh;
497 u16 max = 0;
498 u32 bit_pos, epctrlx, direction;
499 unsigned char zlt = 0, ios = 0, mult = 0;
500
501 ep = container_of(_ep, struct mv_ep, ep);
502 udc = ep->udc;
503
504 if (!_ep || !desc || ep->desc
505 || desc->bDescriptorType != USB_DT_ENDPOINT)
506 return -EINVAL;
507
508 if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
509 return -ESHUTDOWN;
510
511 direction = ep_dir(ep);
29cc8897 512 max = usb_endpoint_maxp(desc);
e7cddda4 513
514 /*
515 * disable HW zero length termination select
516 * driver handles zero length packet through req->req.zero
517 */
518 zlt = 1;
519
520 /* Get the endpoint queue head address */
521 dqh = (struct mv_dqh *)ep->dqh;
522
523 bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
524
525 /* Check if the Endpoint is Primed */
526 if ((readl(&udc->op_regs->epprime) & bit_pos)
527 || (readl(&udc->op_regs->epstatus) & bit_pos)) {
528 dev_info(&udc->dev->dev,
529 "ep=%d %s: Init ERROR: ENDPTPRIME=0x%x,"
530 " ENDPTSTATUS=0x%x, bit_pos=0x%x\n",
531 (unsigned)ep->ep_num, direction ? "SEND" : "RECV",
532 (unsigned)readl(&udc->op_regs->epprime),
533 (unsigned)readl(&udc->op_regs->epstatus),
534 (unsigned)bit_pos);
535 goto en_done;
536 }
537 /* Set the max packet length, interrupt on Setup and Mult fields */
538 switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
539 case USB_ENDPOINT_XFER_BULK:
540 zlt = 1;
541 mult = 0;
542 break;
543 case USB_ENDPOINT_XFER_CONTROL:
544 ios = 1;
545 case USB_ENDPOINT_XFER_INT:
546 mult = 0;
547 break;
548 case USB_ENDPOINT_XFER_ISOC:
549 /* Calculate transactions needed for high bandwidth iso */
550 mult = (unsigned char)(1 + ((max >> 11) & 0x03));
bedcff97 551 max = max & 0x7ff; /* bit 0~10 */
e7cddda4 552 /* 3 transactions at most */
553 if (mult > 3)
554 goto en_done;
555 break;
556 default:
557 goto en_done;
558 }
559 dqh->max_packet_length = (max << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
560 | (mult << EP_QUEUE_HEAD_MULT_POS)
561 | (zlt ? EP_QUEUE_HEAD_ZLT_SEL : 0)
562 | (ios ? EP_QUEUE_HEAD_IOS : 0);
563 dqh->next_dtd_ptr = 1;
564 dqh->size_ioc_int_sts = 0;
565
566 ep->ep.maxpacket = max;
567 ep->desc = desc;
568 ep->stopped = 0;
569
570 /* Enable the endpoint for Rx or Tx and set the endpoint type */
571 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
572 if (direction == EP_DIR_IN) {
573 epctrlx &= ~EPCTRL_TX_ALL_MASK;
574 epctrlx |= EPCTRL_TX_ENABLE | EPCTRL_TX_DATA_TOGGLE_RST
575 | ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
576 << EPCTRL_TX_EP_TYPE_SHIFT);
577 } else {
578 epctrlx &= ~EPCTRL_RX_ALL_MASK;
579 epctrlx |= EPCTRL_RX_ENABLE | EPCTRL_RX_DATA_TOGGLE_RST
580 | ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
581 << EPCTRL_RX_EP_TYPE_SHIFT);
582 }
583 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
584
585 /*
586 * Implement Guideline (GL# USB-7) The unused endpoint type must
587 * be programmed to bulk.
588 */
589 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
590 if ((epctrlx & EPCTRL_RX_ENABLE) == 0) {
591 epctrlx |= ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
592 << EPCTRL_RX_EP_TYPE_SHIFT);
593 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
594 }
595
596 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
597 if ((epctrlx & EPCTRL_TX_ENABLE) == 0) {
598 epctrlx |= ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
599 << EPCTRL_TX_EP_TYPE_SHIFT);
600 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
601 }
602
603 return 0;
604en_done:
605 return -EINVAL;
606}
607
608static int mv_ep_disable(struct usb_ep *_ep)
609{
610 struct mv_udc *udc;
611 struct mv_ep *ep;
612 struct mv_dqh *dqh;
613 u32 bit_pos, epctrlx, direction;
614
615 ep = container_of(_ep, struct mv_ep, ep);
616 if ((_ep == NULL) || !ep->desc)
617 return -EINVAL;
618
619 udc = ep->udc;
620
621 /* Get the endpoint queue head address */
622 dqh = ep->dqh;
623
624 direction = ep_dir(ep);
625 bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
626
627 /* Reset the max packet length and the interrupt on Setup */
628 dqh->max_packet_length = 0;
629
630 /* Disable the endpoint for Rx or Tx and reset the endpoint type */
631 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
632 epctrlx &= ~((direction == EP_DIR_IN)
633 ? (EPCTRL_TX_ENABLE | EPCTRL_TX_TYPE)
634 : (EPCTRL_RX_ENABLE | EPCTRL_RX_TYPE));
635 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
636
637 /* nuke all pending requests (does flush) */
638 nuke(ep, -ESHUTDOWN);
639
640 ep->desc = NULL;
641 ep->stopped = 1;
642 return 0;
643}
644
645static struct usb_request *
646mv_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
647{
648 struct mv_req *req = NULL;
649
650 req = kzalloc(sizeof *req, gfp_flags);
651 if (!req)
652 return NULL;
653
654 req->req.dma = DMA_ADDR_INVALID;
655 INIT_LIST_HEAD(&req->queue);
656
657 return &req->req;
658}
659
660static void mv_free_request(struct usb_ep *_ep, struct usb_request *_req)
661{
662 struct mv_req *req = NULL;
663
664 req = container_of(_req, struct mv_req, req);
665
666 if (_req)
667 kfree(req);
668}
669
670static void mv_ep_fifo_flush(struct usb_ep *_ep)
671{
672 struct mv_udc *udc;
673 u32 bit_pos, direction;
674 struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
675 unsigned int loops;
676
677 udc = ep->udc;
678 direction = ep_dir(ep);
679 bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
680 /*
681 * Flushing will halt the pipe
682 * Write 1 to the Flush register
683 */
684 writel(bit_pos, &udc->op_regs->epflush);
685
686 /* Wait until flushing completed */
687 loops = LOOPS(FLUSH_TIMEOUT);
688 while (readl(&udc->op_regs->epflush) & bit_pos) {
689 /*
690 * ENDPTFLUSH bit should be cleared to indicate this
691 * operation is complete
692 */
693 if (loops == 0) {
694 dev_err(&udc->dev->dev,
695 "TIMEOUT for ENDPTFLUSH=0x%x, bit_pos=0x%x\n",
696 (unsigned)readl(&udc->op_regs->epflush),
697 (unsigned)bit_pos);
698 return;
699 }
700 loops--;
701 udelay(LOOPS_USEC);
702 }
703 loops = LOOPS(EPSTATUS_TIMEOUT);
704 while (readl(&udc->op_regs->epstatus) & bit_pos) {
705 unsigned int inter_loops;
706
707 if (loops == 0) {
708 dev_err(&udc->dev->dev,
709 "TIMEOUT for ENDPTSTATUS=0x%x, bit_pos=0x%x\n",
710 (unsigned)readl(&udc->op_regs->epstatus),
711 (unsigned)bit_pos);
712 return;
713 }
714 /* Write 1 to the Flush register */
715 writel(bit_pos, &udc->op_regs->epflush);
716
717 /* Wait until flushing completed */
718 inter_loops = LOOPS(FLUSH_TIMEOUT);
719 while (readl(&udc->op_regs->epflush) & bit_pos) {
720 /*
721 * ENDPTFLUSH bit should be cleared to indicate this
722 * operation is complete
723 */
724 if (inter_loops == 0) {
725 dev_err(&udc->dev->dev,
726 "TIMEOUT for ENDPTFLUSH=0x%x,"
727 "bit_pos=0x%x\n",
728 (unsigned)readl(&udc->op_regs->epflush),
729 (unsigned)bit_pos);
730 return;
731 }
732 inter_loops--;
733 udelay(LOOPS_USEC);
734 }
735 loops--;
736 }
737}
738
739/* queues (submits) an I/O request to an endpoint */
740static int
741mv_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
742{
743 struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
744 struct mv_req *req = container_of(_req, struct mv_req, req);
745 struct mv_udc *udc = ep->udc;
746 unsigned long flags;
747
748 /* catch various bogus parameters */
749 if (!_req || !req->req.complete || !req->req.buf
750 || !list_empty(&req->queue)) {
751 dev_err(&udc->dev->dev, "%s, bad params", __func__);
752 return -EINVAL;
753 }
754 if (unlikely(!_ep || !ep->desc)) {
755 dev_err(&udc->dev->dev, "%s, bad ep", __func__);
756 return -EINVAL;
757 }
758 if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
759 if (req->req.length > ep->ep.maxpacket)
760 return -EMSGSIZE;
761 }
762
763 udc = ep->udc;
764 if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
765 return -ESHUTDOWN;
766
767 req->ep = ep;
768
769 /* map virtual address to hardware */
770 if (req->req.dma == DMA_ADDR_INVALID) {
771 req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
772 req->req.buf,
773 req->req.length, ep_dir(ep)
774 ? DMA_TO_DEVICE
775 : DMA_FROM_DEVICE);
776 req->mapped = 1;
777 } else {
778 dma_sync_single_for_device(ep->udc->gadget.dev.parent,
779 req->req.dma, req->req.length,
780 ep_dir(ep)
781 ? DMA_TO_DEVICE
782 : DMA_FROM_DEVICE);
783 req->mapped = 0;
784 }
785
786 req->req.status = -EINPROGRESS;
787 req->req.actual = 0;
788 req->dtd_count = 0;
789
790 spin_lock_irqsave(&udc->lock, flags);
791
792 /* build dtds and push them to device queue */
793 if (!req_to_dtd(req)) {
794 int retval;
795 retval = queue_dtd(ep, req);
796 if (retval) {
797 spin_unlock_irqrestore(&udc->lock, flags);
798 return retval;
799 }
800 } else {
801 spin_unlock_irqrestore(&udc->lock, flags);
802 return -ENOMEM;
803 }
804
805 /* Update ep0 state */
806 if (ep->ep_num == 0)
807 udc->ep0_state = DATA_STATE_XMIT;
808
809 /* irq handler advances the queue */
810 if (req != NULL)
811 list_add_tail(&req->queue, &ep->queue);
812 spin_unlock_irqrestore(&udc->lock, flags);
813
814 return 0;
815}
816
817/* dequeues (cancels, unlinks) an I/O request from an endpoint */
818static int mv_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
819{
820 struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
821 struct mv_req *req;
822 struct mv_udc *udc = ep->udc;
823 unsigned long flags;
824 int stopped, ret = 0;
825 u32 epctrlx;
826
827 if (!_ep || !_req)
828 return -EINVAL;
829
830 spin_lock_irqsave(&ep->udc->lock, flags);
831 stopped = ep->stopped;
832
833 /* Stop the ep before we deal with the queue */
834 ep->stopped = 1;
835 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
836 if (ep_dir(ep) == EP_DIR_IN)
837 epctrlx &= ~EPCTRL_TX_ENABLE;
838 else
839 epctrlx &= ~EPCTRL_RX_ENABLE;
840 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
841
842 /* make sure it's actually queued on this endpoint */
843 list_for_each_entry(req, &ep->queue, queue) {
844 if (&req->req == _req)
845 break;
846 }
847 if (&req->req != _req) {
848 ret = -EINVAL;
849 goto out;
850 }
851
852 /* The request is in progress, or completed but not dequeued */
853 if (ep->queue.next == &req->queue) {
854 _req->status = -ECONNRESET;
855 mv_ep_fifo_flush(_ep); /* flush current transfer */
856
857 /* The request isn't the last request in this ep queue */
858 if (req->queue.next != &ep->queue) {
859 struct mv_dqh *qh;
860 struct mv_req *next_req;
861
862 qh = ep->dqh;
863 next_req = list_entry(req->queue.next, struct mv_req,
864 queue);
865
866 /* Point the QH to the first TD of next request */
867 writel((u32) next_req->head, &qh->curr_dtd_ptr);
868 } else {
869 struct mv_dqh *qh;
870
871 qh = ep->dqh;
872 qh->next_dtd_ptr = 1;
873 qh->size_ioc_int_sts = 0;
874 }
875
876 /* The request hasn't been processed, patch up the TD chain */
877 } else {
878 struct mv_req *prev_req;
879
880 prev_req = list_entry(req->queue.prev, struct mv_req, queue);
881 writel(readl(&req->tail->dtd_next),
882 &prev_req->tail->dtd_next);
883
884 }
885
886 done(ep, req, -ECONNRESET);
887
888 /* Enable EP */
889out:
890 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
891 if (ep_dir(ep) == EP_DIR_IN)
892 epctrlx |= EPCTRL_TX_ENABLE;
893 else
894 epctrlx |= EPCTRL_RX_ENABLE;
895 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
896 ep->stopped = stopped;
897
898 spin_unlock_irqrestore(&ep->udc->lock, flags);
899 return ret;
900}
901
902static void ep_set_stall(struct mv_udc *udc, u8 ep_num, u8 direction, int stall)
903{
904 u32 epctrlx;
905
906 epctrlx = readl(&udc->op_regs->epctrlx[ep_num]);
907
908 if (stall) {
909 if (direction == EP_DIR_IN)
910 epctrlx |= EPCTRL_TX_EP_STALL;
911 else
912 epctrlx |= EPCTRL_RX_EP_STALL;
913 } else {
914 if (direction == EP_DIR_IN) {
915 epctrlx &= ~EPCTRL_TX_EP_STALL;
916 epctrlx |= EPCTRL_TX_DATA_TOGGLE_RST;
917 } else {
918 epctrlx &= ~EPCTRL_RX_EP_STALL;
919 epctrlx |= EPCTRL_RX_DATA_TOGGLE_RST;
920 }
921 }
922 writel(epctrlx, &udc->op_regs->epctrlx[ep_num]);
923}
924
925static int ep_is_stall(struct mv_udc *udc, u8 ep_num, u8 direction)
926{
927 u32 epctrlx;
928
929 epctrlx = readl(&udc->op_regs->epctrlx[ep_num]);
930
931 if (direction == EP_DIR_OUT)
932 return (epctrlx & EPCTRL_RX_EP_STALL) ? 1 : 0;
933 else
934 return (epctrlx & EPCTRL_TX_EP_STALL) ? 1 : 0;
935}
936
937static int mv_ep_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
938{
939 struct mv_ep *ep;
940 unsigned long flags = 0;
941 int status = 0;
942 struct mv_udc *udc;
943
944 ep = container_of(_ep, struct mv_ep, ep);
945 udc = ep->udc;
946 if (!_ep || !ep->desc) {
947 status = -EINVAL;
948 goto out;
949 }
950
951 if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
952 status = -EOPNOTSUPP;
953 goto out;
954 }
955
956 /*
957 * Attempt to halt IN ep will fail if any transfer requests
958 * are still queue
959 */
960 if (halt && (ep_dir(ep) == EP_DIR_IN) && !list_empty(&ep->queue)) {
961 status = -EAGAIN;
962 goto out;
963 }
964
965 spin_lock_irqsave(&ep->udc->lock, flags);
966 ep_set_stall(udc, ep->ep_num, ep_dir(ep), halt);
967 if (halt && wedge)
968 ep->wedge = 1;
969 else if (!halt)
970 ep->wedge = 0;
971 spin_unlock_irqrestore(&ep->udc->lock, flags);
972
973 if (ep->ep_num == 0) {
974 udc->ep0_state = WAIT_FOR_SETUP;
975 udc->ep0_dir = EP_DIR_OUT;
976 }
977out:
978 return status;
979}
980
981static int mv_ep_set_halt(struct usb_ep *_ep, int halt)
982{
983 return mv_ep_set_halt_wedge(_ep, halt, 0);
984}
985
986static int mv_ep_set_wedge(struct usb_ep *_ep)
987{
988 return mv_ep_set_halt_wedge(_ep, 1, 1);
989}
990
991static struct usb_ep_ops mv_ep_ops = {
992 .enable = mv_ep_enable,
993 .disable = mv_ep_disable,
994
995 .alloc_request = mv_alloc_request,
996 .free_request = mv_free_request,
997
998 .queue = mv_ep_queue,
999 .dequeue = mv_ep_dequeue,
1000
1001 .set_wedge = mv_ep_set_wedge,
1002 .set_halt = mv_ep_set_halt,
1003 .fifo_flush = mv_ep_fifo_flush, /* flush fifo */
1004};
1005
dde34cc5
NZ
1006static void udc_clock_enable(struct mv_udc *udc)
1007{
1008 unsigned int i;
1009
1010 for (i = 0; i < udc->clknum; i++)
1011 clk_enable(udc->clk[i]);
1012}
1013
1014static void udc_clock_disable(struct mv_udc *udc)
1015{
1016 unsigned int i;
1017
1018 for (i = 0; i < udc->clknum; i++)
1019 clk_disable(udc->clk[i]);
1020}
1021
e7cddda4 1022static void udc_stop(struct mv_udc *udc)
1023{
1024 u32 tmp;
1025
1026 /* Disable interrupts */
1027 tmp = readl(&udc->op_regs->usbintr);
1028 tmp &= ~(USBINTR_INT_EN | USBINTR_ERR_INT_EN |
1029 USBINTR_PORT_CHANGE_DETECT_EN | USBINTR_RESET_EN);
1030 writel(tmp, &udc->op_regs->usbintr);
1031
1032 /* Reset the Run the bit in the command register to stop VUSB */
1033 tmp = readl(&udc->op_regs->usbcmd);
1034 tmp &= ~USBCMD_RUN_STOP;
1035 writel(tmp, &udc->op_regs->usbcmd);
1036}
1037
1038static void udc_start(struct mv_udc *udc)
1039{
1040 u32 usbintr;
1041
1042 usbintr = USBINTR_INT_EN | USBINTR_ERR_INT_EN
1043 | USBINTR_PORT_CHANGE_DETECT_EN
1044 | USBINTR_RESET_EN | USBINTR_DEVICE_SUSPEND;
1045 /* Enable interrupts */
1046 writel(usbintr, &udc->op_regs->usbintr);
1047
1048 /* Set the Run bit in the command register */
1049 writel(USBCMD_RUN_STOP, &udc->op_regs->usbcmd);
1050}
1051
1052static int udc_reset(struct mv_udc *udc)
1053{
1054 unsigned int loops;
1055 u32 tmp, portsc;
1056
1057 /* Stop the controller */
1058 tmp = readl(&udc->op_regs->usbcmd);
1059 tmp &= ~USBCMD_RUN_STOP;
1060 writel(tmp, &udc->op_regs->usbcmd);
1061
1062 /* Reset the controller to get default values */
1063 writel(USBCMD_CTRL_RESET, &udc->op_regs->usbcmd);
1064
1065 /* wait for reset to complete */
1066 loops = LOOPS(RESET_TIMEOUT);
1067 while (readl(&udc->op_regs->usbcmd) & USBCMD_CTRL_RESET) {
1068 if (loops == 0) {
1069 dev_err(&udc->dev->dev,
1070 "Wait for RESET completed TIMEOUT\n");
1071 return -ETIMEDOUT;
1072 }
1073 loops--;
1074 udelay(LOOPS_USEC);
1075 }
1076
1077 /* set controller to device mode */
1078 tmp = readl(&udc->op_regs->usbmode);
1079 tmp |= USBMODE_CTRL_MODE_DEVICE;
1080
1081 /* turn setup lockout off, require setup tripwire in usbcmd */
1082 tmp |= USBMODE_SETUP_LOCK_OFF | USBMODE_STREAM_DISABLE;
1083
1084 writel(tmp, &udc->op_regs->usbmode);
1085
1086 writel(0x0, &udc->op_regs->epsetupstat);
1087
1088 /* Configure the Endpoint List Address */
1089 writel(udc->ep_dqh_dma & USB_EP_LIST_ADDRESS_MASK,
1090 &udc->op_regs->eplistaddr);
1091
1092 portsc = readl(&udc->op_regs->portsc[0]);
1093 if (readl(&udc->cap_regs->hcsparams) & HCSPARAMS_PPC)
1094 portsc &= (~PORTSCX_W1C_BITS | ~PORTSCX_PORT_POWER);
1095
1096 if (udc->force_fs)
1097 portsc |= PORTSCX_FORCE_FULL_SPEED_CONNECT;
1098 else
1099 portsc &= (~PORTSCX_FORCE_FULL_SPEED_CONNECT);
1100
1101 writel(portsc, &udc->op_regs->portsc[0]);
1102
1103 tmp = readl(&udc->op_regs->epctrlx[0]);
1104 tmp &= ~(EPCTRL_TX_EP_STALL | EPCTRL_RX_EP_STALL);
1105 writel(tmp, &udc->op_regs->epctrlx[0]);
1106
1107 return 0;
1108}
1109
1110static int mv_udc_get_frame(struct usb_gadget *gadget)
1111{
1112 struct mv_udc *udc;
1113 u16 retval;
1114
1115 if (!gadget)
1116 return -ENODEV;
1117
1118 udc = container_of(gadget, struct mv_udc, gadget);
1119
1120 retval = readl(udc->op_regs->frindex) & USB_FRINDEX_MASKS;
1121
1122 return retval;
1123}
1124
1125/* Tries to wake up the host connected to this gadget */
1126static int mv_udc_wakeup(struct usb_gadget *gadget)
1127{
1128 struct mv_udc *udc = container_of(gadget, struct mv_udc, gadget);
1129 u32 portsc;
1130
1131 /* Remote wakeup feature not enabled by host */
1132 if (!udc->remote_wakeup)
1133 return -ENOTSUPP;
1134
1135 portsc = readl(&udc->op_regs->portsc);
1136 /* not suspended? */
1137 if (!(portsc & PORTSCX_PORT_SUSPEND))
1138 return 0;
1139 /* trigger force resume */
1140 portsc |= PORTSCX_PORT_FORCE_RESUME;
1141 writel(portsc, &udc->op_regs->portsc[0]);
1142 return 0;
1143}
1144
1145static int mv_udc_pullup(struct usb_gadget *gadget, int is_on)
1146{
1147 struct mv_udc *udc;
1148 unsigned long flags;
1149
1150 udc = container_of(gadget, struct mv_udc, gadget);
1151 spin_lock_irqsave(&udc->lock, flags);
1152
1153 udc->softconnect = (is_on != 0);
1154 if (udc->driver && udc->softconnect)
1155 udc_start(udc);
1156 else
1157 udc_stop(udc);
1158
1159 spin_unlock_irqrestore(&udc->lock, flags);
1160 return 0;
1161}
1162
0f91349b
SAS
1163static int mv_udc_start(struct usb_gadget_driver *driver,
1164 int (*bind)(struct usb_gadget *));
1165static int mv_udc_stop(struct usb_gadget_driver *driver);
e7cddda4 1166/* device controller usb_gadget_ops structure */
1167static const struct usb_gadget_ops mv_ops = {
1168
1169 /* returns the current frame number */
1170 .get_frame = mv_udc_get_frame,
1171
1172 /* tries to wake up the host connected to this gadget */
1173 .wakeup = mv_udc_wakeup,
1174
1175 /* D+ pullup, software-controlled connect/disconnect to USB host */
1176 .pullup = mv_udc_pullup,
0f91349b
SAS
1177 .start = mv_udc_start,
1178 .stop = mv_udc_stop,
e7cddda4 1179};
1180
1181static void mv_udc_testmode(struct mv_udc *udc, u16 index, bool enter)
1182{
1183 dev_info(&udc->dev->dev, "Test Mode is not support yet\n");
1184}
1185
1186static int eps_init(struct mv_udc *udc)
1187{
1188 struct mv_ep *ep;
1189 char name[14];
1190 int i;
1191
1192 /* initialize ep0 */
1193 ep = &udc->eps[0];
1194 ep->udc = udc;
1195 strncpy(ep->name, "ep0", sizeof(ep->name));
1196 ep->ep.name = ep->name;
1197 ep->ep.ops = &mv_ep_ops;
1198 ep->wedge = 0;
1199 ep->stopped = 0;
1200 ep->ep.maxpacket = EP0_MAX_PKT_SIZE;
1201 ep->ep_num = 0;
1202 ep->desc = &mv_ep0_desc;
1203 INIT_LIST_HEAD(&ep->queue);
1204
1205 ep->ep_type = USB_ENDPOINT_XFER_CONTROL;
1206
1207 /* initialize other endpoints */
1208 for (i = 2; i < udc->max_eps * 2; i++) {
1209 ep = &udc->eps[i];
1210 if (i % 2) {
1211 snprintf(name, sizeof(name), "ep%din", i / 2);
1212 ep->direction = EP_DIR_IN;
1213 } else {
1214 snprintf(name, sizeof(name), "ep%dout", i / 2);
1215 ep->direction = EP_DIR_OUT;
1216 }
1217 ep->udc = udc;
1218 strncpy(ep->name, name, sizeof(ep->name));
1219 ep->ep.name = ep->name;
1220
1221 ep->ep.ops = &mv_ep_ops;
1222 ep->stopped = 0;
1223 ep->ep.maxpacket = (unsigned short) ~0;
1224 ep->ep_num = i / 2;
1225
1226 INIT_LIST_HEAD(&ep->queue);
1227 list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
1228
1229 ep->dqh = &udc->ep_dqh[i];
1230 }
1231
1232 return 0;
1233}
1234
1235/* delete all endpoint requests, called with spinlock held */
1236static void nuke(struct mv_ep *ep, int status)
1237{
1238 /* called with spinlock held */
1239 ep->stopped = 1;
1240
1241 /* endpoint fifo flush */
1242 mv_ep_fifo_flush(&ep->ep);
1243
1244 while (!list_empty(&ep->queue)) {
1245 struct mv_req *req = NULL;
1246 req = list_entry(ep->queue.next, struct mv_req, queue);
1247 done(ep, req, status);
1248 }
1249}
1250
1251/* stop all USB activities */
1252static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver)
1253{
1254 struct mv_ep *ep;
1255
1256 nuke(&udc->eps[0], -ESHUTDOWN);
1257
1258 list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
1259 nuke(ep, -ESHUTDOWN);
1260 }
1261
1262 /* report disconnect; the driver is already quiesced */
1263 if (driver) {
1264 spin_unlock(&udc->lock);
1265 driver->disconnect(&udc->gadget);
1266 spin_lock(&udc->lock);
1267 }
1268}
1269
0f91349b 1270static int mv_udc_start(struct usb_gadget_driver *driver,
e7cddda4 1271 int (*bind)(struct usb_gadget *))
1272{
1273 struct mv_udc *udc = the_controller;
1274 int retval = 0;
1275 unsigned long flags;
1276
1277 if (!udc)
1278 return -ENODEV;
1279
1280 if (udc->driver)
1281 return -EBUSY;
1282
1283 spin_lock_irqsave(&udc->lock, flags);
1284
1285 /* hook up the driver ... */
1286 driver->driver.bus = NULL;
1287 udc->driver = driver;
1288 udc->gadget.dev.driver = &driver->driver;
1289
1290 udc->usb_state = USB_STATE_ATTACHED;
1291 udc->ep0_state = WAIT_FOR_SETUP;
1292 udc->ep0_dir = USB_DIR_OUT;
1293
1294 spin_unlock_irqrestore(&udc->lock, flags);
1295
1296 retval = bind(&udc->gadget);
1297 if (retval) {
1298 dev_err(&udc->dev->dev, "bind to driver %s --> %d\n",
1299 driver->driver.name, retval);
1300 udc->driver = NULL;
1301 udc->gadget.dev.driver = NULL;
1302 return retval;
1303 }
1304 udc_reset(udc);
1305 ep0_reset(udc);
1306 udc_start(udc);
1307
1308 return 0;
1309}
e7cddda4 1310
0f91349b 1311static int mv_udc_stop(struct usb_gadget_driver *driver)
e7cddda4 1312{
1313 struct mv_udc *udc = the_controller;
1314 unsigned long flags;
1315
1316 if (!udc)
1317 return -ENODEV;
1318
1319 udc_stop(udc);
1320
1321 spin_lock_irqsave(&udc->lock, flags);
1322
1323 /* stop all usb activities */
1324 udc->gadget.speed = USB_SPEED_UNKNOWN;
1325 stop_activity(udc, driver);
1326 spin_unlock_irqrestore(&udc->lock, flags);
1327
1328 /* unbind gadget driver */
1329 driver->unbind(&udc->gadget);
1330 udc->gadget.dev.driver = NULL;
1331 udc->driver = NULL;
1332
1333 return 0;
1334}
e7cddda4 1335
1336static int
1337udc_prime_status(struct mv_udc *udc, u8 direction, u16 status, bool empty)
1338{
1339 int retval = 0;
1340 struct mv_req *req;
1341 struct mv_ep *ep;
1342
1343 ep = &udc->eps[0];
1344 udc->ep0_dir = direction;
1345
1346 req = udc->status_req;
1347
1348 /* fill in the reqest structure */
1349 if (empty == false) {
1350 *((u16 *) req->req.buf) = cpu_to_le16(status);
1351 req->req.length = 2;
1352 } else
1353 req->req.length = 0;
1354
1355 req->ep = ep;
1356 req->req.status = -EINPROGRESS;
1357 req->req.actual = 0;
1358 req->req.complete = NULL;
1359 req->dtd_count = 0;
1360
1361 /* prime the data phase */
1362 if (!req_to_dtd(req))
1363 retval = queue_dtd(ep, req);
1364 else{ /* no mem */
1365 retval = -ENOMEM;
1366 goto out;
1367 }
1368
1369 if (retval) {
1370 dev_err(&udc->dev->dev, "response error on GET_STATUS request\n");
1371 goto out;
1372 }
1373
1374 list_add_tail(&req->queue, &ep->queue);
1375
1376 return 0;
1377out:
1378 return retval;
1379}
1380
1381static void ch9setaddress(struct mv_udc *udc, struct usb_ctrlrequest *setup)
1382{
1383 udc->dev_addr = (u8)setup->wValue;
1384
1385 /* update usb state */
1386 udc->usb_state = USB_STATE_ADDRESS;
1387
1388 if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1389 ep0_stall(udc);
1390}
1391
1392static void ch9getstatus(struct mv_udc *udc, u8 ep_num,
1393 struct usb_ctrlrequest *setup)
1394{
1395 u16 status;
1396 int retval;
1397
1398 if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK))
1399 != (USB_DIR_IN | USB_TYPE_STANDARD))
1400 return;
1401
1402 if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
1403 status = 1 << USB_DEVICE_SELF_POWERED;
1404 status |= udc->remote_wakeup << USB_DEVICE_REMOTE_WAKEUP;
1405 } else if ((setup->bRequestType & USB_RECIP_MASK)
1406 == USB_RECIP_INTERFACE) {
1407 /* get interface status */
1408 status = 0;
1409 } else if ((setup->bRequestType & USB_RECIP_MASK)
1410 == USB_RECIP_ENDPOINT) {
1411 u8 ep_num, direction;
1412
1413 ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
1414 direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
1415 ? EP_DIR_IN : EP_DIR_OUT;
1416 status = ep_is_stall(udc, ep_num, direction)
1417 << USB_ENDPOINT_HALT;
1418 }
1419
1420 retval = udc_prime_status(udc, EP_DIR_IN, status, false);
1421 if (retval)
1422 ep0_stall(udc);
1423}
1424
1425static void ch9clearfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
1426{
1427 u8 ep_num;
1428 u8 direction;
1429 struct mv_ep *ep;
1430
1431 if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1432 == ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) {
1433 switch (setup->wValue) {
1434 case USB_DEVICE_REMOTE_WAKEUP:
1435 udc->remote_wakeup = 0;
1436 break;
1437 case USB_DEVICE_TEST_MODE:
1438 mv_udc_testmode(udc, 0, false);
1439 break;
1440 default:
1441 goto out;
1442 }
1443 } else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1444 == ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) {
1445 switch (setup->wValue) {
1446 case USB_ENDPOINT_HALT:
1447 ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
1448 direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
1449 ? EP_DIR_IN : EP_DIR_OUT;
1450 if (setup->wValue != 0 || setup->wLength != 0
1451 || ep_num > udc->max_eps)
1452 goto out;
1453 ep = &udc->eps[ep_num * 2 + direction];
1454 if (ep->wedge == 1)
1455 break;
1456 spin_unlock(&udc->lock);
1457 ep_set_stall(udc, ep_num, direction, 0);
1458 spin_lock(&udc->lock);
1459 break;
1460 default:
1461 goto out;
1462 }
1463 } else
1464 goto out;
1465
1466 if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1467 ep0_stall(udc);
1468 else
1469 udc->ep0_state = DATA_STATE_XMIT;
1470out:
1471 return;
1472}
1473
1474static void ch9setfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
1475{
1476 u8 ep_num;
1477 u8 direction;
1478
1479 if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1480 == ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) {
1481 switch (setup->wValue) {
1482 case USB_DEVICE_REMOTE_WAKEUP:
1483 udc->remote_wakeup = 1;
1484 break;
1485 case USB_DEVICE_TEST_MODE:
1486 if (setup->wIndex & 0xFF
1487 && udc->gadget.speed != USB_SPEED_HIGH)
1488 goto out;
1489 if (udc->usb_state == USB_STATE_CONFIGURED
1490 || udc->usb_state == USB_STATE_ADDRESS
1491 || udc->usb_state == USB_STATE_DEFAULT)
1492 mv_udc_testmode(udc,
1493 setup->wIndex & 0xFF00, true);
1494 else
1495 goto out;
1496 break;
1497 default:
1498 goto out;
1499 }
1500 } else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1501 == ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) {
1502 switch (setup->wValue) {
1503 case USB_ENDPOINT_HALT:
1504 ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
1505 direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
1506 ? EP_DIR_IN : EP_DIR_OUT;
1507 if (setup->wValue != 0 || setup->wLength != 0
1508 || ep_num > udc->max_eps)
1509 goto out;
1510 spin_unlock(&udc->lock);
1511 ep_set_stall(udc, ep_num, direction, 1);
1512 spin_lock(&udc->lock);
1513 break;
1514 default:
1515 goto out;
1516 }
1517 } else
1518 goto out;
1519
1520 if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1521 ep0_stall(udc);
1522out:
1523 return;
1524}
1525
1526static void handle_setup_packet(struct mv_udc *udc, u8 ep_num,
1527 struct usb_ctrlrequest *setup)
1528{
1529 bool delegate = false;
1530
1531 nuke(&udc->eps[ep_num * 2 + EP_DIR_OUT], -ESHUTDOWN);
1532
1533 dev_dbg(&udc->dev->dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
1534 setup->bRequestType, setup->bRequest,
1535 setup->wValue, setup->wIndex, setup->wLength);
1536 /* We process some stardard setup requests here */
1537 if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
1538 switch (setup->bRequest) {
1539 case USB_REQ_GET_STATUS:
1540 ch9getstatus(udc, ep_num, setup);
1541 break;
1542
1543 case USB_REQ_SET_ADDRESS:
1544 ch9setaddress(udc, setup);
1545 break;
1546
1547 case USB_REQ_CLEAR_FEATURE:
1548 ch9clearfeature(udc, setup);
1549 break;
1550
1551 case USB_REQ_SET_FEATURE:
1552 ch9setfeature(udc, setup);
1553 break;
1554
1555 default:
1556 delegate = true;
1557 }
1558 } else
1559 delegate = true;
1560
1561 /* delegate USB standard requests to the gadget driver */
1562 if (delegate == true) {
1563 /* USB requests handled by gadget */
1564 if (setup->wLength) {
1565 /* DATA phase from gadget, STATUS phase from udc */
1566 udc->ep0_dir = (setup->bRequestType & USB_DIR_IN)
1567 ? EP_DIR_IN : EP_DIR_OUT;
1568 spin_unlock(&udc->lock);
1569 if (udc->driver->setup(&udc->gadget,
1570 &udc->local_setup_buff) < 0)
1571 ep0_stall(udc);
1572 spin_lock(&udc->lock);
1573 udc->ep0_state = (setup->bRequestType & USB_DIR_IN)
1574 ? DATA_STATE_XMIT : DATA_STATE_RECV;
1575 } else {
1576 /* no DATA phase, IN STATUS phase from gadget */
1577 udc->ep0_dir = EP_DIR_IN;
1578 spin_unlock(&udc->lock);
1579 if (udc->driver->setup(&udc->gadget,
1580 &udc->local_setup_buff) < 0)
1581 ep0_stall(udc);
1582 spin_lock(&udc->lock);
1583 udc->ep0_state = WAIT_FOR_OUT_STATUS;
1584 }
1585 }
1586}
1587
1588/* complete DATA or STATUS phase of ep0 prime status phase if needed */
1589static void ep0_req_complete(struct mv_udc *udc,
1590 struct mv_ep *ep0, struct mv_req *req)
1591{
1592 u32 new_addr;
1593
1594 if (udc->usb_state == USB_STATE_ADDRESS) {
1595 /* set the new address */
1596 new_addr = (u32)udc->dev_addr;
1597 writel(new_addr << USB_DEVICE_ADDRESS_BIT_SHIFT,
1598 &udc->op_regs->deviceaddr);
1599 }
1600
1601 done(ep0, req, 0);
1602
1603 switch (udc->ep0_state) {
1604 case DATA_STATE_XMIT:
1605 /* receive status phase */
1606 if (udc_prime_status(udc, EP_DIR_OUT, 0, true))
1607 ep0_stall(udc);
1608 break;
1609 case DATA_STATE_RECV:
1610 /* send status phase */
1611 if (udc_prime_status(udc, EP_DIR_IN, 0 , true))
1612 ep0_stall(udc);
1613 break;
1614 case WAIT_FOR_OUT_STATUS:
1615 udc->ep0_state = WAIT_FOR_SETUP;
1616 break;
1617 case WAIT_FOR_SETUP:
1618 dev_err(&udc->dev->dev, "unexpect ep0 packets\n");
1619 break;
1620 default:
1621 ep0_stall(udc);
1622 break;
1623 }
1624}
1625
1626static void get_setup_data(struct mv_udc *udc, u8 ep_num, u8 *buffer_ptr)
1627{
1628 u32 temp;
1629 struct mv_dqh *dqh;
1630
1631 dqh = &udc->ep_dqh[ep_num * 2 + EP_DIR_OUT];
1632
1633 /* Clear bit in ENDPTSETUPSTAT */
96c2bbb0 1634 writel((1 << ep_num), &udc->op_regs->epsetupstat);
e7cddda4 1635
1636 /* while a hazard exists when setup package arrives */
1637 do {
1638 /* Set Setup Tripwire */
1639 temp = readl(&udc->op_regs->usbcmd);
1640 writel(temp | USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd);
1641
1642 /* Copy the setup packet to local buffer */
1643 memcpy(buffer_ptr, (u8 *) dqh->setup_buffer, 8);
1644 } while (!(readl(&udc->op_regs->usbcmd) & USBCMD_SETUP_TRIPWIRE_SET));
1645
1646 /* Clear Setup Tripwire */
1647 temp = readl(&udc->op_regs->usbcmd);
1648 writel(temp & ~USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd);
1649}
1650
1651static void irq_process_tr_complete(struct mv_udc *udc)
1652{
1653 u32 tmp, bit_pos;
1654 int i, ep_num = 0, direction = 0;
1655 struct mv_ep *curr_ep;
1656 struct mv_req *curr_req, *temp_req;
1657 int status;
1658
1659 /*
1660 * We use separate loops for ENDPTSETUPSTAT and ENDPTCOMPLETE
1661 * because the setup packets are to be read ASAP
1662 */
1663
1664 /* Process all Setup packet received interrupts */
1665 tmp = readl(&udc->op_regs->epsetupstat);
1666
1667 if (tmp) {
1668 for (i = 0; i < udc->max_eps; i++) {
1669 if (tmp & (1 << i)) {
1670 get_setup_data(udc, i,
1671 (u8 *)(&udc->local_setup_buff));
1672 handle_setup_packet(udc, i,
1673 &udc->local_setup_buff);
1674 }
1675 }
1676 }
1677
1678 /* Don't clear the endpoint setup status register here.
1679 * It is cleared as a setup packet is read out of the buffer
1680 */
1681
1682 /* Process non-setup transaction complete interrupts */
1683 tmp = readl(&udc->op_regs->epcomplete);
1684
1685 if (!tmp)
1686 return;
1687
1688 writel(tmp, &udc->op_regs->epcomplete);
1689
1690 for (i = 0; i < udc->max_eps * 2; i++) {
1691 ep_num = i >> 1;
1692 direction = i % 2;
1693
1694 bit_pos = 1 << (ep_num + 16 * direction);
1695
1696 if (!(bit_pos & tmp))
1697 continue;
1698
1699 if (i == 1)
1700 curr_ep = &udc->eps[0];
1701 else
1702 curr_ep = &udc->eps[i];
1703 /* process the req queue until an uncomplete request */
1704 list_for_each_entry_safe(curr_req, temp_req,
1705 &curr_ep->queue, queue) {
1706 status = process_ep_req(udc, i, curr_req);
1707 if (status)
1708 break;
1709
1710 /* write back status to req */
1711 curr_req->req.status = status;
1712
1713 /* ep0 request completion */
1714 if (ep_num == 0) {
1715 ep0_req_complete(udc, curr_ep, curr_req);
1716 break;
1717 } else {
1718 done(curr_ep, curr_req, status);
1719 }
1720 }
1721 }
1722}
1723
1724void irq_process_reset(struct mv_udc *udc)
1725{
1726 u32 tmp;
1727 unsigned int loops;
1728
1729 udc->ep0_dir = EP_DIR_OUT;
1730 udc->ep0_state = WAIT_FOR_SETUP;
1731 udc->remote_wakeup = 0; /* default to 0 on reset */
1732
1733 /* The address bits are past bit 25-31. Set the address */
1734 tmp = readl(&udc->op_regs->deviceaddr);
1735 tmp &= ~(USB_DEVICE_ADDRESS_MASK);
1736 writel(tmp, &udc->op_regs->deviceaddr);
1737
1738 /* Clear all the setup token semaphores */
1739 tmp = readl(&udc->op_regs->epsetupstat);
1740 writel(tmp, &udc->op_regs->epsetupstat);
1741
1742 /* Clear all the endpoint complete status bits */
1743 tmp = readl(&udc->op_regs->epcomplete);
1744 writel(tmp, &udc->op_regs->epcomplete);
1745
1746 /* wait until all endptprime bits cleared */
1747 loops = LOOPS(PRIME_TIMEOUT);
1748 while (readl(&udc->op_regs->epprime) & 0xFFFFFFFF) {
1749 if (loops == 0) {
1750 dev_err(&udc->dev->dev,
1751 "Timeout for ENDPTPRIME = 0x%x\n",
1752 readl(&udc->op_regs->epprime));
1753 break;
1754 }
1755 loops--;
1756 udelay(LOOPS_USEC);
1757 }
1758
1759 /* Write 1s to the Flush register */
1760 writel((u32)~0, &udc->op_regs->epflush);
1761
1762 if (readl(&udc->op_regs->portsc[0]) & PORTSCX_PORT_RESET) {
1763 dev_info(&udc->dev->dev, "usb bus reset\n");
1764 udc->usb_state = USB_STATE_DEFAULT;
1765 /* reset all the queues, stop all USB activities */
1766 stop_activity(udc, udc->driver);
1767 } else {
1768 dev_info(&udc->dev->dev, "USB reset portsc 0x%x\n",
1769 readl(&udc->op_regs->portsc));
1770
1771 /*
1772 * re-initialize
1773 * controller reset
1774 */
1775 udc_reset(udc);
1776
1777 /* reset all the queues, stop all USB activities */
1778 stop_activity(udc, udc->driver);
1779
1780 /* reset ep0 dQH and endptctrl */
1781 ep0_reset(udc);
1782
1783 /* enable interrupt and set controller to run state */
1784 udc_start(udc);
1785
1786 udc->usb_state = USB_STATE_ATTACHED;
1787 }
1788}
1789
1790static void handle_bus_resume(struct mv_udc *udc)
1791{
1792 udc->usb_state = udc->resume_state;
1793 udc->resume_state = 0;
1794
1795 /* report resume to the driver */
1796 if (udc->driver) {
1797 if (udc->driver->resume) {
1798 spin_unlock(&udc->lock);
1799 udc->driver->resume(&udc->gadget);
1800 spin_lock(&udc->lock);
1801 }
1802 }
1803}
1804
1805static void irq_process_suspend(struct mv_udc *udc)
1806{
1807 udc->resume_state = udc->usb_state;
1808 udc->usb_state = USB_STATE_SUSPENDED;
1809
1810 if (udc->driver->suspend) {
1811 spin_unlock(&udc->lock);
1812 udc->driver->suspend(&udc->gadget);
1813 spin_lock(&udc->lock);
1814 }
1815}
1816
1817static void irq_process_port_change(struct mv_udc *udc)
1818{
1819 u32 portsc;
1820
1821 portsc = readl(&udc->op_regs->portsc[0]);
1822 if (!(portsc & PORTSCX_PORT_RESET)) {
1823 /* Get the speed */
1824 u32 speed = portsc & PORTSCX_PORT_SPEED_MASK;
1825 switch (speed) {
1826 case PORTSCX_PORT_SPEED_HIGH:
1827 udc->gadget.speed = USB_SPEED_HIGH;
1828 break;
1829 case PORTSCX_PORT_SPEED_FULL:
1830 udc->gadget.speed = USB_SPEED_FULL;
1831 break;
1832 case PORTSCX_PORT_SPEED_LOW:
1833 udc->gadget.speed = USB_SPEED_LOW;
1834 break;
1835 default:
1836 udc->gadget.speed = USB_SPEED_UNKNOWN;
1837 break;
1838 }
1839 }
1840
1841 if (portsc & PORTSCX_PORT_SUSPEND) {
1842 udc->resume_state = udc->usb_state;
1843 udc->usb_state = USB_STATE_SUSPENDED;
1844 if (udc->driver->suspend) {
1845 spin_unlock(&udc->lock);
1846 udc->driver->suspend(&udc->gadget);
1847 spin_lock(&udc->lock);
1848 }
1849 }
1850
1851 if (!(portsc & PORTSCX_PORT_SUSPEND)
1852 && udc->usb_state == USB_STATE_SUSPENDED) {
1853 handle_bus_resume(udc);
1854 }
1855
1856 if (!udc->resume_state)
1857 udc->usb_state = USB_STATE_DEFAULT;
1858}
1859
1860static void irq_process_error(struct mv_udc *udc)
1861{
1862 /* Increment the error count */
1863 udc->errors++;
1864}
1865
1866static irqreturn_t mv_udc_irq(int irq, void *dev)
1867{
1868 struct mv_udc *udc = (struct mv_udc *)dev;
1869 u32 status, intr;
1870
1871 spin_lock(&udc->lock);
1872
1873 status = readl(&udc->op_regs->usbsts);
1874 intr = readl(&udc->op_regs->usbintr);
1875 status &= intr;
1876
1877 if (status == 0) {
1878 spin_unlock(&udc->lock);
1879 return IRQ_NONE;
1880 }
1881
25985edc 1882 /* Clear all the interrupts occurred */
e7cddda4 1883 writel(status, &udc->op_regs->usbsts);
1884
1885 if (status & USBSTS_ERR)
1886 irq_process_error(udc);
1887
1888 if (status & USBSTS_RESET)
1889 irq_process_reset(udc);
1890
1891 if (status & USBSTS_PORT_CHANGE)
1892 irq_process_port_change(udc);
1893
1894 if (status & USBSTS_INT)
1895 irq_process_tr_complete(udc);
1896
1897 if (status & USBSTS_SUSPEND)
1898 irq_process_suspend(udc);
1899
1900 spin_unlock(&udc->lock);
1901
1902 return IRQ_HANDLED;
1903}
1904
1905/* release device structure */
1906static void gadget_release(struct device *_dev)
1907{
1908 struct mv_udc *udc = the_controller;
1909
1910 complete(udc->done);
e7cddda4 1911}
1912
5d0b8d0f 1913static int __devexit mv_udc_remove(struct platform_device *dev)
e7cddda4 1914{
1915 struct mv_udc *udc = the_controller;
dde34cc5 1916 int clk_i;
e7cddda4 1917
0f91349b
SAS
1918 usb_del_gadget_udc(&udc->gadget);
1919
e7cddda4 1920 /* free memory allocated in probe */
1921 if (udc->dtd_pool)
1922 dma_pool_destroy(udc->dtd_pool);
1923
1924 if (udc->ep_dqh)
1925 dma_free_coherent(&dev->dev, udc->ep_dqh_size,
1926 udc->ep_dqh, udc->ep_dqh_dma);
1927
1928 kfree(udc->eps);
1929
1930 if (udc->irq)
1931 free_irq(udc->irq, &dev->dev);
1932
1933 if (udc->cap_regs)
1934 iounmap(udc->cap_regs);
1935 udc->cap_regs = NULL;
1936
1937 if (udc->phy_regs)
1938 iounmap((void *)udc->phy_regs);
1939 udc->phy_regs = 0;
1940
1941 if (udc->status_req) {
1942 kfree(udc->status_req->req.buf);
1943 kfree(udc->status_req);
1944 }
1945
dde34cc5
NZ
1946 for (clk_i = 0; clk_i <= udc->clknum; clk_i++)
1947 clk_put(udc->clk[clk_i]);
1948
e7cddda4 1949 device_unregister(&udc->gadget.dev);
1950
1951 /* free dev, wait for the release() finished */
dde34cc5
NZ
1952 wait_for_completion(udc->done);
1953 kfree(udc);
e7cddda4 1954
1955 the_controller = NULL;
1956
1957 return 0;
1958}
1959
5d0b8d0f 1960static int __devinit mv_udc_probe(struct platform_device *dev)
e7cddda4 1961{
dde34cc5 1962 struct mv_usb_platform_data *pdata = dev->dev.platform_data;
e7cddda4 1963 struct mv_udc *udc;
1964 int retval = 0;
dde34cc5 1965 int clk_i = 0;
e7cddda4 1966 struct resource *r;
1967 size_t size;
1968
dde34cc5
NZ
1969 if (pdata == NULL) {
1970 dev_err(&dev->dev, "missing platform_data\n");
1971 return -ENODEV;
1972 }
1973
1974 size = sizeof(*udc) + sizeof(struct clk *) * pdata->clknum;
1975 udc = kzalloc(size, GFP_KERNEL);
e7cddda4 1976 if (udc == NULL) {
1977 dev_err(&dev->dev, "failed to allocate memory for udc\n");
dde34cc5 1978 return -ENOMEM;
e7cddda4 1979 }
1980
dde34cc5
NZ
1981 the_controller = udc;
1982 udc->done = &release_done;
1983 udc->pdata = dev->dev.platform_data;
e7cddda4 1984 spin_lock_init(&udc->lock);
1985
1986 udc->dev = dev;
1987
dde34cc5
NZ
1988 udc->clknum = pdata->clknum;
1989 for (clk_i = 0; clk_i < udc->clknum; clk_i++) {
1990 udc->clk[clk_i] = clk_get(&dev->dev, pdata->clkname[clk_i]);
1991 if (IS_ERR(udc->clk[clk_i])) {
1992 retval = PTR_ERR(udc->clk[clk_i]);
1993 goto err_put_clk;
1994 }
e7cddda4 1995 }
1996
dde34cc5 1997 r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "capregs");
e7cddda4 1998 if (r == NULL) {
1999 dev_err(&dev->dev, "no I/O memory resource defined\n");
2000 retval = -ENODEV;
dde34cc5 2001 goto err_put_clk;
e7cddda4 2002 }
2003
2004 udc->cap_regs = (struct mv_cap_regs __iomem *)
2005 ioremap(r->start, resource_size(r));
2006 if (udc->cap_regs == NULL) {
2007 dev_err(&dev->dev, "failed to map I/O memory\n");
2008 retval = -EBUSY;
dde34cc5 2009 goto err_put_clk;
e7cddda4 2010 }
2011
dde34cc5 2012 r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "phyregs");
e7cddda4 2013 if (r == NULL) {
2014 dev_err(&dev->dev, "no phy I/O memory resource defined\n");
2015 retval = -ENODEV;
dde34cc5 2016 goto err_iounmap_capreg;
e7cddda4 2017 }
2018
2019 udc->phy_regs = (unsigned int)ioremap(r->start, resource_size(r));
2020 if (udc->phy_regs == 0) {
2021 dev_err(&dev->dev, "failed to map phy I/O memory\n");
2022 retval = -EBUSY;
dde34cc5 2023 goto err_iounmap_capreg;
e7cddda4 2024 }
2025
2026 /* we will acces controller register, so enable the clk */
dde34cc5
NZ
2027 udc_clock_enable(udc);
2028 if (pdata->phy_init) {
2029 retval = pdata->phy_init(udc->phy_regs);
2030 if (retval) {
2031 dev_err(&dev->dev, "phy init error %d\n", retval);
2032 goto err_iounmap_phyreg;
2033 }
e7cddda4 2034 }
2035
2036 udc->op_regs = (struct mv_op_regs __iomem *)((u32)udc->cap_regs
2037 + (readl(&udc->cap_regs->caplength_hciversion)
2038 & CAPLENGTH_MASK));
2039 udc->max_eps = readl(&udc->cap_regs->dccparams) & DCCPARAMS_DEN_MASK;
2040
4540a9ab
NZ
2041 /*
2042 * some platform will use usb to download image, it may not disconnect
2043 * usb gadget before loading kernel. So first stop udc here.
2044 */
2045 udc_stop(udc);
2046 writel(0xFFFFFFFF, &udc->op_regs->usbsts);
2047
e7cddda4 2048 size = udc->max_eps * sizeof(struct mv_dqh) *2;
2049 size = (size + DQH_ALIGNMENT - 1) & ~(DQH_ALIGNMENT - 1);
2050 udc->ep_dqh = dma_alloc_coherent(&dev->dev, size,
2051 &udc->ep_dqh_dma, GFP_KERNEL);
2052
2053 if (udc->ep_dqh == NULL) {
2054 dev_err(&dev->dev, "allocate dQH memory failed\n");
2055 retval = -ENOMEM;
dde34cc5 2056 goto err_disable_clock;
e7cddda4 2057 }
2058 udc->ep_dqh_size = size;
2059
2060 /* create dTD dma_pool resource */
2061 udc->dtd_pool = dma_pool_create("mv_dtd",
2062 &dev->dev,
2063 sizeof(struct mv_dtd),
2064 DTD_ALIGNMENT,
2065 DMA_BOUNDARY);
2066
2067 if (!udc->dtd_pool) {
2068 retval = -ENOMEM;
dde34cc5 2069 goto err_free_dma;
e7cddda4 2070 }
2071
2072 size = udc->max_eps * sizeof(struct mv_ep) *2;
2073 udc->eps = kzalloc(size, GFP_KERNEL);
2074 if (udc->eps == NULL) {
2075 dev_err(&dev->dev, "allocate ep memory failed\n");
2076 retval = -ENOMEM;
dde34cc5 2077 goto err_destroy_dma;
e7cddda4 2078 }
2079
2080 /* initialize ep0 status request structure */
2081 udc->status_req = kzalloc(sizeof(struct mv_req), GFP_KERNEL);
2082 if (!udc->status_req) {
2083 dev_err(&dev->dev, "allocate status_req memory failed\n");
2084 retval = -ENOMEM;
dde34cc5 2085 goto err_free_eps;
e7cddda4 2086 }
2087 INIT_LIST_HEAD(&udc->status_req->queue);
2088
2089 /* allocate a small amount of memory to get valid address */
2090 udc->status_req->req.buf = kzalloc(8, GFP_KERNEL);
2091 udc->status_req->req.dma = virt_to_phys(udc->status_req->req.buf);
2092
2093 udc->resume_state = USB_STATE_NOTATTACHED;
2094 udc->usb_state = USB_STATE_POWERED;
2095 udc->ep0_dir = EP_DIR_OUT;
2096 udc->remote_wakeup = 0;
2097
2098 r = platform_get_resource(udc->dev, IORESOURCE_IRQ, 0);
2099 if (r == NULL) {
2100 dev_err(&dev->dev, "no IRQ resource defined\n");
2101 retval = -ENODEV;
dde34cc5 2102 goto err_free_status_req;
e7cddda4 2103 }
2104 udc->irq = r->start;
2105 if (request_irq(udc->irq, mv_udc_irq,
b5dd18d8 2106 IRQF_SHARED, driver_name, udc)) {
e7cddda4 2107 dev_err(&dev->dev, "Request irq %d for UDC failed\n",
2108 udc->irq);
2109 retval = -ENODEV;
dde34cc5 2110 goto err_free_status_req;
e7cddda4 2111 }
2112
2113 /* initialize gadget structure */
2114 udc->gadget.ops = &mv_ops; /* usb_gadget_ops */
2115 udc->gadget.ep0 = &udc->eps[0].ep; /* gadget ep0 */
2116 INIT_LIST_HEAD(&udc->gadget.ep_list); /* ep_list */
2117 udc->gadget.speed = USB_SPEED_UNKNOWN; /* speed */
2118 udc->gadget.is_dualspeed = 1; /* support dual speed */
2119
2120 /* the "gadget" abstracts/virtualizes the controller */
2121 dev_set_name(&udc->gadget.dev, "gadget");
2122 udc->gadget.dev.parent = &dev->dev;
2123 udc->gadget.dev.dma_mask = dev->dev.dma_mask;
2124 udc->gadget.dev.release = gadget_release;
2125 udc->gadget.name = driver_name; /* gadget name */
2126
2127 retval = device_register(&udc->gadget.dev);
2128 if (retval)
dde34cc5 2129 goto err_free_irq;
e7cddda4 2130
2131 eps_init(udc);
2132
0f91349b 2133 retval = usb_add_gadget_udc(&dev->dev, &udc->gadget);
dde34cc5
NZ
2134 if (retval)
2135 goto err_unregister;
2136
2137 return 0;
2138
2139err_unregister:
2140 device_unregister(&udc->gadget.dev);
2141err_free_irq:
2142 free_irq(udc->irq, &dev->dev);
2143err_free_status_req:
2144 kfree(udc->status_req->req.buf);
2145 kfree(udc->status_req);
2146err_free_eps:
2147 kfree(udc->eps);
2148err_destroy_dma:
2149 dma_pool_destroy(udc->dtd_pool);
2150err_free_dma:
2151 dma_free_coherent(&dev->dev, udc->ep_dqh_size,
2152 udc->ep_dqh, udc->ep_dqh_dma);
2153err_disable_clock:
2154 if (udc->pdata->phy_deinit)
2155 udc->pdata->phy_deinit(udc->phy_regs);
2156 udc_clock_disable(udc);
2157err_iounmap_phyreg:
2158 iounmap((void *)udc->phy_regs);
2159err_iounmap_capreg:
2160 iounmap(udc->cap_regs);
2161err_put_clk:
2162 for (clk_i--; clk_i >= 0; clk_i--)
2163 clk_put(udc->clk[clk_i]);
2164 the_controller = NULL;
2165 kfree(udc);
e7cddda4 2166 return retval;
2167}
2168
2169#ifdef CONFIG_PM
cb424473 2170static int mv_udc_suspend(struct device *_dev)
e7cddda4 2171{
2172 struct mv_udc *udc = the_controller;
2173
2174 udc_stop(udc);
2175
2176 return 0;
2177}
2178
cb424473 2179static int mv_udc_resume(struct device *_dev)
e7cddda4 2180{
2181 struct mv_udc *udc = the_controller;
2182 int retval;
2183
dde34cc5
NZ
2184 if (udc->pdata->phy_init) {
2185 retval = udc->pdata->phy_init(udc->phy_regs);
2186 if (retval) {
2187 dev_err(&udc->dev->dev,
2188 "init phy error %d when resume back\n",
2189 retval);
2190 return retval;
2191 }
e7cddda4 2192 }
dde34cc5 2193
e7cddda4 2194 udc_reset(udc);
2195 ep0_reset(udc);
2196 udc_start(udc);
2197
2198 return 0;
2199}
2200
2201static const struct dev_pm_ops mv_udc_pm_ops = {
2202 .suspend = mv_udc_suspend,
2203 .resume = mv_udc_resume,
2204};
2205#endif
2206
2207static struct platform_driver udc_driver = {
2208 .probe = mv_udc_probe,
2209 .remove = __exit_p(mv_udc_remove),
2210 .driver = {
2211 .owner = THIS_MODULE,
2212 .name = "pxa-u2o",
2213#ifdef CONFIG_PM
cb424473 2214 .pm = &mv_udc_pm_ops,
e7cddda4 2215#endif
2216 },
2217};
86081d7b 2218MODULE_ALIAS("platform:pxa-u2o");
e7cddda4 2219
2220MODULE_DESCRIPTION(DRIVER_DESC);
2221MODULE_AUTHOR("Chao Xie <chao.xie@marvell.com>");
2222MODULE_VERSION(DRIVER_VERSION);
2223MODULE_LICENSE("GPL");
2224
2225
2226static int __init init(void)
2227{
2228 return platform_driver_register(&udc_driver);
2229}
2230module_init(init);
2231
2232
2233static void __exit cleanup(void)
2234{
2235 platform_driver_unregister(&udc_driver);
2236}
2237module_exit(cleanup);
2238
This page took 0.170061 seconds and 5 git commands to generate.