usb: gadget: mv_udc: add iso support
[deliverable/linux.git] / drivers / usb / gadget / mv_udc_core.c
CommitLineData
dde34cc5
NZ
1/*
2 * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
3 * Author: Chao Xie <chao.xie@marvell.com>
4 * Neil Zhang <zhangwm@marvell.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
e7cddda4 12#include <linux/module.h>
13#include <linux/pci.h>
14#include <linux/dma-mapping.h>
15#include <linux/dmapool.h>
16#include <linux/kernel.h>
17#include <linux/delay.h>
18#include <linux/ioport.h>
19#include <linux/sched.h>
20#include <linux/slab.h>
21#include <linux/errno.h>
ded017ee 22#include <linux/err.h>
e7cddda4 23#include <linux/init.h>
24#include <linux/timer.h>
25#include <linux/list.h>
26#include <linux/interrupt.h>
27#include <linux/moduleparam.h>
28#include <linux/device.h>
29#include <linux/usb/ch9.h>
30#include <linux/usb/gadget.h>
31#include <linux/usb/otg.h>
32#include <linux/pm.h>
33#include <linux/io.h>
34#include <linux/irq.h>
35#include <linux/platform_device.h>
36#include <linux/clk.h>
dde34cc5 37#include <linux/platform_data/mv_usb.h>
e7cddda4 38#include <asm/unaligned.h>
39
40#include "mv_udc.h"
41
42#define DRIVER_DESC "Marvell PXA USB Device Controller driver"
43#define DRIVER_VERSION "8 Nov 2010"
44
45#define ep_dir(ep) (((ep)->ep_num == 0) ? \
46 ((ep)->udc->ep0_dir) : ((ep)->direction))
47
48/* timeout value -- usec */
49#define RESET_TIMEOUT 10000
50#define FLUSH_TIMEOUT 10000
51#define EPSTATUS_TIMEOUT 10000
52#define PRIME_TIMEOUT 10000
53#define READSAFE_TIMEOUT 1000
e7cddda4 54
9b2035a0 55#define LOOPS_USEC_SHIFT 1
e7cddda4 56#define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
57#define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
58
dde34cc5
NZ
59static DECLARE_COMPLETION(release_done);
60
e7cddda4 61static const char driver_name[] = "mv_udc";
62static const char driver_desc[] = DRIVER_DESC;
63
64/* controller device global variable */
65static struct mv_udc *the_controller;
e7cddda4 66
67static void nuke(struct mv_ep *ep, int status);
1aec033b 68static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver);
e7cddda4 69
70/* for endpoint 0 operations */
71static const struct usb_endpoint_descriptor mv_ep0_desc = {
72 .bLength = USB_DT_ENDPOINT_SIZE,
73 .bDescriptorType = USB_DT_ENDPOINT,
74 .bEndpointAddress = 0,
75 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
76 .wMaxPacketSize = EP0_MAX_PKT_SIZE,
77};
78
79static void ep0_reset(struct mv_udc *udc)
80{
81 struct mv_ep *ep;
82 u32 epctrlx;
83 int i = 0;
84
85 /* ep0 in and out */
86 for (i = 0; i < 2; i++) {
87 ep = &udc->eps[i];
88 ep->udc = udc;
89
90 /* ep0 dQH */
91 ep->dqh = &udc->ep_dqh[i];
92
93 /* configure ep0 endpoint capabilities in dQH */
94 ep->dqh->max_packet_length =
95 (EP0_MAX_PKT_SIZE << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
96 | EP_QUEUE_HEAD_IOS;
97
fbebe1f0
NZ
98 ep->dqh->next_dtd_ptr = EP_QUEUE_HEAD_NEXT_TERMINATE;
99
e7cddda4 100 epctrlx = readl(&udc->op_regs->epctrlx[0]);
101 if (i) { /* TX */
43ad9f3f 102 epctrlx |= EPCTRL_TX_ENABLE
e7cddda4 103 | (USB_ENDPOINT_XFER_CONTROL
104 << EPCTRL_TX_EP_TYPE_SHIFT);
105
106 } else { /* RX */
43ad9f3f 107 epctrlx |= EPCTRL_RX_ENABLE
e7cddda4 108 | (USB_ENDPOINT_XFER_CONTROL
109 << EPCTRL_RX_EP_TYPE_SHIFT);
110 }
111
112 writel(epctrlx, &udc->op_regs->epctrlx[0]);
113 }
114}
115
116/* protocol ep0 stall, will automatically be cleared on new transaction */
117static void ep0_stall(struct mv_udc *udc)
118{
119 u32 epctrlx;
120
121 /* set TX and RX to stall */
122 epctrlx = readl(&udc->op_regs->epctrlx[0]);
123 epctrlx |= EPCTRL_RX_EP_STALL | EPCTRL_TX_EP_STALL;
124 writel(epctrlx, &udc->op_regs->epctrlx[0]);
125
126 /* update ep0 state */
127 udc->ep0_state = WAIT_FOR_SETUP;
128 udc->ep0_dir = EP_DIR_OUT;
129}
130
131static int process_ep_req(struct mv_udc *udc, int index,
132 struct mv_req *curr_req)
133{
134 struct mv_dtd *curr_dtd;
135 struct mv_dqh *curr_dqh;
136 int td_complete, actual, remaining_length;
137 int i, direction;
138 int retval = 0;
139 u32 errors;
daec765d 140 u32 bit_pos;
e7cddda4 141
142 curr_dqh = &udc->ep_dqh[index];
143 direction = index % 2;
144
145 curr_dtd = curr_req->head;
146 td_complete = 0;
147 actual = curr_req->req.length;
148
149 for (i = 0; i < curr_req->dtd_count; i++) {
150 if (curr_dtd->size_ioc_sts & DTD_STATUS_ACTIVE) {
151 dev_dbg(&udc->dev->dev, "%s, dTD not completed\n",
152 udc->eps[index].name);
153 return 1;
154 }
155
156 errors = curr_dtd->size_ioc_sts & DTD_ERROR_MASK;
157 if (!errors) {
daec765d 158 remaining_length =
e7cddda4 159 (curr_dtd->size_ioc_sts & DTD_PACKET_SIZE)
160 >> DTD_LENGTH_BIT_POS;
161 actual -= remaining_length;
daec765d
NZ
162
163 if (remaining_length) {
164 if (direction) {
165 dev_dbg(&udc->dev->dev,
166 "TX dTD remains data\n");
167 retval = -EPROTO;
168 break;
169 } else
170 break;
171 }
e7cddda4 172 } else {
173 dev_info(&udc->dev->dev,
174 "complete_tr error: ep=%d %s: error = 0x%x\n",
175 index >> 1, direction ? "SEND" : "RECV",
176 errors);
177 if (errors & DTD_STATUS_HALTED) {
178 /* Clear the errors and Halt condition */
179 curr_dqh->size_ioc_int_sts &= ~errors;
180 retval = -EPIPE;
181 } else if (errors & DTD_STATUS_DATA_BUFF_ERR) {
182 retval = -EPROTO;
183 } else if (errors & DTD_STATUS_TRANSACTION_ERR) {
184 retval = -EILSEQ;
185 }
186 }
187 if (i != curr_req->dtd_count - 1)
188 curr_dtd = (struct mv_dtd *)curr_dtd->next_dtd_virt;
189 }
190 if (retval)
191 return retval;
192
daec765d
NZ
193 if (direction == EP_DIR_OUT)
194 bit_pos = 1 << curr_req->ep->ep_num;
195 else
196 bit_pos = 1 << (16 + curr_req->ep->ep_num);
197
198 while ((curr_dqh->curr_dtd_ptr == curr_dtd->td_dma)) {
199 if (curr_dtd->dtd_next == EP_QUEUE_HEAD_NEXT_TERMINATE) {
200 while (readl(&udc->op_regs->epstatus) & bit_pos)
201 udelay(1);
202 break;
203 }
204 udelay(1);
205 }
206
e7cddda4 207 curr_req->req.actual = actual;
208
209 return 0;
210}
211
212/*
213 * done() - retire a request; caller blocked irqs
214 * @status : request status to be set, only works when
215 * request is still in progress.
216 */
217static void done(struct mv_ep *ep, struct mv_req *req, int status)
218{
219 struct mv_udc *udc = NULL;
220 unsigned char stopped = ep->stopped;
221 struct mv_dtd *curr_td, *next_td;
222 int j;
223
224 udc = (struct mv_udc *)ep->udc;
225 /* Removed the req from fsl_ep->queue */
226 list_del_init(&req->queue);
227
228 /* req.status should be set as -EINPROGRESS in ep_queue() */
229 if (req->req.status == -EINPROGRESS)
230 req->req.status = status;
231 else
232 status = req->req.status;
233
234 /* Free dtd for the request */
235 next_td = req->head;
236 for (j = 0; j < req->dtd_count; j++) {
237 curr_td = next_td;
238 if (j != req->dtd_count - 1)
239 next_td = curr_td->next_dtd_virt;
240 dma_pool_free(udc->dtd_pool, curr_td, curr_td->td_dma);
241 }
242
243 if (req->mapped) {
244 dma_unmap_single(ep->udc->gadget.dev.parent,
245 req->req.dma, req->req.length,
246 ((ep_dir(ep) == EP_DIR_IN) ?
247 DMA_TO_DEVICE : DMA_FROM_DEVICE));
248 req->req.dma = DMA_ADDR_INVALID;
249 req->mapped = 0;
250 } else
251 dma_sync_single_for_cpu(ep->udc->gadget.dev.parent,
252 req->req.dma, req->req.length,
253 ((ep_dir(ep) == EP_DIR_IN) ?
254 DMA_TO_DEVICE : DMA_FROM_DEVICE));
255
256 if (status && (status != -ESHUTDOWN))
257 dev_info(&udc->dev->dev, "complete %s req %p stat %d len %u/%u",
258 ep->ep.name, &req->req, status,
259 req->req.actual, req->req.length);
260
261 ep->stopped = 1;
262
263 spin_unlock(&ep->udc->lock);
264 /*
265 * complete() is from gadget layer,
266 * eg fsg->bulk_in_complete()
267 */
268 if (req->req.complete)
269 req->req.complete(&ep->ep, &req->req);
270
271 spin_lock(&ep->udc->lock);
272 ep->stopped = stopped;
273}
274
275static int queue_dtd(struct mv_ep *ep, struct mv_req *req)
276{
e7cddda4 277 struct mv_udc *udc;
278 struct mv_dqh *dqh;
91d959d8
NZ
279 u32 bit_pos, direction;
280 u32 usbcmd, epstatus;
e7cddda4 281 unsigned int loops;
91d959d8 282 int retval = 0;
e7cddda4 283
284 udc = ep->udc;
285 direction = ep_dir(ep);
286 dqh = &(udc->ep_dqh[ep->ep_num * 2 + direction]);
287 bit_pos = 1 << (((direction == EP_DIR_OUT) ? 0 : 16) + ep->ep_num);
288
289 /* check if the pipe is empty */
290 if (!(list_empty(&ep->queue))) {
291 struct mv_req *lastreq;
292 lastreq = list_entry(ep->queue.prev, struct mv_req, queue);
293 lastreq->tail->dtd_next =
294 req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
91d959d8
NZ
295
296 wmb();
297
298 if (readl(&udc->op_regs->epprime) & bit_pos)
299 goto done;
300
e7cddda4 301 loops = LOOPS(READSAFE_TIMEOUT);
91d959d8 302 while (1) {
e7cddda4 303 /* start with setting the semaphores */
91d959d8
NZ
304 usbcmd = readl(&udc->op_regs->usbcmd);
305 usbcmd |= USBCMD_ATDTW_TRIPWIRE_SET;
306 writel(usbcmd, &udc->op_regs->usbcmd);
e7cddda4 307
308 /* read the endpoint status */
309 epstatus = readl(&udc->op_regs->epstatus) & bit_pos;
310
311 /*
312 * Reread the ATDTW semaphore bit to check if it is
313 * cleared. When hardware see a hazard, it will clear
314 * the bit or else we remain set to 1 and we can
315 * proceed with priming of endpoint if not already
316 * primed.
317 */
318 if (readl(&udc->op_regs->usbcmd)
91d959d8
NZ
319 & USBCMD_ATDTW_TRIPWIRE_SET)
320 break;
321
e7cddda4 322 loops--;
91d959d8
NZ
323 if (loops == 0) {
324 dev_err(&udc->dev->dev,
325 "Timeout for ATDTW_TRIPWIRE...\n");
326 retval = -ETIME;
327 goto done;
328 }
e7cddda4 329 udelay(LOOPS_USEC);
330 }
331
332 /* Clear the semaphore */
91d959d8
NZ
333 usbcmd = readl(&udc->op_regs->usbcmd);
334 usbcmd &= USBCMD_ATDTW_TRIPWIRE_CLEAR;
335 writel(usbcmd, &udc->op_regs->usbcmd);
e7cddda4 336
91d959d8
NZ
337 if (epstatus)
338 goto done;
339 }
e7cddda4 340
91d959d8
NZ
341 /* Write dQH next pointer and terminate bit to 0 */
342 dqh->next_dtd_ptr = req->head->td_dma
e7cddda4 343 & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
e7cddda4 344
91d959d8
NZ
345 /* clear active and halt bit, in case set from a previous error */
346 dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED);
e7cddda4 347
91d959d8
NZ
348 /* Ensure that updates to the QH will occure before priming. */
349 wmb();
e7cddda4 350
91d959d8
NZ
351 /* Prime the Endpoint */
352 writel(bit_pos, &udc->op_regs->epprime);
e7cddda4 353
e7cddda4 354done:
69932487 355 return retval;
e7cddda4 356}
357
358static struct mv_dtd *build_dtd(struct mv_req *req, unsigned *length,
359 dma_addr_t *dma, int *is_last)
360{
e7cddda4 361 struct mv_dtd *dtd;
362 struct mv_udc *udc;
60326ce3
CX
363 struct mv_dqh *dqh;
364 u32 temp, mult = 0;
e7cddda4 365
366 /* how big will this transfer be? */
60326ce3
CX
367 if (usb_endpoint_xfer_isoc(req->ep->ep.desc)) {
368 dqh = req->ep->dqh;
369 mult = (dqh->max_packet_length >> EP_QUEUE_HEAD_MULT_POS)
370 & 0x3;
371 *length = min(req->req.length - req->req.actual,
372 (unsigned)(mult * req->ep->ep.maxpacket));
373 } else
374 *length = min(req->req.length - req->req.actual,
375 (unsigned)EP_MAX_LENGTH_TRANSFER);
e7cddda4 376
377 udc = req->ep->udc;
378
379 /*
380 * Be careful that no _GFP_HIGHMEM is set,
381 * or we can not use dma_to_virt
382 */
0344606b 383 dtd = dma_pool_alloc(udc->dtd_pool, GFP_ATOMIC, dma);
e7cddda4 384 if (dtd == NULL)
385 return dtd;
386
387 dtd->td_dma = *dma;
388 /* initialize buffer page pointers */
389 temp = (u32)(req->req.dma + req->req.actual);
390 dtd->buff_ptr0 = cpu_to_le32(temp);
391 temp &= ~0xFFF;
392 dtd->buff_ptr1 = cpu_to_le32(temp + 0x1000);
393 dtd->buff_ptr2 = cpu_to_le32(temp + 0x2000);
394 dtd->buff_ptr3 = cpu_to_le32(temp + 0x3000);
395 dtd->buff_ptr4 = cpu_to_le32(temp + 0x4000);
396
397 req->req.actual += *length;
398
399 /* zlp is needed if req->req.zero is set */
400 if (req->req.zero) {
401 if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0)
402 *is_last = 1;
403 else
404 *is_last = 0;
405 } else if (req->req.length == req->req.actual)
406 *is_last = 1;
407 else
408 *is_last = 0;
409
410 /* Fill in the transfer size; set active bit */
411 temp = ((*length << DTD_LENGTH_BIT_POS) | DTD_STATUS_ACTIVE);
412
413 /* Enable interrupt for the last dtd of a request */
414 if (*is_last && !req->req.no_interrupt)
415 temp |= DTD_IOC;
416
60326ce3
CX
417 temp |= mult << 10;
418
e7cddda4 419 dtd->size_ioc_sts = temp;
420
421 mb();
422
423 return dtd;
424}
425
426/* generate dTD linked list for a request */
427static int req_to_dtd(struct mv_req *req)
428{
429 unsigned count;
430 int is_last, is_first = 1;
431 struct mv_dtd *dtd, *last_dtd = NULL;
432 struct mv_udc *udc;
433 dma_addr_t dma;
434
435 udc = req->ep->udc;
436
437 do {
438 dtd = build_dtd(req, &count, &dma, &is_last);
439 if (dtd == NULL)
440 return -ENOMEM;
441
442 if (is_first) {
443 is_first = 0;
444 req->head = dtd;
445 } else {
446 last_dtd->dtd_next = dma;
447 last_dtd->next_dtd_virt = dtd;
448 }
449 last_dtd = dtd;
450 req->dtd_count++;
451 } while (!is_last);
452
453 /* set terminate bit to 1 for the last dTD */
454 dtd->dtd_next = DTD_NEXT_TERMINATE;
455
456 req->tail = dtd;
457
458 return 0;
459}
460
461static int mv_ep_enable(struct usb_ep *_ep,
462 const struct usb_endpoint_descriptor *desc)
463{
464 struct mv_udc *udc;
465 struct mv_ep *ep;
466 struct mv_dqh *dqh;
467 u16 max = 0;
468 u32 bit_pos, epctrlx, direction;
469 unsigned char zlt = 0, ios = 0, mult = 0;
27cec2b2 470 unsigned long flags;
e7cddda4 471
472 ep = container_of(_ep, struct mv_ep, ep);
473 udc = ep->udc;
474
e0f4f9d4 475 if (!_ep || !desc
e7cddda4 476 || desc->bDescriptorType != USB_DT_ENDPOINT)
477 return -EINVAL;
478
479 if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
480 return -ESHUTDOWN;
481
482 direction = ep_dir(ep);
29cc8897 483 max = usb_endpoint_maxp(desc);
e7cddda4 484
485 /*
486 * disable HW zero length termination select
487 * driver handles zero length packet through req->req.zero
488 */
489 zlt = 1;
490
e7cddda4 491 bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
492
493 /* Check if the Endpoint is Primed */
494 if ((readl(&udc->op_regs->epprime) & bit_pos)
495 || (readl(&udc->op_regs->epstatus) & bit_pos)) {
496 dev_info(&udc->dev->dev,
497 "ep=%d %s: Init ERROR: ENDPTPRIME=0x%x,"
498 " ENDPTSTATUS=0x%x, bit_pos=0x%x\n",
499 (unsigned)ep->ep_num, direction ? "SEND" : "RECV",
500 (unsigned)readl(&udc->op_regs->epprime),
501 (unsigned)readl(&udc->op_regs->epstatus),
502 (unsigned)bit_pos);
503 goto en_done;
504 }
505 /* Set the max packet length, interrupt on Setup and Mult fields */
506 switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
507 case USB_ENDPOINT_XFER_BULK:
508 zlt = 1;
509 mult = 0;
510 break;
511 case USB_ENDPOINT_XFER_CONTROL:
512 ios = 1;
513 case USB_ENDPOINT_XFER_INT:
514 mult = 0;
515 break;
516 case USB_ENDPOINT_XFER_ISOC:
517 /* Calculate transactions needed for high bandwidth iso */
518 mult = (unsigned char)(1 + ((max >> 11) & 0x03));
bedcff97 519 max = max & 0x7ff; /* bit 0~10 */
e7cddda4 520 /* 3 transactions at most */
521 if (mult > 3)
522 goto en_done;
523 break;
524 default:
525 goto en_done;
526 }
27cec2b2
NZ
527
528 spin_lock_irqsave(&udc->lock, flags);
529 /* Get the endpoint queue head address */
530 dqh = ep->dqh;
e7cddda4 531 dqh->max_packet_length = (max << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
532 | (mult << EP_QUEUE_HEAD_MULT_POS)
533 | (zlt ? EP_QUEUE_HEAD_ZLT_SEL : 0)
534 | (ios ? EP_QUEUE_HEAD_IOS : 0);
535 dqh->next_dtd_ptr = 1;
536 dqh->size_ioc_int_sts = 0;
537
538 ep->ep.maxpacket = max;
b1371d16 539 ep->ep.desc = desc;
e7cddda4 540 ep->stopped = 0;
541
542 /* Enable the endpoint for Rx or Tx and set the endpoint type */
543 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
544 if (direction == EP_DIR_IN) {
545 epctrlx &= ~EPCTRL_TX_ALL_MASK;
546 epctrlx |= EPCTRL_TX_ENABLE | EPCTRL_TX_DATA_TOGGLE_RST
547 | ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
548 << EPCTRL_TX_EP_TYPE_SHIFT);
549 } else {
550 epctrlx &= ~EPCTRL_RX_ALL_MASK;
551 epctrlx |= EPCTRL_RX_ENABLE | EPCTRL_RX_DATA_TOGGLE_RST
552 | ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
553 << EPCTRL_RX_EP_TYPE_SHIFT);
554 }
555 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
556
557 /*
558 * Implement Guideline (GL# USB-7) The unused endpoint type must
559 * be programmed to bulk.
560 */
561 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
562 if ((epctrlx & EPCTRL_RX_ENABLE) == 0) {
615268b0 563 epctrlx |= (USB_ENDPOINT_XFER_BULK
e7cddda4 564 << EPCTRL_RX_EP_TYPE_SHIFT);
565 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
566 }
567
568 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
569 if ((epctrlx & EPCTRL_TX_ENABLE) == 0) {
615268b0 570 epctrlx |= (USB_ENDPOINT_XFER_BULK
e7cddda4 571 << EPCTRL_TX_EP_TYPE_SHIFT);
572 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
573 }
574
27cec2b2
NZ
575 spin_unlock_irqrestore(&udc->lock, flags);
576
e7cddda4 577 return 0;
578en_done:
579 return -EINVAL;
580}
581
582static int mv_ep_disable(struct usb_ep *_ep)
583{
584 struct mv_udc *udc;
585 struct mv_ep *ep;
586 struct mv_dqh *dqh;
587 u32 bit_pos, epctrlx, direction;
27cec2b2 588 unsigned long flags;
e7cddda4 589
590 ep = container_of(_ep, struct mv_ep, ep);
b1371d16 591 if ((_ep == NULL) || !ep->ep.desc)
e7cddda4 592 return -EINVAL;
593
594 udc = ep->udc;
595
596 /* Get the endpoint queue head address */
597 dqh = ep->dqh;
598
27cec2b2
NZ
599 spin_lock_irqsave(&udc->lock, flags);
600
e7cddda4 601 direction = ep_dir(ep);
602 bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
603
604 /* Reset the max packet length and the interrupt on Setup */
605 dqh->max_packet_length = 0;
606
607 /* Disable the endpoint for Rx or Tx and reset the endpoint type */
608 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
609 epctrlx &= ~((direction == EP_DIR_IN)
610 ? (EPCTRL_TX_ENABLE | EPCTRL_TX_TYPE)
611 : (EPCTRL_RX_ENABLE | EPCTRL_RX_TYPE));
612 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
613
614 /* nuke all pending requests (does flush) */
615 nuke(ep, -ESHUTDOWN);
616
f9c56cdd 617 ep->ep.desc = NULL;
e7cddda4 618 ep->stopped = 1;
27cec2b2
NZ
619
620 spin_unlock_irqrestore(&udc->lock, flags);
621
e7cddda4 622 return 0;
623}
624
625static struct usb_request *
626mv_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
627{
628 struct mv_req *req = NULL;
629
630 req = kzalloc(sizeof *req, gfp_flags);
631 if (!req)
632 return NULL;
633
634 req->req.dma = DMA_ADDR_INVALID;
635 INIT_LIST_HEAD(&req->queue);
636
637 return &req->req;
638}
639
640static void mv_free_request(struct usb_ep *_ep, struct usb_request *_req)
641{
642 struct mv_req *req = NULL;
643
644 req = container_of(_req, struct mv_req, req);
645
646 if (_req)
647 kfree(req);
648}
649
650static void mv_ep_fifo_flush(struct usb_ep *_ep)
651{
652 struct mv_udc *udc;
653 u32 bit_pos, direction;
0c70840b 654 struct mv_ep *ep;
e7cddda4 655 unsigned int loops;
656
0c70840b
NZ
657 if (!_ep)
658 return;
659
660 ep = container_of(_ep, struct mv_ep, ep);
b1371d16 661 if (!ep->ep.desc)
0c70840b
NZ
662 return;
663
e7cddda4 664 udc = ep->udc;
665 direction = ep_dir(ep);
e7cddda4 666
0c70840b
NZ
667 if (ep->ep_num == 0)
668 bit_pos = (1 << 16) | 1;
669 else if (direction == EP_DIR_OUT)
670 bit_pos = 1 << ep->ep_num;
671 else
672 bit_pos = 1 << (16 + ep->ep_num);
673
e7cddda4 674 loops = LOOPS(EPSTATUS_TIMEOUT);
0c70840b 675 do {
e7cddda4 676 unsigned int inter_loops;
677
678 if (loops == 0) {
679 dev_err(&udc->dev->dev,
680 "TIMEOUT for ENDPTSTATUS=0x%x, bit_pos=0x%x\n",
681 (unsigned)readl(&udc->op_regs->epstatus),
682 (unsigned)bit_pos);
683 return;
684 }
685 /* Write 1 to the Flush register */
686 writel(bit_pos, &udc->op_regs->epflush);
687
688 /* Wait until flushing completed */
689 inter_loops = LOOPS(FLUSH_TIMEOUT);
0c70840b 690 while (readl(&udc->op_regs->epflush)) {
e7cddda4 691 /*
692 * ENDPTFLUSH bit should be cleared to indicate this
693 * operation is complete
694 */
695 if (inter_loops == 0) {
696 dev_err(&udc->dev->dev,
697 "TIMEOUT for ENDPTFLUSH=0x%x,"
698 "bit_pos=0x%x\n",
699 (unsigned)readl(&udc->op_regs->epflush),
700 (unsigned)bit_pos);
701 return;
702 }
703 inter_loops--;
704 udelay(LOOPS_USEC);
705 }
706 loops--;
0c70840b 707 } while (readl(&udc->op_regs->epstatus) & bit_pos);
e7cddda4 708}
709
710/* queues (submits) an I/O request to an endpoint */
711static int
712mv_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
713{
714 struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
715 struct mv_req *req = container_of(_req, struct mv_req, req);
716 struct mv_udc *udc = ep->udc;
717 unsigned long flags;
0344606b 718 int retval;
e7cddda4 719
720 /* catch various bogus parameters */
721 if (!_req || !req->req.complete || !req->req.buf
722 || !list_empty(&req->queue)) {
723 dev_err(&udc->dev->dev, "%s, bad params", __func__);
724 return -EINVAL;
725 }
b1371d16 726 if (unlikely(!_ep || !ep->ep.desc)) {
e7cddda4 727 dev_err(&udc->dev->dev, "%s, bad ep", __func__);
728 return -EINVAL;
729 }
e7cddda4 730
731 udc = ep->udc;
732 if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
733 return -ESHUTDOWN;
734
735 req->ep = ep;
736
737 /* map virtual address to hardware */
738 if (req->req.dma == DMA_ADDR_INVALID) {
739 req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
740 req->req.buf,
741 req->req.length, ep_dir(ep)
742 ? DMA_TO_DEVICE
743 : DMA_FROM_DEVICE);
744 req->mapped = 1;
745 } else {
746 dma_sync_single_for_device(ep->udc->gadget.dev.parent,
747 req->req.dma, req->req.length,
748 ep_dir(ep)
749 ? DMA_TO_DEVICE
750 : DMA_FROM_DEVICE);
751 req->mapped = 0;
752 }
753
754 req->req.status = -EINPROGRESS;
755 req->req.actual = 0;
756 req->dtd_count = 0;
757
758 spin_lock_irqsave(&udc->lock, flags);
759
760 /* build dtds and push them to device queue */
761 if (!req_to_dtd(req)) {
e7cddda4 762 retval = queue_dtd(ep, req);
763 if (retval) {
764 spin_unlock_irqrestore(&udc->lock, flags);
0344606b
NZ
765 dev_err(&udc->dev->dev, "Failed to queue dtd\n");
766 goto err_unmap_dma;
e7cddda4 767 }
768 } else {
769 spin_unlock_irqrestore(&udc->lock, flags);
0344606b
NZ
770 dev_err(&udc->dev->dev, "Failed to dma_pool_alloc\n");
771 retval = -ENOMEM;
772 goto err_unmap_dma;
e7cddda4 773 }
774
775 /* Update ep0 state */
776 if (ep->ep_num == 0)
777 udc->ep0_state = DATA_STATE_XMIT;
778
779 /* irq handler advances the queue */
10800f2c 780 list_add_tail(&req->queue, &ep->queue);
e7cddda4 781 spin_unlock_irqrestore(&udc->lock, flags);
782
783 return 0;
0344606b
NZ
784
785err_unmap_dma:
786 if (req->mapped) {
787 dma_unmap_single(ep->udc->gadget.dev.parent,
788 req->req.dma, req->req.length,
789 ((ep_dir(ep) == EP_DIR_IN) ?
790 DMA_TO_DEVICE : DMA_FROM_DEVICE));
791 req->req.dma = DMA_ADDR_INVALID;
792 req->mapped = 0;
793 } else
794 dma_sync_single_for_cpu(ep->udc->gadget.dev.parent,
795 req->req.dma, req->req.length,
796 ((ep_dir(ep) == EP_DIR_IN) ?
797 DMA_TO_DEVICE : DMA_FROM_DEVICE));
798
799 return retval;
e7cddda4 800}
801
c2bbd16b
NZ
802static void mv_prime_ep(struct mv_ep *ep, struct mv_req *req)
803{
804 struct mv_dqh *dqh = ep->dqh;
805 u32 bit_pos;
806
807 /* Write dQH next pointer and terminate bit to 0 */
808 dqh->next_dtd_ptr = req->head->td_dma
809 & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
810
811 /* clear active and halt bit, in case set from a previous error */
812 dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED);
813
814 /* Ensure that updates to the QH will occure before priming. */
815 wmb();
816
817 bit_pos = 1 << (((ep_dir(ep) == EP_DIR_OUT) ? 0 : 16) + ep->ep_num);
818
819 /* Prime the Endpoint */
820 writel(bit_pos, &ep->udc->op_regs->epprime);
821}
822
e7cddda4 823/* dequeues (cancels, unlinks) an I/O request from an endpoint */
824static int mv_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
825{
826 struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
827 struct mv_req *req;
828 struct mv_udc *udc = ep->udc;
829 unsigned long flags;
830 int stopped, ret = 0;
831 u32 epctrlx;
832
833 if (!_ep || !_req)
834 return -EINVAL;
835
836 spin_lock_irqsave(&ep->udc->lock, flags);
837 stopped = ep->stopped;
838
839 /* Stop the ep before we deal with the queue */
840 ep->stopped = 1;
841 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
842 if (ep_dir(ep) == EP_DIR_IN)
843 epctrlx &= ~EPCTRL_TX_ENABLE;
844 else
845 epctrlx &= ~EPCTRL_RX_ENABLE;
846 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
847
848 /* make sure it's actually queued on this endpoint */
849 list_for_each_entry(req, &ep->queue, queue) {
850 if (&req->req == _req)
851 break;
852 }
853 if (&req->req != _req) {
854 ret = -EINVAL;
855 goto out;
856 }
857
858 /* The request is in progress, or completed but not dequeued */
859 if (ep->queue.next == &req->queue) {
860 _req->status = -ECONNRESET;
861 mv_ep_fifo_flush(_ep); /* flush current transfer */
862
863 /* The request isn't the last request in this ep queue */
864 if (req->queue.next != &ep->queue) {
e7cddda4 865 struct mv_req *next_req;
866
c2bbd16b
NZ
867 next_req = list_entry(req->queue.next,
868 struct mv_req, queue);
e7cddda4 869
870 /* Point the QH to the first TD of next request */
c2bbd16b 871 mv_prime_ep(ep, next_req);
e7cddda4 872 } else {
873 struct mv_dqh *qh;
874
875 qh = ep->dqh;
876 qh->next_dtd_ptr = 1;
877 qh->size_ioc_int_sts = 0;
878 }
879
880 /* The request hasn't been processed, patch up the TD chain */
881 } else {
882 struct mv_req *prev_req;
883
884 prev_req = list_entry(req->queue.prev, struct mv_req, queue);
885 writel(readl(&req->tail->dtd_next),
886 &prev_req->tail->dtd_next);
887
888 }
889
890 done(ep, req, -ECONNRESET);
891
892 /* Enable EP */
893out:
894 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
895 if (ep_dir(ep) == EP_DIR_IN)
896 epctrlx |= EPCTRL_TX_ENABLE;
897 else
898 epctrlx |= EPCTRL_RX_ENABLE;
899 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
900 ep->stopped = stopped;
901
902 spin_unlock_irqrestore(&ep->udc->lock, flags);
903 return ret;
904}
905
906static void ep_set_stall(struct mv_udc *udc, u8 ep_num, u8 direction, int stall)
907{
908 u32 epctrlx;
909
910 epctrlx = readl(&udc->op_regs->epctrlx[ep_num]);
911
912 if (stall) {
913 if (direction == EP_DIR_IN)
914 epctrlx |= EPCTRL_TX_EP_STALL;
915 else
916 epctrlx |= EPCTRL_RX_EP_STALL;
917 } else {
918 if (direction == EP_DIR_IN) {
919 epctrlx &= ~EPCTRL_TX_EP_STALL;
920 epctrlx |= EPCTRL_TX_DATA_TOGGLE_RST;
921 } else {
922 epctrlx &= ~EPCTRL_RX_EP_STALL;
923 epctrlx |= EPCTRL_RX_DATA_TOGGLE_RST;
924 }
925 }
926 writel(epctrlx, &udc->op_regs->epctrlx[ep_num]);
927}
928
929static int ep_is_stall(struct mv_udc *udc, u8 ep_num, u8 direction)
930{
931 u32 epctrlx;
932
933 epctrlx = readl(&udc->op_regs->epctrlx[ep_num]);
934
935 if (direction == EP_DIR_OUT)
936 return (epctrlx & EPCTRL_RX_EP_STALL) ? 1 : 0;
937 else
938 return (epctrlx & EPCTRL_TX_EP_STALL) ? 1 : 0;
939}
940
941static int mv_ep_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
942{
943 struct mv_ep *ep;
944 unsigned long flags = 0;
945 int status = 0;
946 struct mv_udc *udc;
947
948 ep = container_of(_ep, struct mv_ep, ep);
949 udc = ep->udc;
b1371d16 950 if (!_ep || !ep->ep.desc) {
e7cddda4 951 status = -EINVAL;
952 goto out;
953 }
954
b1371d16 955 if (ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
e7cddda4 956 status = -EOPNOTSUPP;
957 goto out;
958 }
959
960 /*
961 * Attempt to halt IN ep will fail if any transfer requests
962 * are still queue
963 */
964 if (halt && (ep_dir(ep) == EP_DIR_IN) && !list_empty(&ep->queue)) {
965 status = -EAGAIN;
966 goto out;
967 }
968
969 spin_lock_irqsave(&ep->udc->lock, flags);
970 ep_set_stall(udc, ep->ep_num, ep_dir(ep), halt);
971 if (halt && wedge)
972 ep->wedge = 1;
973 else if (!halt)
974 ep->wedge = 0;
975 spin_unlock_irqrestore(&ep->udc->lock, flags);
976
977 if (ep->ep_num == 0) {
978 udc->ep0_state = WAIT_FOR_SETUP;
979 udc->ep0_dir = EP_DIR_OUT;
980 }
981out:
982 return status;
983}
984
985static int mv_ep_set_halt(struct usb_ep *_ep, int halt)
986{
987 return mv_ep_set_halt_wedge(_ep, halt, 0);
988}
989
990static int mv_ep_set_wedge(struct usb_ep *_ep)
991{
992 return mv_ep_set_halt_wedge(_ep, 1, 1);
993}
994
995static struct usb_ep_ops mv_ep_ops = {
996 .enable = mv_ep_enable,
997 .disable = mv_ep_disable,
998
999 .alloc_request = mv_alloc_request,
1000 .free_request = mv_free_request,
1001
1002 .queue = mv_ep_queue,
1003 .dequeue = mv_ep_dequeue,
1004
1005 .set_wedge = mv_ep_set_wedge,
1006 .set_halt = mv_ep_set_halt,
1007 .fifo_flush = mv_ep_fifo_flush, /* flush fifo */
1008};
1009
dde34cc5
NZ
1010static void udc_clock_enable(struct mv_udc *udc)
1011{
1012 unsigned int i;
1013
1014 for (i = 0; i < udc->clknum; i++)
1015 clk_enable(udc->clk[i]);
1016}
1017
1018static void udc_clock_disable(struct mv_udc *udc)
1019{
1020 unsigned int i;
1021
1022 for (i = 0; i < udc->clknum; i++)
1023 clk_disable(udc->clk[i]);
1024}
1025
e7cddda4 1026static void udc_stop(struct mv_udc *udc)
1027{
1028 u32 tmp;
1029
1030 /* Disable interrupts */
1031 tmp = readl(&udc->op_regs->usbintr);
1032 tmp &= ~(USBINTR_INT_EN | USBINTR_ERR_INT_EN |
1033 USBINTR_PORT_CHANGE_DETECT_EN | USBINTR_RESET_EN);
1034 writel(tmp, &udc->op_regs->usbintr);
1035
309d6d2b
NZ
1036 udc->stopped = 1;
1037
e7cddda4 1038 /* Reset the Run the bit in the command register to stop VUSB */
1039 tmp = readl(&udc->op_regs->usbcmd);
1040 tmp &= ~USBCMD_RUN_STOP;
1041 writel(tmp, &udc->op_regs->usbcmd);
1042}
1043
1044static void udc_start(struct mv_udc *udc)
1045{
1046 u32 usbintr;
1047
1048 usbintr = USBINTR_INT_EN | USBINTR_ERR_INT_EN
1049 | USBINTR_PORT_CHANGE_DETECT_EN
1050 | USBINTR_RESET_EN | USBINTR_DEVICE_SUSPEND;
1051 /* Enable interrupts */
1052 writel(usbintr, &udc->op_regs->usbintr);
1053
309d6d2b
NZ
1054 udc->stopped = 0;
1055
e7cddda4 1056 /* Set the Run bit in the command register */
1057 writel(USBCMD_RUN_STOP, &udc->op_regs->usbcmd);
1058}
1059
1060static int udc_reset(struct mv_udc *udc)
1061{
1062 unsigned int loops;
1063 u32 tmp, portsc;
1064
1065 /* Stop the controller */
1066 tmp = readl(&udc->op_regs->usbcmd);
1067 tmp &= ~USBCMD_RUN_STOP;
1068 writel(tmp, &udc->op_regs->usbcmd);
1069
1070 /* Reset the controller to get default values */
1071 writel(USBCMD_CTRL_RESET, &udc->op_regs->usbcmd);
1072
1073 /* wait for reset to complete */
1074 loops = LOOPS(RESET_TIMEOUT);
1075 while (readl(&udc->op_regs->usbcmd) & USBCMD_CTRL_RESET) {
1076 if (loops == 0) {
1077 dev_err(&udc->dev->dev,
1078 "Wait for RESET completed TIMEOUT\n");
1079 return -ETIMEDOUT;
1080 }
1081 loops--;
1082 udelay(LOOPS_USEC);
1083 }
1084
1085 /* set controller to device mode */
1086 tmp = readl(&udc->op_regs->usbmode);
1087 tmp |= USBMODE_CTRL_MODE_DEVICE;
1088
1089 /* turn setup lockout off, require setup tripwire in usbcmd */
583a7263 1090 tmp |= USBMODE_SETUP_LOCK_OFF;
e7cddda4 1091
1092 writel(tmp, &udc->op_regs->usbmode);
1093
1094 writel(0x0, &udc->op_regs->epsetupstat);
1095
1096 /* Configure the Endpoint List Address */
1097 writel(udc->ep_dqh_dma & USB_EP_LIST_ADDRESS_MASK,
1098 &udc->op_regs->eplistaddr);
1099
1100 portsc = readl(&udc->op_regs->portsc[0]);
1101 if (readl(&udc->cap_regs->hcsparams) & HCSPARAMS_PPC)
1102 portsc &= (~PORTSCX_W1C_BITS | ~PORTSCX_PORT_POWER);
1103
1104 if (udc->force_fs)
1105 portsc |= PORTSCX_FORCE_FULL_SPEED_CONNECT;
1106 else
1107 portsc &= (~PORTSCX_FORCE_FULL_SPEED_CONNECT);
1108
1109 writel(portsc, &udc->op_regs->portsc[0]);
1110
1111 tmp = readl(&udc->op_regs->epctrlx[0]);
1112 tmp &= ~(EPCTRL_TX_EP_STALL | EPCTRL_RX_EP_STALL);
1113 writel(tmp, &udc->op_regs->epctrlx[0]);
1114
1115 return 0;
1116}
1117
85ff7bfb 1118static int mv_udc_enable_internal(struct mv_udc *udc)
1aec033b
NZ
1119{
1120 int retval;
1121
85ff7bfb 1122 if (udc->active)
1aec033b
NZ
1123 return 0;
1124
1125 dev_dbg(&udc->dev->dev, "enable udc\n");
1126 udc_clock_enable(udc);
1127 if (udc->pdata->phy_init) {
1128 retval = udc->pdata->phy_init(udc->phy_regs);
1129 if (retval) {
1130 dev_err(&udc->dev->dev,
1131 "init phy error %d\n", retval);
1132 udc_clock_disable(udc);
1133 return retval;
1134 }
1135 }
1136 udc->active = 1;
1137
1138 return 0;
1139}
1140
85ff7bfb 1141static int mv_udc_enable(struct mv_udc *udc)
1aec033b 1142{
85ff7bfb
NZ
1143 if (udc->clock_gating)
1144 return mv_udc_enable_internal(udc);
1145
1146 return 0;
1147}
1148
1149static void mv_udc_disable_internal(struct mv_udc *udc)
1150{
1151 if (udc->active) {
1aec033b
NZ
1152 dev_dbg(&udc->dev->dev, "disable udc\n");
1153 if (udc->pdata->phy_deinit)
1154 udc->pdata->phy_deinit(udc->phy_regs);
1155 udc_clock_disable(udc);
1156 udc->active = 0;
1157 }
1158}
1159
85ff7bfb
NZ
1160static void mv_udc_disable(struct mv_udc *udc)
1161{
1162 if (udc->clock_gating)
1163 mv_udc_disable_internal(udc);
1164}
1165
e7cddda4 1166static int mv_udc_get_frame(struct usb_gadget *gadget)
1167{
1168 struct mv_udc *udc;
1169 u16 retval;
1170
1171 if (!gadget)
1172 return -ENODEV;
1173
1174 udc = container_of(gadget, struct mv_udc, gadget);
1175
86bb7028 1176 retval = readl(&udc->op_regs->frindex) & USB_FRINDEX_MASKS;
e7cddda4 1177
1178 return retval;
1179}
1180
1181/* Tries to wake up the host connected to this gadget */
1182static int mv_udc_wakeup(struct usb_gadget *gadget)
1183{
1184 struct mv_udc *udc = container_of(gadget, struct mv_udc, gadget);
1185 u32 portsc;
1186
1187 /* Remote wakeup feature not enabled by host */
1188 if (!udc->remote_wakeup)
1189 return -ENOTSUPP;
1190
1191 portsc = readl(&udc->op_regs->portsc);
1192 /* not suspended? */
1193 if (!(portsc & PORTSCX_PORT_SUSPEND))
1194 return 0;
1195 /* trigger force resume */
1196 portsc |= PORTSCX_PORT_FORCE_RESUME;
1197 writel(portsc, &udc->op_regs->portsc[0]);
1198 return 0;
1199}
1200
1aec033b
NZ
1201static int mv_udc_vbus_session(struct usb_gadget *gadget, int is_active)
1202{
1203 struct mv_udc *udc;
1204 unsigned long flags;
1205 int retval = 0;
1206
1207 udc = container_of(gadget, struct mv_udc, gadget);
1208 spin_lock_irqsave(&udc->lock, flags);
1209
2bcb7514
NZ
1210 udc->vbus_active = (is_active != 0);
1211
1aec033b
NZ
1212 dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n",
1213 __func__, udc->softconnect, udc->vbus_active);
1214
1aec033b
NZ
1215 if (udc->driver && udc->softconnect && udc->vbus_active) {
1216 retval = mv_udc_enable(udc);
1217 if (retval == 0) {
1218 /* Clock is disabled, need re-init registers */
1219 udc_reset(udc);
1220 ep0_reset(udc);
1221 udc_start(udc);
1222 }
1223 } else if (udc->driver && udc->softconnect) {
1224 /* stop all the transfer in queue*/
1225 stop_activity(udc, udc->driver);
1226 udc_stop(udc);
1227 mv_udc_disable(udc);
1228 }
1229
1230 spin_unlock_irqrestore(&udc->lock, flags);
1231 return retval;
1232}
1233
e7cddda4 1234static int mv_udc_pullup(struct usb_gadget *gadget, int is_on)
1235{
1236 struct mv_udc *udc;
1237 unsigned long flags;
1aec033b 1238 int retval = 0;
e7cddda4 1239
1240 udc = container_of(gadget, struct mv_udc, gadget);
1241 spin_lock_irqsave(&udc->lock, flags);
1242
2bcb7514
NZ
1243 udc->softconnect = (is_on != 0);
1244
1aec033b
NZ
1245 dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n",
1246 __func__, udc->softconnect, udc->vbus_active);
1247
1aec033b
NZ
1248 if (udc->driver && udc->softconnect && udc->vbus_active) {
1249 retval = mv_udc_enable(udc);
1250 if (retval == 0) {
1251 /* Clock is disabled, need re-init registers */
1252 udc_reset(udc);
1253 ep0_reset(udc);
1254 udc_start(udc);
1255 }
1256 } else if (udc->driver && udc->vbus_active) {
1257 /* stop all the transfer in queue*/
1258 stop_activity(udc, udc->driver);
e7cddda4 1259 udc_stop(udc);
1aec033b
NZ
1260 mv_udc_disable(udc);
1261 }
e7cddda4 1262
1263 spin_unlock_irqrestore(&udc->lock, flags);
1aec033b 1264 return retval;
e7cddda4 1265}
1266
0f91349b
SAS
1267static int mv_udc_start(struct usb_gadget_driver *driver,
1268 int (*bind)(struct usb_gadget *));
1269static int mv_udc_stop(struct usb_gadget_driver *driver);
e7cddda4 1270/* device controller usb_gadget_ops structure */
1271static const struct usb_gadget_ops mv_ops = {
1272
1273 /* returns the current frame number */
1274 .get_frame = mv_udc_get_frame,
1275
1276 /* tries to wake up the host connected to this gadget */
1277 .wakeup = mv_udc_wakeup,
1278
1aec033b
NZ
1279 /* notify controller that VBUS is powered or not */
1280 .vbus_session = mv_udc_vbus_session,
1281
e7cddda4 1282 /* D+ pullup, software-controlled connect/disconnect to USB host */
1283 .pullup = mv_udc_pullup,
0f91349b
SAS
1284 .start = mv_udc_start,
1285 .stop = mv_udc_stop,
e7cddda4 1286};
1287
e7cddda4 1288static int eps_init(struct mv_udc *udc)
1289{
1290 struct mv_ep *ep;
1291 char name[14];
1292 int i;
1293
1294 /* initialize ep0 */
1295 ep = &udc->eps[0];
1296 ep->udc = udc;
1297 strncpy(ep->name, "ep0", sizeof(ep->name));
1298 ep->ep.name = ep->name;
1299 ep->ep.ops = &mv_ep_ops;
1300 ep->wedge = 0;
1301 ep->stopped = 0;
1302 ep->ep.maxpacket = EP0_MAX_PKT_SIZE;
1303 ep->ep_num = 0;
b1371d16 1304 ep->ep.desc = &mv_ep0_desc;
e7cddda4 1305 INIT_LIST_HEAD(&ep->queue);
1306
1307 ep->ep_type = USB_ENDPOINT_XFER_CONTROL;
1308
1309 /* initialize other endpoints */
1310 for (i = 2; i < udc->max_eps * 2; i++) {
1311 ep = &udc->eps[i];
1312 if (i % 2) {
1313 snprintf(name, sizeof(name), "ep%din", i / 2);
1314 ep->direction = EP_DIR_IN;
1315 } else {
1316 snprintf(name, sizeof(name), "ep%dout", i / 2);
1317 ep->direction = EP_DIR_OUT;
1318 }
1319 ep->udc = udc;
1320 strncpy(ep->name, name, sizeof(ep->name));
1321 ep->ep.name = ep->name;
1322
1323 ep->ep.ops = &mv_ep_ops;
1324 ep->stopped = 0;
1325 ep->ep.maxpacket = (unsigned short) ~0;
1326 ep->ep_num = i / 2;
1327
1328 INIT_LIST_HEAD(&ep->queue);
1329 list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
1330
1331 ep->dqh = &udc->ep_dqh[i];
1332 }
1333
1334 return 0;
1335}
1336
1337/* delete all endpoint requests, called with spinlock held */
1338static void nuke(struct mv_ep *ep, int status)
1339{
1340 /* called with spinlock held */
1341 ep->stopped = 1;
1342
1343 /* endpoint fifo flush */
1344 mv_ep_fifo_flush(&ep->ep);
1345
1346 while (!list_empty(&ep->queue)) {
1347 struct mv_req *req = NULL;
1348 req = list_entry(ep->queue.next, struct mv_req, queue);
1349 done(ep, req, status);
1350 }
1351}
1352
1353/* stop all USB activities */
1354static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver)
1355{
1356 struct mv_ep *ep;
1357
1358 nuke(&udc->eps[0], -ESHUTDOWN);
1359
1360 list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
1361 nuke(ep, -ESHUTDOWN);
1362 }
1363
1364 /* report disconnect; the driver is already quiesced */
1365 if (driver) {
1366 spin_unlock(&udc->lock);
1367 driver->disconnect(&udc->gadget);
1368 spin_lock(&udc->lock);
1369 }
1370}
1371
0f91349b 1372static int mv_udc_start(struct usb_gadget_driver *driver,
e7cddda4 1373 int (*bind)(struct usb_gadget *))
1374{
1375 struct mv_udc *udc = the_controller;
1376 int retval = 0;
1377 unsigned long flags;
1378
1379 if (!udc)
1380 return -ENODEV;
1381
1382 if (udc->driver)
1383 return -EBUSY;
1384
1385 spin_lock_irqsave(&udc->lock, flags);
1386
1387 /* hook up the driver ... */
1388 driver->driver.bus = NULL;
1389 udc->driver = driver;
1390 udc->gadget.dev.driver = &driver->driver;
1391
1392 udc->usb_state = USB_STATE_ATTACHED;
1393 udc->ep0_state = WAIT_FOR_SETUP;
1aec033b 1394 udc->ep0_dir = EP_DIR_OUT;
e7cddda4 1395
1396 spin_unlock_irqrestore(&udc->lock, flags);
1397
1398 retval = bind(&udc->gadget);
1399 if (retval) {
1400 dev_err(&udc->dev->dev, "bind to driver %s --> %d\n",
1401 driver->driver.name, retval);
1402 udc->driver = NULL;
1403 udc->gadget.dev.driver = NULL;
1404 return retval;
1405 }
1aec033b 1406
ded017ee 1407 if (!IS_ERR_OR_NULL(udc->transceiver)) {
6e13c650
HK
1408 retval = otg_set_peripheral(udc->transceiver->otg,
1409 &udc->gadget);
487d54d1
NZ
1410 if (retval) {
1411 dev_err(&udc->dev->dev,
1412 "unable to register peripheral to otg\n");
1413 if (driver->unbind) {
1414 driver->unbind(&udc->gadget);
1415 udc->gadget.dev.driver = NULL;
1416 udc->driver = NULL;
1417 }
1418 return retval;
1419 }
1420 }
1421
1aec033b
NZ
1422 /* pullup is always on */
1423 mv_udc_pullup(&udc->gadget, 1);
1424
1425 /* When boot with cable attached, there will be no vbus irq occurred */
1426 if (udc->qwork)
1427 queue_work(udc->qwork, &udc->vbus_work);
e7cddda4 1428
1429 return 0;
1430}
e7cddda4 1431
0f91349b 1432static int mv_udc_stop(struct usb_gadget_driver *driver)
e7cddda4 1433{
1434 struct mv_udc *udc = the_controller;
1435 unsigned long flags;
1436
1437 if (!udc)
1438 return -ENODEV;
1439
e7cddda4 1440 spin_lock_irqsave(&udc->lock, flags);
1441
1aec033b
NZ
1442 mv_udc_enable(udc);
1443 udc_stop(udc);
1444
e7cddda4 1445 /* stop all usb activities */
1446 udc->gadget.speed = USB_SPEED_UNKNOWN;
1447 stop_activity(udc, driver);
1aec033b
NZ
1448 mv_udc_disable(udc);
1449
e7cddda4 1450 spin_unlock_irqrestore(&udc->lock, flags);
1451
1452 /* unbind gadget driver */
1453 driver->unbind(&udc->gadget);
1454 udc->gadget.dev.driver = NULL;
1455 udc->driver = NULL;
1456
1457 return 0;
1458}
e7cddda4 1459
fb22cbac
NZ
1460static void mv_set_ptc(struct mv_udc *udc, u32 mode)
1461{
1462 u32 portsc;
1463
1464 portsc = readl(&udc->op_regs->portsc[0]);
1465 portsc |= mode << 16;
1466 writel(portsc, &udc->op_regs->portsc[0]);
1467}
1468
1469static void prime_status_complete(struct usb_ep *ep, struct usb_request *_req)
1470{
1471 struct mv_udc *udc = the_controller;
1472 struct mv_req *req = container_of(_req, struct mv_req, req);
1473 unsigned long flags;
1474
1475 dev_info(&udc->dev->dev, "switch to test mode %d\n", req->test_mode);
1476
1477 spin_lock_irqsave(&udc->lock, flags);
1478 if (req->test_mode) {
1479 mv_set_ptc(udc, req->test_mode);
1480 req->test_mode = 0;
1481 }
1482 spin_unlock_irqrestore(&udc->lock, flags);
1483}
1484
e7cddda4 1485static int
1486udc_prime_status(struct mv_udc *udc, u8 direction, u16 status, bool empty)
1487{
1488 int retval = 0;
1489 struct mv_req *req;
1490 struct mv_ep *ep;
1491
1492 ep = &udc->eps[0];
1493 udc->ep0_dir = direction;
36616224 1494 udc->ep0_state = WAIT_FOR_OUT_STATUS;
e7cddda4 1495
1496 req = udc->status_req;
1497
1498 /* fill in the reqest structure */
1499 if (empty == false) {
1500 *((u16 *) req->req.buf) = cpu_to_le16(status);
1501 req->req.length = 2;
1502 } else
1503 req->req.length = 0;
1504
1505 req->ep = ep;
1506 req->req.status = -EINPROGRESS;
1507 req->req.actual = 0;
fb22cbac
NZ
1508 if (udc->test_mode) {
1509 req->req.complete = prime_status_complete;
1510 req->test_mode = udc->test_mode;
1511 udc->test_mode = 0;
1512 } else
1513 req->req.complete = NULL;
e7cddda4 1514 req->dtd_count = 0;
1515
46e172df
NZ
1516 if (req->req.dma == DMA_ADDR_INVALID) {
1517 req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
1518 req->req.buf, req->req.length,
1519 ep_dir(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
1520 req->mapped = 1;
1521 }
1522
e7cddda4 1523 /* prime the data phase */
0344606b 1524 if (!req_to_dtd(req)) {
e7cddda4 1525 retval = queue_dtd(ep, req);
0344606b
NZ
1526 if (retval) {
1527 dev_err(&udc->dev->dev,
1528 "Failed to queue dtd when prime status\n");
1529 goto out;
1530 }
1531 } else{ /* no mem */
e7cddda4 1532 retval = -ENOMEM;
0344606b
NZ
1533 dev_err(&udc->dev->dev,
1534 "Failed to dma_pool_alloc when prime status\n");
e7cddda4 1535 goto out;
1536 }
1537
1538 list_add_tail(&req->queue, &ep->queue);
1539
1540 return 0;
1541out:
0344606b
NZ
1542 if (req->mapped) {
1543 dma_unmap_single(ep->udc->gadget.dev.parent,
1544 req->req.dma, req->req.length,
1545 ((ep_dir(ep) == EP_DIR_IN) ?
1546 DMA_TO_DEVICE : DMA_FROM_DEVICE));
1547 req->req.dma = DMA_ADDR_INVALID;
1548 req->mapped = 0;
1549 }
1550
e7cddda4 1551 return retval;
1552}
1553
fb22cbac
NZ
1554static void mv_udc_testmode(struct mv_udc *udc, u16 index)
1555{
1556 if (index <= TEST_FORCE_EN) {
1557 udc->test_mode = index;
1558 if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1559 ep0_stall(udc);
1560 } else
1561 dev_err(&udc->dev->dev,
1562 "This test mode(%d) is not supported\n", index);
1563}
1564
e7cddda4 1565static void ch9setaddress(struct mv_udc *udc, struct usb_ctrlrequest *setup)
1566{
1567 udc->dev_addr = (u8)setup->wValue;
1568
1569 /* update usb state */
1570 udc->usb_state = USB_STATE_ADDRESS;
1571
1572 if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1573 ep0_stall(udc);
1574}
1575
1576static void ch9getstatus(struct mv_udc *udc, u8 ep_num,
1577 struct usb_ctrlrequest *setup)
1578{
431879a7 1579 u16 status = 0;
e7cddda4 1580 int retval;
1581
1582 if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK))
1583 != (USB_DIR_IN | USB_TYPE_STANDARD))
1584 return;
1585
1586 if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
1587 status = 1 << USB_DEVICE_SELF_POWERED;
1588 status |= udc->remote_wakeup << USB_DEVICE_REMOTE_WAKEUP;
1589 } else if ((setup->bRequestType & USB_RECIP_MASK)
1590 == USB_RECIP_INTERFACE) {
1591 /* get interface status */
1592 status = 0;
1593 } else if ((setup->bRequestType & USB_RECIP_MASK)
1594 == USB_RECIP_ENDPOINT) {
1595 u8 ep_num, direction;
1596
1597 ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
1598 direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
1599 ? EP_DIR_IN : EP_DIR_OUT;
1600 status = ep_is_stall(udc, ep_num, direction)
1601 << USB_ENDPOINT_HALT;
1602 }
1603
1604 retval = udc_prime_status(udc, EP_DIR_IN, status, false);
1605 if (retval)
1606 ep0_stall(udc);
36616224
NZ
1607 else
1608 udc->ep0_state = DATA_STATE_XMIT;
e7cddda4 1609}
1610
1611static void ch9clearfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
1612{
1613 u8 ep_num;
1614 u8 direction;
1615 struct mv_ep *ep;
1616
1617 if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1618 == ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) {
1619 switch (setup->wValue) {
1620 case USB_DEVICE_REMOTE_WAKEUP:
1621 udc->remote_wakeup = 0;
1622 break;
e7cddda4 1623 default:
1624 goto out;
1625 }
1626 } else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1627 == ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) {
1628 switch (setup->wValue) {
1629 case USB_ENDPOINT_HALT:
1630 ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
1631 direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
1632 ? EP_DIR_IN : EP_DIR_OUT;
1633 if (setup->wValue != 0 || setup->wLength != 0
1634 || ep_num > udc->max_eps)
1635 goto out;
1636 ep = &udc->eps[ep_num * 2 + direction];
1637 if (ep->wedge == 1)
1638 break;
1639 spin_unlock(&udc->lock);
1640 ep_set_stall(udc, ep_num, direction, 0);
1641 spin_lock(&udc->lock);
1642 break;
1643 default:
1644 goto out;
1645 }
1646 } else
1647 goto out;
1648
1649 if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1650 ep0_stall(udc);
e7cddda4 1651out:
1652 return;
1653}
1654
1655static void ch9setfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
1656{
1657 u8 ep_num;
1658 u8 direction;
1659
1660 if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1661 == ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) {
1662 switch (setup->wValue) {
1663 case USB_DEVICE_REMOTE_WAKEUP:
1664 udc->remote_wakeup = 1;
1665 break;
1666 case USB_DEVICE_TEST_MODE:
1667 if (setup->wIndex & 0xFF
fb22cbac
NZ
1668 || udc->gadget.speed != USB_SPEED_HIGH)
1669 ep0_stall(udc);
1670
1671 if (udc->usb_state != USB_STATE_CONFIGURED
1672 && udc->usb_state != USB_STATE_ADDRESS
1673 && udc->usb_state != USB_STATE_DEFAULT)
1674 ep0_stall(udc);
1675
1676 mv_udc_testmode(udc, (setup->wIndex >> 8));
1677 goto out;
e7cddda4 1678 default:
1679 goto out;
1680 }
1681 } else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1682 == ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) {
1683 switch (setup->wValue) {
1684 case USB_ENDPOINT_HALT:
1685 ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
1686 direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
1687 ? EP_DIR_IN : EP_DIR_OUT;
1688 if (setup->wValue != 0 || setup->wLength != 0
1689 || ep_num > udc->max_eps)
1690 goto out;
1691 spin_unlock(&udc->lock);
1692 ep_set_stall(udc, ep_num, direction, 1);
1693 spin_lock(&udc->lock);
1694 break;
1695 default:
1696 goto out;
1697 }
1698 } else
1699 goto out;
1700
1701 if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1702 ep0_stall(udc);
1703out:
1704 return;
1705}
1706
1707static void handle_setup_packet(struct mv_udc *udc, u8 ep_num,
1708 struct usb_ctrlrequest *setup)
1709{
1710 bool delegate = false;
1711
1712 nuke(&udc->eps[ep_num * 2 + EP_DIR_OUT], -ESHUTDOWN);
1713
1714 dev_dbg(&udc->dev->dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
1715 setup->bRequestType, setup->bRequest,
1716 setup->wValue, setup->wIndex, setup->wLength);
1717 /* We process some stardard setup requests here */
1718 if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
1719 switch (setup->bRequest) {
1720 case USB_REQ_GET_STATUS:
1721 ch9getstatus(udc, ep_num, setup);
1722 break;
1723
1724 case USB_REQ_SET_ADDRESS:
1725 ch9setaddress(udc, setup);
1726 break;
1727
1728 case USB_REQ_CLEAR_FEATURE:
1729 ch9clearfeature(udc, setup);
1730 break;
1731
1732 case USB_REQ_SET_FEATURE:
1733 ch9setfeature(udc, setup);
1734 break;
1735
1736 default:
1737 delegate = true;
1738 }
1739 } else
1740 delegate = true;
1741
1742 /* delegate USB standard requests to the gadget driver */
1743 if (delegate == true) {
1744 /* USB requests handled by gadget */
1745 if (setup->wLength) {
1746 /* DATA phase from gadget, STATUS phase from udc */
1747 udc->ep0_dir = (setup->bRequestType & USB_DIR_IN)
1748 ? EP_DIR_IN : EP_DIR_OUT;
1749 spin_unlock(&udc->lock);
1750 if (udc->driver->setup(&udc->gadget,
1751 &udc->local_setup_buff) < 0)
1752 ep0_stall(udc);
1753 spin_lock(&udc->lock);
1754 udc->ep0_state = (setup->bRequestType & USB_DIR_IN)
1755 ? DATA_STATE_XMIT : DATA_STATE_RECV;
1756 } else {
1757 /* no DATA phase, IN STATUS phase from gadget */
1758 udc->ep0_dir = EP_DIR_IN;
1759 spin_unlock(&udc->lock);
1760 if (udc->driver->setup(&udc->gadget,
1761 &udc->local_setup_buff) < 0)
1762 ep0_stall(udc);
1763 spin_lock(&udc->lock);
1764 udc->ep0_state = WAIT_FOR_OUT_STATUS;
1765 }
1766 }
1767}
1768
1769/* complete DATA or STATUS phase of ep0 prime status phase if needed */
1770static void ep0_req_complete(struct mv_udc *udc,
1771 struct mv_ep *ep0, struct mv_req *req)
1772{
1773 u32 new_addr;
1774
1775 if (udc->usb_state == USB_STATE_ADDRESS) {
1776 /* set the new address */
1777 new_addr = (u32)udc->dev_addr;
1778 writel(new_addr << USB_DEVICE_ADDRESS_BIT_SHIFT,
1779 &udc->op_regs->deviceaddr);
1780 }
1781
1782 done(ep0, req, 0);
1783
1784 switch (udc->ep0_state) {
1785 case DATA_STATE_XMIT:
1786 /* receive status phase */
1787 if (udc_prime_status(udc, EP_DIR_OUT, 0, true))
1788 ep0_stall(udc);
1789 break;
1790 case DATA_STATE_RECV:
1791 /* send status phase */
1792 if (udc_prime_status(udc, EP_DIR_IN, 0 , true))
1793 ep0_stall(udc);
1794 break;
1795 case WAIT_FOR_OUT_STATUS:
1796 udc->ep0_state = WAIT_FOR_SETUP;
1797 break;
1798 case WAIT_FOR_SETUP:
1799 dev_err(&udc->dev->dev, "unexpect ep0 packets\n");
1800 break;
1801 default:
1802 ep0_stall(udc);
1803 break;
1804 }
1805}
1806
1807static void get_setup_data(struct mv_udc *udc, u8 ep_num, u8 *buffer_ptr)
1808{
1809 u32 temp;
1810 struct mv_dqh *dqh;
1811
1812 dqh = &udc->ep_dqh[ep_num * 2 + EP_DIR_OUT];
1813
1814 /* Clear bit in ENDPTSETUPSTAT */
96c2bbb0 1815 writel((1 << ep_num), &udc->op_regs->epsetupstat);
e7cddda4 1816
1817 /* while a hazard exists when setup package arrives */
1818 do {
1819 /* Set Setup Tripwire */
1820 temp = readl(&udc->op_regs->usbcmd);
1821 writel(temp | USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd);
1822
1823 /* Copy the setup packet to local buffer */
1824 memcpy(buffer_ptr, (u8 *) dqh->setup_buffer, 8);
1825 } while (!(readl(&udc->op_regs->usbcmd) & USBCMD_SETUP_TRIPWIRE_SET));
1826
1827 /* Clear Setup Tripwire */
1828 temp = readl(&udc->op_regs->usbcmd);
1829 writel(temp & ~USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd);
1830}
1831
1832static void irq_process_tr_complete(struct mv_udc *udc)
1833{
1834 u32 tmp, bit_pos;
1835 int i, ep_num = 0, direction = 0;
1836 struct mv_ep *curr_ep;
1837 struct mv_req *curr_req, *temp_req;
1838 int status;
1839
1840 /*
1841 * We use separate loops for ENDPTSETUPSTAT and ENDPTCOMPLETE
1842 * because the setup packets are to be read ASAP
1843 */
1844
1845 /* Process all Setup packet received interrupts */
1846 tmp = readl(&udc->op_regs->epsetupstat);
1847
1848 if (tmp) {
1849 for (i = 0; i < udc->max_eps; i++) {
1850 if (tmp & (1 << i)) {
1851 get_setup_data(udc, i,
1852 (u8 *)(&udc->local_setup_buff));
1853 handle_setup_packet(udc, i,
1854 &udc->local_setup_buff);
1855 }
1856 }
1857 }
1858
1859 /* Don't clear the endpoint setup status register here.
1860 * It is cleared as a setup packet is read out of the buffer
1861 */
1862
1863 /* Process non-setup transaction complete interrupts */
1864 tmp = readl(&udc->op_regs->epcomplete);
1865
1866 if (!tmp)
1867 return;
1868
1869 writel(tmp, &udc->op_regs->epcomplete);
1870
1871 for (i = 0; i < udc->max_eps * 2; i++) {
1872 ep_num = i >> 1;
1873 direction = i % 2;
1874
1875 bit_pos = 1 << (ep_num + 16 * direction);
1876
1877 if (!(bit_pos & tmp))
1878 continue;
1879
1880 if (i == 1)
1881 curr_ep = &udc->eps[0];
1882 else
1883 curr_ep = &udc->eps[i];
1884 /* process the req queue until an uncomplete request */
1885 list_for_each_entry_safe(curr_req, temp_req,
1886 &curr_ep->queue, queue) {
1887 status = process_ep_req(udc, i, curr_req);
1888 if (status)
1889 break;
1890
1891 /* write back status to req */
1892 curr_req->req.status = status;
1893
1894 /* ep0 request completion */
1895 if (ep_num == 0) {
1896 ep0_req_complete(udc, curr_ep, curr_req);
1897 break;
1898 } else {
1899 done(curr_ep, curr_req, status);
1900 }
1901 }
1902 }
1903}
1904
1905void irq_process_reset(struct mv_udc *udc)
1906{
1907 u32 tmp;
1908 unsigned int loops;
1909
1910 udc->ep0_dir = EP_DIR_OUT;
1911 udc->ep0_state = WAIT_FOR_SETUP;
1912 udc->remote_wakeup = 0; /* default to 0 on reset */
1913
1914 /* The address bits are past bit 25-31. Set the address */
1915 tmp = readl(&udc->op_regs->deviceaddr);
1916 tmp &= ~(USB_DEVICE_ADDRESS_MASK);
1917 writel(tmp, &udc->op_regs->deviceaddr);
1918
1919 /* Clear all the setup token semaphores */
1920 tmp = readl(&udc->op_regs->epsetupstat);
1921 writel(tmp, &udc->op_regs->epsetupstat);
1922
1923 /* Clear all the endpoint complete status bits */
1924 tmp = readl(&udc->op_regs->epcomplete);
1925 writel(tmp, &udc->op_regs->epcomplete);
1926
1927 /* wait until all endptprime bits cleared */
1928 loops = LOOPS(PRIME_TIMEOUT);
1929 while (readl(&udc->op_regs->epprime) & 0xFFFFFFFF) {
1930 if (loops == 0) {
1931 dev_err(&udc->dev->dev,
1932 "Timeout for ENDPTPRIME = 0x%x\n",
1933 readl(&udc->op_regs->epprime));
1934 break;
1935 }
1936 loops--;
1937 udelay(LOOPS_USEC);
1938 }
1939
1940 /* Write 1s to the Flush register */
1941 writel((u32)~0, &udc->op_regs->epflush);
1942
1943 if (readl(&udc->op_regs->portsc[0]) & PORTSCX_PORT_RESET) {
1944 dev_info(&udc->dev->dev, "usb bus reset\n");
1945 udc->usb_state = USB_STATE_DEFAULT;
1946 /* reset all the queues, stop all USB activities */
1947 stop_activity(udc, udc->driver);
1948 } else {
1949 dev_info(&udc->dev->dev, "USB reset portsc 0x%x\n",
1950 readl(&udc->op_regs->portsc));
1951
1952 /*
1953 * re-initialize
1954 * controller reset
1955 */
1956 udc_reset(udc);
1957
1958 /* reset all the queues, stop all USB activities */
1959 stop_activity(udc, udc->driver);
1960
1961 /* reset ep0 dQH and endptctrl */
1962 ep0_reset(udc);
1963
1964 /* enable interrupt and set controller to run state */
1965 udc_start(udc);
1966
1967 udc->usb_state = USB_STATE_ATTACHED;
1968 }
1969}
1970
1971static void handle_bus_resume(struct mv_udc *udc)
1972{
1973 udc->usb_state = udc->resume_state;
1974 udc->resume_state = 0;
1975
1976 /* report resume to the driver */
1977 if (udc->driver) {
1978 if (udc->driver->resume) {
1979 spin_unlock(&udc->lock);
1980 udc->driver->resume(&udc->gadget);
1981 spin_lock(&udc->lock);
1982 }
1983 }
1984}
1985
1986static void irq_process_suspend(struct mv_udc *udc)
1987{
1988 udc->resume_state = udc->usb_state;
1989 udc->usb_state = USB_STATE_SUSPENDED;
1990
1991 if (udc->driver->suspend) {
1992 spin_unlock(&udc->lock);
1993 udc->driver->suspend(&udc->gadget);
1994 spin_lock(&udc->lock);
1995 }
1996}
1997
1998static void irq_process_port_change(struct mv_udc *udc)
1999{
2000 u32 portsc;
2001
2002 portsc = readl(&udc->op_regs->portsc[0]);
2003 if (!(portsc & PORTSCX_PORT_RESET)) {
2004 /* Get the speed */
2005 u32 speed = portsc & PORTSCX_PORT_SPEED_MASK;
2006 switch (speed) {
2007 case PORTSCX_PORT_SPEED_HIGH:
2008 udc->gadget.speed = USB_SPEED_HIGH;
2009 break;
2010 case PORTSCX_PORT_SPEED_FULL:
2011 udc->gadget.speed = USB_SPEED_FULL;
2012 break;
2013 case PORTSCX_PORT_SPEED_LOW:
2014 udc->gadget.speed = USB_SPEED_LOW;
2015 break;
2016 default:
2017 udc->gadget.speed = USB_SPEED_UNKNOWN;
2018 break;
2019 }
2020 }
2021
2022 if (portsc & PORTSCX_PORT_SUSPEND) {
2023 udc->resume_state = udc->usb_state;
2024 udc->usb_state = USB_STATE_SUSPENDED;
2025 if (udc->driver->suspend) {
2026 spin_unlock(&udc->lock);
2027 udc->driver->suspend(&udc->gadget);
2028 spin_lock(&udc->lock);
2029 }
2030 }
2031
2032 if (!(portsc & PORTSCX_PORT_SUSPEND)
2033 && udc->usb_state == USB_STATE_SUSPENDED) {
2034 handle_bus_resume(udc);
2035 }
2036
2037 if (!udc->resume_state)
2038 udc->usb_state = USB_STATE_DEFAULT;
2039}
2040
2041static void irq_process_error(struct mv_udc *udc)
2042{
2043 /* Increment the error count */
2044 udc->errors++;
2045}
2046
2047static irqreturn_t mv_udc_irq(int irq, void *dev)
2048{
2049 struct mv_udc *udc = (struct mv_udc *)dev;
2050 u32 status, intr;
2051
309d6d2b
NZ
2052 /* Disable ISR when stopped bit is set */
2053 if (udc->stopped)
2054 return IRQ_NONE;
2055
e7cddda4 2056 spin_lock(&udc->lock);
2057
2058 status = readl(&udc->op_regs->usbsts);
2059 intr = readl(&udc->op_regs->usbintr);
2060 status &= intr;
2061
2062 if (status == 0) {
2063 spin_unlock(&udc->lock);
2064 return IRQ_NONE;
2065 }
2066
25985edc 2067 /* Clear all the interrupts occurred */
e7cddda4 2068 writel(status, &udc->op_regs->usbsts);
2069
2070 if (status & USBSTS_ERR)
2071 irq_process_error(udc);
2072
2073 if (status & USBSTS_RESET)
2074 irq_process_reset(udc);
2075
2076 if (status & USBSTS_PORT_CHANGE)
2077 irq_process_port_change(udc);
2078
2079 if (status & USBSTS_INT)
2080 irq_process_tr_complete(udc);
2081
2082 if (status & USBSTS_SUSPEND)
2083 irq_process_suspend(udc);
2084
2085 spin_unlock(&udc->lock);
2086
2087 return IRQ_HANDLED;
2088}
2089
1aec033b
NZ
2090static irqreturn_t mv_udc_vbus_irq(int irq, void *dev)
2091{
2092 struct mv_udc *udc = (struct mv_udc *)dev;
2093
2094 /* polling VBUS and init phy may cause too much time*/
2095 if (udc->qwork)
2096 queue_work(udc->qwork, &udc->vbus_work);
2097
2098 return IRQ_HANDLED;
2099}
2100
2101static void mv_udc_vbus_work(struct work_struct *work)
2102{
2103 struct mv_udc *udc;
2104 unsigned int vbus;
2105
2106 udc = container_of(work, struct mv_udc, vbus_work);
2107 if (!udc->pdata->vbus)
2108 return;
2109
2110 vbus = udc->pdata->vbus->poll();
2111 dev_info(&udc->dev->dev, "vbus is %d\n", vbus);
2112
2113 if (vbus == VBUS_HIGH)
2114 mv_udc_vbus_session(&udc->gadget, 1);
2115 else if (vbus == VBUS_LOW)
2116 mv_udc_vbus_session(&udc->gadget, 0);
2117}
2118
e7cddda4 2119/* release device structure */
2120static void gadget_release(struct device *_dev)
2121{
2122 struct mv_udc *udc = the_controller;
2123
2124 complete(udc->done);
e7cddda4 2125}
2126
5d0b8d0f 2127static int __devexit mv_udc_remove(struct platform_device *dev)
e7cddda4 2128{
2129 struct mv_udc *udc = the_controller;
dde34cc5 2130 int clk_i;
e7cddda4 2131
0f91349b
SAS
2132 usb_del_gadget_udc(&udc->gadget);
2133
1aec033b
NZ
2134 if (udc->qwork) {
2135 flush_workqueue(udc->qwork);
2136 destroy_workqueue(udc->qwork);
2137 }
2138
487d54d1
NZ
2139 /*
2140 * If we have transceiver inited,
2141 * then vbus irq will not be requested in udc driver.
2142 */
2143 if (udc->pdata && udc->pdata->vbus
ded017ee 2144 && udc->clock_gating && IS_ERR_OR_NULL(udc->transceiver))
1aec033b 2145 free_irq(udc->pdata->vbus->irq, &dev->dev);
e7cddda4 2146
2147 /* free memory allocated in probe */
2148 if (udc->dtd_pool)
2149 dma_pool_destroy(udc->dtd_pool);
2150
2151 if (udc->ep_dqh)
2152 dma_free_coherent(&dev->dev, udc->ep_dqh_size,
2153 udc->ep_dqh, udc->ep_dqh_dma);
2154
2155 kfree(udc->eps);
2156
2157 if (udc->irq)
2158 free_irq(udc->irq, &dev->dev);
2159
1aec033b
NZ
2160 mv_udc_disable(udc);
2161
e7cddda4 2162 if (udc->cap_regs)
2163 iounmap(udc->cap_regs);
e7cddda4 2164
2165 if (udc->phy_regs)
5e6c86b0 2166 iounmap(udc->phy_regs);
e7cddda4 2167
2168 if (udc->status_req) {
2169 kfree(udc->status_req->req.buf);
2170 kfree(udc->status_req);
2171 }
2172
dde34cc5
NZ
2173 for (clk_i = 0; clk_i <= udc->clknum; clk_i++)
2174 clk_put(udc->clk[clk_i]);
2175
e7cddda4 2176 device_unregister(&udc->gadget.dev);
2177
2178 /* free dev, wait for the release() finished */
dde34cc5
NZ
2179 wait_for_completion(udc->done);
2180 kfree(udc);
e7cddda4 2181
2182 the_controller = NULL;
2183
2184 return 0;
2185}
2186
5d0b8d0f 2187static int __devinit mv_udc_probe(struct platform_device *dev)
e7cddda4 2188{
dde34cc5 2189 struct mv_usb_platform_data *pdata = dev->dev.platform_data;
e7cddda4 2190 struct mv_udc *udc;
2191 int retval = 0;
dde34cc5 2192 int clk_i = 0;
e7cddda4 2193 struct resource *r;
2194 size_t size;
2195
dde34cc5
NZ
2196 if (pdata == NULL) {
2197 dev_err(&dev->dev, "missing platform_data\n");
2198 return -ENODEV;
2199 }
2200
2201 size = sizeof(*udc) + sizeof(struct clk *) * pdata->clknum;
2202 udc = kzalloc(size, GFP_KERNEL);
e7cddda4 2203 if (udc == NULL) {
2204 dev_err(&dev->dev, "failed to allocate memory for udc\n");
dde34cc5 2205 return -ENOMEM;
e7cddda4 2206 }
2207
dde34cc5
NZ
2208 the_controller = udc;
2209 udc->done = &release_done;
2210 udc->pdata = dev->dev.platform_data;
e7cddda4 2211 spin_lock_init(&udc->lock);
2212
2213 udc->dev = dev;
2214
487d54d1
NZ
2215#ifdef CONFIG_USB_OTG_UTILS
2216 if (pdata->mode == MV_USB_MODE_OTG)
662dca54 2217 udc->transceiver = usb_get_phy(USB_PHY_TYPE_USB2);
487d54d1
NZ
2218#endif
2219
dde34cc5
NZ
2220 udc->clknum = pdata->clknum;
2221 for (clk_i = 0; clk_i < udc->clknum; clk_i++) {
2222 udc->clk[clk_i] = clk_get(&dev->dev, pdata->clkname[clk_i]);
2223 if (IS_ERR(udc->clk[clk_i])) {
2224 retval = PTR_ERR(udc->clk[clk_i]);
2225 goto err_put_clk;
2226 }
e7cddda4 2227 }
2228
dde34cc5 2229 r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "capregs");
e7cddda4 2230 if (r == NULL) {
2231 dev_err(&dev->dev, "no I/O memory resource defined\n");
2232 retval = -ENODEV;
dde34cc5 2233 goto err_put_clk;
e7cddda4 2234 }
2235
2236 udc->cap_regs = (struct mv_cap_regs __iomem *)
2237 ioremap(r->start, resource_size(r));
2238 if (udc->cap_regs == NULL) {
2239 dev_err(&dev->dev, "failed to map I/O memory\n");
2240 retval = -EBUSY;
dde34cc5 2241 goto err_put_clk;
e7cddda4 2242 }
2243
dde34cc5 2244 r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "phyregs");
e7cddda4 2245 if (r == NULL) {
2246 dev_err(&dev->dev, "no phy I/O memory resource defined\n");
2247 retval = -ENODEV;
dde34cc5 2248 goto err_iounmap_capreg;
e7cddda4 2249 }
2250
5e6c86b0
NZ
2251 udc->phy_regs = ioremap(r->start, resource_size(r));
2252 if (udc->phy_regs == NULL) {
e7cddda4 2253 dev_err(&dev->dev, "failed to map phy I/O memory\n");
2254 retval = -EBUSY;
dde34cc5 2255 goto err_iounmap_capreg;
e7cddda4 2256 }
2257
2258 /* we will acces controller register, so enable the clk */
85ff7bfb
NZ
2259 retval = mv_udc_enable_internal(udc);
2260 if (retval)
2261 goto err_iounmap_phyreg;
e7cddda4 2262
5e6c86b0
NZ
2263 udc->op_regs =
2264 (struct mv_op_regs __iomem *)((unsigned long)udc->cap_regs
e7cddda4 2265 + (readl(&udc->cap_regs->caplength_hciversion)
2266 & CAPLENGTH_MASK));
2267 udc->max_eps = readl(&udc->cap_regs->dccparams) & DCCPARAMS_DEN_MASK;
2268
4540a9ab
NZ
2269 /*
2270 * some platform will use usb to download image, it may not disconnect
2271 * usb gadget before loading kernel. So first stop udc here.
2272 */
2273 udc_stop(udc);
2274 writel(0xFFFFFFFF, &udc->op_regs->usbsts);
2275
e7cddda4 2276 size = udc->max_eps * sizeof(struct mv_dqh) *2;
2277 size = (size + DQH_ALIGNMENT - 1) & ~(DQH_ALIGNMENT - 1);
2278 udc->ep_dqh = dma_alloc_coherent(&dev->dev, size,
2279 &udc->ep_dqh_dma, GFP_KERNEL);
2280
2281 if (udc->ep_dqh == NULL) {
2282 dev_err(&dev->dev, "allocate dQH memory failed\n");
2283 retval = -ENOMEM;
dde34cc5 2284 goto err_disable_clock;
e7cddda4 2285 }
2286 udc->ep_dqh_size = size;
2287
2288 /* create dTD dma_pool resource */
2289 udc->dtd_pool = dma_pool_create("mv_dtd",
2290 &dev->dev,
2291 sizeof(struct mv_dtd),
2292 DTD_ALIGNMENT,
2293 DMA_BOUNDARY);
2294
2295 if (!udc->dtd_pool) {
2296 retval = -ENOMEM;
dde34cc5 2297 goto err_free_dma;
e7cddda4 2298 }
2299
2300 size = udc->max_eps * sizeof(struct mv_ep) *2;
2301 udc->eps = kzalloc(size, GFP_KERNEL);
2302 if (udc->eps == NULL) {
2303 dev_err(&dev->dev, "allocate ep memory failed\n");
2304 retval = -ENOMEM;
dde34cc5 2305 goto err_destroy_dma;
e7cddda4 2306 }
2307
2308 /* initialize ep0 status request structure */
2309 udc->status_req = kzalloc(sizeof(struct mv_req), GFP_KERNEL);
2310 if (!udc->status_req) {
2311 dev_err(&dev->dev, "allocate status_req memory failed\n");
2312 retval = -ENOMEM;
dde34cc5 2313 goto err_free_eps;
e7cddda4 2314 }
2315 INIT_LIST_HEAD(&udc->status_req->queue);
2316
2317 /* allocate a small amount of memory to get valid address */
2318 udc->status_req->req.buf = kzalloc(8, GFP_KERNEL);
46e172df 2319 udc->status_req->req.dma = DMA_ADDR_INVALID;
e7cddda4 2320
2321 udc->resume_state = USB_STATE_NOTATTACHED;
2322 udc->usb_state = USB_STATE_POWERED;
2323 udc->ep0_dir = EP_DIR_OUT;
2324 udc->remote_wakeup = 0;
2325
2326 r = platform_get_resource(udc->dev, IORESOURCE_IRQ, 0);
2327 if (r == NULL) {
2328 dev_err(&dev->dev, "no IRQ resource defined\n");
2329 retval = -ENODEV;
dde34cc5 2330 goto err_free_status_req;
e7cddda4 2331 }
2332 udc->irq = r->start;
2333 if (request_irq(udc->irq, mv_udc_irq,
b5dd18d8 2334 IRQF_SHARED, driver_name, udc)) {
e7cddda4 2335 dev_err(&dev->dev, "Request irq %d for UDC failed\n",
2336 udc->irq);
2337 retval = -ENODEV;
dde34cc5 2338 goto err_free_status_req;
e7cddda4 2339 }
2340
2341 /* initialize gadget structure */
2342 udc->gadget.ops = &mv_ops; /* usb_gadget_ops */
2343 udc->gadget.ep0 = &udc->eps[0].ep; /* gadget ep0 */
2344 INIT_LIST_HEAD(&udc->gadget.ep_list); /* ep_list */
2345 udc->gadget.speed = USB_SPEED_UNKNOWN; /* speed */
d327ab5b 2346 udc->gadget.max_speed = USB_SPEED_HIGH; /* support dual speed */
e7cddda4 2347
2348 /* the "gadget" abstracts/virtualizes the controller */
2349 dev_set_name(&udc->gadget.dev, "gadget");
2350 udc->gadget.dev.parent = &dev->dev;
2351 udc->gadget.dev.dma_mask = dev->dev.dma_mask;
2352 udc->gadget.dev.release = gadget_release;
2353 udc->gadget.name = driver_name; /* gadget name */
2354
2355 retval = device_register(&udc->gadget.dev);
2356 if (retval)
dde34cc5 2357 goto err_free_irq;
e7cddda4 2358
2359 eps_init(udc);
2360
1aec033b 2361 /* VBUS detect: we can disable/enable clock on demand.*/
ded017ee 2362 if (!IS_ERR_OR_NULL(udc->transceiver))
487d54d1
NZ
2363 udc->clock_gating = 1;
2364 else if (pdata->vbus) {
1aec033b
NZ
2365 udc->clock_gating = 1;
2366 retval = request_threaded_irq(pdata->vbus->irq, NULL,
2367 mv_udc_vbus_irq, IRQF_ONESHOT, "vbus", udc);
2368 if (retval) {
2369 dev_info(&dev->dev,
2370 "Can not request irq for VBUS, "
2371 "disable clock gating\n");
2372 udc->clock_gating = 0;
2373 }
2374
2375 udc->qwork = create_singlethread_workqueue("mv_udc_queue");
2376 if (!udc->qwork) {
2377 dev_err(&dev->dev, "cannot create workqueue\n");
2378 retval = -ENOMEM;
2379 goto err_unregister;
2380 }
2381
2382 INIT_WORK(&udc->vbus_work, mv_udc_vbus_work);
2383 }
2384
2385 /*
2386 * When clock gating is supported, we can disable clk and phy.
2387 * If not, it means that VBUS detection is not supported, we
2388 * have to enable vbus active all the time to let controller work.
2389 */
85ff7bfb
NZ
2390 if (udc->clock_gating)
2391 mv_udc_disable_internal(udc);
2392 else
1aec033b 2393 udc->vbus_active = 1;
e7cddda4 2394
0f91349b 2395 retval = usb_add_gadget_udc(&dev->dev, &udc->gadget);
dde34cc5
NZ
2396 if (retval)
2397 goto err_unregister;
2398
1aec033b
NZ
2399 dev_info(&dev->dev, "successful probe UDC device %s clock gating.\n",
2400 udc->clock_gating ? "with" : "without");
2401
dde34cc5
NZ
2402 return 0;
2403
2404err_unregister:
487d54d1 2405 if (udc->pdata && udc->pdata->vbus
ded017ee 2406 && udc->clock_gating && IS_ERR_OR_NULL(udc->transceiver))
1aec033b 2407 free_irq(pdata->vbus->irq, &dev->dev);
dde34cc5
NZ
2408 device_unregister(&udc->gadget.dev);
2409err_free_irq:
2410 free_irq(udc->irq, &dev->dev);
2411err_free_status_req:
2412 kfree(udc->status_req->req.buf);
2413 kfree(udc->status_req);
2414err_free_eps:
2415 kfree(udc->eps);
2416err_destroy_dma:
2417 dma_pool_destroy(udc->dtd_pool);
2418err_free_dma:
2419 dma_free_coherent(&dev->dev, udc->ep_dqh_size,
2420 udc->ep_dqh, udc->ep_dqh_dma);
2421err_disable_clock:
85ff7bfb 2422 mv_udc_disable_internal(udc);
dde34cc5 2423err_iounmap_phyreg:
5e6c86b0 2424 iounmap(udc->phy_regs);
dde34cc5
NZ
2425err_iounmap_capreg:
2426 iounmap(udc->cap_regs);
2427err_put_clk:
2428 for (clk_i--; clk_i >= 0; clk_i--)
2429 clk_put(udc->clk[clk_i]);
2430 the_controller = NULL;
2431 kfree(udc);
e7cddda4 2432 return retval;
2433}
2434
2435#ifdef CONFIG_PM
cb424473 2436static int mv_udc_suspend(struct device *_dev)
e7cddda4 2437{
2438 struct mv_udc *udc = the_controller;
2439
5076ae55 2440 /* if OTG is enabled, the following will be done in OTG driver*/
ded017ee 2441 if (!IS_ERR_OR_NULL(udc->transceiver))
5076ae55
NZ
2442 return 0;
2443
2444 if (udc->pdata->vbus && udc->pdata->vbus->poll)
2445 if (udc->pdata->vbus->poll() == VBUS_HIGH) {
2446 dev_info(&udc->dev->dev, "USB cable is connected!\n");
2447 return -EAGAIN;
2448 }
2449
2450 /*
2451 * only cable is unplugged, udc can suspend.
2452 * So do not care about clock_gating == 1.
2453 */
2454 if (!udc->clock_gating) {
2455 udc_stop(udc);
2456
2457 spin_lock_irq(&udc->lock);
2458 /* stop all usb activities */
2459 stop_activity(udc, udc->driver);
2460 spin_unlock_irq(&udc->lock);
2461
2462 mv_udc_disable_internal(udc);
2463 }
e7cddda4 2464
2465 return 0;
2466}
2467
cb424473 2468static int mv_udc_resume(struct device *_dev)
e7cddda4 2469{
2470 struct mv_udc *udc = the_controller;
2471 int retval;
2472
5076ae55 2473 /* if OTG is enabled, the following will be done in OTG driver*/
ded017ee 2474 if (!IS_ERR_OR_NULL(udc->transceiver))
5076ae55
NZ
2475 return 0;
2476
2477 if (!udc->clock_gating) {
2478 retval = mv_udc_enable_internal(udc);
2479 if (retval)
dde34cc5 2480 return retval;
5076ae55
NZ
2481
2482 if (udc->driver && udc->softconnect) {
2483 udc_reset(udc);
2484 ep0_reset(udc);
2485 udc_start(udc);
dde34cc5 2486 }
e7cddda4 2487 }
dde34cc5 2488
e7cddda4 2489 return 0;
2490}
2491
2492static const struct dev_pm_ops mv_udc_pm_ops = {
2493 .suspend = mv_udc_suspend,
2494 .resume = mv_udc_resume,
2495};
2496#endif
2497
046b07ac
NZ
2498static void mv_udc_shutdown(struct platform_device *dev)
2499{
2500 struct mv_udc *udc = the_controller;
2501 u32 mode;
2502
2503 /* reset controller mode to IDLE */
2504 mode = readl(&udc->op_regs->usbmode);
2505 mode &= ~3;
2506 writel(mode, &udc->op_regs->usbmode);
2507}
2508
e7cddda4 2509static struct platform_driver udc_driver = {
2510 .probe = mv_udc_probe,
2511 .remove = __exit_p(mv_udc_remove),
046b07ac 2512 .shutdown = mv_udc_shutdown,
e7cddda4 2513 .driver = {
2514 .owner = THIS_MODULE,
5e6c86b0 2515 .name = "mv-udc",
e7cddda4 2516#ifdef CONFIG_PM
cb424473 2517 .pm = &mv_udc_pm_ops,
e7cddda4 2518#endif
2519 },
2520};
cc27c96c
AL
2521
2522module_platform_driver(udc_driver);
ee0db58a 2523MODULE_ALIAS("platform:mv-udc");
e7cddda4 2524MODULE_DESCRIPTION(DRIVER_DESC);
2525MODULE_AUTHOR("Chao Xie <chao.xie@marvell.com>");
2526MODULE_VERSION(DRIVER_VERSION);
2527MODULE_LICENSE("GPL");
This page took 0.320098 seconds and 5 git commands to generate.