Merge branch 'for-linus-4.8' of git://git.kernel.org/pub/scm/linux/kernel/git/mason...
[deliverable/linux.git] / drivers / usb / dwc3 / gadget.c
1 /**
2 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link
3 *
4 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
5 *
6 * Authors: Felipe Balbi <balbi@ti.com>,
7 * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
8 *
9 * This program is free software: you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 of
11 * the License as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 */
18
19 #include <linux/kernel.h>
20 #include <linux/delay.h>
21 #include <linux/slab.h>
22 #include <linux/spinlock.h>
23 #include <linux/platform_device.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/interrupt.h>
26 #include <linux/io.h>
27 #include <linux/list.h>
28 #include <linux/dma-mapping.h>
29
30 #include <linux/usb/ch9.h>
31 #include <linux/usb/gadget.h>
32
33 #include "debug.h"
34 #include "core.h"
35 #include "gadget.h"
36 #include "io.h"
37
38 /**
39 * dwc3_gadget_set_test_mode - Enables USB2 Test Modes
40 * @dwc: pointer to our context structure
41 * @mode: the mode to set (J, K SE0 NAK, Force Enable)
42 *
43 * Caller should take care of locking. This function will
44 * return 0 on success or -EINVAL if wrong Test Selector
45 * is passed
46 */
47 int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
48 {
49 u32 reg;
50
51 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
52 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
53
54 switch (mode) {
55 case TEST_J:
56 case TEST_K:
57 case TEST_SE0_NAK:
58 case TEST_PACKET:
59 case TEST_FORCE_EN:
60 reg |= mode << 1;
61 break;
62 default:
63 return -EINVAL;
64 }
65
66 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
67
68 return 0;
69 }
70
71 /**
72 * dwc3_gadget_get_link_state - Gets current state of USB Link
73 * @dwc: pointer to our context structure
74 *
75 * Caller should take care of locking. This function will
76 * return the link state on success (>= 0) or -ETIMEDOUT.
77 */
78 int dwc3_gadget_get_link_state(struct dwc3 *dwc)
79 {
80 u32 reg;
81
82 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
83
84 return DWC3_DSTS_USBLNKST(reg);
85 }
86
87 /**
88 * dwc3_gadget_set_link_state - Sets USB Link to a particular State
89 * @dwc: pointer to our context structure
90 * @state: the state to put link into
91 *
92 * Caller should take care of locking. This function will
93 * return 0 on success or -ETIMEDOUT.
94 */
95 int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
96 {
97 int retries = 10000;
98 u32 reg;
99
100 /*
101 * Wait until device controller is ready. Only applies to 1.94a and
102 * later RTL.
103 */
104 if (dwc->revision >= DWC3_REVISION_194A) {
105 while (--retries) {
106 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
107 if (reg & DWC3_DSTS_DCNRD)
108 udelay(5);
109 else
110 break;
111 }
112
113 if (retries <= 0)
114 return -ETIMEDOUT;
115 }
116
117 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
118 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
119
120 /* set requested state */
121 reg |= DWC3_DCTL_ULSTCHNGREQ(state);
122 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
123
124 /*
125 * The following code is racy when called from dwc3_gadget_wakeup,
126 * and is not needed, at least on newer versions
127 */
128 if (dwc->revision >= DWC3_REVISION_194A)
129 return 0;
130
131 /* wait for a change in DSTS */
132 retries = 10000;
133 while (--retries) {
134 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
135
136 if (DWC3_DSTS_USBLNKST(reg) == state)
137 return 0;
138
139 udelay(5);
140 }
141
142 dwc3_trace(trace_dwc3_gadget,
143 "link state change request timed out");
144
145 return -ETIMEDOUT;
146 }
147
148 /**
149 * dwc3_ep_inc_trb() - Increment a TRB index.
150 * @index - Pointer to the TRB index to increment.
151 *
152 * The index should never point to the link TRB. After incrementing,
153 * if it is point to the link TRB, wrap around to the beginning. The
154 * link TRB is always at the last TRB entry.
155 */
156 static void dwc3_ep_inc_trb(u8 *index)
157 {
158 (*index)++;
159 if (*index == (DWC3_TRB_NUM - 1))
160 *index = 0;
161 }
162
163 static void dwc3_ep_inc_enq(struct dwc3_ep *dep)
164 {
165 dwc3_ep_inc_trb(&dep->trb_enqueue);
166 }
167
168 static void dwc3_ep_inc_deq(struct dwc3_ep *dep)
169 {
170 dwc3_ep_inc_trb(&dep->trb_dequeue);
171 }
172
173 void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
174 int status)
175 {
176 struct dwc3 *dwc = dep->dwc;
177 int i;
178
179 if (req->started) {
180 i = 0;
181 do {
182 dwc3_ep_inc_deq(dep);
183 } while(++i < req->request.num_mapped_sgs);
184 req->started = false;
185 }
186 list_del(&req->list);
187 req->trb = NULL;
188
189 if (req->request.status == -EINPROGRESS)
190 req->request.status = status;
191
192 if (dwc->ep0_bounced && dep->number == 0)
193 dwc->ep0_bounced = false;
194 else
195 usb_gadget_unmap_request(&dwc->gadget, &req->request,
196 req->direction);
197
198 trace_dwc3_gadget_giveback(req);
199
200 spin_unlock(&dwc->lock);
201 usb_gadget_giveback_request(&dep->endpoint, &req->request);
202 spin_lock(&dwc->lock);
203
204 if (dep->number > 1)
205 pm_runtime_put(dwc->dev);
206 }
207
208 int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param)
209 {
210 u32 timeout = 500;
211 int status = 0;
212 int ret = 0;
213 u32 reg;
214
215 dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param);
216 dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT);
217
218 do {
219 reg = dwc3_readl(dwc->regs, DWC3_DGCMD);
220 if (!(reg & DWC3_DGCMD_CMDACT)) {
221 status = DWC3_DGCMD_STATUS(reg);
222 if (status)
223 ret = -EINVAL;
224 break;
225 }
226 } while (timeout--);
227
228 if (!timeout) {
229 ret = -ETIMEDOUT;
230 status = -ETIMEDOUT;
231 }
232
233 trace_dwc3_gadget_generic_cmd(cmd, param, status);
234
235 return ret;
236 }
237
238 static int __dwc3_gadget_wakeup(struct dwc3 *dwc);
239
240 int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
241 struct dwc3_gadget_ep_cmd_params *params)
242 {
243 struct dwc3 *dwc = dep->dwc;
244 u32 timeout = 500;
245 u32 reg;
246
247 int cmd_status = 0;
248 int susphy = false;
249 int ret = -EINVAL;
250
251 /*
252 * Synopsys Databook 2.60a states, on section 6.3.2.5.[1-8], that if
253 * we're issuing an endpoint command, we must check if
254 * GUSB2PHYCFG.SUSPHY bit is set. If it is, then we need to clear it.
255 *
256 * We will also set SUSPHY bit to what it was before returning as stated
257 * by the same section on Synopsys databook.
258 */
259 if (dwc->gadget.speed <= USB_SPEED_HIGH) {
260 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
261 if (unlikely(reg & DWC3_GUSB2PHYCFG_SUSPHY)) {
262 susphy = true;
263 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
264 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
265 }
266 }
267
268 if (cmd == DWC3_DEPCMD_STARTTRANSFER) {
269 int needs_wakeup;
270
271 needs_wakeup = (dwc->link_state == DWC3_LINK_STATE_U1 ||
272 dwc->link_state == DWC3_LINK_STATE_U2 ||
273 dwc->link_state == DWC3_LINK_STATE_U3);
274
275 if (unlikely(needs_wakeup)) {
276 ret = __dwc3_gadget_wakeup(dwc);
277 dev_WARN_ONCE(dwc->dev, ret, "wakeup failed --> %d\n",
278 ret);
279 }
280 }
281
282 dwc3_writel(dep->regs, DWC3_DEPCMDPAR0, params->param0);
283 dwc3_writel(dep->regs, DWC3_DEPCMDPAR1, params->param1);
284 dwc3_writel(dep->regs, DWC3_DEPCMDPAR2, params->param2);
285
286 dwc3_writel(dep->regs, DWC3_DEPCMD, cmd | DWC3_DEPCMD_CMDACT);
287 do {
288 reg = dwc3_readl(dep->regs, DWC3_DEPCMD);
289 if (!(reg & DWC3_DEPCMD_CMDACT)) {
290 cmd_status = DWC3_DEPCMD_STATUS(reg);
291
292 switch (cmd_status) {
293 case 0:
294 ret = 0;
295 break;
296 case DEPEVT_TRANSFER_NO_RESOURCE:
297 ret = -EINVAL;
298 break;
299 case DEPEVT_TRANSFER_BUS_EXPIRY:
300 /*
301 * SW issues START TRANSFER command to
302 * isochronous ep with future frame interval. If
303 * future interval time has already passed when
304 * core receives the command, it will respond
305 * with an error status of 'Bus Expiry'.
306 *
307 * Instead of always returning -EINVAL, let's
308 * give a hint to the gadget driver that this is
309 * the case by returning -EAGAIN.
310 */
311 ret = -EAGAIN;
312 break;
313 default:
314 dev_WARN(dwc->dev, "UNKNOWN cmd status\n");
315 }
316
317 break;
318 }
319 } while (--timeout);
320
321 if (timeout == 0) {
322 ret = -ETIMEDOUT;
323 cmd_status = -ETIMEDOUT;
324 }
325
326 trace_dwc3_gadget_ep_cmd(dep, cmd, params, cmd_status);
327
328 if (unlikely(susphy)) {
329 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
330 reg |= DWC3_GUSB2PHYCFG_SUSPHY;
331 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
332 }
333
334 return ret;
335 }
336
337 static int dwc3_send_clear_stall_ep_cmd(struct dwc3_ep *dep)
338 {
339 struct dwc3 *dwc = dep->dwc;
340 struct dwc3_gadget_ep_cmd_params params;
341 u32 cmd = DWC3_DEPCMD_CLEARSTALL;
342
343 /*
344 * As of core revision 2.60a the recommended programming model
345 * is to set the ClearPendIN bit when issuing a Clear Stall EP
346 * command for IN endpoints. This is to prevent an issue where
347 * some (non-compliant) hosts may not send ACK TPs for pending
348 * IN transfers due to a mishandled error condition. Synopsys
349 * STAR 9000614252.
350 */
351 if (dep->direction && (dwc->revision >= DWC3_REVISION_260A))
352 cmd |= DWC3_DEPCMD_CLEARPENDIN;
353
354 memset(&params, 0, sizeof(params));
355
356 return dwc3_send_gadget_ep_cmd(dep, cmd, &params);
357 }
358
359 static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
360 struct dwc3_trb *trb)
361 {
362 u32 offset = (char *) trb - (char *) dep->trb_pool;
363
364 return dep->trb_pool_dma + offset;
365 }
366
367 static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
368 {
369 struct dwc3 *dwc = dep->dwc;
370
371 if (dep->trb_pool)
372 return 0;
373
374 dep->trb_pool = dma_alloc_coherent(dwc->dev,
375 sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
376 &dep->trb_pool_dma, GFP_KERNEL);
377 if (!dep->trb_pool) {
378 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
379 dep->name);
380 return -ENOMEM;
381 }
382
383 return 0;
384 }
385
386 static void dwc3_free_trb_pool(struct dwc3_ep *dep)
387 {
388 struct dwc3 *dwc = dep->dwc;
389
390 dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
391 dep->trb_pool, dep->trb_pool_dma);
392
393 dep->trb_pool = NULL;
394 dep->trb_pool_dma = 0;
395 }
396
397 static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep);
398
399 /**
400 * dwc3_gadget_start_config - Configure EP resources
401 * @dwc: pointer to our controller context structure
402 * @dep: endpoint that is being enabled
403 *
404 * The assignment of transfer resources cannot perfectly follow the
405 * data book due to the fact that the controller driver does not have
406 * all knowledge of the configuration in advance. It is given this
407 * information piecemeal by the composite gadget framework after every
408 * SET_CONFIGURATION and SET_INTERFACE. Trying to follow the databook
409 * programming model in this scenario can cause errors. For two
410 * reasons:
411 *
412 * 1) The databook says to do DEPSTARTCFG for every SET_CONFIGURATION
413 * and SET_INTERFACE (8.1.5). This is incorrect in the scenario of
414 * multiple interfaces.
415 *
416 * 2) The databook does not mention doing more DEPXFERCFG for new
417 * endpoint on alt setting (8.1.6).
418 *
419 * The following simplified method is used instead:
420 *
421 * All hardware endpoints can be assigned a transfer resource and this
422 * setting will stay persistent until either a core reset or
423 * hibernation. So whenever we do a DEPSTARTCFG(0) we can go ahead and
424 * do DEPXFERCFG for every hardware endpoint as well. We are
425 * guaranteed that there are as many transfer resources as endpoints.
426 *
427 * This function is called for each endpoint when it is being enabled
428 * but is triggered only when called for EP0-out, which always happens
429 * first, and which should only happen in one of the above conditions.
430 */
431 static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
432 {
433 struct dwc3_gadget_ep_cmd_params params;
434 u32 cmd;
435 int i;
436 int ret;
437
438 if (dep->number)
439 return 0;
440
441 memset(&params, 0x00, sizeof(params));
442 cmd = DWC3_DEPCMD_DEPSTARTCFG;
443
444 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
445 if (ret)
446 return ret;
447
448 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
449 struct dwc3_ep *dep = dwc->eps[i];
450
451 if (!dep)
452 continue;
453
454 ret = dwc3_gadget_set_xfer_resource(dwc, dep);
455 if (ret)
456 return ret;
457 }
458
459 return 0;
460 }
461
462 static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
463 const struct usb_endpoint_descriptor *desc,
464 const struct usb_ss_ep_comp_descriptor *comp_desc,
465 bool modify, bool restore)
466 {
467 struct dwc3_gadget_ep_cmd_params params;
468
469 if (dev_WARN_ONCE(dwc->dev, modify && restore,
470 "Can't modify and restore\n"))
471 return -EINVAL;
472
473 memset(&params, 0x00, sizeof(params));
474
475 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
476 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
477
478 /* Burst size is only needed in SuperSpeed mode */
479 if (dwc->gadget.speed >= USB_SPEED_SUPER) {
480 u32 burst = dep->endpoint.maxburst;
481 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst - 1);
482 }
483
484 if (modify) {
485 params.param0 |= DWC3_DEPCFG_ACTION_MODIFY;
486 } else if (restore) {
487 params.param0 |= DWC3_DEPCFG_ACTION_RESTORE;
488 params.param2 |= dep->saved_state;
489 } else {
490 params.param0 |= DWC3_DEPCFG_ACTION_INIT;
491 }
492
493 params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN;
494
495 if (dep->number <= 1 || usb_endpoint_xfer_isoc(desc))
496 params.param1 |= DWC3_DEPCFG_XFER_NOT_READY_EN;
497
498 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
499 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
500 | DWC3_DEPCFG_STREAM_EVENT_EN;
501 dep->stream_capable = true;
502 }
503
504 if (!usb_endpoint_xfer_control(desc))
505 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
506
507 /*
508 * We are doing 1:1 mapping for endpoints, meaning
509 * Physical Endpoints 2 maps to Logical Endpoint 2 and
510 * so on. We consider the direction bit as part of the physical
511 * endpoint number. So USB endpoint 0x81 is 0x03.
512 */
513 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
514
515 /*
516 * We must use the lower 16 TX FIFOs even though
517 * HW might have more
518 */
519 if (dep->direction)
520 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
521
522 if (desc->bInterval) {
523 params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1);
524 dep->interval = 1 << (desc->bInterval - 1);
525 }
526
527 return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, &params);
528 }
529
530 static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
531 {
532 struct dwc3_gadget_ep_cmd_params params;
533
534 memset(&params, 0x00, sizeof(params));
535
536 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
537
538 return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETTRANSFRESOURCE,
539 &params);
540 }
541
542 /**
543 * __dwc3_gadget_ep_enable - Initializes a HW endpoint
544 * @dep: endpoint to be initialized
545 * @desc: USB Endpoint Descriptor
546 *
547 * Caller should take care of locking
548 */
549 static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
550 const struct usb_endpoint_descriptor *desc,
551 const struct usb_ss_ep_comp_descriptor *comp_desc,
552 bool modify, bool restore)
553 {
554 struct dwc3 *dwc = dep->dwc;
555 u32 reg;
556 int ret;
557
558 dwc3_trace(trace_dwc3_gadget, "Enabling %s", dep->name);
559
560 if (!(dep->flags & DWC3_EP_ENABLED)) {
561 ret = dwc3_gadget_start_config(dwc, dep);
562 if (ret)
563 return ret;
564 }
565
566 ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, modify,
567 restore);
568 if (ret)
569 return ret;
570
571 if (!(dep->flags & DWC3_EP_ENABLED)) {
572 struct dwc3_trb *trb_st_hw;
573 struct dwc3_trb *trb_link;
574
575 dep->endpoint.desc = desc;
576 dep->comp_desc = comp_desc;
577 dep->type = usb_endpoint_type(desc);
578 dep->flags |= DWC3_EP_ENABLED;
579
580 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
581 reg |= DWC3_DALEPENA_EP(dep->number);
582 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
583
584 if (usb_endpoint_xfer_control(desc))
585 return 0;
586
587 /* Initialize the TRB ring */
588 dep->trb_dequeue = 0;
589 dep->trb_enqueue = 0;
590 memset(dep->trb_pool, 0,
591 sizeof(struct dwc3_trb) * DWC3_TRB_NUM);
592
593 /* Link TRB. The HWO bit is never reset */
594 trb_st_hw = &dep->trb_pool[0];
595
596 trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1];
597 trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
598 trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
599 trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB;
600 trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
601 }
602
603 return 0;
604 }
605
606 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force);
607 static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
608 {
609 struct dwc3_request *req;
610
611 dwc3_stop_active_transfer(dwc, dep->number, true);
612
613 /* - giveback all requests to gadget driver */
614 while (!list_empty(&dep->started_list)) {
615 req = next_request(&dep->started_list);
616
617 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
618 }
619
620 while (!list_empty(&dep->pending_list)) {
621 req = next_request(&dep->pending_list);
622
623 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
624 }
625 }
626
627 /**
628 * __dwc3_gadget_ep_disable - Disables a HW endpoint
629 * @dep: the endpoint to disable
630 *
631 * This function also removes requests which are currently processed ny the
632 * hardware and those which are not yet scheduled.
633 * Caller should take care of locking.
634 */
635 static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
636 {
637 struct dwc3 *dwc = dep->dwc;
638 u32 reg;
639
640 dwc3_trace(trace_dwc3_gadget, "Disabling %s", dep->name);
641
642 dwc3_remove_requests(dwc, dep);
643
644 /* make sure HW endpoint isn't stalled */
645 if (dep->flags & DWC3_EP_STALL)
646 __dwc3_gadget_ep_set_halt(dep, 0, false);
647
648 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
649 reg &= ~DWC3_DALEPENA_EP(dep->number);
650 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
651
652 dep->stream_capable = false;
653 dep->endpoint.desc = NULL;
654 dep->comp_desc = NULL;
655 dep->type = 0;
656 dep->flags = 0;
657
658 return 0;
659 }
660
661 /* -------------------------------------------------------------------------- */
662
663 static int dwc3_gadget_ep0_enable(struct usb_ep *ep,
664 const struct usb_endpoint_descriptor *desc)
665 {
666 return -EINVAL;
667 }
668
669 static int dwc3_gadget_ep0_disable(struct usb_ep *ep)
670 {
671 return -EINVAL;
672 }
673
674 /* -------------------------------------------------------------------------- */
675
676 static int dwc3_gadget_ep_enable(struct usb_ep *ep,
677 const struct usb_endpoint_descriptor *desc)
678 {
679 struct dwc3_ep *dep;
680 struct dwc3 *dwc;
681 unsigned long flags;
682 int ret;
683
684 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
685 pr_debug("dwc3: invalid parameters\n");
686 return -EINVAL;
687 }
688
689 if (!desc->wMaxPacketSize) {
690 pr_debug("dwc3: missing wMaxPacketSize\n");
691 return -EINVAL;
692 }
693
694 dep = to_dwc3_ep(ep);
695 dwc = dep->dwc;
696
697 if (dev_WARN_ONCE(dwc->dev, dep->flags & DWC3_EP_ENABLED,
698 "%s is already enabled\n",
699 dep->name))
700 return 0;
701
702 spin_lock_irqsave(&dwc->lock, flags);
703 ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc, false, false);
704 spin_unlock_irqrestore(&dwc->lock, flags);
705
706 return ret;
707 }
708
709 static int dwc3_gadget_ep_disable(struct usb_ep *ep)
710 {
711 struct dwc3_ep *dep;
712 struct dwc3 *dwc;
713 unsigned long flags;
714 int ret;
715
716 if (!ep) {
717 pr_debug("dwc3: invalid parameters\n");
718 return -EINVAL;
719 }
720
721 dep = to_dwc3_ep(ep);
722 dwc = dep->dwc;
723
724 if (dev_WARN_ONCE(dwc->dev, !(dep->flags & DWC3_EP_ENABLED),
725 "%s is already disabled\n",
726 dep->name))
727 return 0;
728
729 spin_lock_irqsave(&dwc->lock, flags);
730 ret = __dwc3_gadget_ep_disable(dep);
731 spin_unlock_irqrestore(&dwc->lock, flags);
732
733 return ret;
734 }
735
736 static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
737 gfp_t gfp_flags)
738 {
739 struct dwc3_request *req;
740 struct dwc3_ep *dep = to_dwc3_ep(ep);
741
742 req = kzalloc(sizeof(*req), gfp_flags);
743 if (!req)
744 return NULL;
745
746 req->epnum = dep->number;
747 req->dep = dep;
748
749 dep->allocated_requests++;
750
751 trace_dwc3_alloc_request(req);
752
753 return &req->request;
754 }
755
756 static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
757 struct usb_request *request)
758 {
759 struct dwc3_request *req = to_dwc3_request(request);
760 struct dwc3_ep *dep = to_dwc3_ep(ep);
761
762 dep->allocated_requests--;
763 trace_dwc3_free_request(req);
764 kfree(req);
765 }
766
767 /**
768 * dwc3_prepare_one_trb - setup one TRB from one request
769 * @dep: endpoint for which this request is prepared
770 * @req: dwc3_request pointer
771 */
772 static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
773 struct dwc3_request *req, dma_addr_t dma,
774 unsigned length, unsigned last, unsigned chain, unsigned node)
775 {
776 struct dwc3_trb *trb;
777
778 dwc3_trace(trace_dwc3_gadget, "%s: req %p dma %08llx length %d%s%s",
779 dep->name, req, (unsigned long long) dma,
780 length, last ? " last" : "",
781 chain ? " chain" : "");
782
783
784 trb = &dep->trb_pool[dep->trb_enqueue];
785
786 if (!req->trb) {
787 dwc3_gadget_move_started_request(req);
788 req->trb = trb;
789 req->trb_dma = dwc3_trb_dma_offset(dep, trb);
790 req->first_trb_index = dep->trb_enqueue;
791 }
792
793 dwc3_ep_inc_enq(dep);
794
795 trb->size = DWC3_TRB_SIZE_LENGTH(length);
796 trb->bpl = lower_32_bits(dma);
797 trb->bph = upper_32_bits(dma);
798
799 switch (usb_endpoint_type(dep->endpoint.desc)) {
800 case USB_ENDPOINT_XFER_CONTROL:
801 trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP;
802 break;
803
804 case USB_ENDPOINT_XFER_ISOC:
805 if (!node)
806 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
807 else
808 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
809
810 /* always enable Interrupt on Missed ISOC */
811 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
812 break;
813
814 case USB_ENDPOINT_XFER_BULK:
815 case USB_ENDPOINT_XFER_INT:
816 trb->ctrl = DWC3_TRBCTL_NORMAL;
817 break;
818 default:
819 /*
820 * This is only possible with faulty memory because we
821 * checked it already :)
822 */
823 BUG();
824 }
825
826 /* always enable Continue on Short Packet */
827 trb->ctrl |= DWC3_TRB_CTRL_CSP;
828
829 if (!req->request.no_interrupt && !chain)
830 trb->ctrl |= DWC3_TRB_CTRL_IOC | DWC3_TRB_CTRL_ISP_IMI;
831
832 if (last && !usb_endpoint_xfer_isoc(dep->endpoint.desc))
833 trb->ctrl |= DWC3_TRB_CTRL_LST;
834
835 if (chain)
836 trb->ctrl |= DWC3_TRB_CTRL_CHN;
837
838 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable)
839 trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id);
840
841 trb->ctrl |= DWC3_TRB_CTRL_HWO;
842
843 dep->queued_requests++;
844
845 trace_dwc3_prepare_trb(dep, trb);
846 }
847
848 /**
849 * dwc3_ep_prev_trb() - Returns the previous TRB in the ring
850 * @dep: The endpoint with the TRB ring
851 * @index: The index of the current TRB in the ring
852 *
853 * Returns the TRB prior to the one pointed to by the index. If the
854 * index is 0, we will wrap backwards, skip the link TRB, and return
855 * the one just before that.
856 */
857 static struct dwc3_trb *dwc3_ep_prev_trb(struct dwc3_ep *dep, u8 index)
858 {
859 if (!index)
860 index = DWC3_TRB_NUM - 2;
861 else
862 index = dep->trb_enqueue - 1;
863
864 return &dep->trb_pool[index];
865 }
866
867 static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep)
868 {
869 struct dwc3_trb *tmp;
870 u8 trbs_left;
871
872 /*
873 * If enqueue & dequeue are equal than it is either full or empty.
874 *
875 * One way to know for sure is if the TRB right before us has HWO bit
876 * set or not. If it has, then we're definitely full and can't fit any
877 * more transfers in our ring.
878 */
879 if (dep->trb_enqueue == dep->trb_dequeue) {
880 tmp = dwc3_ep_prev_trb(dep, dep->trb_enqueue);
881 if (tmp->ctrl & DWC3_TRB_CTRL_HWO)
882 return 0;
883
884 return DWC3_TRB_NUM - 1;
885 }
886
887 trbs_left = dep->trb_dequeue - dep->trb_enqueue - 1;
888 trbs_left &= (DWC3_TRB_NUM - 1);
889
890 return trbs_left;
891 }
892
893 static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
894 struct dwc3_request *req, unsigned int trbs_left,
895 unsigned int more_coming)
896 {
897 struct usb_request *request = &req->request;
898 struct scatterlist *sg = request->sg;
899 struct scatterlist *s;
900 unsigned int last = false;
901 unsigned int length;
902 dma_addr_t dma;
903 int i;
904
905 for_each_sg(sg, s, request->num_mapped_sgs, i) {
906 unsigned chain = true;
907
908 length = sg_dma_len(s);
909 dma = sg_dma_address(s);
910
911 if (sg_is_last(s)) {
912 if (usb_endpoint_xfer_int(dep->endpoint.desc) ||
913 !more_coming)
914 last = true;
915
916 chain = false;
917 }
918
919 if (!trbs_left--)
920 last = true;
921
922 if (last)
923 chain = false;
924
925 dwc3_prepare_one_trb(dep, req, dma, length,
926 last, chain, i);
927
928 if (last)
929 break;
930 }
931 }
932
933 static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
934 struct dwc3_request *req, unsigned int trbs_left,
935 unsigned int more_coming)
936 {
937 unsigned int last = false;
938 unsigned int length;
939 dma_addr_t dma;
940
941 dma = req->request.dma;
942 length = req->request.length;
943
944 if (!trbs_left)
945 last = true;
946
947 /* Is this the last request? */
948 if (usb_endpoint_xfer_int(dep->endpoint.desc) || !more_coming)
949 last = true;
950
951 dwc3_prepare_one_trb(dep, req, dma, length,
952 last, false, 0);
953 }
954
955 /*
956 * dwc3_prepare_trbs - setup TRBs from requests
957 * @dep: endpoint for which requests are being prepared
958 *
959 * The function goes through the requests list and sets up TRBs for the
960 * transfers. The function returns once there are no more TRBs available or
961 * it runs out of requests.
962 */
963 static void dwc3_prepare_trbs(struct dwc3_ep *dep)
964 {
965 struct dwc3_request *req, *n;
966 unsigned int more_coming;
967 u32 trbs_left;
968
969 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
970
971 trbs_left = dwc3_calc_trbs_left(dep);
972 if (!trbs_left)
973 return;
974
975 more_coming = dep->allocated_requests - dep->queued_requests;
976
977 list_for_each_entry_safe(req, n, &dep->pending_list, list) {
978 if (req->request.num_mapped_sgs > 0)
979 dwc3_prepare_one_trb_sg(dep, req, trbs_left--,
980 more_coming);
981 else
982 dwc3_prepare_one_trb_linear(dep, req, trbs_left--,
983 more_coming);
984
985 if (!trbs_left)
986 return;
987 }
988 }
989
990 static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param)
991 {
992 struct dwc3_gadget_ep_cmd_params params;
993 struct dwc3_request *req;
994 struct dwc3 *dwc = dep->dwc;
995 int starting;
996 int ret;
997 u32 cmd;
998
999 starting = !(dep->flags & DWC3_EP_BUSY);
1000
1001 dwc3_prepare_trbs(dep);
1002 req = next_request(&dep->started_list);
1003 if (!req) {
1004 dep->flags |= DWC3_EP_PENDING_REQUEST;
1005 return 0;
1006 }
1007
1008 memset(&params, 0, sizeof(params));
1009
1010 if (starting) {
1011 params.param0 = upper_32_bits(req->trb_dma);
1012 params.param1 = lower_32_bits(req->trb_dma);
1013 cmd = DWC3_DEPCMD_STARTTRANSFER |
1014 DWC3_DEPCMD_PARAM(cmd_param);
1015 } else {
1016 cmd = DWC3_DEPCMD_UPDATETRANSFER |
1017 DWC3_DEPCMD_PARAM(dep->resource_index);
1018 }
1019
1020 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
1021 if (ret < 0) {
1022 /*
1023 * FIXME we need to iterate over the list of requests
1024 * here and stop, unmap, free and del each of the linked
1025 * requests instead of what we do now.
1026 */
1027 usb_gadget_unmap_request(&dwc->gadget, &req->request,
1028 req->direction);
1029 list_del(&req->list);
1030 return ret;
1031 }
1032
1033 dep->flags |= DWC3_EP_BUSY;
1034
1035 if (starting) {
1036 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
1037 WARN_ON_ONCE(!dep->resource_index);
1038 }
1039
1040 return 0;
1041 }
1042
1043 static void __dwc3_gadget_start_isoc(struct dwc3 *dwc,
1044 struct dwc3_ep *dep, u32 cur_uf)
1045 {
1046 u32 uf;
1047
1048 if (list_empty(&dep->pending_list)) {
1049 dwc3_trace(trace_dwc3_gadget,
1050 "ISOC ep %s run out for requests",
1051 dep->name);
1052 dep->flags |= DWC3_EP_PENDING_REQUEST;
1053 return;
1054 }
1055
1056 /* 4 micro frames in the future */
1057 uf = cur_uf + dep->interval * 4;
1058
1059 __dwc3_gadget_kick_transfer(dep, uf);
1060 }
1061
1062 static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
1063 struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
1064 {
1065 u32 cur_uf, mask;
1066
1067 mask = ~(dep->interval - 1);
1068 cur_uf = event->parameters & mask;
1069
1070 __dwc3_gadget_start_isoc(dwc, dep, cur_uf);
1071 }
1072
1073 static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
1074 {
1075 struct dwc3 *dwc = dep->dwc;
1076 int ret;
1077
1078 if (!dep->endpoint.desc) {
1079 dwc3_trace(trace_dwc3_gadget,
1080 "trying to queue request %p to disabled %s",
1081 &req->request, dep->endpoint.name);
1082 return -ESHUTDOWN;
1083 }
1084
1085 if (WARN(req->dep != dep, "request %p belongs to '%s'\n",
1086 &req->request, req->dep->name)) {
1087 dwc3_trace(trace_dwc3_gadget, "request %p belongs to '%s'",
1088 &req->request, req->dep->name);
1089 return -EINVAL;
1090 }
1091
1092 pm_runtime_get(dwc->dev);
1093
1094 req->request.actual = 0;
1095 req->request.status = -EINPROGRESS;
1096 req->direction = dep->direction;
1097 req->epnum = dep->number;
1098
1099 trace_dwc3_ep_queue(req);
1100
1101 /*
1102 * We only add to our list of requests now and
1103 * start consuming the list once we get XferNotReady
1104 * IRQ.
1105 *
1106 * That way, we avoid doing anything that we don't need
1107 * to do now and defer it until the point we receive a
1108 * particular token from the Host side.
1109 *
1110 * This will also avoid Host cancelling URBs due to too
1111 * many NAKs.
1112 */
1113 ret = usb_gadget_map_request(&dwc->gadget, &req->request,
1114 dep->direction);
1115 if (ret)
1116 return ret;
1117
1118 list_add_tail(&req->list, &dep->pending_list);
1119
1120 /*
1121 * If there are no pending requests and the endpoint isn't already
1122 * busy, we will just start the request straight away.
1123 *
1124 * This will save one IRQ (XFER_NOT_READY) and possibly make it a
1125 * little bit faster.
1126 */
1127 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
1128 !usb_endpoint_xfer_int(dep->endpoint.desc)) {
1129 ret = __dwc3_gadget_kick_transfer(dep, 0);
1130 goto out;
1131 }
1132
1133 /*
1134 * There are a few special cases:
1135 *
1136 * 1. XferNotReady with empty list of requests. We need to kick the
1137 * transfer here in that situation, otherwise we will be NAKing
1138 * forever. If we get XferNotReady before gadget driver has a
1139 * chance to queue a request, we will ACK the IRQ but won't be
1140 * able to receive the data until the next request is queued.
1141 * The following code is handling exactly that.
1142 *
1143 */
1144 if (dep->flags & DWC3_EP_PENDING_REQUEST) {
1145 /*
1146 * If xfernotready is already elapsed and it is a case
1147 * of isoc transfer, then issue END TRANSFER, so that
1148 * you can receive xfernotready again and can have
1149 * notion of current microframe.
1150 */
1151 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1152 if (list_empty(&dep->started_list)) {
1153 dwc3_stop_active_transfer(dwc, dep->number, true);
1154 dep->flags = DWC3_EP_ENABLED;
1155 }
1156 return 0;
1157 }
1158
1159 ret = __dwc3_gadget_kick_transfer(dep, 0);
1160 if (!ret)
1161 dep->flags &= ~DWC3_EP_PENDING_REQUEST;
1162
1163 goto out;
1164 }
1165
1166 /*
1167 * 2. XferInProgress on Isoc EP with an active transfer. We need to
1168 * kick the transfer here after queuing a request, otherwise the
1169 * core may not see the modified TRB(s).
1170 */
1171 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
1172 (dep->flags & DWC3_EP_BUSY) &&
1173 !(dep->flags & DWC3_EP_MISSED_ISOC)) {
1174 WARN_ON_ONCE(!dep->resource_index);
1175 ret = __dwc3_gadget_kick_transfer(dep, dep->resource_index);
1176 goto out;
1177 }
1178
1179 /*
1180 * 4. Stream Capable Bulk Endpoints. We need to start the transfer
1181 * right away, otherwise host will not know we have streams to be
1182 * handled.
1183 */
1184 if (dep->stream_capable)
1185 ret = __dwc3_gadget_kick_transfer(dep, 0);
1186
1187 out:
1188 if (ret && ret != -EBUSY)
1189 dwc3_trace(trace_dwc3_gadget,
1190 "%s: failed to kick transfers",
1191 dep->name);
1192 if (ret == -EBUSY)
1193 ret = 0;
1194
1195 return ret;
1196 }
1197
1198 static void __dwc3_gadget_ep_zlp_complete(struct usb_ep *ep,
1199 struct usb_request *request)
1200 {
1201 dwc3_gadget_ep_free_request(ep, request);
1202 }
1203
1204 static int __dwc3_gadget_ep_queue_zlp(struct dwc3 *dwc, struct dwc3_ep *dep)
1205 {
1206 struct dwc3_request *req;
1207 struct usb_request *request;
1208 struct usb_ep *ep = &dep->endpoint;
1209
1210 dwc3_trace(trace_dwc3_gadget, "queueing ZLP");
1211 request = dwc3_gadget_ep_alloc_request(ep, GFP_ATOMIC);
1212 if (!request)
1213 return -ENOMEM;
1214
1215 request->length = 0;
1216 request->buf = dwc->zlp_buf;
1217 request->complete = __dwc3_gadget_ep_zlp_complete;
1218
1219 req = to_dwc3_request(request);
1220
1221 return __dwc3_gadget_ep_queue(dep, req);
1222 }
1223
1224 static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
1225 gfp_t gfp_flags)
1226 {
1227 struct dwc3_request *req = to_dwc3_request(request);
1228 struct dwc3_ep *dep = to_dwc3_ep(ep);
1229 struct dwc3 *dwc = dep->dwc;
1230
1231 unsigned long flags;
1232
1233 int ret;
1234
1235 spin_lock_irqsave(&dwc->lock, flags);
1236 ret = __dwc3_gadget_ep_queue(dep, req);
1237
1238 /*
1239 * Okay, here's the thing, if gadget driver has requested for a ZLP by
1240 * setting request->zero, instead of doing magic, we will just queue an
1241 * extra usb_request ourselves so that it gets handled the same way as
1242 * any other request.
1243 */
1244 if (ret == 0 && request->zero && request->length &&
1245 (request->length % ep->maxpacket == 0))
1246 ret = __dwc3_gadget_ep_queue_zlp(dwc, dep);
1247
1248 spin_unlock_irqrestore(&dwc->lock, flags);
1249
1250 return ret;
1251 }
1252
1253 static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
1254 struct usb_request *request)
1255 {
1256 struct dwc3_request *req = to_dwc3_request(request);
1257 struct dwc3_request *r = NULL;
1258
1259 struct dwc3_ep *dep = to_dwc3_ep(ep);
1260 struct dwc3 *dwc = dep->dwc;
1261
1262 unsigned long flags;
1263 int ret = 0;
1264
1265 trace_dwc3_ep_dequeue(req);
1266
1267 spin_lock_irqsave(&dwc->lock, flags);
1268
1269 list_for_each_entry(r, &dep->pending_list, list) {
1270 if (r == req)
1271 break;
1272 }
1273
1274 if (r != req) {
1275 list_for_each_entry(r, &dep->started_list, list) {
1276 if (r == req)
1277 break;
1278 }
1279 if (r == req) {
1280 /* wait until it is processed */
1281 dwc3_stop_active_transfer(dwc, dep->number, true);
1282 goto out1;
1283 }
1284 dev_err(dwc->dev, "request %p was not queued to %s\n",
1285 request, ep->name);
1286 ret = -EINVAL;
1287 goto out0;
1288 }
1289
1290 out1:
1291 /* giveback the request */
1292 dwc3_gadget_giveback(dep, req, -ECONNRESET);
1293
1294 out0:
1295 spin_unlock_irqrestore(&dwc->lock, flags);
1296
1297 return ret;
1298 }
1299
1300 int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
1301 {
1302 struct dwc3_gadget_ep_cmd_params params;
1303 struct dwc3 *dwc = dep->dwc;
1304 int ret;
1305
1306 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1307 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
1308 return -EINVAL;
1309 }
1310
1311 memset(&params, 0x00, sizeof(params));
1312
1313 if (value) {
1314 struct dwc3_trb *trb;
1315
1316 unsigned transfer_in_flight;
1317 unsigned started;
1318
1319 if (dep->number > 1)
1320 trb = dwc3_ep_prev_trb(dep, dep->trb_enqueue);
1321 else
1322 trb = &dwc->ep0_trb[dep->trb_enqueue];
1323
1324 transfer_in_flight = trb->ctrl & DWC3_TRB_CTRL_HWO;
1325 started = !list_empty(&dep->started_list);
1326
1327 if (!protocol && ((dep->direction && transfer_in_flight) ||
1328 (!dep->direction && started))) {
1329 dwc3_trace(trace_dwc3_gadget,
1330 "%s: pending request, cannot halt",
1331 dep->name);
1332 return -EAGAIN;
1333 }
1334
1335 ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETSTALL,
1336 &params);
1337 if (ret)
1338 dev_err(dwc->dev, "failed to set STALL on %s\n",
1339 dep->name);
1340 else
1341 dep->flags |= DWC3_EP_STALL;
1342 } else {
1343
1344 ret = dwc3_send_clear_stall_ep_cmd(dep);
1345 if (ret)
1346 dev_err(dwc->dev, "failed to clear STALL on %s\n",
1347 dep->name);
1348 else
1349 dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
1350 }
1351
1352 return ret;
1353 }
1354
1355 static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
1356 {
1357 struct dwc3_ep *dep = to_dwc3_ep(ep);
1358 struct dwc3 *dwc = dep->dwc;
1359
1360 unsigned long flags;
1361
1362 int ret;
1363
1364 spin_lock_irqsave(&dwc->lock, flags);
1365 ret = __dwc3_gadget_ep_set_halt(dep, value, false);
1366 spin_unlock_irqrestore(&dwc->lock, flags);
1367
1368 return ret;
1369 }
1370
1371 static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
1372 {
1373 struct dwc3_ep *dep = to_dwc3_ep(ep);
1374 struct dwc3 *dwc = dep->dwc;
1375 unsigned long flags;
1376 int ret;
1377
1378 spin_lock_irqsave(&dwc->lock, flags);
1379 dep->flags |= DWC3_EP_WEDGE;
1380
1381 if (dep->number == 0 || dep->number == 1)
1382 ret = __dwc3_gadget_ep0_set_halt(ep, 1);
1383 else
1384 ret = __dwc3_gadget_ep_set_halt(dep, 1, false);
1385 spin_unlock_irqrestore(&dwc->lock, flags);
1386
1387 return ret;
1388 }
1389
1390 /* -------------------------------------------------------------------------- */
1391
1392 static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = {
1393 .bLength = USB_DT_ENDPOINT_SIZE,
1394 .bDescriptorType = USB_DT_ENDPOINT,
1395 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
1396 };
1397
1398 static const struct usb_ep_ops dwc3_gadget_ep0_ops = {
1399 .enable = dwc3_gadget_ep0_enable,
1400 .disable = dwc3_gadget_ep0_disable,
1401 .alloc_request = dwc3_gadget_ep_alloc_request,
1402 .free_request = dwc3_gadget_ep_free_request,
1403 .queue = dwc3_gadget_ep0_queue,
1404 .dequeue = dwc3_gadget_ep_dequeue,
1405 .set_halt = dwc3_gadget_ep0_set_halt,
1406 .set_wedge = dwc3_gadget_ep_set_wedge,
1407 };
1408
1409 static const struct usb_ep_ops dwc3_gadget_ep_ops = {
1410 .enable = dwc3_gadget_ep_enable,
1411 .disable = dwc3_gadget_ep_disable,
1412 .alloc_request = dwc3_gadget_ep_alloc_request,
1413 .free_request = dwc3_gadget_ep_free_request,
1414 .queue = dwc3_gadget_ep_queue,
1415 .dequeue = dwc3_gadget_ep_dequeue,
1416 .set_halt = dwc3_gadget_ep_set_halt,
1417 .set_wedge = dwc3_gadget_ep_set_wedge,
1418 };
1419
1420 /* -------------------------------------------------------------------------- */
1421
1422 static int dwc3_gadget_get_frame(struct usb_gadget *g)
1423 {
1424 struct dwc3 *dwc = gadget_to_dwc(g);
1425 u32 reg;
1426
1427 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1428 return DWC3_DSTS_SOFFN(reg);
1429 }
1430
1431 static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
1432 {
1433 int retries;
1434
1435 int ret;
1436 u32 reg;
1437
1438 u8 link_state;
1439 u8 speed;
1440
1441 /*
1442 * According to the Databook Remote wakeup request should
1443 * be issued only when the device is in early suspend state.
1444 *
1445 * We can check that via USB Link State bits in DSTS register.
1446 */
1447 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1448
1449 speed = reg & DWC3_DSTS_CONNECTSPD;
1450 if ((speed == DWC3_DSTS_SUPERSPEED) ||
1451 (speed == DWC3_DSTS_SUPERSPEED_PLUS)) {
1452 dwc3_trace(trace_dwc3_gadget, "no wakeup on SuperSpeed");
1453 return 0;
1454 }
1455
1456 link_state = DWC3_DSTS_USBLNKST(reg);
1457
1458 switch (link_state) {
1459 case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */
1460 case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */
1461 break;
1462 default:
1463 dwc3_trace(trace_dwc3_gadget,
1464 "can't wakeup from '%s'",
1465 dwc3_gadget_link_string(link_state));
1466 return -EINVAL;
1467 }
1468
1469 ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
1470 if (ret < 0) {
1471 dev_err(dwc->dev, "failed to put link in Recovery\n");
1472 return ret;
1473 }
1474
1475 /* Recent versions do this automatically */
1476 if (dwc->revision < DWC3_REVISION_194A) {
1477 /* write zeroes to Link Change Request */
1478 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1479 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
1480 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1481 }
1482
1483 /* poll until Link State changes to ON */
1484 retries = 20000;
1485
1486 while (retries--) {
1487 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1488
1489 /* in HS, means ON */
1490 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
1491 break;
1492 }
1493
1494 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
1495 dev_err(dwc->dev, "failed to send remote wakeup\n");
1496 return -EINVAL;
1497 }
1498
1499 return 0;
1500 }
1501
1502 static int dwc3_gadget_wakeup(struct usb_gadget *g)
1503 {
1504 struct dwc3 *dwc = gadget_to_dwc(g);
1505 unsigned long flags;
1506 int ret;
1507
1508 spin_lock_irqsave(&dwc->lock, flags);
1509 ret = __dwc3_gadget_wakeup(dwc);
1510 spin_unlock_irqrestore(&dwc->lock, flags);
1511
1512 return ret;
1513 }
1514
1515 static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
1516 int is_selfpowered)
1517 {
1518 struct dwc3 *dwc = gadget_to_dwc(g);
1519 unsigned long flags;
1520
1521 spin_lock_irqsave(&dwc->lock, flags);
1522 g->is_selfpowered = !!is_selfpowered;
1523 spin_unlock_irqrestore(&dwc->lock, flags);
1524
1525 return 0;
1526 }
1527
1528 static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
1529 {
1530 u32 reg;
1531 u32 timeout = 500;
1532
1533 if (pm_runtime_suspended(dwc->dev))
1534 return 0;
1535
1536 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1537 if (is_on) {
1538 if (dwc->revision <= DWC3_REVISION_187A) {
1539 reg &= ~DWC3_DCTL_TRGTULST_MASK;
1540 reg |= DWC3_DCTL_TRGTULST_RX_DET;
1541 }
1542
1543 if (dwc->revision >= DWC3_REVISION_194A)
1544 reg &= ~DWC3_DCTL_KEEP_CONNECT;
1545 reg |= DWC3_DCTL_RUN_STOP;
1546
1547 if (dwc->has_hibernation)
1548 reg |= DWC3_DCTL_KEEP_CONNECT;
1549
1550 dwc->pullups_connected = true;
1551 } else {
1552 reg &= ~DWC3_DCTL_RUN_STOP;
1553
1554 if (dwc->has_hibernation && !suspend)
1555 reg &= ~DWC3_DCTL_KEEP_CONNECT;
1556
1557 dwc->pullups_connected = false;
1558 }
1559
1560 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1561
1562 do {
1563 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1564 reg &= DWC3_DSTS_DEVCTRLHLT;
1565 } while (--timeout && !(!is_on ^ !reg));
1566
1567 if (!timeout)
1568 return -ETIMEDOUT;
1569
1570 dwc3_trace(trace_dwc3_gadget, "gadget %s data soft-%s",
1571 dwc->gadget_driver
1572 ? dwc->gadget_driver->function : "no-function",
1573 is_on ? "connect" : "disconnect");
1574
1575 return 0;
1576 }
1577
1578 static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
1579 {
1580 struct dwc3 *dwc = gadget_to_dwc(g);
1581 unsigned long flags;
1582 int ret;
1583
1584 is_on = !!is_on;
1585
1586 spin_lock_irqsave(&dwc->lock, flags);
1587 ret = dwc3_gadget_run_stop(dwc, is_on, false);
1588 spin_unlock_irqrestore(&dwc->lock, flags);
1589
1590 return ret;
1591 }
1592
1593 static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
1594 {
1595 u32 reg;
1596
1597 /* Enable all but Start and End of Frame IRQs */
1598 reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN |
1599 DWC3_DEVTEN_EVNTOVERFLOWEN |
1600 DWC3_DEVTEN_CMDCMPLTEN |
1601 DWC3_DEVTEN_ERRTICERREN |
1602 DWC3_DEVTEN_WKUPEVTEN |
1603 DWC3_DEVTEN_ULSTCNGEN |
1604 DWC3_DEVTEN_CONNECTDONEEN |
1605 DWC3_DEVTEN_USBRSTEN |
1606 DWC3_DEVTEN_DISCONNEVTEN);
1607
1608 dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
1609 }
1610
1611 static void dwc3_gadget_disable_irq(struct dwc3 *dwc)
1612 {
1613 /* mask all interrupts */
1614 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
1615 }
1616
1617 static irqreturn_t dwc3_interrupt(int irq, void *_dwc);
1618 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc);
1619
1620 /**
1621 * dwc3_gadget_setup_nump - Calculate and initialize NUMP field of DCFG
1622 * dwc: pointer to our context structure
1623 *
1624 * The following looks like complex but it's actually very simple. In order to
1625 * calculate the number of packets we can burst at once on OUT transfers, we're
1626 * gonna use RxFIFO size.
1627 *
1628 * To calculate RxFIFO size we need two numbers:
1629 * MDWIDTH = size, in bits, of the internal memory bus
1630 * RAM2_DEPTH = depth, in MDWIDTH, of internal RAM2 (where RxFIFO sits)
1631 *
1632 * Given these two numbers, the formula is simple:
1633 *
1634 * RxFIFO Size = (RAM2_DEPTH * MDWIDTH / 8) - 24 - 16;
1635 *
1636 * 24 bytes is for 3x SETUP packets
1637 * 16 bytes is a clock domain crossing tolerance
1638 *
1639 * Given RxFIFO Size, NUMP = RxFIFOSize / 1024;
1640 */
1641 static void dwc3_gadget_setup_nump(struct dwc3 *dwc)
1642 {
1643 u32 ram2_depth;
1644 u32 mdwidth;
1645 u32 nump;
1646 u32 reg;
1647
1648 ram2_depth = DWC3_GHWPARAMS7_RAM2_DEPTH(dwc->hwparams.hwparams7);
1649 mdwidth = DWC3_GHWPARAMS0_MDWIDTH(dwc->hwparams.hwparams0);
1650
1651 nump = ((ram2_depth * mdwidth / 8) - 24 - 16) / 1024;
1652 nump = min_t(u32, nump, 16);
1653
1654 /* update NumP */
1655 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1656 reg &= ~DWC3_DCFG_NUMP_MASK;
1657 reg |= nump << DWC3_DCFG_NUMP_SHIFT;
1658 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1659 }
1660
1661 static int __dwc3_gadget_start(struct dwc3 *dwc)
1662 {
1663 struct dwc3_ep *dep;
1664 int ret = 0;
1665 u32 reg;
1666
1667 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1668 reg &= ~(DWC3_DCFG_SPEED_MASK);
1669
1670 /**
1671 * WORKAROUND: DWC3 revision < 2.20a have an issue
1672 * which would cause metastability state on Run/Stop
1673 * bit if we try to force the IP to USB2-only mode.
1674 *
1675 * Because of that, we cannot configure the IP to any
1676 * speed other than the SuperSpeed
1677 *
1678 * Refers to:
1679 *
1680 * STAR#9000525659: Clock Domain Crossing on DCTL in
1681 * USB 2.0 Mode
1682 */
1683 if (dwc->revision < DWC3_REVISION_220A) {
1684 reg |= DWC3_DCFG_SUPERSPEED;
1685 } else {
1686 switch (dwc->maximum_speed) {
1687 case USB_SPEED_LOW:
1688 reg |= DWC3_DCFG_LOWSPEED;
1689 break;
1690 case USB_SPEED_FULL:
1691 reg |= DWC3_DCFG_FULLSPEED1;
1692 break;
1693 case USB_SPEED_HIGH:
1694 reg |= DWC3_DCFG_HIGHSPEED;
1695 break;
1696 case USB_SPEED_SUPER_PLUS:
1697 reg |= DWC3_DCFG_SUPERSPEED_PLUS;
1698 break;
1699 default:
1700 dev_err(dwc->dev, "invalid dwc->maximum_speed (%d)\n",
1701 dwc->maximum_speed);
1702 /* fall through */
1703 case USB_SPEED_SUPER:
1704 reg |= DWC3_DCFG_SUPERSPEED;
1705 break;
1706 }
1707 }
1708 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1709
1710 /*
1711 * We are telling dwc3 that we want to use DCFG.NUMP as ACK TP's NUMP
1712 * field instead of letting dwc3 itself calculate that automatically.
1713 *
1714 * This way, we maximize the chances that we'll be able to get several
1715 * bursts of data without going through any sort of endpoint throttling.
1716 */
1717 reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
1718 reg &= ~DWC3_GRXTHRCFG_PKTCNTSEL;
1719 dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
1720
1721 dwc3_gadget_setup_nump(dwc);
1722
1723 /* Start with SuperSpeed Default */
1724 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
1725
1726 dep = dwc->eps[0];
1727 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
1728 false);
1729 if (ret) {
1730 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1731 goto err0;
1732 }
1733
1734 dep = dwc->eps[1];
1735 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
1736 false);
1737 if (ret) {
1738 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1739 goto err1;
1740 }
1741
1742 /* begin to receive SETUP packets */
1743 dwc->ep0state = EP0_SETUP_PHASE;
1744 dwc3_ep0_out_start(dwc);
1745
1746 dwc3_gadget_enable_irq(dwc);
1747
1748 return 0;
1749
1750 err1:
1751 __dwc3_gadget_ep_disable(dwc->eps[0]);
1752
1753 err0:
1754 return ret;
1755 }
1756
1757 static int dwc3_gadget_start(struct usb_gadget *g,
1758 struct usb_gadget_driver *driver)
1759 {
1760 struct dwc3 *dwc = gadget_to_dwc(g);
1761 unsigned long flags;
1762 int ret = 0;
1763 int irq;
1764
1765 irq = dwc->irq_gadget;
1766 ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
1767 IRQF_SHARED, "dwc3", dwc->ev_buf);
1768 if (ret) {
1769 dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
1770 irq, ret);
1771 goto err0;
1772 }
1773
1774 spin_lock_irqsave(&dwc->lock, flags);
1775 if (dwc->gadget_driver) {
1776 dev_err(dwc->dev, "%s is already bound to %s\n",
1777 dwc->gadget.name,
1778 dwc->gadget_driver->driver.name);
1779 ret = -EBUSY;
1780 goto err1;
1781 }
1782
1783 dwc->gadget_driver = driver;
1784
1785 if (pm_runtime_active(dwc->dev))
1786 __dwc3_gadget_start(dwc);
1787
1788 spin_unlock_irqrestore(&dwc->lock, flags);
1789
1790 return 0;
1791
1792 err1:
1793 spin_unlock_irqrestore(&dwc->lock, flags);
1794 free_irq(irq, dwc);
1795
1796 err0:
1797 return ret;
1798 }
1799
1800 static void __dwc3_gadget_stop(struct dwc3 *dwc)
1801 {
1802 if (pm_runtime_suspended(dwc->dev))
1803 return;
1804
1805 dwc3_gadget_disable_irq(dwc);
1806 __dwc3_gadget_ep_disable(dwc->eps[0]);
1807 __dwc3_gadget_ep_disable(dwc->eps[1]);
1808 }
1809
1810 static int dwc3_gadget_stop(struct usb_gadget *g)
1811 {
1812 struct dwc3 *dwc = gadget_to_dwc(g);
1813 unsigned long flags;
1814
1815 spin_lock_irqsave(&dwc->lock, flags);
1816 __dwc3_gadget_stop(dwc);
1817 dwc->gadget_driver = NULL;
1818 spin_unlock_irqrestore(&dwc->lock, flags);
1819
1820 free_irq(dwc->irq_gadget, dwc->ev_buf);
1821
1822 return 0;
1823 }
1824
1825 static const struct usb_gadget_ops dwc3_gadget_ops = {
1826 .get_frame = dwc3_gadget_get_frame,
1827 .wakeup = dwc3_gadget_wakeup,
1828 .set_selfpowered = dwc3_gadget_set_selfpowered,
1829 .pullup = dwc3_gadget_pullup,
1830 .udc_start = dwc3_gadget_start,
1831 .udc_stop = dwc3_gadget_stop,
1832 };
1833
1834 /* -------------------------------------------------------------------------- */
1835
1836 static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc,
1837 u8 num, u32 direction)
1838 {
1839 struct dwc3_ep *dep;
1840 u8 i;
1841
1842 for (i = 0; i < num; i++) {
1843 u8 epnum = (i << 1) | (direction ? 1 : 0);
1844
1845 dep = kzalloc(sizeof(*dep), GFP_KERNEL);
1846 if (!dep)
1847 return -ENOMEM;
1848
1849 dep->dwc = dwc;
1850 dep->number = epnum;
1851 dep->direction = !!direction;
1852 dep->regs = dwc->regs + DWC3_DEP_BASE(epnum);
1853 dwc->eps[epnum] = dep;
1854
1855 snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1,
1856 (epnum & 1) ? "in" : "out");
1857
1858 dep->endpoint.name = dep->name;
1859 spin_lock_init(&dep->lock);
1860
1861 dwc3_trace(trace_dwc3_gadget, "initializing %s", dep->name);
1862
1863 if (epnum == 0 || epnum == 1) {
1864 usb_ep_set_maxpacket_limit(&dep->endpoint, 512);
1865 dep->endpoint.maxburst = 1;
1866 dep->endpoint.ops = &dwc3_gadget_ep0_ops;
1867 if (!epnum)
1868 dwc->gadget.ep0 = &dep->endpoint;
1869 } else {
1870 int ret;
1871
1872 usb_ep_set_maxpacket_limit(&dep->endpoint, 1024);
1873 dep->endpoint.max_streams = 15;
1874 dep->endpoint.ops = &dwc3_gadget_ep_ops;
1875 list_add_tail(&dep->endpoint.ep_list,
1876 &dwc->gadget.ep_list);
1877
1878 ret = dwc3_alloc_trb_pool(dep);
1879 if (ret)
1880 return ret;
1881 }
1882
1883 if (epnum == 0 || epnum == 1) {
1884 dep->endpoint.caps.type_control = true;
1885 } else {
1886 dep->endpoint.caps.type_iso = true;
1887 dep->endpoint.caps.type_bulk = true;
1888 dep->endpoint.caps.type_int = true;
1889 }
1890
1891 dep->endpoint.caps.dir_in = !!direction;
1892 dep->endpoint.caps.dir_out = !direction;
1893
1894 INIT_LIST_HEAD(&dep->pending_list);
1895 INIT_LIST_HEAD(&dep->started_list);
1896 }
1897
1898 return 0;
1899 }
1900
1901 static int dwc3_gadget_init_endpoints(struct dwc3 *dwc)
1902 {
1903 int ret;
1904
1905 INIT_LIST_HEAD(&dwc->gadget.ep_list);
1906
1907 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_out_eps, 0);
1908 if (ret < 0) {
1909 dwc3_trace(trace_dwc3_gadget,
1910 "failed to allocate OUT endpoints");
1911 return ret;
1912 }
1913
1914 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_in_eps, 1);
1915 if (ret < 0) {
1916 dwc3_trace(trace_dwc3_gadget,
1917 "failed to allocate IN endpoints");
1918 return ret;
1919 }
1920
1921 return 0;
1922 }
1923
1924 static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
1925 {
1926 struct dwc3_ep *dep;
1927 u8 epnum;
1928
1929 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1930 dep = dwc->eps[epnum];
1931 if (!dep)
1932 continue;
1933 /*
1934 * Physical endpoints 0 and 1 are special; they form the
1935 * bi-directional USB endpoint 0.
1936 *
1937 * For those two physical endpoints, we don't allocate a TRB
1938 * pool nor do we add them the endpoints list. Due to that, we
1939 * shouldn't do these two operations otherwise we would end up
1940 * with all sorts of bugs when removing dwc3.ko.
1941 */
1942 if (epnum != 0 && epnum != 1) {
1943 dwc3_free_trb_pool(dep);
1944 list_del(&dep->endpoint.ep_list);
1945 }
1946
1947 kfree(dep);
1948 }
1949 }
1950
1951 /* -------------------------------------------------------------------------- */
1952
1953 static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
1954 struct dwc3_request *req, struct dwc3_trb *trb,
1955 const struct dwc3_event_depevt *event, int status,
1956 int chain)
1957 {
1958 unsigned int count;
1959 unsigned int s_pkt = 0;
1960 unsigned int trb_status;
1961
1962 dep->queued_requests--;
1963 trace_dwc3_complete_trb(dep, trb);
1964
1965 /*
1966 * If we're in the middle of series of chained TRBs and we
1967 * receive a short transfer along the way, DWC3 will skip
1968 * through all TRBs including the last TRB in the chain (the
1969 * where CHN bit is zero. DWC3 will also avoid clearing HWO
1970 * bit and SW has to do it manually.
1971 *
1972 * We're going to do that here to avoid problems of HW trying
1973 * to use bogus TRBs for transfers.
1974 */
1975 if (chain && (trb->ctrl & DWC3_TRB_CTRL_HWO))
1976 trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
1977
1978 if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
1979 return 1;
1980
1981 count = trb->size & DWC3_TRB_SIZE_MASK;
1982
1983 if (dep->direction) {
1984 if (count) {
1985 trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size);
1986 if (trb_status == DWC3_TRBSTS_MISSED_ISOC) {
1987 dwc3_trace(trace_dwc3_gadget,
1988 "%s: incomplete IN transfer",
1989 dep->name);
1990 /*
1991 * If missed isoc occurred and there is
1992 * no request queued then issue END
1993 * TRANSFER, so that core generates
1994 * next xfernotready and we will issue
1995 * a fresh START TRANSFER.
1996 * If there are still queued request
1997 * then wait, do not issue either END
1998 * or UPDATE TRANSFER, just attach next
1999 * request in pending_list during
2000 * giveback.If any future queued request
2001 * is successfully transferred then we
2002 * will issue UPDATE TRANSFER for all
2003 * request in the pending_list.
2004 */
2005 dep->flags |= DWC3_EP_MISSED_ISOC;
2006 } else {
2007 dev_err(dwc->dev, "incomplete IN transfer %s\n",
2008 dep->name);
2009 status = -ECONNRESET;
2010 }
2011 } else {
2012 dep->flags &= ~DWC3_EP_MISSED_ISOC;
2013 }
2014 } else {
2015 if (count && (event->status & DEPEVT_STATUS_SHORT))
2016 s_pkt = 1;
2017 }
2018
2019 if (s_pkt && !chain)
2020 return 1;
2021 if ((event->status & DEPEVT_STATUS_LST) &&
2022 (trb->ctrl & (DWC3_TRB_CTRL_LST |
2023 DWC3_TRB_CTRL_HWO)))
2024 return 1;
2025 if ((event->status & DEPEVT_STATUS_IOC) &&
2026 (trb->ctrl & DWC3_TRB_CTRL_IOC))
2027 return 1;
2028 return 0;
2029 }
2030
2031 static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
2032 const struct dwc3_event_depevt *event, int status)
2033 {
2034 struct dwc3_request *req;
2035 struct dwc3_trb *trb;
2036 unsigned int slot;
2037 unsigned int i;
2038 int count = 0;
2039 int ret;
2040
2041 do {
2042 int chain;
2043
2044 req = next_request(&dep->started_list);
2045 if (WARN_ON_ONCE(!req))
2046 return 1;
2047
2048 chain = req->request.num_mapped_sgs > 0;
2049 i = 0;
2050 do {
2051 slot = req->first_trb_index + i;
2052 if (slot == DWC3_TRB_NUM - 1)
2053 slot++;
2054 slot %= DWC3_TRB_NUM;
2055 trb = &dep->trb_pool[slot];
2056 count += trb->size & DWC3_TRB_SIZE_MASK;
2057
2058 ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb,
2059 event, status, chain);
2060 if (ret)
2061 break;
2062 } while (++i < req->request.num_mapped_sgs);
2063
2064 /*
2065 * We assume here we will always receive the entire data block
2066 * which we should receive. Meaning, if we program RX to
2067 * receive 4K but we receive only 2K, we assume that's all we
2068 * should receive and we simply bounce the request back to the
2069 * gadget driver for further processing.
2070 */
2071 req->request.actual += req->request.length - count;
2072 dwc3_gadget_giveback(dep, req, status);
2073
2074 if (ret)
2075 break;
2076 } while (1);
2077
2078 /*
2079 * Our endpoint might get disabled by another thread during
2080 * dwc3_gadget_giveback(). If that happens, we're just gonna return 1
2081 * early on so DWC3_EP_BUSY flag gets cleared
2082 */
2083 if (!dep->endpoint.desc)
2084 return 1;
2085
2086 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
2087 list_empty(&dep->started_list)) {
2088 if (list_empty(&dep->pending_list)) {
2089 /*
2090 * If there is no entry in request list then do
2091 * not issue END TRANSFER now. Just set PENDING
2092 * flag, so that END TRANSFER is issued when an
2093 * entry is added into request list.
2094 */
2095 dep->flags = DWC3_EP_PENDING_REQUEST;
2096 } else {
2097 dwc3_stop_active_transfer(dwc, dep->number, true);
2098 dep->flags = DWC3_EP_ENABLED;
2099 }
2100 return 1;
2101 }
2102
2103 if (usb_endpoint_xfer_isoc(dep->endpoint.desc))
2104 if ((event->status & DEPEVT_STATUS_IOC) &&
2105 (trb->ctrl & DWC3_TRB_CTRL_IOC))
2106 return 0;
2107 return 1;
2108 }
2109
2110 static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
2111 struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
2112 {
2113 unsigned status = 0;
2114 int clean_busy;
2115 u32 is_xfer_complete;
2116
2117 is_xfer_complete = (event->endpoint_event == DWC3_DEPEVT_XFERCOMPLETE);
2118
2119 if (event->status & DEPEVT_STATUS_BUSERR)
2120 status = -ECONNRESET;
2121
2122 clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status);
2123 if (clean_busy && (!dep->endpoint.desc || is_xfer_complete ||
2124 usb_endpoint_xfer_isoc(dep->endpoint.desc)))
2125 dep->flags &= ~DWC3_EP_BUSY;
2126
2127 /*
2128 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
2129 * See dwc3_gadget_linksts_change_interrupt() for 1st half.
2130 */
2131 if (dwc->revision < DWC3_REVISION_183A) {
2132 u32 reg;
2133 int i;
2134
2135 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
2136 dep = dwc->eps[i];
2137
2138 if (!(dep->flags & DWC3_EP_ENABLED))
2139 continue;
2140
2141 if (!list_empty(&dep->started_list))
2142 return;
2143 }
2144
2145 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2146 reg |= dwc->u1u2;
2147 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2148
2149 dwc->u1u2 = 0;
2150 }
2151
2152 /*
2153 * Our endpoint might get disabled by another thread during
2154 * dwc3_gadget_giveback(). If that happens, we're just gonna return 1
2155 * early on so DWC3_EP_BUSY flag gets cleared
2156 */
2157 if (!dep->endpoint.desc)
2158 return;
2159
2160 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
2161 int ret;
2162
2163 ret = __dwc3_gadget_kick_transfer(dep, 0);
2164 if (!ret || ret == -EBUSY)
2165 return;
2166 }
2167 }
2168
2169 static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
2170 const struct dwc3_event_depevt *event)
2171 {
2172 struct dwc3_ep *dep;
2173 u8 epnum = event->endpoint_number;
2174
2175 dep = dwc->eps[epnum];
2176
2177 if (!(dep->flags & DWC3_EP_ENABLED))
2178 return;
2179
2180 if (epnum == 0 || epnum == 1) {
2181 dwc3_ep0_interrupt(dwc, event);
2182 return;
2183 }
2184
2185 switch (event->endpoint_event) {
2186 case DWC3_DEPEVT_XFERCOMPLETE:
2187 dep->resource_index = 0;
2188
2189 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
2190 dwc3_trace(trace_dwc3_gadget,
2191 "%s is an Isochronous endpoint",
2192 dep->name);
2193 return;
2194 }
2195
2196 dwc3_endpoint_transfer_complete(dwc, dep, event);
2197 break;
2198 case DWC3_DEPEVT_XFERINPROGRESS:
2199 dwc3_endpoint_transfer_complete(dwc, dep, event);
2200 break;
2201 case DWC3_DEPEVT_XFERNOTREADY:
2202 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
2203 dwc3_gadget_start_isoc(dwc, dep, event);
2204 } else {
2205 int active;
2206 int ret;
2207
2208 active = event->status & DEPEVT_STATUS_TRANSFER_ACTIVE;
2209
2210 dwc3_trace(trace_dwc3_gadget, "%s: reason %s",
2211 dep->name, active ? "Transfer Active"
2212 : "Transfer Not Active");
2213
2214 ret = __dwc3_gadget_kick_transfer(dep, 0);
2215 if (!ret || ret == -EBUSY)
2216 return;
2217
2218 dwc3_trace(trace_dwc3_gadget,
2219 "%s: failed to kick transfers",
2220 dep->name);
2221 }
2222
2223 break;
2224 case DWC3_DEPEVT_STREAMEVT:
2225 if (!usb_endpoint_xfer_bulk(dep->endpoint.desc)) {
2226 dev_err(dwc->dev, "Stream event for non-Bulk %s\n",
2227 dep->name);
2228 return;
2229 }
2230
2231 switch (event->status) {
2232 case DEPEVT_STREAMEVT_FOUND:
2233 dwc3_trace(trace_dwc3_gadget,
2234 "Stream %d found and started",
2235 event->parameters);
2236
2237 break;
2238 case DEPEVT_STREAMEVT_NOTFOUND:
2239 /* FALLTHROUGH */
2240 default:
2241 dwc3_trace(trace_dwc3_gadget,
2242 "unable to find suitable stream");
2243 }
2244 break;
2245 case DWC3_DEPEVT_RXTXFIFOEVT:
2246 dwc3_trace(trace_dwc3_gadget, "%s FIFO Overrun", dep->name);
2247 break;
2248 case DWC3_DEPEVT_EPCMDCMPLT:
2249 dwc3_trace(trace_dwc3_gadget, "Endpoint Command Complete");
2250 break;
2251 }
2252 }
2253
2254 static void dwc3_disconnect_gadget(struct dwc3 *dwc)
2255 {
2256 if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
2257 spin_unlock(&dwc->lock);
2258 dwc->gadget_driver->disconnect(&dwc->gadget);
2259 spin_lock(&dwc->lock);
2260 }
2261 }
2262
2263 static void dwc3_suspend_gadget(struct dwc3 *dwc)
2264 {
2265 if (dwc->gadget_driver && dwc->gadget_driver->suspend) {
2266 spin_unlock(&dwc->lock);
2267 dwc->gadget_driver->suspend(&dwc->gadget);
2268 spin_lock(&dwc->lock);
2269 }
2270 }
2271
2272 static void dwc3_resume_gadget(struct dwc3 *dwc)
2273 {
2274 if (dwc->gadget_driver && dwc->gadget_driver->resume) {
2275 spin_unlock(&dwc->lock);
2276 dwc->gadget_driver->resume(&dwc->gadget);
2277 spin_lock(&dwc->lock);
2278 }
2279 }
2280
2281 static void dwc3_reset_gadget(struct dwc3 *dwc)
2282 {
2283 if (!dwc->gadget_driver)
2284 return;
2285
2286 if (dwc->gadget.speed != USB_SPEED_UNKNOWN) {
2287 spin_unlock(&dwc->lock);
2288 usb_gadget_udc_reset(&dwc->gadget, dwc->gadget_driver);
2289 spin_lock(&dwc->lock);
2290 }
2291 }
2292
2293 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force)
2294 {
2295 struct dwc3_ep *dep;
2296 struct dwc3_gadget_ep_cmd_params params;
2297 u32 cmd;
2298 int ret;
2299
2300 dep = dwc->eps[epnum];
2301
2302 if (!dep->resource_index)
2303 return;
2304
2305 /*
2306 * NOTICE: We are violating what the Databook says about the
2307 * EndTransfer command. Ideally we would _always_ wait for the
2308 * EndTransfer Command Completion IRQ, but that's causing too
2309 * much trouble synchronizing between us and gadget driver.
2310 *
2311 * We have discussed this with the IP Provider and it was
2312 * suggested to giveback all requests here, but give HW some
2313 * extra time to synchronize with the interconnect. We're using
2314 * an arbitrary 100us delay for that.
2315 *
2316 * Note also that a similar handling was tested by Synopsys
2317 * (thanks a lot Paul) and nothing bad has come out of it.
2318 * In short, what we're doing is:
2319 *
2320 * - Issue EndTransfer WITH CMDIOC bit set
2321 * - Wait 100us
2322 */
2323
2324 cmd = DWC3_DEPCMD_ENDTRANSFER;
2325 cmd |= force ? DWC3_DEPCMD_HIPRI_FORCERM : 0;
2326 cmd |= DWC3_DEPCMD_CMDIOC;
2327 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
2328 memset(&params, 0, sizeof(params));
2329 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
2330 WARN_ON_ONCE(ret);
2331 dep->resource_index = 0;
2332 dep->flags &= ~DWC3_EP_BUSY;
2333 udelay(100);
2334 }
2335
2336 static void dwc3_stop_active_transfers(struct dwc3 *dwc)
2337 {
2338 u32 epnum;
2339
2340 for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2341 struct dwc3_ep *dep;
2342
2343 dep = dwc->eps[epnum];
2344 if (!dep)
2345 continue;
2346
2347 if (!(dep->flags & DWC3_EP_ENABLED))
2348 continue;
2349
2350 dwc3_remove_requests(dwc, dep);
2351 }
2352 }
2353
2354 static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
2355 {
2356 u32 epnum;
2357
2358 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2359 struct dwc3_ep *dep;
2360 int ret;
2361
2362 dep = dwc->eps[epnum];
2363 if (!dep)
2364 continue;
2365
2366 if (!(dep->flags & DWC3_EP_STALL))
2367 continue;
2368
2369 dep->flags &= ~DWC3_EP_STALL;
2370
2371 ret = dwc3_send_clear_stall_ep_cmd(dep);
2372 WARN_ON_ONCE(ret);
2373 }
2374 }
2375
2376 static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
2377 {
2378 int reg;
2379
2380 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2381 reg &= ~DWC3_DCTL_INITU1ENA;
2382 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2383
2384 reg &= ~DWC3_DCTL_INITU2ENA;
2385 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2386
2387 dwc3_disconnect_gadget(dwc);
2388
2389 dwc->gadget.speed = USB_SPEED_UNKNOWN;
2390 dwc->setup_packet_pending = false;
2391 usb_gadget_set_state(&dwc->gadget, USB_STATE_NOTATTACHED);
2392
2393 dwc->connected = false;
2394 }
2395
2396 static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
2397 {
2398 u32 reg;
2399
2400 dwc->connected = true;
2401
2402 /*
2403 * WORKAROUND: DWC3 revisions <1.88a have an issue which
2404 * would cause a missing Disconnect Event if there's a
2405 * pending Setup Packet in the FIFO.
2406 *
2407 * There's no suggested workaround on the official Bug
2408 * report, which states that "unless the driver/application
2409 * is doing any special handling of a disconnect event,
2410 * there is no functional issue".
2411 *
2412 * Unfortunately, it turns out that we _do_ some special
2413 * handling of a disconnect event, namely complete all
2414 * pending transfers, notify gadget driver of the
2415 * disconnection, and so on.
2416 *
2417 * Our suggested workaround is to follow the Disconnect
2418 * Event steps here, instead, based on a setup_packet_pending
2419 * flag. Such flag gets set whenever we have a SETUP_PENDING
2420 * status for EP0 TRBs and gets cleared on XferComplete for the
2421 * same endpoint.
2422 *
2423 * Refers to:
2424 *
2425 * STAR#9000466709: RTL: Device : Disconnect event not
2426 * generated if setup packet pending in FIFO
2427 */
2428 if (dwc->revision < DWC3_REVISION_188A) {
2429 if (dwc->setup_packet_pending)
2430 dwc3_gadget_disconnect_interrupt(dwc);
2431 }
2432
2433 dwc3_reset_gadget(dwc);
2434
2435 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2436 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
2437 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2438 dwc->test_mode = false;
2439
2440 dwc3_stop_active_transfers(dwc);
2441 dwc3_clear_stall_all_ep(dwc);
2442
2443 /* Reset device address to zero */
2444 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2445 reg &= ~(DWC3_DCFG_DEVADDR_MASK);
2446 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2447 }
2448
2449 static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed)
2450 {
2451 u32 reg;
2452 u32 usb30_clock = DWC3_GCTL_CLK_BUS;
2453
2454 /*
2455 * We change the clock only at SS but I dunno why I would want to do
2456 * this. Maybe it becomes part of the power saving plan.
2457 */
2458
2459 if ((speed != DWC3_DSTS_SUPERSPEED) &&
2460 (speed != DWC3_DSTS_SUPERSPEED_PLUS))
2461 return;
2462
2463 /*
2464 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
2465 * each time on Connect Done.
2466 */
2467 if (!usb30_clock)
2468 return;
2469
2470 reg = dwc3_readl(dwc->regs, DWC3_GCTL);
2471 reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock);
2472 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
2473 }
2474
2475 static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
2476 {
2477 struct dwc3_ep *dep;
2478 int ret;
2479 u32 reg;
2480 u8 speed;
2481
2482 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
2483 speed = reg & DWC3_DSTS_CONNECTSPD;
2484 dwc->speed = speed;
2485
2486 dwc3_update_ram_clk_sel(dwc, speed);
2487
2488 switch (speed) {
2489 case DWC3_DSTS_SUPERSPEED_PLUS:
2490 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2491 dwc->gadget.ep0->maxpacket = 512;
2492 dwc->gadget.speed = USB_SPEED_SUPER_PLUS;
2493 break;
2494 case DWC3_DSTS_SUPERSPEED:
2495 /*
2496 * WORKAROUND: DWC3 revisions <1.90a have an issue which
2497 * would cause a missing USB3 Reset event.
2498 *
2499 * In such situations, we should force a USB3 Reset
2500 * event by calling our dwc3_gadget_reset_interrupt()
2501 * routine.
2502 *
2503 * Refers to:
2504 *
2505 * STAR#9000483510: RTL: SS : USB3 reset event may
2506 * not be generated always when the link enters poll
2507 */
2508 if (dwc->revision < DWC3_REVISION_190A)
2509 dwc3_gadget_reset_interrupt(dwc);
2510
2511 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2512 dwc->gadget.ep0->maxpacket = 512;
2513 dwc->gadget.speed = USB_SPEED_SUPER;
2514 break;
2515 case DWC3_DSTS_HIGHSPEED:
2516 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2517 dwc->gadget.ep0->maxpacket = 64;
2518 dwc->gadget.speed = USB_SPEED_HIGH;
2519 break;
2520 case DWC3_DSTS_FULLSPEED2:
2521 case DWC3_DSTS_FULLSPEED1:
2522 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2523 dwc->gadget.ep0->maxpacket = 64;
2524 dwc->gadget.speed = USB_SPEED_FULL;
2525 break;
2526 case DWC3_DSTS_LOWSPEED:
2527 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8);
2528 dwc->gadget.ep0->maxpacket = 8;
2529 dwc->gadget.speed = USB_SPEED_LOW;
2530 break;
2531 }
2532
2533 /* Enable USB2 LPM Capability */
2534
2535 if ((dwc->revision > DWC3_REVISION_194A) &&
2536 (speed != DWC3_DSTS_SUPERSPEED) &&
2537 (speed != DWC3_DSTS_SUPERSPEED_PLUS)) {
2538 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2539 reg |= DWC3_DCFG_LPM_CAP;
2540 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2541
2542 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2543 reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN);
2544
2545 reg |= DWC3_DCTL_HIRD_THRES(dwc->hird_threshold);
2546
2547 /*
2548 * When dwc3 revisions >= 2.40a, LPM Erratum is enabled and
2549 * DCFG.LPMCap is set, core responses with an ACK and the
2550 * BESL value in the LPM token is less than or equal to LPM
2551 * NYET threshold.
2552 */
2553 WARN_ONCE(dwc->revision < DWC3_REVISION_240A
2554 && dwc->has_lpm_erratum,
2555 "LPM Erratum not available on dwc3 revisisions < 2.40a\n");
2556
2557 if (dwc->has_lpm_erratum && dwc->revision >= DWC3_REVISION_240A)
2558 reg |= DWC3_DCTL_LPM_ERRATA(dwc->lpm_nyet_threshold);
2559
2560 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2561 } else {
2562 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2563 reg &= ~DWC3_DCTL_HIRD_THRES_MASK;
2564 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2565 }
2566
2567 dep = dwc->eps[0];
2568 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true,
2569 false);
2570 if (ret) {
2571 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2572 return;
2573 }
2574
2575 dep = dwc->eps[1];
2576 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true,
2577 false);
2578 if (ret) {
2579 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2580 return;
2581 }
2582
2583 /*
2584 * Configure PHY via GUSB3PIPECTLn if required.
2585 *
2586 * Update GTXFIFOSIZn
2587 *
2588 * In both cases reset values should be sufficient.
2589 */
2590 }
2591
2592 static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
2593 {
2594 /*
2595 * TODO take core out of low power mode when that's
2596 * implemented.
2597 */
2598
2599 if (dwc->gadget_driver && dwc->gadget_driver->resume) {
2600 spin_unlock(&dwc->lock);
2601 dwc->gadget_driver->resume(&dwc->gadget);
2602 spin_lock(&dwc->lock);
2603 }
2604 }
2605
2606 static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
2607 unsigned int evtinfo)
2608 {
2609 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
2610 unsigned int pwropt;
2611
2612 /*
2613 * WORKAROUND: DWC3 < 2.50a have an issue when configured without
2614 * Hibernation mode enabled which would show up when device detects
2615 * host-initiated U3 exit.
2616 *
2617 * In that case, device will generate a Link State Change Interrupt
2618 * from U3 to RESUME which is only necessary if Hibernation is
2619 * configured in.
2620 *
2621 * There are no functional changes due to such spurious event and we
2622 * just need to ignore it.
2623 *
2624 * Refers to:
2625 *
2626 * STAR#9000570034 RTL: SS Resume event generated in non-Hibernation
2627 * operational mode
2628 */
2629 pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1);
2630 if ((dwc->revision < DWC3_REVISION_250A) &&
2631 (pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) {
2632 if ((dwc->link_state == DWC3_LINK_STATE_U3) &&
2633 (next == DWC3_LINK_STATE_RESUME)) {
2634 dwc3_trace(trace_dwc3_gadget,
2635 "ignoring transition U3 -> Resume");
2636 return;
2637 }
2638 }
2639
2640 /*
2641 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending
2642 * on the link partner, the USB session might do multiple entry/exit
2643 * of low power states before a transfer takes place.
2644 *
2645 * Due to this problem, we might experience lower throughput. The
2646 * suggested workaround is to disable DCTL[12:9] bits if we're
2647 * transitioning from U1/U2 to U0 and enable those bits again
2648 * after a transfer completes and there are no pending transfers
2649 * on any of the enabled endpoints.
2650 *
2651 * This is the first half of that workaround.
2652 *
2653 * Refers to:
2654 *
2655 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us
2656 * core send LGO_Ux entering U0
2657 */
2658 if (dwc->revision < DWC3_REVISION_183A) {
2659 if (next == DWC3_LINK_STATE_U0) {
2660 u32 u1u2;
2661 u32 reg;
2662
2663 switch (dwc->link_state) {
2664 case DWC3_LINK_STATE_U1:
2665 case DWC3_LINK_STATE_U2:
2666 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2667 u1u2 = reg & (DWC3_DCTL_INITU2ENA
2668 | DWC3_DCTL_ACCEPTU2ENA
2669 | DWC3_DCTL_INITU1ENA
2670 | DWC3_DCTL_ACCEPTU1ENA);
2671
2672 if (!dwc->u1u2)
2673 dwc->u1u2 = reg & u1u2;
2674
2675 reg &= ~u1u2;
2676
2677 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2678 break;
2679 default:
2680 /* do nothing */
2681 break;
2682 }
2683 }
2684 }
2685
2686 switch (next) {
2687 case DWC3_LINK_STATE_U1:
2688 if (dwc->speed == USB_SPEED_SUPER)
2689 dwc3_suspend_gadget(dwc);
2690 break;
2691 case DWC3_LINK_STATE_U2:
2692 case DWC3_LINK_STATE_U3:
2693 dwc3_suspend_gadget(dwc);
2694 break;
2695 case DWC3_LINK_STATE_RESUME:
2696 dwc3_resume_gadget(dwc);
2697 break;
2698 default:
2699 /* do nothing */
2700 break;
2701 }
2702
2703 dwc->link_state = next;
2704 }
2705
2706 static void dwc3_gadget_suspend_interrupt(struct dwc3 *dwc,
2707 unsigned int evtinfo)
2708 {
2709 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
2710
2711 if (dwc->link_state != next && next == DWC3_LINK_STATE_U3)
2712 dwc3_suspend_gadget(dwc);
2713
2714 dwc->link_state = next;
2715 }
2716
2717 static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc,
2718 unsigned int evtinfo)
2719 {
2720 unsigned int is_ss = evtinfo & BIT(4);
2721
2722 /**
2723 * WORKAROUND: DWC3 revison 2.20a with hibernation support
2724 * have a known issue which can cause USB CV TD.9.23 to fail
2725 * randomly.
2726 *
2727 * Because of this issue, core could generate bogus hibernation
2728 * events which SW needs to ignore.
2729 *
2730 * Refers to:
2731 *
2732 * STAR#9000546576: Device Mode Hibernation: Issue in USB 2.0
2733 * Device Fallback from SuperSpeed
2734 */
2735 if (is_ss ^ (dwc->speed == USB_SPEED_SUPER))
2736 return;
2737
2738 /* enter hibernation here */
2739 }
2740
2741 static void dwc3_gadget_interrupt(struct dwc3 *dwc,
2742 const struct dwc3_event_devt *event)
2743 {
2744 switch (event->type) {
2745 case DWC3_DEVICE_EVENT_DISCONNECT:
2746 dwc3_gadget_disconnect_interrupt(dwc);
2747 break;
2748 case DWC3_DEVICE_EVENT_RESET:
2749 dwc3_gadget_reset_interrupt(dwc);
2750 break;
2751 case DWC3_DEVICE_EVENT_CONNECT_DONE:
2752 dwc3_gadget_conndone_interrupt(dwc);
2753 break;
2754 case DWC3_DEVICE_EVENT_WAKEUP:
2755 dwc3_gadget_wakeup_interrupt(dwc);
2756 break;
2757 case DWC3_DEVICE_EVENT_HIBER_REQ:
2758 if (dev_WARN_ONCE(dwc->dev, !dwc->has_hibernation,
2759 "unexpected hibernation event\n"))
2760 break;
2761
2762 dwc3_gadget_hibernation_interrupt(dwc, event->event_info);
2763 break;
2764 case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
2765 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
2766 break;
2767 case DWC3_DEVICE_EVENT_EOPF:
2768 /* It changed to be suspend event for version 2.30a and above */
2769 if (dwc->revision < DWC3_REVISION_230A) {
2770 dwc3_trace(trace_dwc3_gadget, "End of Periodic Frame");
2771 } else {
2772 dwc3_trace(trace_dwc3_gadget, "U3/L1-L2 Suspend Event");
2773
2774 /*
2775 * Ignore suspend event until the gadget enters into
2776 * USB_STATE_CONFIGURED state.
2777 */
2778 if (dwc->gadget.state >= USB_STATE_CONFIGURED)
2779 dwc3_gadget_suspend_interrupt(dwc,
2780 event->event_info);
2781 }
2782 break;
2783 case DWC3_DEVICE_EVENT_SOF:
2784 dwc3_trace(trace_dwc3_gadget, "Start of Periodic Frame");
2785 break;
2786 case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
2787 dwc3_trace(trace_dwc3_gadget, "Erratic Error");
2788 break;
2789 case DWC3_DEVICE_EVENT_CMD_CMPL:
2790 dwc3_trace(trace_dwc3_gadget, "Command Complete");
2791 break;
2792 case DWC3_DEVICE_EVENT_OVERFLOW:
2793 dwc3_trace(trace_dwc3_gadget, "Overflow");
2794 break;
2795 default:
2796 dev_WARN(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
2797 }
2798 }
2799
2800 static void dwc3_process_event_entry(struct dwc3 *dwc,
2801 const union dwc3_event *event)
2802 {
2803 trace_dwc3_event(event->raw);
2804
2805 /* Endpoint IRQ, handle it and return early */
2806 if (event->type.is_devspec == 0) {
2807 /* depevt */
2808 return dwc3_endpoint_interrupt(dwc, &event->depevt);
2809 }
2810
2811 switch (event->type.type) {
2812 case DWC3_EVENT_TYPE_DEV:
2813 dwc3_gadget_interrupt(dwc, &event->devt);
2814 break;
2815 /* REVISIT what to do with Carkit and I2C events ? */
2816 default:
2817 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw);
2818 }
2819 }
2820
2821 static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt)
2822 {
2823 struct dwc3 *dwc = evt->dwc;
2824 irqreturn_t ret = IRQ_NONE;
2825 int left;
2826 u32 reg;
2827
2828 left = evt->count;
2829
2830 if (!(evt->flags & DWC3_EVENT_PENDING))
2831 return IRQ_NONE;
2832
2833 while (left > 0) {
2834 union dwc3_event event;
2835
2836 event.raw = *(u32 *) (evt->buf + evt->lpos);
2837
2838 dwc3_process_event_entry(dwc, &event);
2839
2840 /*
2841 * FIXME we wrap around correctly to the next entry as
2842 * almost all entries are 4 bytes in size. There is one
2843 * entry which has 12 bytes which is a regular entry
2844 * followed by 8 bytes data. ATM I don't know how
2845 * things are organized if we get next to the a
2846 * boundary so I worry about that once we try to handle
2847 * that.
2848 */
2849 evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
2850 left -= 4;
2851
2852 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), 4);
2853 }
2854
2855 evt->count = 0;
2856 evt->flags &= ~DWC3_EVENT_PENDING;
2857 ret = IRQ_HANDLED;
2858
2859 /* Unmask interrupt */
2860 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0));
2861 reg &= ~DWC3_GEVNTSIZ_INTMASK;
2862 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg);
2863
2864 return ret;
2865 }
2866
2867 static irqreturn_t dwc3_thread_interrupt(int irq, void *_evt)
2868 {
2869 struct dwc3_event_buffer *evt = _evt;
2870 struct dwc3 *dwc = evt->dwc;
2871 unsigned long flags;
2872 irqreturn_t ret = IRQ_NONE;
2873
2874 spin_lock_irqsave(&dwc->lock, flags);
2875 ret = dwc3_process_event_buf(evt);
2876 spin_unlock_irqrestore(&dwc->lock, flags);
2877
2878 return ret;
2879 }
2880
2881 static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt)
2882 {
2883 struct dwc3 *dwc = evt->dwc;
2884 u32 count;
2885 u32 reg;
2886
2887 if (pm_runtime_suspended(dwc->dev)) {
2888 pm_runtime_get(dwc->dev);
2889 disable_irq_nosync(dwc->irq_gadget);
2890 dwc->pending_events = true;
2891 return IRQ_HANDLED;
2892 }
2893
2894 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
2895 count &= DWC3_GEVNTCOUNT_MASK;
2896 if (!count)
2897 return IRQ_NONE;
2898
2899 evt->count = count;
2900 evt->flags |= DWC3_EVENT_PENDING;
2901
2902 /* Mask interrupt */
2903 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0));
2904 reg |= DWC3_GEVNTSIZ_INTMASK;
2905 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg);
2906
2907 return IRQ_WAKE_THREAD;
2908 }
2909
2910 static irqreturn_t dwc3_interrupt(int irq, void *_evt)
2911 {
2912 struct dwc3_event_buffer *evt = _evt;
2913
2914 return dwc3_check_event_buf(evt);
2915 }
2916
2917 /**
2918 * dwc3_gadget_init - Initializes gadget related registers
2919 * @dwc: pointer to our controller context structure
2920 *
2921 * Returns 0 on success otherwise negative errno.
2922 */
2923 int dwc3_gadget_init(struct dwc3 *dwc)
2924 {
2925 int ret, irq;
2926 struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
2927
2928 irq = platform_get_irq_byname(dwc3_pdev, "peripheral");
2929 if (irq == -EPROBE_DEFER)
2930 return irq;
2931
2932 if (irq <= 0) {
2933 irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3");
2934 if (irq == -EPROBE_DEFER)
2935 return irq;
2936
2937 if (irq <= 0) {
2938 irq = platform_get_irq(dwc3_pdev, 0);
2939 if (irq <= 0) {
2940 if (irq != -EPROBE_DEFER) {
2941 dev_err(dwc->dev,
2942 "missing peripheral IRQ\n");
2943 }
2944 if (!irq)
2945 irq = -EINVAL;
2946 return irq;
2947 }
2948 }
2949 }
2950
2951 dwc->irq_gadget = irq;
2952
2953 dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2954 &dwc->ctrl_req_addr, GFP_KERNEL);
2955 if (!dwc->ctrl_req) {
2956 dev_err(dwc->dev, "failed to allocate ctrl request\n");
2957 ret = -ENOMEM;
2958 goto err0;
2959 }
2960
2961 dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
2962 &dwc->ep0_trb_addr, GFP_KERNEL);
2963 if (!dwc->ep0_trb) {
2964 dev_err(dwc->dev, "failed to allocate ep0 trb\n");
2965 ret = -ENOMEM;
2966 goto err1;
2967 }
2968
2969 dwc->setup_buf = kzalloc(DWC3_EP0_BOUNCE_SIZE, GFP_KERNEL);
2970 if (!dwc->setup_buf) {
2971 ret = -ENOMEM;
2972 goto err2;
2973 }
2974
2975 dwc->ep0_bounce = dma_alloc_coherent(dwc->dev,
2976 DWC3_EP0_BOUNCE_SIZE, &dwc->ep0_bounce_addr,
2977 GFP_KERNEL);
2978 if (!dwc->ep0_bounce) {
2979 dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n");
2980 ret = -ENOMEM;
2981 goto err3;
2982 }
2983
2984 dwc->zlp_buf = kzalloc(DWC3_ZLP_BUF_SIZE, GFP_KERNEL);
2985 if (!dwc->zlp_buf) {
2986 ret = -ENOMEM;
2987 goto err4;
2988 }
2989
2990 dwc->gadget.ops = &dwc3_gadget_ops;
2991 dwc->gadget.speed = USB_SPEED_UNKNOWN;
2992 dwc->gadget.sg_supported = true;
2993 dwc->gadget.name = "dwc3-gadget";
2994 dwc->gadget.is_otg = dwc->dr_mode == USB_DR_MODE_OTG;
2995
2996 /*
2997 * FIXME We might be setting max_speed to <SUPER, however versions
2998 * <2.20a of dwc3 have an issue with metastability (documented
2999 * elsewhere in this driver) which tells us we can't set max speed to
3000 * anything lower than SUPER.
3001 *
3002 * Because gadget.max_speed is only used by composite.c and function
3003 * drivers (i.e. it won't go into dwc3's registers) we are allowing this
3004 * to happen so we avoid sending SuperSpeed Capability descriptor
3005 * together with our BOS descriptor as that could confuse host into
3006 * thinking we can handle super speed.
3007 *
3008 * Note that, in fact, we won't even support GetBOS requests when speed
3009 * is less than super speed because we don't have means, yet, to tell
3010 * composite.c that we are USB 2.0 + LPM ECN.
3011 */
3012 if (dwc->revision < DWC3_REVISION_220A)
3013 dwc3_trace(trace_dwc3_gadget,
3014 "Changing max_speed on rev %08x",
3015 dwc->revision);
3016
3017 dwc->gadget.max_speed = dwc->maximum_speed;
3018
3019 /*
3020 * Per databook, DWC3 needs buffer size to be aligned to MaxPacketSize
3021 * on ep out.
3022 */
3023 dwc->gadget.quirk_ep_out_aligned_size = true;
3024
3025 /*
3026 * REVISIT: Here we should clear all pending IRQs to be
3027 * sure we're starting from a well known location.
3028 */
3029
3030 ret = dwc3_gadget_init_endpoints(dwc);
3031 if (ret)
3032 goto err5;
3033
3034 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget);
3035 if (ret) {
3036 dev_err(dwc->dev, "failed to register udc\n");
3037 goto err5;
3038 }
3039
3040 return 0;
3041
3042 err5:
3043 kfree(dwc->zlp_buf);
3044
3045 err4:
3046 dwc3_gadget_free_endpoints(dwc);
3047 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
3048 dwc->ep0_bounce, dwc->ep0_bounce_addr);
3049
3050 err3:
3051 kfree(dwc->setup_buf);
3052
3053 err2:
3054 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
3055 dwc->ep0_trb, dwc->ep0_trb_addr);
3056
3057 err1:
3058 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
3059 dwc->ctrl_req, dwc->ctrl_req_addr);
3060
3061 err0:
3062 return ret;
3063 }
3064
3065 /* -------------------------------------------------------------------------- */
3066
3067 void dwc3_gadget_exit(struct dwc3 *dwc)
3068 {
3069 usb_del_gadget_udc(&dwc->gadget);
3070
3071 dwc3_gadget_free_endpoints(dwc);
3072
3073 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
3074 dwc->ep0_bounce, dwc->ep0_bounce_addr);
3075
3076 kfree(dwc->setup_buf);
3077 kfree(dwc->zlp_buf);
3078
3079 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
3080 dwc->ep0_trb, dwc->ep0_trb_addr);
3081
3082 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
3083 dwc->ctrl_req, dwc->ctrl_req_addr);
3084 }
3085
3086 int dwc3_gadget_suspend(struct dwc3 *dwc)
3087 {
3088 int ret;
3089
3090 if (!dwc->gadget_driver)
3091 return 0;
3092
3093 ret = dwc3_gadget_run_stop(dwc, false, false);
3094 if (ret < 0)
3095 return ret;
3096
3097 dwc3_disconnect_gadget(dwc);
3098 __dwc3_gadget_stop(dwc);
3099
3100 return 0;
3101 }
3102
3103 int dwc3_gadget_resume(struct dwc3 *dwc)
3104 {
3105 int ret;
3106
3107 if (!dwc->gadget_driver)
3108 return 0;
3109
3110 ret = __dwc3_gadget_start(dwc);
3111 if (ret < 0)
3112 goto err0;
3113
3114 ret = dwc3_gadget_run_stop(dwc, true, false);
3115 if (ret < 0)
3116 goto err1;
3117
3118 return 0;
3119
3120 err1:
3121 __dwc3_gadget_stop(dwc);
3122
3123 err0:
3124 return ret;
3125 }
3126
3127 void dwc3_gadget_process_pending_events(struct dwc3 *dwc)
3128 {
3129 if (dwc->pending_events) {
3130 dwc3_interrupt(dwc->irq_gadget, dwc->ev_buf);
3131 dwc->pending_events = false;
3132 enable_irq(dwc->irq_gadget);
3133 }
3134 }
This page took 0.124492 seconds and 6 git commands to generate.