122e64df2f4dc173123ddbfe7ef934d8630851a2
[deliverable/linux.git] / drivers / usb / dwc3 / gadget.c
1 /**
2 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link
3 *
4 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
5 *
6 * Authors: Felipe Balbi <balbi@ti.com>,
7 * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
8 *
9 * This program is free software: you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 of
11 * the License as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 */
18
19 #include <linux/kernel.h>
20 #include <linux/delay.h>
21 #include <linux/slab.h>
22 #include <linux/spinlock.h>
23 #include <linux/platform_device.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/interrupt.h>
26 #include <linux/io.h>
27 #include <linux/list.h>
28 #include <linux/dma-mapping.h>
29
30 #include <linux/usb/ch9.h>
31 #include <linux/usb/gadget.h>
32
33 #include "debug.h"
34 #include "core.h"
35 #include "gadget.h"
36 #include "io.h"
37
38 /**
39 * dwc3_gadget_set_test_mode - Enables USB2 Test Modes
40 * @dwc: pointer to our context structure
41 * @mode: the mode to set (J, K SE0 NAK, Force Enable)
42 *
43 * Caller should take care of locking. This function will
44 * return 0 on success or -EINVAL if wrong Test Selector
45 * is passed
46 */
47 int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
48 {
49 u32 reg;
50
51 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
52 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
53
54 switch (mode) {
55 case TEST_J:
56 case TEST_K:
57 case TEST_SE0_NAK:
58 case TEST_PACKET:
59 case TEST_FORCE_EN:
60 reg |= mode << 1;
61 break;
62 default:
63 return -EINVAL;
64 }
65
66 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
67
68 return 0;
69 }
70
71 /**
72 * dwc3_gadget_get_link_state - Gets current state of USB Link
73 * @dwc: pointer to our context structure
74 *
75 * Caller should take care of locking. This function will
76 * return the link state on success (>= 0) or -ETIMEDOUT.
77 */
78 int dwc3_gadget_get_link_state(struct dwc3 *dwc)
79 {
80 u32 reg;
81
82 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
83
84 return DWC3_DSTS_USBLNKST(reg);
85 }
86
87 /**
88 * dwc3_gadget_set_link_state - Sets USB Link to a particular State
89 * @dwc: pointer to our context structure
90 * @state: the state to put link into
91 *
92 * Caller should take care of locking. This function will
93 * return 0 on success or -ETIMEDOUT.
94 */
95 int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
96 {
97 int retries = 10000;
98 u32 reg;
99
100 /*
101 * Wait until device controller is ready. Only applies to 1.94a and
102 * later RTL.
103 */
104 if (dwc->revision >= DWC3_REVISION_194A) {
105 while (--retries) {
106 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
107 if (reg & DWC3_DSTS_DCNRD)
108 udelay(5);
109 else
110 break;
111 }
112
113 if (retries <= 0)
114 return -ETIMEDOUT;
115 }
116
117 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
118 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
119
120 /* set requested state */
121 reg |= DWC3_DCTL_ULSTCHNGREQ(state);
122 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
123
124 /*
125 * The following code is racy when called from dwc3_gadget_wakeup,
126 * and is not needed, at least on newer versions
127 */
128 if (dwc->revision >= DWC3_REVISION_194A)
129 return 0;
130
131 /* wait for a change in DSTS */
132 retries = 10000;
133 while (--retries) {
134 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
135
136 if (DWC3_DSTS_USBLNKST(reg) == state)
137 return 0;
138
139 udelay(5);
140 }
141
142 dwc3_trace(trace_dwc3_gadget,
143 "link state change request timed out");
144
145 return -ETIMEDOUT;
146 }
147
148 /**
149 * dwc3_ep_inc_trb() - Increment a TRB index.
150 * @index - Pointer to the TRB index to increment.
151 *
152 * The index should never point to the link TRB. After incrementing,
153 * if it is point to the link TRB, wrap around to the beginning. The
154 * link TRB is always at the last TRB entry.
155 */
156 static void dwc3_ep_inc_trb(u8 *index)
157 {
158 (*index)++;
159 if (*index == (DWC3_TRB_NUM - 1))
160 *index = 0;
161 }
162
163 static void dwc3_ep_inc_enq(struct dwc3_ep *dep)
164 {
165 dwc3_ep_inc_trb(&dep->trb_enqueue);
166 }
167
168 static void dwc3_ep_inc_deq(struct dwc3_ep *dep)
169 {
170 dwc3_ep_inc_trb(&dep->trb_dequeue);
171 }
172
173 void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
174 int status)
175 {
176 struct dwc3 *dwc = dep->dwc;
177 int i;
178
179 if (req->started) {
180 i = 0;
181 do {
182 dwc3_ep_inc_deq(dep);
183 } while(++i < req->request.num_mapped_sgs);
184 req->started = false;
185 }
186 list_del(&req->list);
187 req->trb = NULL;
188
189 if (req->request.status == -EINPROGRESS)
190 req->request.status = status;
191
192 if (dwc->ep0_bounced && dep->number == 0)
193 dwc->ep0_bounced = false;
194 else
195 usb_gadget_unmap_request(&dwc->gadget, &req->request,
196 req->direction);
197
198 trace_dwc3_gadget_giveback(req);
199
200 spin_unlock(&dwc->lock);
201 usb_gadget_giveback_request(&dep->endpoint, &req->request);
202 spin_lock(&dwc->lock);
203
204 if (dep->number > 1)
205 pm_runtime_put(dwc->dev);
206 }
207
208 int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param)
209 {
210 u32 timeout = 500;
211 int status = 0;
212 int ret = 0;
213 u32 reg;
214
215 dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param);
216 dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT);
217
218 do {
219 reg = dwc3_readl(dwc->regs, DWC3_DGCMD);
220 if (!(reg & DWC3_DGCMD_CMDACT)) {
221 status = DWC3_DGCMD_STATUS(reg);
222 if (status)
223 ret = -EINVAL;
224 break;
225 }
226 } while (timeout--);
227
228 if (!timeout) {
229 ret = -ETIMEDOUT;
230 status = -ETIMEDOUT;
231 }
232
233 trace_dwc3_gadget_generic_cmd(cmd, param, status);
234
235 return ret;
236 }
237
238 static int __dwc3_gadget_wakeup(struct dwc3 *dwc);
239
240 int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
241 struct dwc3_gadget_ep_cmd_params *params)
242 {
243 struct dwc3 *dwc = dep->dwc;
244 u32 timeout = 500;
245 u32 reg;
246
247 int cmd_status = 0;
248 int susphy = false;
249 int ret = -EINVAL;
250
251 /*
252 * Synopsys Databook 2.60a states, on section 6.3.2.5.[1-8], that if
253 * we're issuing an endpoint command, we must check if
254 * GUSB2PHYCFG.SUSPHY bit is set. If it is, then we need to clear it.
255 *
256 * We will also set SUSPHY bit to what it was before returning as stated
257 * by the same section on Synopsys databook.
258 */
259 if (dwc->gadget.speed <= USB_SPEED_HIGH) {
260 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
261 if (unlikely(reg & DWC3_GUSB2PHYCFG_SUSPHY)) {
262 susphy = true;
263 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
264 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
265 }
266 }
267
268 if (cmd == DWC3_DEPCMD_STARTTRANSFER) {
269 int needs_wakeup;
270
271 needs_wakeup = (dwc->link_state == DWC3_LINK_STATE_U1 ||
272 dwc->link_state == DWC3_LINK_STATE_U2 ||
273 dwc->link_state == DWC3_LINK_STATE_U3);
274
275 if (unlikely(needs_wakeup)) {
276 ret = __dwc3_gadget_wakeup(dwc);
277 dev_WARN_ONCE(dwc->dev, ret, "wakeup failed --> %d\n",
278 ret);
279 }
280 }
281
282 dwc3_writel(dep->regs, DWC3_DEPCMDPAR0, params->param0);
283 dwc3_writel(dep->regs, DWC3_DEPCMDPAR1, params->param1);
284 dwc3_writel(dep->regs, DWC3_DEPCMDPAR2, params->param2);
285
286 dwc3_writel(dep->regs, DWC3_DEPCMD, cmd | DWC3_DEPCMD_CMDACT);
287 do {
288 reg = dwc3_readl(dep->regs, DWC3_DEPCMD);
289 if (!(reg & DWC3_DEPCMD_CMDACT)) {
290 cmd_status = DWC3_DEPCMD_STATUS(reg);
291
292 switch (cmd_status) {
293 case 0:
294 ret = 0;
295 break;
296 case DEPEVT_TRANSFER_NO_RESOURCE:
297 ret = -EINVAL;
298 break;
299 case DEPEVT_TRANSFER_BUS_EXPIRY:
300 /*
301 * SW issues START TRANSFER command to
302 * isochronous ep with future frame interval. If
303 * future interval time has already passed when
304 * core receives the command, it will respond
305 * with an error status of 'Bus Expiry'.
306 *
307 * Instead of always returning -EINVAL, let's
308 * give a hint to the gadget driver that this is
309 * the case by returning -EAGAIN.
310 */
311 ret = -EAGAIN;
312 break;
313 default:
314 dev_WARN(dwc->dev, "UNKNOWN cmd status\n");
315 }
316
317 break;
318 }
319 } while (--timeout);
320
321 if (timeout == 0) {
322 ret = -ETIMEDOUT;
323 cmd_status = -ETIMEDOUT;
324 }
325
326 trace_dwc3_gadget_ep_cmd(dep, cmd, params, cmd_status);
327
328 if (unlikely(susphy)) {
329 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
330 reg |= DWC3_GUSB2PHYCFG_SUSPHY;
331 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
332 }
333
334 return ret;
335 }
336
337 static int dwc3_send_clear_stall_ep_cmd(struct dwc3_ep *dep)
338 {
339 struct dwc3 *dwc = dep->dwc;
340 struct dwc3_gadget_ep_cmd_params params;
341 u32 cmd = DWC3_DEPCMD_CLEARSTALL;
342
343 /*
344 * As of core revision 2.60a the recommended programming model
345 * is to set the ClearPendIN bit when issuing a Clear Stall EP
346 * command for IN endpoints. This is to prevent an issue where
347 * some (non-compliant) hosts may not send ACK TPs for pending
348 * IN transfers due to a mishandled error condition. Synopsys
349 * STAR 9000614252.
350 */
351 if (dep->direction && (dwc->revision >= DWC3_REVISION_260A))
352 cmd |= DWC3_DEPCMD_CLEARPENDIN;
353
354 memset(&params, 0, sizeof(params));
355
356 return dwc3_send_gadget_ep_cmd(dep, cmd, &params);
357 }
358
359 static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
360 struct dwc3_trb *trb)
361 {
362 u32 offset = (char *) trb - (char *) dep->trb_pool;
363
364 return dep->trb_pool_dma + offset;
365 }
366
367 static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
368 {
369 struct dwc3 *dwc = dep->dwc;
370
371 if (dep->trb_pool)
372 return 0;
373
374 dep->trb_pool = dma_alloc_coherent(dwc->dev,
375 sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
376 &dep->trb_pool_dma, GFP_KERNEL);
377 if (!dep->trb_pool) {
378 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
379 dep->name);
380 return -ENOMEM;
381 }
382
383 return 0;
384 }
385
386 static void dwc3_free_trb_pool(struct dwc3_ep *dep)
387 {
388 struct dwc3 *dwc = dep->dwc;
389
390 dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
391 dep->trb_pool, dep->trb_pool_dma);
392
393 dep->trb_pool = NULL;
394 dep->trb_pool_dma = 0;
395 }
396
397 static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep);
398
399 /**
400 * dwc3_gadget_start_config - Configure EP resources
401 * @dwc: pointer to our controller context structure
402 * @dep: endpoint that is being enabled
403 *
404 * The assignment of transfer resources cannot perfectly follow the
405 * data book due to the fact that the controller driver does not have
406 * all knowledge of the configuration in advance. It is given this
407 * information piecemeal by the composite gadget framework after every
408 * SET_CONFIGURATION and SET_INTERFACE. Trying to follow the databook
409 * programming model in this scenario can cause errors. For two
410 * reasons:
411 *
412 * 1) The databook says to do DEPSTARTCFG for every SET_CONFIGURATION
413 * and SET_INTERFACE (8.1.5). This is incorrect in the scenario of
414 * multiple interfaces.
415 *
416 * 2) The databook does not mention doing more DEPXFERCFG for new
417 * endpoint on alt setting (8.1.6).
418 *
419 * The following simplified method is used instead:
420 *
421 * All hardware endpoints can be assigned a transfer resource and this
422 * setting will stay persistent until either a core reset or
423 * hibernation. So whenever we do a DEPSTARTCFG(0) we can go ahead and
424 * do DEPXFERCFG for every hardware endpoint as well. We are
425 * guaranteed that there are as many transfer resources as endpoints.
426 *
427 * This function is called for each endpoint when it is being enabled
428 * but is triggered only when called for EP0-out, which always happens
429 * first, and which should only happen in one of the above conditions.
430 */
431 static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
432 {
433 struct dwc3_gadget_ep_cmd_params params;
434 u32 cmd;
435 int i;
436 int ret;
437
438 if (dep->number)
439 return 0;
440
441 memset(&params, 0x00, sizeof(params));
442 cmd = DWC3_DEPCMD_DEPSTARTCFG;
443
444 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
445 if (ret)
446 return ret;
447
448 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
449 struct dwc3_ep *dep = dwc->eps[i];
450
451 if (!dep)
452 continue;
453
454 ret = dwc3_gadget_set_xfer_resource(dwc, dep);
455 if (ret)
456 return ret;
457 }
458
459 return 0;
460 }
461
462 static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
463 const struct usb_endpoint_descriptor *desc,
464 const struct usb_ss_ep_comp_descriptor *comp_desc,
465 bool modify, bool restore)
466 {
467 struct dwc3_gadget_ep_cmd_params params;
468
469 if (dev_WARN_ONCE(dwc->dev, modify && restore,
470 "Can't modify and restore\n"))
471 return -EINVAL;
472
473 memset(&params, 0x00, sizeof(params));
474
475 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
476 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
477
478 /* Burst size is only needed in SuperSpeed mode */
479 if (dwc->gadget.speed >= USB_SPEED_SUPER) {
480 u32 burst = dep->endpoint.maxburst;
481 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst - 1);
482 }
483
484 if (modify) {
485 params.param0 |= DWC3_DEPCFG_ACTION_MODIFY;
486 } else if (restore) {
487 params.param0 |= DWC3_DEPCFG_ACTION_RESTORE;
488 params.param2 |= dep->saved_state;
489 } else {
490 params.param0 |= DWC3_DEPCFG_ACTION_INIT;
491 }
492
493 params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN;
494
495 if (dep->number <= 1 || usb_endpoint_xfer_isoc(desc))
496 params.param1 |= DWC3_DEPCFG_XFER_NOT_READY_EN;
497
498 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
499 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
500 | DWC3_DEPCFG_STREAM_EVENT_EN;
501 dep->stream_capable = true;
502 }
503
504 if (!usb_endpoint_xfer_control(desc))
505 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
506
507 /*
508 * We are doing 1:1 mapping for endpoints, meaning
509 * Physical Endpoints 2 maps to Logical Endpoint 2 and
510 * so on. We consider the direction bit as part of the physical
511 * endpoint number. So USB endpoint 0x81 is 0x03.
512 */
513 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
514
515 /*
516 * We must use the lower 16 TX FIFOs even though
517 * HW might have more
518 */
519 if (dep->direction)
520 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
521
522 if (desc->bInterval) {
523 params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1);
524 dep->interval = 1 << (desc->bInterval - 1);
525 }
526
527 return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, &params);
528 }
529
530 static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
531 {
532 struct dwc3_gadget_ep_cmd_params params;
533
534 memset(&params, 0x00, sizeof(params));
535
536 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
537
538 return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETTRANSFRESOURCE,
539 &params);
540 }
541
542 /**
543 * __dwc3_gadget_ep_enable - Initializes a HW endpoint
544 * @dep: endpoint to be initialized
545 * @desc: USB Endpoint Descriptor
546 *
547 * Caller should take care of locking
548 */
549 static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
550 const struct usb_endpoint_descriptor *desc,
551 const struct usb_ss_ep_comp_descriptor *comp_desc,
552 bool modify, bool restore)
553 {
554 struct dwc3 *dwc = dep->dwc;
555 u32 reg;
556 int ret;
557
558 dwc3_trace(trace_dwc3_gadget, "Enabling %s", dep->name);
559
560 if (!(dep->flags & DWC3_EP_ENABLED)) {
561 ret = dwc3_gadget_start_config(dwc, dep);
562 if (ret)
563 return ret;
564 }
565
566 ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, modify,
567 restore);
568 if (ret)
569 return ret;
570
571 if (!(dep->flags & DWC3_EP_ENABLED)) {
572 struct dwc3_trb *trb_st_hw;
573 struct dwc3_trb *trb_link;
574
575 dep->endpoint.desc = desc;
576 dep->comp_desc = comp_desc;
577 dep->type = usb_endpoint_type(desc);
578 dep->flags |= DWC3_EP_ENABLED;
579
580 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
581 reg |= DWC3_DALEPENA_EP(dep->number);
582 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
583
584 if (usb_endpoint_xfer_control(desc))
585 return 0;
586
587 /* Initialize the TRB ring */
588 dep->trb_dequeue = 0;
589 dep->trb_enqueue = 0;
590 memset(dep->trb_pool, 0,
591 sizeof(struct dwc3_trb) * DWC3_TRB_NUM);
592
593 /* Link TRB. The HWO bit is never reset */
594 trb_st_hw = &dep->trb_pool[0];
595
596 trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1];
597 trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
598 trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
599 trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB;
600 trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
601 }
602
603 return 0;
604 }
605
606 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force);
607 static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
608 {
609 struct dwc3_request *req;
610
611 dwc3_stop_active_transfer(dwc, dep->number, true);
612
613 /* - giveback all requests to gadget driver */
614 while (!list_empty(&dep->started_list)) {
615 req = next_request(&dep->started_list);
616
617 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
618 }
619
620 while (!list_empty(&dep->pending_list)) {
621 req = next_request(&dep->pending_list);
622
623 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
624 }
625 }
626
627 /**
628 * __dwc3_gadget_ep_disable - Disables a HW endpoint
629 * @dep: the endpoint to disable
630 *
631 * This function also removes requests which are currently processed ny the
632 * hardware and those which are not yet scheduled.
633 * Caller should take care of locking.
634 */
635 static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
636 {
637 struct dwc3 *dwc = dep->dwc;
638 u32 reg;
639
640 dwc3_trace(trace_dwc3_gadget, "Disabling %s", dep->name);
641
642 dwc3_remove_requests(dwc, dep);
643
644 /* make sure HW endpoint isn't stalled */
645 if (dep->flags & DWC3_EP_STALL)
646 __dwc3_gadget_ep_set_halt(dep, 0, false);
647
648 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
649 reg &= ~DWC3_DALEPENA_EP(dep->number);
650 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
651
652 dep->stream_capable = false;
653 dep->endpoint.desc = NULL;
654 dep->comp_desc = NULL;
655 dep->type = 0;
656 dep->flags = 0;
657
658 return 0;
659 }
660
661 /* -------------------------------------------------------------------------- */
662
663 static int dwc3_gadget_ep0_enable(struct usb_ep *ep,
664 const struct usb_endpoint_descriptor *desc)
665 {
666 return -EINVAL;
667 }
668
669 static int dwc3_gadget_ep0_disable(struct usb_ep *ep)
670 {
671 return -EINVAL;
672 }
673
674 /* -------------------------------------------------------------------------- */
675
676 static int dwc3_gadget_ep_enable(struct usb_ep *ep,
677 const struct usb_endpoint_descriptor *desc)
678 {
679 struct dwc3_ep *dep;
680 struct dwc3 *dwc;
681 unsigned long flags;
682 int ret;
683
684 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
685 pr_debug("dwc3: invalid parameters\n");
686 return -EINVAL;
687 }
688
689 if (!desc->wMaxPacketSize) {
690 pr_debug("dwc3: missing wMaxPacketSize\n");
691 return -EINVAL;
692 }
693
694 dep = to_dwc3_ep(ep);
695 dwc = dep->dwc;
696
697 if (dev_WARN_ONCE(dwc->dev, dep->flags & DWC3_EP_ENABLED,
698 "%s is already enabled\n",
699 dep->name))
700 return 0;
701
702 spin_lock_irqsave(&dwc->lock, flags);
703 ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc, false, false);
704 spin_unlock_irqrestore(&dwc->lock, flags);
705
706 return ret;
707 }
708
709 static int dwc3_gadget_ep_disable(struct usb_ep *ep)
710 {
711 struct dwc3_ep *dep;
712 struct dwc3 *dwc;
713 unsigned long flags;
714 int ret;
715
716 if (!ep) {
717 pr_debug("dwc3: invalid parameters\n");
718 return -EINVAL;
719 }
720
721 dep = to_dwc3_ep(ep);
722 dwc = dep->dwc;
723
724 if (dev_WARN_ONCE(dwc->dev, !(dep->flags & DWC3_EP_ENABLED),
725 "%s is already disabled\n",
726 dep->name))
727 return 0;
728
729 spin_lock_irqsave(&dwc->lock, flags);
730 ret = __dwc3_gadget_ep_disable(dep);
731 spin_unlock_irqrestore(&dwc->lock, flags);
732
733 return ret;
734 }
735
736 static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
737 gfp_t gfp_flags)
738 {
739 struct dwc3_request *req;
740 struct dwc3_ep *dep = to_dwc3_ep(ep);
741
742 req = kzalloc(sizeof(*req), gfp_flags);
743 if (!req)
744 return NULL;
745
746 req->epnum = dep->number;
747 req->dep = dep;
748
749 dep->allocated_requests++;
750
751 trace_dwc3_alloc_request(req);
752
753 return &req->request;
754 }
755
756 static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
757 struct usb_request *request)
758 {
759 struct dwc3_request *req = to_dwc3_request(request);
760 struct dwc3_ep *dep = to_dwc3_ep(ep);
761
762 dep->allocated_requests--;
763 trace_dwc3_free_request(req);
764 kfree(req);
765 }
766
767 /**
768 * dwc3_prepare_one_trb - setup one TRB from one request
769 * @dep: endpoint for which this request is prepared
770 * @req: dwc3_request pointer
771 */
772 static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
773 struct dwc3_request *req, dma_addr_t dma,
774 unsigned length, unsigned last, unsigned chain, unsigned node)
775 {
776 struct dwc3_trb *trb;
777
778 dwc3_trace(trace_dwc3_gadget, "%s: req %p dma %08llx length %d%s%s",
779 dep->name, req, (unsigned long long) dma,
780 length, last ? " last" : "",
781 chain ? " chain" : "");
782
783
784 trb = &dep->trb_pool[dep->trb_enqueue];
785
786 if (!req->trb) {
787 dwc3_gadget_move_started_request(req);
788 req->trb = trb;
789 req->trb_dma = dwc3_trb_dma_offset(dep, trb);
790 req->first_trb_index = dep->trb_enqueue;
791 }
792
793 dwc3_ep_inc_enq(dep);
794
795 trb->size = DWC3_TRB_SIZE_LENGTH(length);
796 trb->bpl = lower_32_bits(dma);
797 trb->bph = upper_32_bits(dma);
798
799 switch (usb_endpoint_type(dep->endpoint.desc)) {
800 case USB_ENDPOINT_XFER_CONTROL:
801 trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP;
802 break;
803
804 case USB_ENDPOINT_XFER_ISOC:
805 if (!node)
806 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
807 else
808 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
809
810 /* always enable Interrupt on Missed ISOC */
811 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
812 break;
813
814 case USB_ENDPOINT_XFER_BULK:
815 case USB_ENDPOINT_XFER_INT:
816 trb->ctrl = DWC3_TRBCTL_NORMAL;
817 break;
818 default:
819 /*
820 * This is only possible with faulty memory because we
821 * checked it already :)
822 */
823 BUG();
824 }
825
826 /* always enable Continue on Short Packet */
827 trb->ctrl |= DWC3_TRB_CTRL_CSP;
828
829 if (!req->request.no_interrupt && !chain)
830 trb->ctrl |= DWC3_TRB_CTRL_IOC | DWC3_TRB_CTRL_ISP_IMI;
831
832 if (last && !usb_endpoint_xfer_isoc(dep->endpoint.desc))
833 trb->ctrl |= DWC3_TRB_CTRL_LST;
834
835 if (chain)
836 trb->ctrl |= DWC3_TRB_CTRL_CHN;
837
838 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable)
839 trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id);
840
841 trb->ctrl |= DWC3_TRB_CTRL_HWO;
842
843 dep->queued_requests++;
844
845 trace_dwc3_prepare_trb(dep, trb);
846 }
847
848 /**
849 * dwc3_ep_prev_trb() - Returns the previous TRB in the ring
850 * @dep: The endpoint with the TRB ring
851 * @index: The index of the current TRB in the ring
852 *
853 * Returns the TRB prior to the one pointed to by the index. If the
854 * index is 0, we will wrap backwards, skip the link TRB, and return
855 * the one just before that.
856 */
857 static struct dwc3_trb *dwc3_ep_prev_trb(struct dwc3_ep *dep, u8 index)
858 {
859 if (!index)
860 index = DWC3_TRB_NUM - 2;
861 else
862 index = dep->trb_enqueue - 1;
863
864 return &dep->trb_pool[index];
865 }
866
867 static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep)
868 {
869 struct dwc3_trb *tmp;
870 u8 trbs_left;
871
872 /*
873 * If enqueue & dequeue are equal than it is either full or empty.
874 *
875 * One way to know for sure is if the TRB right before us has HWO bit
876 * set or not. If it has, then we're definitely full and can't fit any
877 * more transfers in our ring.
878 */
879 if (dep->trb_enqueue == dep->trb_dequeue) {
880 tmp = dwc3_ep_prev_trb(dep, dep->trb_enqueue);
881 if (tmp->ctrl & DWC3_TRB_CTRL_HWO)
882 return 0;
883
884 return DWC3_TRB_NUM - 1;
885 }
886
887 trbs_left = dep->trb_dequeue - dep->trb_enqueue;
888 trbs_left &= (DWC3_TRB_NUM - 1);
889
890 if (dep->trb_dequeue < dep->trb_enqueue)
891 trbs_left--;
892
893 return trbs_left;
894 }
895
896 static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
897 struct dwc3_request *req, unsigned int trbs_left,
898 unsigned int more_coming)
899 {
900 struct usb_request *request = &req->request;
901 struct scatterlist *sg = request->sg;
902 struct scatterlist *s;
903 unsigned int last = false;
904 unsigned int length;
905 dma_addr_t dma;
906 int i;
907
908 for_each_sg(sg, s, request->num_mapped_sgs, i) {
909 unsigned chain = true;
910
911 length = sg_dma_len(s);
912 dma = sg_dma_address(s);
913
914 if (sg_is_last(s)) {
915 if (usb_endpoint_xfer_int(dep->endpoint.desc) ||
916 !more_coming)
917 last = true;
918
919 chain = false;
920 }
921
922 if (!trbs_left--)
923 last = true;
924
925 if (last)
926 chain = false;
927
928 dwc3_prepare_one_trb(dep, req, dma, length,
929 last, chain, i);
930
931 if (last)
932 break;
933 }
934 }
935
936 static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
937 struct dwc3_request *req, unsigned int trbs_left,
938 unsigned int more_coming)
939 {
940 unsigned int last = false;
941 unsigned int length;
942 dma_addr_t dma;
943
944 dma = req->request.dma;
945 length = req->request.length;
946
947 if (!trbs_left)
948 last = true;
949
950 /* Is this the last request? */
951 if (usb_endpoint_xfer_int(dep->endpoint.desc) || !more_coming)
952 last = true;
953
954 dwc3_prepare_one_trb(dep, req, dma, length,
955 last, false, 0);
956 }
957
958 /*
959 * dwc3_prepare_trbs - setup TRBs from requests
960 * @dep: endpoint for which requests are being prepared
961 *
962 * The function goes through the requests list and sets up TRBs for the
963 * transfers. The function returns once there are no more TRBs available or
964 * it runs out of requests.
965 */
966 static void dwc3_prepare_trbs(struct dwc3_ep *dep)
967 {
968 struct dwc3_request *req, *n;
969 unsigned int more_coming;
970 u32 trbs_left;
971
972 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
973
974 trbs_left = dwc3_calc_trbs_left(dep);
975 if (!trbs_left)
976 return;
977
978 more_coming = dep->allocated_requests - dep->queued_requests;
979
980 list_for_each_entry_safe(req, n, &dep->pending_list, list) {
981 if (req->request.num_mapped_sgs > 0)
982 dwc3_prepare_one_trb_sg(dep, req, trbs_left--,
983 more_coming);
984 else
985 dwc3_prepare_one_trb_linear(dep, req, trbs_left--,
986 more_coming);
987
988 if (!trbs_left)
989 return;
990 }
991 }
992
993 static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param)
994 {
995 struct dwc3_gadget_ep_cmd_params params;
996 struct dwc3_request *req;
997 struct dwc3 *dwc = dep->dwc;
998 int starting;
999 int ret;
1000 u32 cmd;
1001
1002 starting = !(dep->flags & DWC3_EP_BUSY);
1003
1004 dwc3_prepare_trbs(dep);
1005 req = next_request(&dep->started_list);
1006 if (!req) {
1007 dep->flags |= DWC3_EP_PENDING_REQUEST;
1008 return 0;
1009 }
1010
1011 memset(&params, 0, sizeof(params));
1012
1013 if (starting) {
1014 params.param0 = upper_32_bits(req->trb_dma);
1015 params.param1 = lower_32_bits(req->trb_dma);
1016 cmd = DWC3_DEPCMD_STARTTRANSFER |
1017 DWC3_DEPCMD_PARAM(cmd_param);
1018 } else {
1019 cmd = DWC3_DEPCMD_UPDATETRANSFER |
1020 DWC3_DEPCMD_PARAM(dep->resource_index);
1021 }
1022
1023 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
1024 if (ret < 0) {
1025 /*
1026 * FIXME we need to iterate over the list of requests
1027 * here and stop, unmap, free and del each of the linked
1028 * requests instead of what we do now.
1029 */
1030 usb_gadget_unmap_request(&dwc->gadget, &req->request,
1031 req->direction);
1032 list_del(&req->list);
1033 return ret;
1034 }
1035
1036 dep->flags |= DWC3_EP_BUSY;
1037
1038 if (starting) {
1039 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
1040 WARN_ON_ONCE(!dep->resource_index);
1041 }
1042
1043 return 0;
1044 }
1045
1046 static void __dwc3_gadget_start_isoc(struct dwc3 *dwc,
1047 struct dwc3_ep *dep, u32 cur_uf)
1048 {
1049 u32 uf;
1050
1051 if (list_empty(&dep->pending_list)) {
1052 dwc3_trace(trace_dwc3_gadget,
1053 "ISOC ep %s run out for requests",
1054 dep->name);
1055 dep->flags |= DWC3_EP_PENDING_REQUEST;
1056 return;
1057 }
1058
1059 /* 4 micro frames in the future */
1060 uf = cur_uf + dep->interval * 4;
1061
1062 __dwc3_gadget_kick_transfer(dep, uf);
1063 }
1064
1065 static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
1066 struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
1067 {
1068 u32 cur_uf, mask;
1069
1070 mask = ~(dep->interval - 1);
1071 cur_uf = event->parameters & mask;
1072
1073 __dwc3_gadget_start_isoc(dwc, dep, cur_uf);
1074 }
1075
1076 static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
1077 {
1078 struct dwc3 *dwc = dep->dwc;
1079 int ret;
1080
1081 if (!dep->endpoint.desc) {
1082 dwc3_trace(trace_dwc3_gadget,
1083 "trying to queue request %p to disabled %s",
1084 &req->request, dep->endpoint.name);
1085 return -ESHUTDOWN;
1086 }
1087
1088 if (WARN(req->dep != dep, "request %p belongs to '%s'\n",
1089 &req->request, req->dep->name)) {
1090 dwc3_trace(trace_dwc3_gadget, "request %p belongs to '%s'",
1091 &req->request, req->dep->name);
1092 return -EINVAL;
1093 }
1094
1095 pm_runtime_get(dwc->dev);
1096
1097 req->request.actual = 0;
1098 req->request.status = -EINPROGRESS;
1099 req->direction = dep->direction;
1100 req->epnum = dep->number;
1101
1102 trace_dwc3_ep_queue(req);
1103
1104 /*
1105 * We only add to our list of requests now and
1106 * start consuming the list once we get XferNotReady
1107 * IRQ.
1108 *
1109 * That way, we avoid doing anything that we don't need
1110 * to do now and defer it until the point we receive a
1111 * particular token from the Host side.
1112 *
1113 * This will also avoid Host cancelling URBs due to too
1114 * many NAKs.
1115 */
1116 ret = usb_gadget_map_request(&dwc->gadget, &req->request,
1117 dep->direction);
1118 if (ret)
1119 return ret;
1120
1121 list_add_tail(&req->list, &dep->pending_list);
1122
1123 /*
1124 * If there are no pending requests and the endpoint isn't already
1125 * busy, we will just start the request straight away.
1126 *
1127 * This will save one IRQ (XFER_NOT_READY) and possibly make it a
1128 * little bit faster.
1129 */
1130 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
1131 !usb_endpoint_xfer_int(dep->endpoint.desc)) {
1132 ret = __dwc3_gadget_kick_transfer(dep, 0);
1133 goto out;
1134 }
1135
1136 /*
1137 * There are a few special cases:
1138 *
1139 * 1. XferNotReady with empty list of requests. We need to kick the
1140 * transfer here in that situation, otherwise we will be NAKing
1141 * forever. If we get XferNotReady before gadget driver has a
1142 * chance to queue a request, we will ACK the IRQ but won't be
1143 * able to receive the data until the next request is queued.
1144 * The following code is handling exactly that.
1145 *
1146 */
1147 if (dep->flags & DWC3_EP_PENDING_REQUEST) {
1148 /*
1149 * If xfernotready is already elapsed and it is a case
1150 * of isoc transfer, then issue END TRANSFER, so that
1151 * you can receive xfernotready again and can have
1152 * notion of current microframe.
1153 */
1154 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1155 if (list_empty(&dep->started_list)) {
1156 dwc3_stop_active_transfer(dwc, dep->number, true);
1157 dep->flags = DWC3_EP_ENABLED;
1158 }
1159 return 0;
1160 }
1161
1162 ret = __dwc3_gadget_kick_transfer(dep, 0);
1163 if (!ret)
1164 dep->flags &= ~DWC3_EP_PENDING_REQUEST;
1165
1166 goto out;
1167 }
1168
1169 /*
1170 * 2. XferInProgress on Isoc EP with an active transfer. We need to
1171 * kick the transfer here after queuing a request, otherwise the
1172 * core may not see the modified TRB(s).
1173 */
1174 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
1175 (dep->flags & DWC3_EP_BUSY) &&
1176 !(dep->flags & DWC3_EP_MISSED_ISOC)) {
1177 WARN_ON_ONCE(!dep->resource_index);
1178 ret = __dwc3_gadget_kick_transfer(dep, dep->resource_index);
1179 goto out;
1180 }
1181
1182 /*
1183 * 4. Stream Capable Bulk Endpoints. We need to start the transfer
1184 * right away, otherwise host will not know we have streams to be
1185 * handled.
1186 */
1187 if (dep->stream_capable)
1188 ret = __dwc3_gadget_kick_transfer(dep, 0);
1189
1190 out:
1191 if (ret && ret != -EBUSY)
1192 dwc3_trace(trace_dwc3_gadget,
1193 "%s: failed to kick transfers",
1194 dep->name);
1195 if (ret == -EBUSY)
1196 ret = 0;
1197
1198 return ret;
1199 }
1200
1201 static void __dwc3_gadget_ep_zlp_complete(struct usb_ep *ep,
1202 struct usb_request *request)
1203 {
1204 dwc3_gadget_ep_free_request(ep, request);
1205 }
1206
1207 static int __dwc3_gadget_ep_queue_zlp(struct dwc3 *dwc, struct dwc3_ep *dep)
1208 {
1209 struct dwc3_request *req;
1210 struct usb_request *request;
1211 struct usb_ep *ep = &dep->endpoint;
1212
1213 dwc3_trace(trace_dwc3_gadget, "queueing ZLP");
1214 request = dwc3_gadget_ep_alloc_request(ep, GFP_ATOMIC);
1215 if (!request)
1216 return -ENOMEM;
1217
1218 request->length = 0;
1219 request->buf = dwc->zlp_buf;
1220 request->complete = __dwc3_gadget_ep_zlp_complete;
1221
1222 req = to_dwc3_request(request);
1223
1224 return __dwc3_gadget_ep_queue(dep, req);
1225 }
1226
1227 static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
1228 gfp_t gfp_flags)
1229 {
1230 struct dwc3_request *req = to_dwc3_request(request);
1231 struct dwc3_ep *dep = to_dwc3_ep(ep);
1232 struct dwc3 *dwc = dep->dwc;
1233
1234 unsigned long flags;
1235
1236 int ret;
1237
1238 spin_lock_irqsave(&dwc->lock, flags);
1239 ret = __dwc3_gadget_ep_queue(dep, req);
1240
1241 /*
1242 * Okay, here's the thing, if gadget driver has requested for a ZLP by
1243 * setting request->zero, instead of doing magic, we will just queue an
1244 * extra usb_request ourselves so that it gets handled the same way as
1245 * any other request.
1246 */
1247 if (ret == 0 && request->zero && request->length &&
1248 (request->length % ep->maxpacket == 0))
1249 ret = __dwc3_gadget_ep_queue_zlp(dwc, dep);
1250
1251 spin_unlock_irqrestore(&dwc->lock, flags);
1252
1253 return ret;
1254 }
1255
1256 static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
1257 struct usb_request *request)
1258 {
1259 struct dwc3_request *req = to_dwc3_request(request);
1260 struct dwc3_request *r = NULL;
1261
1262 struct dwc3_ep *dep = to_dwc3_ep(ep);
1263 struct dwc3 *dwc = dep->dwc;
1264
1265 unsigned long flags;
1266 int ret = 0;
1267
1268 trace_dwc3_ep_dequeue(req);
1269
1270 spin_lock_irqsave(&dwc->lock, flags);
1271
1272 list_for_each_entry(r, &dep->pending_list, list) {
1273 if (r == req)
1274 break;
1275 }
1276
1277 if (r != req) {
1278 list_for_each_entry(r, &dep->started_list, list) {
1279 if (r == req)
1280 break;
1281 }
1282 if (r == req) {
1283 /* wait until it is processed */
1284 dwc3_stop_active_transfer(dwc, dep->number, true);
1285 goto out1;
1286 }
1287 dev_err(dwc->dev, "request %p was not queued to %s\n",
1288 request, ep->name);
1289 ret = -EINVAL;
1290 goto out0;
1291 }
1292
1293 out1:
1294 /* giveback the request */
1295 dwc3_gadget_giveback(dep, req, -ECONNRESET);
1296
1297 out0:
1298 spin_unlock_irqrestore(&dwc->lock, flags);
1299
1300 return ret;
1301 }
1302
1303 int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
1304 {
1305 struct dwc3_gadget_ep_cmd_params params;
1306 struct dwc3 *dwc = dep->dwc;
1307 int ret;
1308
1309 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1310 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
1311 return -EINVAL;
1312 }
1313
1314 memset(&params, 0x00, sizeof(params));
1315
1316 if (value) {
1317 struct dwc3_trb *trb;
1318
1319 unsigned transfer_in_flight;
1320 unsigned started;
1321
1322 if (dep->number > 1)
1323 trb = dwc3_ep_prev_trb(dep, dep->trb_enqueue);
1324 else
1325 trb = &dwc->ep0_trb[dep->trb_enqueue];
1326
1327 transfer_in_flight = trb->ctrl & DWC3_TRB_CTRL_HWO;
1328 started = !list_empty(&dep->started_list);
1329
1330 if (!protocol && ((dep->direction && transfer_in_flight) ||
1331 (!dep->direction && started))) {
1332 dwc3_trace(trace_dwc3_gadget,
1333 "%s: pending request, cannot halt",
1334 dep->name);
1335 return -EAGAIN;
1336 }
1337
1338 ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETSTALL,
1339 &params);
1340 if (ret)
1341 dev_err(dwc->dev, "failed to set STALL on %s\n",
1342 dep->name);
1343 else
1344 dep->flags |= DWC3_EP_STALL;
1345 } else {
1346
1347 ret = dwc3_send_clear_stall_ep_cmd(dep);
1348 if (ret)
1349 dev_err(dwc->dev, "failed to clear STALL on %s\n",
1350 dep->name);
1351 else
1352 dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
1353 }
1354
1355 return ret;
1356 }
1357
1358 static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
1359 {
1360 struct dwc3_ep *dep = to_dwc3_ep(ep);
1361 struct dwc3 *dwc = dep->dwc;
1362
1363 unsigned long flags;
1364
1365 int ret;
1366
1367 spin_lock_irqsave(&dwc->lock, flags);
1368 ret = __dwc3_gadget_ep_set_halt(dep, value, false);
1369 spin_unlock_irqrestore(&dwc->lock, flags);
1370
1371 return ret;
1372 }
1373
1374 static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
1375 {
1376 struct dwc3_ep *dep = to_dwc3_ep(ep);
1377 struct dwc3 *dwc = dep->dwc;
1378 unsigned long flags;
1379 int ret;
1380
1381 spin_lock_irqsave(&dwc->lock, flags);
1382 dep->flags |= DWC3_EP_WEDGE;
1383
1384 if (dep->number == 0 || dep->number == 1)
1385 ret = __dwc3_gadget_ep0_set_halt(ep, 1);
1386 else
1387 ret = __dwc3_gadget_ep_set_halt(dep, 1, false);
1388 spin_unlock_irqrestore(&dwc->lock, flags);
1389
1390 return ret;
1391 }
1392
1393 /* -------------------------------------------------------------------------- */
1394
1395 static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = {
1396 .bLength = USB_DT_ENDPOINT_SIZE,
1397 .bDescriptorType = USB_DT_ENDPOINT,
1398 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
1399 };
1400
1401 static const struct usb_ep_ops dwc3_gadget_ep0_ops = {
1402 .enable = dwc3_gadget_ep0_enable,
1403 .disable = dwc3_gadget_ep0_disable,
1404 .alloc_request = dwc3_gadget_ep_alloc_request,
1405 .free_request = dwc3_gadget_ep_free_request,
1406 .queue = dwc3_gadget_ep0_queue,
1407 .dequeue = dwc3_gadget_ep_dequeue,
1408 .set_halt = dwc3_gadget_ep0_set_halt,
1409 .set_wedge = dwc3_gadget_ep_set_wedge,
1410 };
1411
1412 static const struct usb_ep_ops dwc3_gadget_ep_ops = {
1413 .enable = dwc3_gadget_ep_enable,
1414 .disable = dwc3_gadget_ep_disable,
1415 .alloc_request = dwc3_gadget_ep_alloc_request,
1416 .free_request = dwc3_gadget_ep_free_request,
1417 .queue = dwc3_gadget_ep_queue,
1418 .dequeue = dwc3_gadget_ep_dequeue,
1419 .set_halt = dwc3_gadget_ep_set_halt,
1420 .set_wedge = dwc3_gadget_ep_set_wedge,
1421 };
1422
1423 /* -------------------------------------------------------------------------- */
1424
1425 static int dwc3_gadget_get_frame(struct usb_gadget *g)
1426 {
1427 struct dwc3 *dwc = gadget_to_dwc(g);
1428 u32 reg;
1429
1430 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1431 return DWC3_DSTS_SOFFN(reg);
1432 }
1433
1434 static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
1435 {
1436 int retries;
1437
1438 int ret;
1439 u32 reg;
1440
1441 u8 link_state;
1442 u8 speed;
1443
1444 /*
1445 * According to the Databook Remote wakeup request should
1446 * be issued only when the device is in early suspend state.
1447 *
1448 * We can check that via USB Link State bits in DSTS register.
1449 */
1450 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1451
1452 speed = reg & DWC3_DSTS_CONNECTSPD;
1453 if ((speed == DWC3_DSTS_SUPERSPEED) ||
1454 (speed == DWC3_DSTS_SUPERSPEED_PLUS)) {
1455 dwc3_trace(trace_dwc3_gadget, "no wakeup on SuperSpeed");
1456 return 0;
1457 }
1458
1459 link_state = DWC3_DSTS_USBLNKST(reg);
1460
1461 switch (link_state) {
1462 case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */
1463 case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */
1464 break;
1465 default:
1466 dwc3_trace(trace_dwc3_gadget,
1467 "can't wakeup from '%s'",
1468 dwc3_gadget_link_string(link_state));
1469 return -EINVAL;
1470 }
1471
1472 ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
1473 if (ret < 0) {
1474 dev_err(dwc->dev, "failed to put link in Recovery\n");
1475 return ret;
1476 }
1477
1478 /* Recent versions do this automatically */
1479 if (dwc->revision < DWC3_REVISION_194A) {
1480 /* write zeroes to Link Change Request */
1481 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1482 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
1483 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1484 }
1485
1486 /* poll until Link State changes to ON */
1487 retries = 20000;
1488
1489 while (retries--) {
1490 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1491
1492 /* in HS, means ON */
1493 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
1494 break;
1495 }
1496
1497 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
1498 dev_err(dwc->dev, "failed to send remote wakeup\n");
1499 return -EINVAL;
1500 }
1501
1502 return 0;
1503 }
1504
1505 static int dwc3_gadget_wakeup(struct usb_gadget *g)
1506 {
1507 struct dwc3 *dwc = gadget_to_dwc(g);
1508 unsigned long flags;
1509 int ret;
1510
1511 spin_lock_irqsave(&dwc->lock, flags);
1512 ret = __dwc3_gadget_wakeup(dwc);
1513 spin_unlock_irqrestore(&dwc->lock, flags);
1514
1515 return ret;
1516 }
1517
1518 static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
1519 int is_selfpowered)
1520 {
1521 struct dwc3 *dwc = gadget_to_dwc(g);
1522 unsigned long flags;
1523
1524 spin_lock_irqsave(&dwc->lock, flags);
1525 g->is_selfpowered = !!is_selfpowered;
1526 spin_unlock_irqrestore(&dwc->lock, flags);
1527
1528 return 0;
1529 }
1530
1531 static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
1532 {
1533 u32 reg;
1534 u32 timeout = 500;
1535
1536 if (pm_runtime_suspended(dwc->dev))
1537 return 0;
1538
1539 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1540 if (is_on) {
1541 if (dwc->revision <= DWC3_REVISION_187A) {
1542 reg &= ~DWC3_DCTL_TRGTULST_MASK;
1543 reg |= DWC3_DCTL_TRGTULST_RX_DET;
1544 }
1545
1546 if (dwc->revision >= DWC3_REVISION_194A)
1547 reg &= ~DWC3_DCTL_KEEP_CONNECT;
1548 reg |= DWC3_DCTL_RUN_STOP;
1549
1550 if (dwc->has_hibernation)
1551 reg |= DWC3_DCTL_KEEP_CONNECT;
1552
1553 dwc->pullups_connected = true;
1554 } else {
1555 reg &= ~DWC3_DCTL_RUN_STOP;
1556
1557 if (dwc->has_hibernation && !suspend)
1558 reg &= ~DWC3_DCTL_KEEP_CONNECT;
1559
1560 dwc->pullups_connected = false;
1561 }
1562
1563 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1564
1565 do {
1566 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1567 reg &= DWC3_DSTS_DEVCTRLHLT;
1568 } while (--timeout && !(!is_on ^ !reg));
1569
1570 if (!timeout)
1571 return -ETIMEDOUT;
1572
1573 dwc3_trace(trace_dwc3_gadget, "gadget %s data soft-%s",
1574 dwc->gadget_driver
1575 ? dwc->gadget_driver->function : "no-function",
1576 is_on ? "connect" : "disconnect");
1577
1578 return 0;
1579 }
1580
1581 static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
1582 {
1583 struct dwc3 *dwc = gadget_to_dwc(g);
1584 unsigned long flags;
1585 int ret;
1586
1587 is_on = !!is_on;
1588
1589 spin_lock_irqsave(&dwc->lock, flags);
1590 ret = dwc3_gadget_run_stop(dwc, is_on, false);
1591 spin_unlock_irqrestore(&dwc->lock, flags);
1592
1593 return ret;
1594 }
1595
1596 static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
1597 {
1598 u32 reg;
1599
1600 /* Enable all but Start and End of Frame IRQs */
1601 reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN |
1602 DWC3_DEVTEN_EVNTOVERFLOWEN |
1603 DWC3_DEVTEN_CMDCMPLTEN |
1604 DWC3_DEVTEN_ERRTICERREN |
1605 DWC3_DEVTEN_WKUPEVTEN |
1606 DWC3_DEVTEN_ULSTCNGEN |
1607 DWC3_DEVTEN_CONNECTDONEEN |
1608 DWC3_DEVTEN_USBRSTEN |
1609 DWC3_DEVTEN_DISCONNEVTEN);
1610
1611 dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
1612 }
1613
1614 static void dwc3_gadget_disable_irq(struct dwc3 *dwc)
1615 {
1616 /* mask all interrupts */
1617 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
1618 }
1619
1620 static irqreturn_t dwc3_interrupt(int irq, void *_dwc);
1621 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc);
1622
1623 /**
1624 * dwc3_gadget_setup_nump - Calculate and initialize NUMP field of DCFG
1625 * dwc: pointer to our context structure
1626 *
1627 * The following looks like complex but it's actually very simple. In order to
1628 * calculate the number of packets we can burst at once on OUT transfers, we're
1629 * gonna use RxFIFO size.
1630 *
1631 * To calculate RxFIFO size we need two numbers:
1632 * MDWIDTH = size, in bits, of the internal memory bus
1633 * RAM2_DEPTH = depth, in MDWIDTH, of internal RAM2 (where RxFIFO sits)
1634 *
1635 * Given these two numbers, the formula is simple:
1636 *
1637 * RxFIFO Size = (RAM2_DEPTH * MDWIDTH / 8) - 24 - 16;
1638 *
1639 * 24 bytes is for 3x SETUP packets
1640 * 16 bytes is a clock domain crossing tolerance
1641 *
1642 * Given RxFIFO Size, NUMP = RxFIFOSize / 1024;
1643 */
1644 static void dwc3_gadget_setup_nump(struct dwc3 *dwc)
1645 {
1646 u32 ram2_depth;
1647 u32 mdwidth;
1648 u32 nump;
1649 u32 reg;
1650
1651 ram2_depth = DWC3_GHWPARAMS7_RAM2_DEPTH(dwc->hwparams.hwparams7);
1652 mdwidth = DWC3_GHWPARAMS0_MDWIDTH(dwc->hwparams.hwparams0);
1653
1654 nump = ((ram2_depth * mdwidth / 8) - 24 - 16) / 1024;
1655 nump = min_t(u32, nump, 16);
1656
1657 /* update NumP */
1658 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1659 reg &= ~DWC3_DCFG_NUMP_MASK;
1660 reg |= nump << DWC3_DCFG_NUMP_SHIFT;
1661 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1662 }
1663
1664 static int __dwc3_gadget_start(struct dwc3 *dwc)
1665 {
1666 struct dwc3_ep *dep;
1667 int ret = 0;
1668 u32 reg;
1669
1670 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1671 reg &= ~(DWC3_DCFG_SPEED_MASK);
1672
1673 /**
1674 * WORKAROUND: DWC3 revision < 2.20a have an issue
1675 * which would cause metastability state on Run/Stop
1676 * bit if we try to force the IP to USB2-only mode.
1677 *
1678 * Because of that, we cannot configure the IP to any
1679 * speed other than the SuperSpeed
1680 *
1681 * Refers to:
1682 *
1683 * STAR#9000525659: Clock Domain Crossing on DCTL in
1684 * USB 2.0 Mode
1685 */
1686 if (dwc->revision < DWC3_REVISION_220A) {
1687 reg |= DWC3_DCFG_SUPERSPEED;
1688 } else {
1689 switch (dwc->maximum_speed) {
1690 case USB_SPEED_LOW:
1691 reg |= DWC3_DCFG_LOWSPEED;
1692 break;
1693 case USB_SPEED_FULL:
1694 reg |= DWC3_DCFG_FULLSPEED1;
1695 break;
1696 case USB_SPEED_HIGH:
1697 reg |= DWC3_DCFG_HIGHSPEED;
1698 break;
1699 case USB_SPEED_SUPER_PLUS:
1700 reg |= DWC3_DCFG_SUPERSPEED_PLUS;
1701 break;
1702 default:
1703 dev_err(dwc->dev, "invalid dwc->maximum_speed (%d)\n",
1704 dwc->maximum_speed);
1705 /* fall through */
1706 case USB_SPEED_SUPER:
1707 reg |= DWC3_DCFG_SUPERSPEED;
1708 break;
1709 }
1710 }
1711 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1712
1713 /*
1714 * We are telling dwc3 that we want to use DCFG.NUMP as ACK TP's NUMP
1715 * field instead of letting dwc3 itself calculate that automatically.
1716 *
1717 * This way, we maximize the chances that we'll be able to get several
1718 * bursts of data without going through any sort of endpoint throttling.
1719 */
1720 reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
1721 reg &= ~DWC3_GRXTHRCFG_PKTCNTSEL;
1722 dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
1723
1724 dwc3_gadget_setup_nump(dwc);
1725
1726 /* Start with SuperSpeed Default */
1727 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
1728
1729 dep = dwc->eps[0];
1730 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
1731 false);
1732 if (ret) {
1733 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1734 goto err0;
1735 }
1736
1737 dep = dwc->eps[1];
1738 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
1739 false);
1740 if (ret) {
1741 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1742 goto err1;
1743 }
1744
1745 /* begin to receive SETUP packets */
1746 dwc->ep0state = EP0_SETUP_PHASE;
1747 dwc3_ep0_out_start(dwc);
1748
1749 dwc3_gadget_enable_irq(dwc);
1750
1751 return 0;
1752
1753 err1:
1754 __dwc3_gadget_ep_disable(dwc->eps[0]);
1755
1756 err0:
1757 return ret;
1758 }
1759
1760 static int dwc3_gadget_start(struct usb_gadget *g,
1761 struct usb_gadget_driver *driver)
1762 {
1763 struct dwc3 *dwc = gadget_to_dwc(g);
1764 unsigned long flags;
1765 int ret = 0;
1766 int irq;
1767
1768 irq = dwc->irq_gadget;
1769 ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
1770 IRQF_SHARED, "dwc3", dwc->ev_buf);
1771 if (ret) {
1772 dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
1773 irq, ret);
1774 goto err0;
1775 }
1776
1777 spin_lock_irqsave(&dwc->lock, flags);
1778 if (dwc->gadget_driver) {
1779 dev_err(dwc->dev, "%s is already bound to %s\n",
1780 dwc->gadget.name,
1781 dwc->gadget_driver->driver.name);
1782 ret = -EBUSY;
1783 goto err1;
1784 }
1785
1786 dwc->gadget_driver = driver;
1787
1788 if (pm_runtime_active(dwc->dev))
1789 __dwc3_gadget_start(dwc);
1790
1791 spin_unlock_irqrestore(&dwc->lock, flags);
1792
1793 return 0;
1794
1795 err1:
1796 spin_unlock_irqrestore(&dwc->lock, flags);
1797 free_irq(irq, dwc);
1798
1799 err0:
1800 return ret;
1801 }
1802
1803 static void __dwc3_gadget_stop(struct dwc3 *dwc)
1804 {
1805 if (pm_runtime_suspended(dwc->dev))
1806 return;
1807
1808 dwc3_gadget_disable_irq(dwc);
1809 __dwc3_gadget_ep_disable(dwc->eps[0]);
1810 __dwc3_gadget_ep_disable(dwc->eps[1]);
1811 }
1812
1813 static int dwc3_gadget_stop(struct usb_gadget *g)
1814 {
1815 struct dwc3 *dwc = gadget_to_dwc(g);
1816 unsigned long flags;
1817
1818 spin_lock_irqsave(&dwc->lock, flags);
1819 __dwc3_gadget_stop(dwc);
1820 dwc->gadget_driver = NULL;
1821 spin_unlock_irqrestore(&dwc->lock, flags);
1822
1823 free_irq(dwc->irq_gadget, dwc->ev_buf);
1824
1825 return 0;
1826 }
1827
1828 static const struct usb_gadget_ops dwc3_gadget_ops = {
1829 .get_frame = dwc3_gadget_get_frame,
1830 .wakeup = dwc3_gadget_wakeup,
1831 .set_selfpowered = dwc3_gadget_set_selfpowered,
1832 .pullup = dwc3_gadget_pullup,
1833 .udc_start = dwc3_gadget_start,
1834 .udc_stop = dwc3_gadget_stop,
1835 };
1836
1837 /* -------------------------------------------------------------------------- */
1838
1839 static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc,
1840 u8 num, u32 direction)
1841 {
1842 struct dwc3_ep *dep;
1843 u8 i;
1844
1845 for (i = 0; i < num; i++) {
1846 u8 epnum = (i << 1) | (direction ? 1 : 0);
1847
1848 dep = kzalloc(sizeof(*dep), GFP_KERNEL);
1849 if (!dep)
1850 return -ENOMEM;
1851
1852 dep->dwc = dwc;
1853 dep->number = epnum;
1854 dep->direction = !!direction;
1855 dep->regs = dwc->regs + DWC3_DEP_BASE(epnum);
1856 dwc->eps[epnum] = dep;
1857
1858 snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1,
1859 (epnum & 1) ? "in" : "out");
1860
1861 dep->endpoint.name = dep->name;
1862 spin_lock_init(&dep->lock);
1863
1864 dwc3_trace(trace_dwc3_gadget, "initializing %s", dep->name);
1865
1866 if (epnum == 0 || epnum == 1) {
1867 usb_ep_set_maxpacket_limit(&dep->endpoint, 512);
1868 dep->endpoint.maxburst = 1;
1869 dep->endpoint.ops = &dwc3_gadget_ep0_ops;
1870 if (!epnum)
1871 dwc->gadget.ep0 = &dep->endpoint;
1872 } else {
1873 int ret;
1874
1875 usb_ep_set_maxpacket_limit(&dep->endpoint, 1024);
1876 dep->endpoint.max_streams = 15;
1877 dep->endpoint.ops = &dwc3_gadget_ep_ops;
1878 list_add_tail(&dep->endpoint.ep_list,
1879 &dwc->gadget.ep_list);
1880
1881 ret = dwc3_alloc_trb_pool(dep);
1882 if (ret)
1883 return ret;
1884 }
1885
1886 if (epnum == 0 || epnum == 1) {
1887 dep->endpoint.caps.type_control = true;
1888 } else {
1889 dep->endpoint.caps.type_iso = true;
1890 dep->endpoint.caps.type_bulk = true;
1891 dep->endpoint.caps.type_int = true;
1892 }
1893
1894 dep->endpoint.caps.dir_in = !!direction;
1895 dep->endpoint.caps.dir_out = !direction;
1896
1897 INIT_LIST_HEAD(&dep->pending_list);
1898 INIT_LIST_HEAD(&dep->started_list);
1899 }
1900
1901 return 0;
1902 }
1903
1904 static int dwc3_gadget_init_endpoints(struct dwc3 *dwc)
1905 {
1906 int ret;
1907
1908 INIT_LIST_HEAD(&dwc->gadget.ep_list);
1909
1910 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_out_eps, 0);
1911 if (ret < 0) {
1912 dwc3_trace(trace_dwc3_gadget,
1913 "failed to allocate OUT endpoints");
1914 return ret;
1915 }
1916
1917 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_in_eps, 1);
1918 if (ret < 0) {
1919 dwc3_trace(trace_dwc3_gadget,
1920 "failed to allocate IN endpoints");
1921 return ret;
1922 }
1923
1924 return 0;
1925 }
1926
1927 static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
1928 {
1929 struct dwc3_ep *dep;
1930 u8 epnum;
1931
1932 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1933 dep = dwc->eps[epnum];
1934 if (!dep)
1935 continue;
1936 /*
1937 * Physical endpoints 0 and 1 are special; they form the
1938 * bi-directional USB endpoint 0.
1939 *
1940 * For those two physical endpoints, we don't allocate a TRB
1941 * pool nor do we add them the endpoints list. Due to that, we
1942 * shouldn't do these two operations otherwise we would end up
1943 * with all sorts of bugs when removing dwc3.ko.
1944 */
1945 if (epnum != 0 && epnum != 1) {
1946 dwc3_free_trb_pool(dep);
1947 list_del(&dep->endpoint.ep_list);
1948 }
1949
1950 kfree(dep);
1951 }
1952 }
1953
1954 /* -------------------------------------------------------------------------- */
1955
1956 static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
1957 struct dwc3_request *req, struct dwc3_trb *trb,
1958 const struct dwc3_event_depevt *event, int status,
1959 int chain)
1960 {
1961 unsigned int count;
1962 unsigned int s_pkt = 0;
1963 unsigned int trb_status;
1964
1965 dep->queued_requests--;
1966 trace_dwc3_complete_trb(dep, trb);
1967
1968 /*
1969 * If we're in the middle of series of chained TRBs and we
1970 * receive a short transfer along the way, DWC3 will skip
1971 * through all TRBs including the last TRB in the chain (the
1972 * where CHN bit is zero. DWC3 will also avoid clearing HWO
1973 * bit and SW has to do it manually.
1974 *
1975 * We're going to do that here to avoid problems of HW trying
1976 * to use bogus TRBs for transfers.
1977 */
1978 if (chain && (trb->ctrl & DWC3_TRB_CTRL_HWO))
1979 trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
1980
1981 if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
1982 return 1;
1983
1984 count = trb->size & DWC3_TRB_SIZE_MASK;
1985
1986 if (dep->direction) {
1987 if (count) {
1988 trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size);
1989 if (trb_status == DWC3_TRBSTS_MISSED_ISOC) {
1990 dwc3_trace(trace_dwc3_gadget,
1991 "%s: incomplete IN transfer",
1992 dep->name);
1993 /*
1994 * If missed isoc occurred and there is
1995 * no request queued then issue END
1996 * TRANSFER, so that core generates
1997 * next xfernotready and we will issue
1998 * a fresh START TRANSFER.
1999 * If there are still queued request
2000 * then wait, do not issue either END
2001 * or UPDATE TRANSFER, just attach next
2002 * request in pending_list during
2003 * giveback.If any future queued request
2004 * is successfully transferred then we
2005 * will issue UPDATE TRANSFER for all
2006 * request in the pending_list.
2007 */
2008 dep->flags |= DWC3_EP_MISSED_ISOC;
2009 } else {
2010 dev_err(dwc->dev, "incomplete IN transfer %s\n",
2011 dep->name);
2012 status = -ECONNRESET;
2013 }
2014 } else {
2015 dep->flags &= ~DWC3_EP_MISSED_ISOC;
2016 }
2017 } else {
2018 if (count && (event->status & DEPEVT_STATUS_SHORT))
2019 s_pkt = 1;
2020 }
2021
2022 if (s_pkt && !chain)
2023 return 1;
2024 if ((event->status & DEPEVT_STATUS_LST) &&
2025 (trb->ctrl & (DWC3_TRB_CTRL_LST |
2026 DWC3_TRB_CTRL_HWO)))
2027 return 1;
2028 if ((event->status & DEPEVT_STATUS_IOC) &&
2029 (trb->ctrl & DWC3_TRB_CTRL_IOC))
2030 return 1;
2031 return 0;
2032 }
2033
2034 static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
2035 const struct dwc3_event_depevt *event, int status)
2036 {
2037 struct dwc3_request *req;
2038 struct dwc3_trb *trb;
2039 unsigned int slot;
2040 unsigned int i;
2041 int count = 0;
2042 int ret;
2043
2044 do {
2045 int chain;
2046
2047 req = next_request(&dep->started_list);
2048 if (WARN_ON_ONCE(!req))
2049 return 1;
2050
2051 chain = req->request.num_mapped_sgs > 0;
2052 i = 0;
2053 do {
2054 slot = req->first_trb_index + i;
2055 if (slot == DWC3_TRB_NUM - 1)
2056 slot++;
2057 slot %= DWC3_TRB_NUM;
2058 trb = &dep->trb_pool[slot];
2059 count += trb->size & DWC3_TRB_SIZE_MASK;
2060
2061 ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb,
2062 event, status, chain);
2063 if (ret)
2064 break;
2065 } while (++i < req->request.num_mapped_sgs);
2066
2067 /*
2068 * We assume here we will always receive the entire data block
2069 * which we should receive. Meaning, if we program RX to
2070 * receive 4K but we receive only 2K, we assume that's all we
2071 * should receive and we simply bounce the request back to the
2072 * gadget driver for further processing.
2073 */
2074 req->request.actual += req->request.length - count;
2075 dwc3_gadget_giveback(dep, req, status);
2076
2077 if (ret)
2078 break;
2079 } while (1);
2080
2081 /*
2082 * Our endpoint might get disabled by another thread during
2083 * dwc3_gadget_giveback(). If that happens, we're just gonna return 1
2084 * early on so DWC3_EP_BUSY flag gets cleared
2085 */
2086 if (!dep->endpoint.desc)
2087 return 1;
2088
2089 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
2090 list_empty(&dep->started_list)) {
2091 if (list_empty(&dep->pending_list)) {
2092 /*
2093 * If there is no entry in request list then do
2094 * not issue END TRANSFER now. Just set PENDING
2095 * flag, so that END TRANSFER is issued when an
2096 * entry is added into request list.
2097 */
2098 dep->flags = DWC3_EP_PENDING_REQUEST;
2099 } else {
2100 dwc3_stop_active_transfer(dwc, dep->number, true);
2101 dep->flags = DWC3_EP_ENABLED;
2102 }
2103 return 1;
2104 }
2105
2106 if (usb_endpoint_xfer_isoc(dep->endpoint.desc))
2107 if ((event->status & DEPEVT_STATUS_IOC) &&
2108 (trb->ctrl & DWC3_TRB_CTRL_IOC))
2109 return 0;
2110 return 1;
2111 }
2112
2113 static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
2114 struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
2115 {
2116 unsigned status = 0;
2117 int clean_busy;
2118 u32 is_xfer_complete;
2119
2120 is_xfer_complete = (event->endpoint_event == DWC3_DEPEVT_XFERCOMPLETE);
2121
2122 if (event->status & DEPEVT_STATUS_BUSERR)
2123 status = -ECONNRESET;
2124
2125 clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status);
2126 if (clean_busy && (!dep->endpoint.desc || is_xfer_complete ||
2127 usb_endpoint_xfer_isoc(dep->endpoint.desc)))
2128 dep->flags &= ~DWC3_EP_BUSY;
2129
2130 /*
2131 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
2132 * See dwc3_gadget_linksts_change_interrupt() for 1st half.
2133 */
2134 if (dwc->revision < DWC3_REVISION_183A) {
2135 u32 reg;
2136 int i;
2137
2138 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
2139 dep = dwc->eps[i];
2140
2141 if (!(dep->flags & DWC3_EP_ENABLED))
2142 continue;
2143
2144 if (!list_empty(&dep->started_list))
2145 return;
2146 }
2147
2148 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2149 reg |= dwc->u1u2;
2150 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2151
2152 dwc->u1u2 = 0;
2153 }
2154
2155 /*
2156 * Our endpoint might get disabled by another thread during
2157 * dwc3_gadget_giveback(). If that happens, we're just gonna return 1
2158 * early on so DWC3_EP_BUSY flag gets cleared
2159 */
2160 if (!dep->endpoint.desc)
2161 return;
2162
2163 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
2164 int ret;
2165
2166 ret = __dwc3_gadget_kick_transfer(dep, 0);
2167 if (!ret || ret == -EBUSY)
2168 return;
2169 }
2170 }
2171
2172 static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
2173 const struct dwc3_event_depevt *event)
2174 {
2175 struct dwc3_ep *dep;
2176 u8 epnum = event->endpoint_number;
2177
2178 dep = dwc->eps[epnum];
2179
2180 if (!(dep->flags & DWC3_EP_ENABLED))
2181 return;
2182
2183 if (epnum == 0 || epnum == 1) {
2184 dwc3_ep0_interrupt(dwc, event);
2185 return;
2186 }
2187
2188 switch (event->endpoint_event) {
2189 case DWC3_DEPEVT_XFERCOMPLETE:
2190 dep->resource_index = 0;
2191
2192 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
2193 dwc3_trace(trace_dwc3_gadget,
2194 "%s is an Isochronous endpoint",
2195 dep->name);
2196 return;
2197 }
2198
2199 dwc3_endpoint_transfer_complete(dwc, dep, event);
2200 break;
2201 case DWC3_DEPEVT_XFERINPROGRESS:
2202 dwc3_endpoint_transfer_complete(dwc, dep, event);
2203 break;
2204 case DWC3_DEPEVT_XFERNOTREADY:
2205 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
2206 dwc3_gadget_start_isoc(dwc, dep, event);
2207 } else {
2208 int active;
2209 int ret;
2210
2211 active = event->status & DEPEVT_STATUS_TRANSFER_ACTIVE;
2212
2213 dwc3_trace(trace_dwc3_gadget, "%s: reason %s",
2214 dep->name, active ? "Transfer Active"
2215 : "Transfer Not Active");
2216
2217 ret = __dwc3_gadget_kick_transfer(dep, 0);
2218 if (!ret || ret == -EBUSY)
2219 return;
2220
2221 dwc3_trace(trace_dwc3_gadget,
2222 "%s: failed to kick transfers",
2223 dep->name);
2224 }
2225
2226 break;
2227 case DWC3_DEPEVT_STREAMEVT:
2228 if (!usb_endpoint_xfer_bulk(dep->endpoint.desc)) {
2229 dev_err(dwc->dev, "Stream event for non-Bulk %s\n",
2230 dep->name);
2231 return;
2232 }
2233
2234 switch (event->status) {
2235 case DEPEVT_STREAMEVT_FOUND:
2236 dwc3_trace(trace_dwc3_gadget,
2237 "Stream %d found and started",
2238 event->parameters);
2239
2240 break;
2241 case DEPEVT_STREAMEVT_NOTFOUND:
2242 /* FALLTHROUGH */
2243 default:
2244 dwc3_trace(trace_dwc3_gadget,
2245 "unable to find suitable stream");
2246 }
2247 break;
2248 case DWC3_DEPEVT_RXTXFIFOEVT:
2249 dwc3_trace(trace_dwc3_gadget, "%s FIFO Overrun", dep->name);
2250 break;
2251 case DWC3_DEPEVT_EPCMDCMPLT:
2252 dwc3_trace(trace_dwc3_gadget, "Endpoint Command Complete");
2253 break;
2254 }
2255 }
2256
2257 static void dwc3_disconnect_gadget(struct dwc3 *dwc)
2258 {
2259 if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
2260 spin_unlock(&dwc->lock);
2261 dwc->gadget_driver->disconnect(&dwc->gadget);
2262 spin_lock(&dwc->lock);
2263 }
2264 }
2265
2266 static void dwc3_suspend_gadget(struct dwc3 *dwc)
2267 {
2268 if (dwc->gadget_driver && dwc->gadget_driver->suspend) {
2269 spin_unlock(&dwc->lock);
2270 dwc->gadget_driver->suspend(&dwc->gadget);
2271 spin_lock(&dwc->lock);
2272 }
2273 }
2274
2275 static void dwc3_resume_gadget(struct dwc3 *dwc)
2276 {
2277 if (dwc->gadget_driver && dwc->gadget_driver->resume) {
2278 spin_unlock(&dwc->lock);
2279 dwc->gadget_driver->resume(&dwc->gadget);
2280 spin_lock(&dwc->lock);
2281 }
2282 }
2283
2284 static void dwc3_reset_gadget(struct dwc3 *dwc)
2285 {
2286 if (!dwc->gadget_driver)
2287 return;
2288
2289 if (dwc->gadget.speed != USB_SPEED_UNKNOWN) {
2290 spin_unlock(&dwc->lock);
2291 usb_gadget_udc_reset(&dwc->gadget, dwc->gadget_driver);
2292 spin_lock(&dwc->lock);
2293 }
2294 }
2295
2296 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force)
2297 {
2298 struct dwc3_ep *dep;
2299 struct dwc3_gadget_ep_cmd_params params;
2300 u32 cmd;
2301 int ret;
2302
2303 dep = dwc->eps[epnum];
2304
2305 if (!dep->resource_index)
2306 return;
2307
2308 /*
2309 * NOTICE: We are violating what the Databook says about the
2310 * EndTransfer command. Ideally we would _always_ wait for the
2311 * EndTransfer Command Completion IRQ, but that's causing too
2312 * much trouble synchronizing between us and gadget driver.
2313 *
2314 * We have discussed this with the IP Provider and it was
2315 * suggested to giveback all requests here, but give HW some
2316 * extra time to synchronize with the interconnect. We're using
2317 * an arbitrary 100us delay for that.
2318 *
2319 * Note also that a similar handling was tested by Synopsys
2320 * (thanks a lot Paul) and nothing bad has come out of it.
2321 * In short, what we're doing is:
2322 *
2323 * - Issue EndTransfer WITH CMDIOC bit set
2324 * - Wait 100us
2325 */
2326
2327 cmd = DWC3_DEPCMD_ENDTRANSFER;
2328 cmd |= force ? DWC3_DEPCMD_HIPRI_FORCERM : 0;
2329 cmd |= DWC3_DEPCMD_CMDIOC;
2330 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
2331 memset(&params, 0, sizeof(params));
2332 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
2333 WARN_ON_ONCE(ret);
2334 dep->resource_index = 0;
2335 dep->flags &= ~DWC3_EP_BUSY;
2336 udelay(100);
2337 }
2338
2339 static void dwc3_stop_active_transfers(struct dwc3 *dwc)
2340 {
2341 u32 epnum;
2342
2343 for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2344 struct dwc3_ep *dep;
2345
2346 dep = dwc->eps[epnum];
2347 if (!dep)
2348 continue;
2349
2350 if (!(dep->flags & DWC3_EP_ENABLED))
2351 continue;
2352
2353 dwc3_remove_requests(dwc, dep);
2354 }
2355 }
2356
2357 static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
2358 {
2359 u32 epnum;
2360
2361 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2362 struct dwc3_ep *dep;
2363 int ret;
2364
2365 dep = dwc->eps[epnum];
2366 if (!dep)
2367 continue;
2368
2369 if (!(dep->flags & DWC3_EP_STALL))
2370 continue;
2371
2372 dep->flags &= ~DWC3_EP_STALL;
2373
2374 ret = dwc3_send_clear_stall_ep_cmd(dep);
2375 WARN_ON_ONCE(ret);
2376 }
2377 }
2378
2379 static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
2380 {
2381 int reg;
2382
2383 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2384 reg &= ~DWC3_DCTL_INITU1ENA;
2385 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2386
2387 reg &= ~DWC3_DCTL_INITU2ENA;
2388 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2389
2390 dwc3_disconnect_gadget(dwc);
2391
2392 dwc->gadget.speed = USB_SPEED_UNKNOWN;
2393 dwc->setup_packet_pending = false;
2394 usb_gadget_set_state(&dwc->gadget, USB_STATE_NOTATTACHED);
2395
2396 dwc->connected = false;
2397 }
2398
2399 static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
2400 {
2401 u32 reg;
2402
2403 dwc->connected = true;
2404
2405 /*
2406 * WORKAROUND: DWC3 revisions <1.88a have an issue which
2407 * would cause a missing Disconnect Event if there's a
2408 * pending Setup Packet in the FIFO.
2409 *
2410 * There's no suggested workaround on the official Bug
2411 * report, which states that "unless the driver/application
2412 * is doing any special handling of a disconnect event,
2413 * there is no functional issue".
2414 *
2415 * Unfortunately, it turns out that we _do_ some special
2416 * handling of a disconnect event, namely complete all
2417 * pending transfers, notify gadget driver of the
2418 * disconnection, and so on.
2419 *
2420 * Our suggested workaround is to follow the Disconnect
2421 * Event steps here, instead, based on a setup_packet_pending
2422 * flag. Such flag gets set whenever we have a SETUP_PENDING
2423 * status for EP0 TRBs and gets cleared on XferComplete for the
2424 * same endpoint.
2425 *
2426 * Refers to:
2427 *
2428 * STAR#9000466709: RTL: Device : Disconnect event not
2429 * generated if setup packet pending in FIFO
2430 */
2431 if (dwc->revision < DWC3_REVISION_188A) {
2432 if (dwc->setup_packet_pending)
2433 dwc3_gadget_disconnect_interrupt(dwc);
2434 }
2435
2436 dwc3_reset_gadget(dwc);
2437
2438 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2439 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
2440 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2441 dwc->test_mode = false;
2442
2443 dwc3_stop_active_transfers(dwc);
2444 dwc3_clear_stall_all_ep(dwc);
2445
2446 /* Reset device address to zero */
2447 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2448 reg &= ~(DWC3_DCFG_DEVADDR_MASK);
2449 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2450 }
2451
2452 static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed)
2453 {
2454 u32 reg;
2455 u32 usb30_clock = DWC3_GCTL_CLK_BUS;
2456
2457 /*
2458 * We change the clock only at SS but I dunno why I would want to do
2459 * this. Maybe it becomes part of the power saving plan.
2460 */
2461
2462 if ((speed != DWC3_DSTS_SUPERSPEED) &&
2463 (speed != DWC3_DSTS_SUPERSPEED_PLUS))
2464 return;
2465
2466 /*
2467 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
2468 * each time on Connect Done.
2469 */
2470 if (!usb30_clock)
2471 return;
2472
2473 reg = dwc3_readl(dwc->regs, DWC3_GCTL);
2474 reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock);
2475 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
2476 }
2477
2478 static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
2479 {
2480 struct dwc3_ep *dep;
2481 int ret;
2482 u32 reg;
2483 u8 speed;
2484
2485 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
2486 speed = reg & DWC3_DSTS_CONNECTSPD;
2487 dwc->speed = speed;
2488
2489 dwc3_update_ram_clk_sel(dwc, speed);
2490
2491 switch (speed) {
2492 case DWC3_DSTS_SUPERSPEED_PLUS:
2493 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2494 dwc->gadget.ep0->maxpacket = 512;
2495 dwc->gadget.speed = USB_SPEED_SUPER_PLUS;
2496 break;
2497 case DWC3_DSTS_SUPERSPEED:
2498 /*
2499 * WORKAROUND: DWC3 revisions <1.90a have an issue which
2500 * would cause a missing USB3 Reset event.
2501 *
2502 * In such situations, we should force a USB3 Reset
2503 * event by calling our dwc3_gadget_reset_interrupt()
2504 * routine.
2505 *
2506 * Refers to:
2507 *
2508 * STAR#9000483510: RTL: SS : USB3 reset event may
2509 * not be generated always when the link enters poll
2510 */
2511 if (dwc->revision < DWC3_REVISION_190A)
2512 dwc3_gadget_reset_interrupt(dwc);
2513
2514 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2515 dwc->gadget.ep0->maxpacket = 512;
2516 dwc->gadget.speed = USB_SPEED_SUPER;
2517 break;
2518 case DWC3_DSTS_HIGHSPEED:
2519 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2520 dwc->gadget.ep0->maxpacket = 64;
2521 dwc->gadget.speed = USB_SPEED_HIGH;
2522 break;
2523 case DWC3_DSTS_FULLSPEED2:
2524 case DWC3_DSTS_FULLSPEED1:
2525 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2526 dwc->gadget.ep0->maxpacket = 64;
2527 dwc->gadget.speed = USB_SPEED_FULL;
2528 break;
2529 case DWC3_DSTS_LOWSPEED:
2530 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8);
2531 dwc->gadget.ep0->maxpacket = 8;
2532 dwc->gadget.speed = USB_SPEED_LOW;
2533 break;
2534 }
2535
2536 /* Enable USB2 LPM Capability */
2537
2538 if ((dwc->revision > DWC3_REVISION_194A) &&
2539 (speed != DWC3_DSTS_SUPERSPEED) &&
2540 (speed != DWC3_DSTS_SUPERSPEED_PLUS)) {
2541 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2542 reg |= DWC3_DCFG_LPM_CAP;
2543 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2544
2545 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2546 reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN);
2547
2548 reg |= DWC3_DCTL_HIRD_THRES(dwc->hird_threshold);
2549
2550 /*
2551 * When dwc3 revisions >= 2.40a, LPM Erratum is enabled and
2552 * DCFG.LPMCap is set, core responses with an ACK and the
2553 * BESL value in the LPM token is less than or equal to LPM
2554 * NYET threshold.
2555 */
2556 WARN_ONCE(dwc->revision < DWC3_REVISION_240A
2557 && dwc->has_lpm_erratum,
2558 "LPM Erratum not available on dwc3 revisisions < 2.40a\n");
2559
2560 if (dwc->has_lpm_erratum && dwc->revision >= DWC3_REVISION_240A)
2561 reg |= DWC3_DCTL_LPM_ERRATA(dwc->lpm_nyet_threshold);
2562
2563 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2564 } else {
2565 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2566 reg &= ~DWC3_DCTL_HIRD_THRES_MASK;
2567 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2568 }
2569
2570 dep = dwc->eps[0];
2571 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true,
2572 false);
2573 if (ret) {
2574 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2575 return;
2576 }
2577
2578 dep = dwc->eps[1];
2579 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true,
2580 false);
2581 if (ret) {
2582 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2583 return;
2584 }
2585
2586 /*
2587 * Configure PHY via GUSB3PIPECTLn if required.
2588 *
2589 * Update GTXFIFOSIZn
2590 *
2591 * In both cases reset values should be sufficient.
2592 */
2593 }
2594
2595 static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
2596 {
2597 /*
2598 * TODO take core out of low power mode when that's
2599 * implemented.
2600 */
2601
2602 if (dwc->gadget_driver && dwc->gadget_driver->resume) {
2603 spin_unlock(&dwc->lock);
2604 dwc->gadget_driver->resume(&dwc->gadget);
2605 spin_lock(&dwc->lock);
2606 }
2607 }
2608
2609 static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
2610 unsigned int evtinfo)
2611 {
2612 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
2613 unsigned int pwropt;
2614
2615 /*
2616 * WORKAROUND: DWC3 < 2.50a have an issue when configured without
2617 * Hibernation mode enabled which would show up when device detects
2618 * host-initiated U3 exit.
2619 *
2620 * In that case, device will generate a Link State Change Interrupt
2621 * from U3 to RESUME which is only necessary if Hibernation is
2622 * configured in.
2623 *
2624 * There are no functional changes due to such spurious event and we
2625 * just need to ignore it.
2626 *
2627 * Refers to:
2628 *
2629 * STAR#9000570034 RTL: SS Resume event generated in non-Hibernation
2630 * operational mode
2631 */
2632 pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1);
2633 if ((dwc->revision < DWC3_REVISION_250A) &&
2634 (pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) {
2635 if ((dwc->link_state == DWC3_LINK_STATE_U3) &&
2636 (next == DWC3_LINK_STATE_RESUME)) {
2637 dwc3_trace(trace_dwc3_gadget,
2638 "ignoring transition U3 -> Resume");
2639 return;
2640 }
2641 }
2642
2643 /*
2644 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending
2645 * on the link partner, the USB session might do multiple entry/exit
2646 * of low power states before a transfer takes place.
2647 *
2648 * Due to this problem, we might experience lower throughput. The
2649 * suggested workaround is to disable DCTL[12:9] bits if we're
2650 * transitioning from U1/U2 to U0 and enable those bits again
2651 * after a transfer completes and there are no pending transfers
2652 * on any of the enabled endpoints.
2653 *
2654 * This is the first half of that workaround.
2655 *
2656 * Refers to:
2657 *
2658 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us
2659 * core send LGO_Ux entering U0
2660 */
2661 if (dwc->revision < DWC3_REVISION_183A) {
2662 if (next == DWC3_LINK_STATE_U0) {
2663 u32 u1u2;
2664 u32 reg;
2665
2666 switch (dwc->link_state) {
2667 case DWC3_LINK_STATE_U1:
2668 case DWC3_LINK_STATE_U2:
2669 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2670 u1u2 = reg & (DWC3_DCTL_INITU2ENA
2671 | DWC3_DCTL_ACCEPTU2ENA
2672 | DWC3_DCTL_INITU1ENA
2673 | DWC3_DCTL_ACCEPTU1ENA);
2674
2675 if (!dwc->u1u2)
2676 dwc->u1u2 = reg & u1u2;
2677
2678 reg &= ~u1u2;
2679
2680 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2681 break;
2682 default:
2683 /* do nothing */
2684 break;
2685 }
2686 }
2687 }
2688
2689 switch (next) {
2690 case DWC3_LINK_STATE_U1:
2691 if (dwc->speed == USB_SPEED_SUPER)
2692 dwc3_suspend_gadget(dwc);
2693 break;
2694 case DWC3_LINK_STATE_U2:
2695 case DWC3_LINK_STATE_U3:
2696 dwc3_suspend_gadget(dwc);
2697 break;
2698 case DWC3_LINK_STATE_RESUME:
2699 dwc3_resume_gadget(dwc);
2700 break;
2701 default:
2702 /* do nothing */
2703 break;
2704 }
2705
2706 dwc->link_state = next;
2707 }
2708
2709 static void dwc3_gadget_suspend_interrupt(struct dwc3 *dwc,
2710 unsigned int evtinfo)
2711 {
2712 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
2713
2714 if (dwc->link_state != next && next == DWC3_LINK_STATE_U3)
2715 dwc3_suspend_gadget(dwc);
2716
2717 dwc->link_state = next;
2718 }
2719
2720 static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc,
2721 unsigned int evtinfo)
2722 {
2723 unsigned int is_ss = evtinfo & BIT(4);
2724
2725 /**
2726 * WORKAROUND: DWC3 revison 2.20a with hibernation support
2727 * have a known issue which can cause USB CV TD.9.23 to fail
2728 * randomly.
2729 *
2730 * Because of this issue, core could generate bogus hibernation
2731 * events which SW needs to ignore.
2732 *
2733 * Refers to:
2734 *
2735 * STAR#9000546576: Device Mode Hibernation: Issue in USB 2.0
2736 * Device Fallback from SuperSpeed
2737 */
2738 if (is_ss ^ (dwc->speed == USB_SPEED_SUPER))
2739 return;
2740
2741 /* enter hibernation here */
2742 }
2743
2744 static void dwc3_gadget_interrupt(struct dwc3 *dwc,
2745 const struct dwc3_event_devt *event)
2746 {
2747 switch (event->type) {
2748 case DWC3_DEVICE_EVENT_DISCONNECT:
2749 dwc3_gadget_disconnect_interrupt(dwc);
2750 break;
2751 case DWC3_DEVICE_EVENT_RESET:
2752 dwc3_gadget_reset_interrupt(dwc);
2753 break;
2754 case DWC3_DEVICE_EVENT_CONNECT_DONE:
2755 dwc3_gadget_conndone_interrupt(dwc);
2756 break;
2757 case DWC3_DEVICE_EVENT_WAKEUP:
2758 dwc3_gadget_wakeup_interrupt(dwc);
2759 break;
2760 case DWC3_DEVICE_EVENT_HIBER_REQ:
2761 if (dev_WARN_ONCE(dwc->dev, !dwc->has_hibernation,
2762 "unexpected hibernation event\n"))
2763 break;
2764
2765 dwc3_gadget_hibernation_interrupt(dwc, event->event_info);
2766 break;
2767 case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
2768 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
2769 break;
2770 case DWC3_DEVICE_EVENT_EOPF:
2771 /* It changed to be suspend event for version 2.30a and above */
2772 if (dwc->revision < DWC3_REVISION_230A) {
2773 dwc3_trace(trace_dwc3_gadget, "End of Periodic Frame");
2774 } else {
2775 dwc3_trace(trace_dwc3_gadget, "U3/L1-L2 Suspend Event");
2776
2777 /*
2778 * Ignore suspend event until the gadget enters into
2779 * USB_STATE_CONFIGURED state.
2780 */
2781 if (dwc->gadget.state >= USB_STATE_CONFIGURED)
2782 dwc3_gadget_suspend_interrupt(dwc,
2783 event->event_info);
2784 }
2785 break;
2786 case DWC3_DEVICE_EVENT_SOF:
2787 dwc3_trace(trace_dwc3_gadget, "Start of Periodic Frame");
2788 break;
2789 case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
2790 dwc3_trace(trace_dwc3_gadget, "Erratic Error");
2791 break;
2792 case DWC3_DEVICE_EVENT_CMD_CMPL:
2793 dwc3_trace(trace_dwc3_gadget, "Command Complete");
2794 break;
2795 case DWC3_DEVICE_EVENT_OVERFLOW:
2796 dwc3_trace(trace_dwc3_gadget, "Overflow");
2797 break;
2798 default:
2799 dev_WARN(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
2800 }
2801 }
2802
2803 static void dwc3_process_event_entry(struct dwc3 *dwc,
2804 const union dwc3_event *event)
2805 {
2806 trace_dwc3_event(event->raw);
2807
2808 /* Endpoint IRQ, handle it and return early */
2809 if (event->type.is_devspec == 0) {
2810 /* depevt */
2811 return dwc3_endpoint_interrupt(dwc, &event->depevt);
2812 }
2813
2814 switch (event->type.type) {
2815 case DWC3_EVENT_TYPE_DEV:
2816 dwc3_gadget_interrupt(dwc, &event->devt);
2817 break;
2818 /* REVISIT what to do with Carkit and I2C events ? */
2819 default:
2820 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw);
2821 }
2822 }
2823
2824 static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt)
2825 {
2826 struct dwc3 *dwc = evt->dwc;
2827 irqreturn_t ret = IRQ_NONE;
2828 int left;
2829 u32 reg;
2830
2831 left = evt->count;
2832
2833 if (!(evt->flags & DWC3_EVENT_PENDING))
2834 return IRQ_NONE;
2835
2836 while (left > 0) {
2837 union dwc3_event event;
2838
2839 event.raw = *(u32 *) (evt->buf + evt->lpos);
2840
2841 dwc3_process_event_entry(dwc, &event);
2842
2843 /*
2844 * FIXME we wrap around correctly to the next entry as
2845 * almost all entries are 4 bytes in size. There is one
2846 * entry which has 12 bytes which is a regular entry
2847 * followed by 8 bytes data. ATM I don't know how
2848 * things are organized if we get next to the a
2849 * boundary so I worry about that once we try to handle
2850 * that.
2851 */
2852 evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
2853 left -= 4;
2854
2855 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), 4);
2856 }
2857
2858 evt->count = 0;
2859 evt->flags &= ~DWC3_EVENT_PENDING;
2860 ret = IRQ_HANDLED;
2861
2862 /* Unmask interrupt */
2863 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0));
2864 reg &= ~DWC3_GEVNTSIZ_INTMASK;
2865 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg);
2866
2867 return ret;
2868 }
2869
2870 static irqreturn_t dwc3_thread_interrupt(int irq, void *_evt)
2871 {
2872 struct dwc3_event_buffer *evt = _evt;
2873 struct dwc3 *dwc = evt->dwc;
2874 unsigned long flags;
2875 irqreturn_t ret = IRQ_NONE;
2876
2877 spin_lock_irqsave(&dwc->lock, flags);
2878 ret = dwc3_process_event_buf(evt);
2879 spin_unlock_irqrestore(&dwc->lock, flags);
2880
2881 return ret;
2882 }
2883
2884 static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt)
2885 {
2886 struct dwc3 *dwc = evt->dwc;
2887 u32 count;
2888 u32 reg;
2889
2890 if (pm_runtime_suspended(dwc->dev)) {
2891 pm_runtime_get(dwc->dev);
2892 disable_irq_nosync(dwc->irq_gadget);
2893 dwc->pending_events = true;
2894 return IRQ_HANDLED;
2895 }
2896
2897 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
2898 count &= DWC3_GEVNTCOUNT_MASK;
2899 if (!count)
2900 return IRQ_NONE;
2901
2902 evt->count = count;
2903 evt->flags |= DWC3_EVENT_PENDING;
2904
2905 /* Mask interrupt */
2906 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0));
2907 reg |= DWC3_GEVNTSIZ_INTMASK;
2908 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg);
2909
2910 return IRQ_WAKE_THREAD;
2911 }
2912
2913 static irqreturn_t dwc3_interrupt(int irq, void *_evt)
2914 {
2915 struct dwc3_event_buffer *evt = _evt;
2916
2917 return dwc3_check_event_buf(evt);
2918 }
2919
2920 /**
2921 * dwc3_gadget_init - Initializes gadget related registers
2922 * @dwc: pointer to our controller context structure
2923 *
2924 * Returns 0 on success otherwise negative errno.
2925 */
2926 int dwc3_gadget_init(struct dwc3 *dwc)
2927 {
2928 int ret, irq;
2929 struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
2930
2931 irq = platform_get_irq_byname(dwc3_pdev, "peripheral");
2932 if (irq == -EPROBE_DEFER)
2933 return irq;
2934
2935 if (irq <= 0) {
2936 irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3");
2937 if (irq == -EPROBE_DEFER)
2938 return irq;
2939
2940 if (irq <= 0) {
2941 irq = platform_get_irq(dwc3_pdev, 0);
2942 if (irq <= 0) {
2943 if (irq != -EPROBE_DEFER) {
2944 dev_err(dwc->dev,
2945 "missing peripheral IRQ\n");
2946 }
2947 if (!irq)
2948 irq = -EINVAL;
2949 return irq;
2950 }
2951 }
2952 }
2953
2954 dwc->irq_gadget = irq;
2955
2956 dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2957 &dwc->ctrl_req_addr, GFP_KERNEL);
2958 if (!dwc->ctrl_req) {
2959 dev_err(dwc->dev, "failed to allocate ctrl request\n");
2960 ret = -ENOMEM;
2961 goto err0;
2962 }
2963
2964 dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
2965 &dwc->ep0_trb_addr, GFP_KERNEL);
2966 if (!dwc->ep0_trb) {
2967 dev_err(dwc->dev, "failed to allocate ep0 trb\n");
2968 ret = -ENOMEM;
2969 goto err1;
2970 }
2971
2972 dwc->setup_buf = kzalloc(DWC3_EP0_BOUNCE_SIZE, GFP_KERNEL);
2973 if (!dwc->setup_buf) {
2974 ret = -ENOMEM;
2975 goto err2;
2976 }
2977
2978 dwc->ep0_bounce = dma_alloc_coherent(dwc->dev,
2979 DWC3_EP0_BOUNCE_SIZE, &dwc->ep0_bounce_addr,
2980 GFP_KERNEL);
2981 if (!dwc->ep0_bounce) {
2982 dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n");
2983 ret = -ENOMEM;
2984 goto err3;
2985 }
2986
2987 dwc->zlp_buf = kzalloc(DWC3_ZLP_BUF_SIZE, GFP_KERNEL);
2988 if (!dwc->zlp_buf) {
2989 ret = -ENOMEM;
2990 goto err4;
2991 }
2992
2993 dwc->gadget.ops = &dwc3_gadget_ops;
2994 dwc->gadget.speed = USB_SPEED_UNKNOWN;
2995 dwc->gadget.sg_supported = true;
2996 dwc->gadget.name = "dwc3-gadget";
2997 dwc->gadget.is_otg = dwc->dr_mode == USB_DR_MODE_OTG;
2998
2999 /*
3000 * FIXME We might be setting max_speed to <SUPER, however versions
3001 * <2.20a of dwc3 have an issue with metastability (documented
3002 * elsewhere in this driver) which tells us we can't set max speed to
3003 * anything lower than SUPER.
3004 *
3005 * Because gadget.max_speed is only used by composite.c and function
3006 * drivers (i.e. it won't go into dwc3's registers) we are allowing this
3007 * to happen so we avoid sending SuperSpeed Capability descriptor
3008 * together with our BOS descriptor as that could confuse host into
3009 * thinking we can handle super speed.
3010 *
3011 * Note that, in fact, we won't even support GetBOS requests when speed
3012 * is less than super speed because we don't have means, yet, to tell
3013 * composite.c that we are USB 2.0 + LPM ECN.
3014 */
3015 if (dwc->revision < DWC3_REVISION_220A)
3016 dwc3_trace(trace_dwc3_gadget,
3017 "Changing max_speed on rev %08x",
3018 dwc->revision);
3019
3020 dwc->gadget.max_speed = dwc->maximum_speed;
3021
3022 /*
3023 * Per databook, DWC3 needs buffer size to be aligned to MaxPacketSize
3024 * on ep out.
3025 */
3026 dwc->gadget.quirk_ep_out_aligned_size = true;
3027
3028 /*
3029 * REVISIT: Here we should clear all pending IRQs to be
3030 * sure we're starting from a well known location.
3031 */
3032
3033 ret = dwc3_gadget_init_endpoints(dwc);
3034 if (ret)
3035 goto err5;
3036
3037 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget);
3038 if (ret) {
3039 dev_err(dwc->dev, "failed to register udc\n");
3040 goto err5;
3041 }
3042
3043 return 0;
3044
3045 err5:
3046 kfree(dwc->zlp_buf);
3047
3048 err4:
3049 dwc3_gadget_free_endpoints(dwc);
3050 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
3051 dwc->ep0_bounce, dwc->ep0_bounce_addr);
3052
3053 err3:
3054 kfree(dwc->setup_buf);
3055
3056 err2:
3057 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
3058 dwc->ep0_trb, dwc->ep0_trb_addr);
3059
3060 err1:
3061 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
3062 dwc->ctrl_req, dwc->ctrl_req_addr);
3063
3064 err0:
3065 return ret;
3066 }
3067
3068 /* -------------------------------------------------------------------------- */
3069
3070 void dwc3_gadget_exit(struct dwc3 *dwc)
3071 {
3072 usb_del_gadget_udc(&dwc->gadget);
3073
3074 dwc3_gadget_free_endpoints(dwc);
3075
3076 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
3077 dwc->ep0_bounce, dwc->ep0_bounce_addr);
3078
3079 kfree(dwc->setup_buf);
3080 kfree(dwc->zlp_buf);
3081
3082 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
3083 dwc->ep0_trb, dwc->ep0_trb_addr);
3084
3085 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
3086 dwc->ctrl_req, dwc->ctrl_req_addr);
3087 }
3088
3089 int dwc3_gadget_suspend(struct dwc3 *dwc)
3090 {
3091 int ret;
3092
3093 if (!dwc->gadget_driver)
3094 return 0;
3095
3096 ret = dwc3_gadget_run_stop(dwc, false, false);
3097 if (ret < 0)
3098 return ret;
3099
3100 dwc3_disconnect_gadget(dwc);
3101 __dwc3_gadget_stop(dwc);
3102
3103 return 0;
3104 }
3105
3106 int dwc3_gadget_resume(struct dwc3 *dwc)
3107 {
3108 int ret;
3109
3110 if (!dwc->gadget_driver)
3111 return 0;
3112
3113 ret = __dwc3_gadget_start(dwc);
3114 if (ret < 0)
3115 goto err0;
3116
3117 ret = dwc3_gadget_run_stop(dwc, true, false);
3118 if (ret < 0)
3119 goto err1;
3120
3121 return 0;
3122
3123 err1:
3124 __dwc3_gadget_stop(dwc);
3125
3126 err0:
3127 return ret;
3128 }
3129
3130 void dwc3_gadget_process_pending_events(struct dwc3 *dwc)
3131 {
3132 if (dwc->pending_events) {
3133 dwc3_interrupt(dwc->irq_gadget, dwc->ev_buf);
3134 dwc->pending_events = false;
3135 enable_irq(dwc->irq_gadget);
3136 }
3137 }
This page took 0.135774 seconds and 4 git commands to generate.