usb: dwc3: fix Clear Stall EP command failure
[deliverable/linux.git] / drivers / usb / dwc3 / gadget.c
1 /**
2 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link
3 *
4 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
5 *
6 * Authors: Felipe Balbi <balbi@ti.com>,
7 * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
8 *
9 * This program is free software: you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 of
11 * the License as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 */
18
19 #include <linux/kernel.h>
20 #include <linux/delay.h>
21 #include <linux/slab.h>
22 #include <linux/spinlock.h>
23 #include <linux/platform_device.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/interrupt.h>
26 #include <linux/io.h>
27 #include <linux/list.h>
28 #include <linux/dma-mapping.h>
29
30 #include <linux/usb/ch9.h>
31 #include <linux/usb/gadget.h>
32
33 #include "debug.h"
34 #include "core.h"
35 #include "gadget.h"
36 #include "io.h"
37
38 /**
39 * dwc3_gadget_set_test_mode - Enables USB2 Test Modes
40 * @dwc: pointer to our context structure
41 * @mode: the mode to set (J, K SE0 NAK, Force Enable)
42 *
43 * Caller should take care of locking. This function will
44 * return 0 on success or -EINVAL if wrong Test Selector
45 * is passed
46 */
47 int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
48 {
49 u32 reg;
50
51 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
52 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
53
54 switch (mode) {
55 case TEST_J:
56 case TEST_K:
57 case TEST_SE0_NAK:
58 case TEST_PACKET:
59 case TEST_FORCE_EN:
60 reg |= mode << 1;
61 break;
62 default:
63 return -EINVAL;
64 }
65
66 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
67
68 return 0;
69 }
70
71 /**
72 * dwc3_gadget_get_link_state - Gets current state of USB Link
73 * @dwc: pointer to our context structure
74 *
75 * Caller should take care of locking. This function will
76 * return the link state on success (>= 0) or -ETIMEDOUT.
77 */
78 int dwc3_gadget_get_link_state(struct dwc3 *dwc)
79 {
80 u32 reg;
81
82 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
83
84 return DWC3_DSTS_USBLNKST(reg);
85 }
86
87 /**
88 * dwc3_gadget_set_link_state - Sets USB Link to a particular State
89 * @dwc: pointer to our context structure
90 * @state: the state to put link into
91 *
92 * Caller should take care of locking. This function will
93 * return 0 on success or -ETIMEDOUT.
94 */
95 int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
96 {
97 int retries = 10000;
98 u32 reg;
99
100 /*
101 * Wait until device controller is ready. Only applies to 1.94a and
102 * later RTL.
103 */
104 if (dwc->revision >= DWC3_REVISION_194A) {
105 while (--retries) {
106 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
107 if (reg & DWC3_DSTS_DCNRD)
108 udelay(5);
109 else
110 break;
111 }
112
113 if (retries <= 0)
114 return -ETIMEDOUT;
115 }
116
117 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
118 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
119
120 /* set requested state */
121 reg |= DWC3_DCTL_ULSTCHNGREQ(state);
122 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
123
124 /*
125 * The following code is racy when called from dwc3_gadget_wakeup,
126 * and is not needed, at least on newer versions
127 */
128 if (dwc->revision >= DWC3_REVISION_194A)
129 return 0;
130
131 /* wait for a change in DSTS */
132 retries = 10000;
133 while (--retries) {
134 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
135
136 if (DWC3_DSTS_USBLNKST(reg) == state)
137 return 0;
138
139 udelay(5);
140 }
141
142 dwc3_trace(trace_dwc3_gadget,
143 "link state change request timed out");
144
145 return -ETIMEDOUT;
146 }
147
148 /**
149 * dwc3_ep_inc_trb() - Increment a TRB index.
150 * @index - Pointer to the TRB index to increment.
151 *
152 * The index should never point to the link TRB. After incrementing,
153 * if it is point to the link TRB, wrap around to the beginning. The
154 * link TRB is always at the last TRB entry.
155 */
156 static void dwc3_ep_inc_trb(u8 *index)
157 {
158 (*index)++;
159 if (*index == (DWC3_TRB_NUM - 1))
160 *index = 0;
161 }
162
163 static void dwc3_ep_inc_enq(struct dwc3_ep *dep)
164 {
165 dwc3_ep_inc_trb(&dep->trb_enqueue);
166 }
167
168 static void dwc3_ep_inc_deq(struct dwc3_ep *dep)
169 {
170 dwc3_ep_inc_trb(&dep->trb_dequeue);
171 }
172
173 void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
174 int status)
175 {
176 struct dwc3 *dwc = dep->dwc;
177 int i;
178
179 if (req->started) {
180 i = 0;
181 do {
182 dwc3_ep_inc_deq(dep);
183 } while(++i < req->request.num_mapped_sgs);
184 req->started = false;
185 }
186 list_del(&req->list);
187 req->trb = NULL;
188
189 if (req->request.status == -EINPROGRESS)
190 req->request.status = status;
191
192 if (dwc->ep0_bounced && dep->number == 0)
193 dwc->ep0_bounced = false;
194 else
195 usb_gadget_unmap_request(&dwc->gadget, &req->request,
196 req->direction);
197
198 trace_dwc3_gadget_giveback(req);
199
200 spin_unlock(&dwc->lock);
201 usb_gadget_giveback_request(&dep->endpoint, &req->request);
202 spin_lock(&dwc->lock);
203
204 if (dep->number > 1)
205 pm_runtime_put(dwc->dev);
206 }
207
208 int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param)
209 {
210 u32 timeout = 500;
211 int status = 0;
212 int ret = 0;
213 u32 reg;
214
215 dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param);
216 dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT);
217
218 do {
219 reg = dwc3_readl(dwc->regs, DWC3_DGCMD);
220 if (!(reg & DWC3_DGCMD_CMDACT)) {
221 status = DWC3_DGCMD_STATUS(reg);
222 if (status)
223 ret = -EINVAL;
224 break;
225 }
226 } while (timeout--);
227
228 if (!timeout) {
229 ret = -ETIMEDOUT;
230 status = -ETIMEDOUT;
231 }
232
233 trace_dwc3_gadget_generic_cmd(cmd, param, status);
234
235 return ret;
236 }
237
238 static int __dwc3_gadget_wakeup(struct dwc3 *dwc);
239
240 int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
241 struct dwc3_gadget_ep_cmd_params *params)
242 {
243 struct dwc3 *dwc = dep->dwc;
244 u32 timeout = 500;
245 u32 reg;
246
247 int cmd_status = 0;
248 int susphy = false;
249 int ret = -EINVAL;
250
251 /*
252 * Synopsys Databook 2.60a states, on section 6.3.2.5.[1-8], that if
253 * we're issuing an endpoint command, we must check if
254 * GUSB2PHYCFG.SUSPHY bit is set. If it is, then we need to clear it.
255 *
256 * We will also set SUSPHY bit to what it was before returning as stated
257 * by the same section on Synopsys databook.
258 */
259 if (dwc->gadget.speed <= USB_SPEED_HIGH) {
260 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
261 if (unlikely(reg & DWC3_GUSB2PHYCFG_SUSPHY)) {
262 susphy = true;
263 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
264 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
265 }
266 }
267
268 if (cmd == DWC3_DEPCMD_STARTTRANSFER) {
269 int needs_wakeup;
270
271 needs_wakeup = (dwc->link_state == DWC3_LINK_STATE_U1 ||
272 dwc->link_state == DWC3_LINK_STATE_U2 ||
273 dwc->link_state == DWC3_LINK_STATE_U3);
274
275 if (unlikely(needs_wakeup)) {
276 ret = __dwc3_gadget_wakeup(dwc);
277 dev_WARN_ONCE(dwc->dev, ret, "wakeup failed --> %d\n",
278 ret);
279 }
280 }
281
282 dwc3_writel(dep->regs, DWC3_DEPCMDPAR0, params->param0);
283 dwc3_writel(dep->regs, DWC3_DEPCMDPAR1, params->param1);
284 dwc3_writel(dep->regs, DWC3_DEPCMDPAR2, params->param2);
285
286 dwc3_writel(dep->regs, DWC3_DEPCMD, cmd | DWC3_DEPCMD_CMDACT);
287 do {
288 reg = dwc3_readl(dep->regs, DWC3_DEPCMD);
289 if (!(reg & DWC3_DEPCMD_CMDACT)) {
290 cmd_status = DWC3_DEPCMD_STATUS(reg);
291
292 switch (cmd_status) {
293 case 0:
294 ret = 0;
295 break;
296 case DEPEVT_TRANSFER_NO_RESOURCE:
297 ret = -EINVAL;
298 break;
299 case DEPEVT_TRANSFER_BUS_EXPIRY:
300 /*
301 * SW issues START TRANSFER command to
302 * isochronous ep with future frame interval. If
303 * future interval time has already passed when
304 * core receives the command, it will respond
305 * with an error status of 'Bus Expiry'.
306 *
307 * Instead of always returning -EINVAL, let's
308 * give a hint to the gadget driver that this is
309 * the case by returning -EAGAIN.
310 */
311 ret = -EAGAIN;
312 break;
313 default:
314 dev_WARN(dwc->dev, "UNKNOWN cmd status\n");
315 }
316
317 break;
318 }
319 } while (--timeout);
320
321 if (timeout == 0) {
322 ret = -ETIMEDOUT;
323 cmd_status = -ETIMEDOUT;
324 }
325
326 trace_dwc3_gadget_ep_cmd(dep, cmd, params, cmd_status);
327
328 if (unlikely(susphy)) {
329 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
330 reg |= DWC3_GUSB2PHYCFG_SUSPHY;
331 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
332 }
333
334 return ret;
335 }
336
337 static int dwc3_send_clear_stall_ep_cmd(struct dwc3_ep *dep)
338 {
339 struct dwc3 *dwc = dep->dwc;
340 struct dwc3_gadget_ep_cmd_params params;
341 u32 cmd = DWC3_DEPCMD_CLEARSTALL;
342
343 /*
344 * As of core revision 2.60a the recommended programming model
345 * is to set the ClearPendIN bit when issuing a Clear Stall EP
346 * command for IN endpoints. This is to prevent an issue where
347 * some (non-compliant) hosts may not send ACK TPs for pending
348 * IN transfers due to a mishandled error condition. Synopsys
349 * STAR 9000614252.
350 */
351 if (dep->direction && (dwc->revision >= DWC3_REVISION_260A) &&
352 (dwc->gadget.speed >= USB_SPEED_SUPER))
353 cmd |= DWC3_DEPCMD_CLEARPENDIN;
354
355 memset(&params, 0, sizeof(params));
356
357 return dwc3_send_gadget_ep_cmd(dep, cmd, &params);
358 }
359
360 static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
361 struct dwc3_trb *trb)
362 {
363 u32 offset = (char *) trb - (char *) dep->trb_pool;
364
365 return dep->trb_pool_dma + offset;
366 }
367
368 static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
369 {
370 struct dwc3 *dwc = dep->dwc;
371
372 if (dep->trb_pool)
373 return 0;
374
375 dep->trb_pool = dma_alloc_coherent(dwc->dev,
376 sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
377 &dep->trb_pool_dma, GFP_KERNEL);
378 if (!dep->trb_pool) {
379 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
380 dep->name);
381 return -ENOMEM;
382 }
383
384 return 0;
385 }
386
387 static void dwc3_free_trb_pool(struct dwc3_ep *dep)
388 {
389 struct dwc3 *dwc = dep->dwc;
390
391 dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
392 dep->trb_pool, dep->trb_pool_dma);
393
394 dep->trb_pool = NULL;
395 dep->trb_pool_dma = 0;
396 }
397
398 static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep);
399
400 /**
401 * dwc3_gadget_start_config - Configure EP resources
402 * @dwc: pointer to our controller context structure
403 * @dep: endpoint that is being enabled
404 *
405 * The assignment of transfer resources cannot perfectly follow the
406 * data book due to the fact that the controller driver does not have
407 * all knowledge of the configuration in advance. It is given this
408 * information piecemeal by the composite gadget framework after every
409 * SET_CONFIGURATION and SET_INTERFACE. Trying to follow the databook
410 * programming model in this scenario can cause errors. For two
411 * reasons:
412 *
413 * 1) The databook says to do DEPSTARTCFG for every SET_CONFIGURATION
414 * and SET_INTERFACE (8.1.5). This is incorrect in the scenario of
415 * multiple interfaces.
416 *
417 * 2) The databook does not mention doing more DEPXFERCFG for new
418 * endpoint on alt setting (8.1.6).
419 *
420 * The following simplified method is used instead:
421 *
422 * All hardware endpoints can be assigned a transfer resource and this
423 * setting will stay persistent until either a core reset or
424 * hibernation. So whenever we do a DEPSTARTCFG(0) we can go ahead and
425 * do DEPXFERCFG for every hardware endpoint as well. We are
426 * guaranteed that there are as many transfer resources as endpoints.
427 *
428 * This function is called for each endpoint when it is being enabled
429 * but is triggered only when called for EP0-out, which always happens
430 * first, and which should only happen in one of the above conditions.
431 */
432 static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
433 {
434 struct dwc3_gadget_ep_cmd_params params;
435 u32 cmd;
436 int i;
437 int ret;
438
439 if (dep->number)
440 return 0;
441
442 memset(&params, 0x00, sizeof(params));
443 cmd = DWC3_DEPCMD_DEPSTARTCFG;
444
445 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
446 if (ret)
447 return ret;
448
449 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
450 struct dwc3_ep *dep = dwc->eps[i];
451
452 if (!dep)
453 continue;
454
455 ret = dwc3_gadget_set_xfer_resource(dwc, dep);
456 if (ret)
457 return ret;
458 }
459
460 return 0;
461 }
462
463 static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
464 const struct usb_endpoint_descriptor *desc,
465 const struct usb_ss_ep_comp_descriptor *comp_desc,
466 bool modify, bool restore)
467 {
468 struct dwc3_gadget_ep_cmd_params params;
469
470 if (dev_WARN_ONCE(dwc->dev, modify && restore,
471 "Can't modify and restore\n"))
472 return -EINVAL;
473
474 memset(&params, 0x00, sizeof(params));
475
476 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
477 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
478
479 /* Burst size is only needed in SuperSpeed mode */
480 if (dwc->gadget.speed >= USB_SPEED_SUPER) {
481 u32 burst = dep->endpoint.maxburst;
482 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst - 1);
483 }
484
485 if (modify) {
486 params.param0 |= DWC3_DEPCFG_ACTION_MODIFY;
487 } else if (restore) {
488 params.param0 |= DWC3_DEPCFG_ACTION_RESTORE;
489 params.param2 |= dep->saved_state;
490 } else {
491 params.param0 |= DWC3_DEPCFG_ACTION_INIT;
492 }
493
494 params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN;
495
496 if (dep->number <= 1 || usb_endpoint_xfer_isoc(desc))
497 params.param1 |= DWC3_DEPCFG_XFER_NOT_READY_EN;
498
499 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
500 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
501 | DWC3_DEPCFG_STREAM_EVENT_EN;
502 dep->stream_capable = true;
503 }
504
505 if (!usb_endpoint_xfer_control(desc))
506 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
507
508 /*
509 * We are doing 1:1 mapping for endpoints, meaning
510 * Physical Endpoints 2 maps to Logical Endpoint 2 and
511 * so on. We consider the direction bit as part of the physical
512 * endpoint number. So USB endpoint 0x81 is 0x03.
513 */
514 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
515
516 /*
517 * We must use the lower 16 TX FIFOs even though
518 * HW might have more
519 */
520 if (dep->direction)
521 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
522
523 if (desc->bInterval) {
524 params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1);
525 dep->interval = 1 << (desc->bInterval - 1);
526 }
527
528 return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, &params);
529 }
530
531 static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
532 {
533 struct dwc3_gadget_ep_cmd_params params;
534
535 memset(&params, 0x00, sizeof(params));
536
537 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
538
539 return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETTRANSFRESOURCE,
540 &params);
541 }
542
543 /**
544 * __dwc3_gadget_ep_enable - Initializes a HW endpoint
545 * @dep: endpoint to be initialized
546 * @desc: USB Endpoint Descriptor
547 *
548 * Caller should take care of locking
549 */
550 static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
551 const struct usb_endpoint_descriptor *desc,
552 const struct usb_ss_ep_comp_descriptor *comp_desc,
553 bool modify, bool restore)
554 {
555 struct dwc3 *dwc = dep->dwc;
556 u32 reg;
557 int ret;
558
559 dwc3_trace(trace_dwc3_gadget, "Enabling %s", dep->name);
560
561 if (!(dep->flags & DWC3_EP_ENABLED)) {
562 ret = dwc3_gadget_start_config(dwc, dep);
563 if (ret)
564 return ret;
565 }
566
567 ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, modify,
568 restore);
569 if (ret)
570 return ret;
571
572 if (!(dep->flags & DWC3_EP_ENABLED)) {
573 struct dwc3_trb *trb_st_hw;
574 struct dwc3_trb *trb_link;
575
576 dep->endpoint.desc = desc;
577 dep->comp_desc = comp_desc;
578 dep->type = usb_endpoint_type(desc);
579 dep->flags |= DWC3_EP_ENABLED;
580
581 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
582 reg |= DWC3_DALEPENA_EP(dep->number);
583 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
584
585 if (usb_endpoint_xfer_control(desc))
586 return 0;
587
588 /* Initialize the TRB ring */
589 dep->trb_dequeue = 0;
590 dep->trb_enqueue = 0;
591 memset(dep->trb_pool, 0,
592 sizeof(struct dwc3_trb) * DWC3_TRB_NUM);
593
594 /* Link TRB. The HWO bit is never reset */
595 trb_st_hw = &dep->trb_pool[0];
596
597 trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1];
598 trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
599 trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
600 trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB;
601 trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
602 }
603
604 return 0;
605 }
606
607 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force);
608 static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
609 {
610 struct dwc3_request *req;
611
612 dwc3_stop_active_transfer(dwc, dep->number, true);
613
614 /* - giveback all requests to gadget driver */
615 while (!list_empty(&dep->started_list)) {
616 req = next_request(&dep->started_list);
617
618 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
619 }
620
621 while (!list_empty(&dep->pending_list)) {
622 req = next_request(&dep->pending_list);
623
624 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
625 }
626 }
627
628 /**
629 * __dwc3_gadget_ep_disable - Disables a HW endpoint
630 * @dep: the endpoint to disable
631 *
632 * This function also removes requests which are currently processed ny the
633 * hardware and those which are not yet scheduled.
634 * Caller should take care of locking.
635 */
636 static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
637 {
638 struct dwc3 *dwc = dep->dwc;
639 u32 reg;
640
641 dwc3_trace(trace_dwc3_gadget, "Disabling %s", dep->name);
642
643 dwc3_remove_requests(dwc, dep);
644
645 /* make sure HW endpoint isn't stalled */
646 if (dep->flags & DWC3_EP_STALL)
647 __dwc3_gadget_ep_set_halt(dep, 0, false);
648
649 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
650 reg &= ~DWC3_DALEPENA_EP(dep->number);
651 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
652
653 dep->stream_capable = false;
654 dep->endpoint.desc = NULL;
655 dep->comp_desc = NULL;
656 dep->type = 0;
657 dep->flags = 0;
658
659 return 0;
660 }
661
662 /* -------------------------------------------------------------------------- */
663
664 static int dwc3_gadget_ep0_enable(struct usb_ep *ep,
665 const struct usb_endpoint_descriptor *desc)
666 {
667 return -EINVAL;
668 }
669
670 static int dwc3_gadget_ep0_disable(struct usb_ep *ep)
671 {
672 return -EINVAL;
673 }
674
675 /* -------------------------------------------------------------------------- */
676
677 static int dwc3_gadget_ep_enable(struct usb_ep *ep,
678 const struct usb_endpoint_descriptor *desc)
679 {
680 struct dwc3_ep *dep;
681 struct dwc3 *dwc;
682 unsigned long flags;
683 int ret;
684
685 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
686 pr_debug("dwc3: invalid parameters\n");
687 return -EINVAL;
688 }
689
690 if (!desc->wMaxPacketSize) {
691 pr_debug("dwc3: missing wMaxPacketSize\n");
692 return -EINVAL;
693 }
694
695 dep = to_dwc3_ep(ep);
696 dwc = dep->dwc;
697
698 if (dev_WARN_ONCE(dwc->dev, dep->flags & DWC3_EP_ENABLED,
699 "%s is already enabled\n",
700 dep->name))
701 return 0;
702
703 spin_lock_irqsave(&dwc->lock, flags);
704 ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc, false, false);
705 spin_unlock_irqrestore(&dwc->lock, flags);
706
707 return ret;
708 }
709
710 static int dwc3_gadget_ep_disable(struct usb_ep *ep)
711 {
712 struct dwc3_ep *dep;
713 struct dwc3 *dwc;
714 unsigned long flags;
715 int ret;
716
717 if (!ep) {
718 pr_debug("dwc3: invalid parameters\n");
719 return -EINVAL;
720 }
721
722 dep = to_dwc3_ep(ep);
723 dwc = dep->dwc;
724
725 if (dev_WARN_ONCE(dwc->dev, !(dep->flags & DWC3_EP_ENABLED),
726 "%s is already disabled\n",
727 dep->name))
728 return 0;
729
730 spin_lock_irqsave(&dwc->lock, flags);
731 ret = __dwc3_gadget_ep_disable(dep);
732 spin_unlock_irqrestore(&dwc->lock, flags);
733
734 return ret;
735 }
736
737 static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
738 gfp_t gfp_flags)
739 {
740 struct dwc3_request *req;
741 struct dwc3_ep *dep = to_dwc3_ep(ep);
742
743 req = kzalloc(sizeof(*req), gfp_flags);
744 if (!req)
745 return NULL;
746
747 req->epnum = dep->number;
748 req->dep = dep;
749
750 dep->allocated_requests++;
751
752 trace_dwc3_alloc_request(req);
753
754 return &req->request;
755 }
756
757 static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
758 struct usb_request *request)
759 {
760 struct dwc3_request *req = to_dwc3_request(request);
761 struct dwc3_ep *dep = to_dwc3_ep(ep);
762
763 dep->allocated_requests--;
764 trace_dwc3_free_request(req);
765 kfree(req);
766 }
767
768 /**
769 * dwc3_prepare_one_trb - setup one TRB from one request
770 * @dep: endpoint for which this request is prepared
771 * @req: dwc3_request pointer
772 */
773 static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
774 struct dwc3_request *req, dma_addr_t dma,
775 unsigned length, unsigned last, unsigned chain, unsigned node)
776 {
777 struct dwc3_trb *trb;
778
779 dwc3_trace(trace_dwc3_gadget, "%s: req %p dma %08llx length %d%s%s",
780 dep->name, req, (unsigned long long) dma,
781 length, last ? " last" : "",
782 chain ? " chain" : "");
783
784
785 trb = &dep->trb_pool[dep->trb_enqueue];
786
787 if (!req->trb) {
788 dwc3_gadget_move_started_request(req);
789 req->trb = trb;
790 req->trb_dma = dwc3_trb_dma_offset(dep, trb);
791 req->first_trb_index = dep->trb_enqueue;
792 }
793
794 dwc3_ep_inc_enq(dep);
795
796 trb->size = DWC3_TRB_SIZE_LENGTH(length);
797 trb->bpl = lower_32_bits(dma);
798 trb->bph = upper_32_bits(dma);
799
800 switch (usb_endpoint_type(dep->endpoint.desc)) {
801 case USB_ENDPOINT_XFER_CONTROL:
802 trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP;
803 break;
804
805 case USB_ENDPOINT_XFER_ISOC:
806 if (!node)
807 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
808 else
809 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
810
811 /* always enable Interrupt on Missed ISOC */
812 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
813 break;
814
815 case USB_ENDPOINT_XFER_BULK:
816 case USB_ENDPOINT_XFER_INT:
817 trb->ctrl = DWC3_TRBCTL_NORMAL;
818 break;
819 default:
820 /*
821 * This is only possible with faulty memory because we
822 * checked it already :)
823 */
824 BUG();
825 }
826
827 /* always enable Continue on Short Packet */
828 trb->ctrl |= DWC3_TRB_CTRL_CSP;
829
830 if (!req->request.no_interrupt && !chain)
831 trb->ctrl |= DWC3_TRB_CTRL_IOC | DWC3_TRB_CTRL_ISP_IMI;
832
833 if (last && !usb_endpoint_xfer_isoc(dep->endpoint.desc))
834 trb->ctrl |= DWC3_TRB_CTRL_LST;
835
836 if (chain)
837 trb->ctrl |= DWC3_TRB_CTRL_CHN;
838
839 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable)
840 trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id);
841
842 trb->ctrl |= DWC3_TRB_CTRL_HWO;
843
844 dep->queued_requests++;
845
846 trace_dwc3_prepare_trb(dep, trb);
847 }
848
849 /**
850 * dwc3_ep_prev_trb() - Returns the previous TRB in the ring
851 * @dep: The endpoint with the TRB ring
852 * @index: The index of the current TRB in the ring
853 *
854 * Returns the TRB prior to the one pointed to by the index. If the
855 * index is 0, we will wrap backwards, skip the link TRB, and return
856 * the one just before that.
857 */
858 static struct dwc3_trb *dwc3_ep_prev_trb(struct dwc3_ep *dep, u8 index)
859 {
860 if (!index)
861 index = DWC3_TRB_NUM - 2;
862 else
863 index = dep->trb_enqueue - 1;
864
865 return &dep->trb_pool[index];
866 }
867
868 static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep)
869 {
870 struct dwc3_trb *tmp;
871 u8 trbs_left;
872
873 /*
874 * If enqueue & dequeue are equal than it is either full or empty.
875 *
876 * One way to know for sure is if the TRB right before us has HWO bit
877 * set or not. If it has, then we're definitely full and can't fit any
878 * more transfers in our ring.
879 */
880 if (dep->trb_enqueue == dep->trb_dequeue) {
881 tmp = dwc3_ep_prev_trb(dep, dep->trb_enqueue);
882 if (tmp->ctrl & DWC3_TRB_CTRL_HWO)
883 return 0;
884
885 return DWC3_TRB_NUM - 1;
886 }
887
888 trbs_left = dep->trb_dequeue - dep->trb_enqueue;
889 trbs_left &= (DWC3_TRB_NUM - 1);
890
891 if (dep->trb_dequeue < dep->trb_enqueue)
892 trbs_left--;
893
894 return trbs_left;
895 }
896
897 static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
898 struct dwc3_request *req, unsigned int trbs_left,
899 unsigned int more_coming)
900 {
901 struct usb_request *request = &req->request;
902 struct scatterlist *sg = request->sg;
903 struct scatterlist *s;
904 unsigned int last = false;
905 unsigned int length;
906 dma_addr_t dma;
907 int i;
908
909 for_each_sg(sg, s, request->num_mapped_sgs, i) {
910 unsigned chain = true;
911
912 length = sg_dma_len(s);
913 dma = sg_dma_address(s);
914
915 if (sg_is_last(s)) {
916 if (usb_endpoint_xfer_int(dep->endpoint.desc) ||
917 !more_coming)
918 last = true;
919
920 chain = false;
921 }
922
923 if (!trbs_left--)
924 last = true;
925
926 if (last)
927 chain = false;
928
929 dwc3_prepare_one_trb(dep, req, dma, length,
930 last, chain, i);
931
932 if (last)
933 break;
934 }
935 }
936
937 static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
938 struct dwc3_request *req, unsigned int trbs_left,
939 unsigned int more_coming)
940 {
941 unsigned int last = false;
942 unsigned int length;
943 dma_addr_t dma;
944
945 dma = req->request.dma;
946 length = req->request.length;
947
948 if (!trbs_left)
949 last = true;
950
951 /* Is this the last request? */
952 if (usb_endpoint_xfer_int(dep->endpoint.desc) || !more_coming)
953 last = true;
954
955 dwc3_prepare_one_trb(dep, req, dma, length,
956 last, false, 0);
957 }
958
959 /*
960 * dwc3_prepare_trbs - setup TRBs from requests
961 * @dep: endpoint for which requests are being prepared
962 *
963 * The function goes through the requests list and sets up TRBs for the
964 * transfers. The function returns once there are no more TRBs available or
965 * it runs out of requests.
966 */
967 static void dwc3_prepare_trbs(struct dwc3_ep *dep)
968 {
969 struct dwc3_request *req, *n;
970 unsigned int more_coming;
971 u32 trbs_left;
972
973 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
974
975 trbs_left = dwc3_calc_trbs_left(dep);
976 if (!trbs_left)
977 return;
978
979 more_coming = dep->allocated_requests - dep->queued_requests;
980
981 list_for_each_entry_safe(req, n, &dep->pending_list, list) {
982 if (req->request.num_mapped_sgs > 0)
983 dwc3_prepare_one_trb_sg(dep, req, trbs_left--,
984 more_coming);
985 else
986 dwc3_prepare_one_trb_linear(dep, req, trbs_left--,
987 more_coming);
988
989 if (!trbs_left)
990 return;
991 }
992 }
993
994 static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param)
995 {
996 struct dwc3_gadget_ep_cmd_params params;
997 struct dwc3_request *req;
998 struct dwc3 *dwc = dep->dwc;
999 int starting;
1000 int ret;
1001 u32 cmd;
1002
1003 starting = !(dep->flags & DWC3_EP_BUSY);
1004
1005 dwc3_prepare_trbs(dep);
1006 req = next_request(&dep->started_list);
1007 if (!req) {
1008 dep->flags |= DWC3_EP_PENDING_REQUEST;
1009 return 0;
1010 }
1011
1012 memset(&params, 0, sizeof(params));
1013
1014 if (starting) {
1015 params.param0 = upper_32_bits(req->trb_dma);
1016 params.param1 = lower_32_bits(req->trb_dma);
1017 cmd = DWC3_DEPCMD_STARTTRANSFER |
1018 DWC3_DEPCMD_PARAM(cmd_param);
1019 } else {
1020 cmd = DWC3_DEPCMD_UPDATETRANSFER |
1021 DWC3_DEPCMD_PARAM(dep->resource_index);
1022 }
1023
1024 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
1025 if (ret < 0) {
1026 /*
1027 * FIXME we need to iterate over the list of requests
1028 * here and stop, unmap, free and del each of the linked
1029 * requests instead of what we do now.
1030 */
1031 usb_gadget_unmap_request(&dwc->gadget, &req->request,
1032 req->direction);
1033 list_del(&req->list);
1034 return ret;
1035 }
1036
1037 dep->flags |= DWC3_EP_BUSY;
1038
1039 if (starting) {
1040 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
1041 WARN_ON_ONCE(!dep->resource_index);
1042 }
1043
1044 return 0;
1045 }
1046
1047 static void __dwc3_gadget_start_isoc(struct dwc3 *dwc,
1048 struct dwc3_ep *dep, u32 cur_uf)
1049 {
1050 u32 uf;
1051
1052 if (list_empty(&dep->pending_list)) {
1053 dwc3_trace(trace_dwc3_gadget,
1054 "ISOC ep %s run out for requests",
1055 dep->name);
1056 dep->flags |= DWC3_EP_PENDING_REQUEST;
1057 return;
1058 }
1059
1060 /* 4 micro frames in the future */
1061 uf = cur_uf + dep->interval * 4;
1062
1063 __dwc3_gadget_kick_transfer(dep, uf);
1064 }
1065
1066 static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
1067 struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
1068 {
1069 u32 cur_uf, mask;
1070
1071 mask = ~(dep->interval - 1);
1072 cur_uf = event->parameters & mask;
1073
1074 __dwc3_gadget_start_isoc(dwc, dep, cur_uf);
1075 }
1076
1077 static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
1078 {
1079 struct dwc3 *dwc = dep->dwc;
1080 int ret;
1081
1082 if (!dep->endpoint.desc) {
1083 dwc3_trace(trace_dwc3_gadget,
1084 "trying to queue request %p to disabled %s",
1085 &req->request, dep->endpoint.name);
1086 return -ESHUTDOWN;
1087 }
1088
1089 if (WARN(req->dep != dep, "request %p belongs to '%s'\n",
1090 &req->request, req->dep->name)) {
1091 dwc3_trace(trace_dwc3_gadget, "request %p belongs to '%s'",
1092 &req->request, req->dep->name);
1093 return -EINVAL;
1094 }
1095
1096 pm_runtime_get(dwc->dev);
1097
1098 req->request.actual = 0;
1099 req->request.status = -EINPROGRESS;
1100 req->direction = dep->direction;
1101 req->epnum = dep->number;
1102
1103 trace_dwc3_ep_queue(req);
1104
1105 /*
1106 * We only add to our list of requests now and
1107 * start consuming the list once we get XferNotReady
1108 * IRQ.
1109 *
1110 * That way, we avoid doing anything that we don't need
1111 * to do now and defer it until the point we receive a
1112 * particular token from the Host side.
1113 *
1114 * This will also avoid Host cancelling URBs due to too
1115 * many NAKs.
1116 */
1117 ret = usb_gadget_map_request(&dwc->gadget, &req->request,
1118 dep->direction);
1119 if (ret)
1120 return ret;
1121
1122 list_add_tail(&req->list, &dep->pending_list);
1123
1124 /*
1125 * If there are no pending requests and the endpoint isn't already
1126 * busy, we will just start the request straight away.
1127 *
1128 * This will save one IRQ (XFER_NOT_READY) and possibly make it a
1129 * little bit faster.
1130 */
1131 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
1132 !usb_endpoint_xfer_int(dep->endpoint.desc)) {
1133 ret = __dwc3_gadget_kick_transfer(dep, 0);
1134 goto out;
1135 }
1136
1137 /*
1138 * There are a few special cases:
1139 *
1140 * 1. XferNotReady with empty list of requests. We need to kick the
1141 * transfer here in that situation, otherwise we will be NAKing
1142 * forever. If we get XferNotReady before gadget driver has a
1143 * chance to queue a request, we will ACK the IRQ but won't be
1144 * able to receive the data until the next request is queued.
1145 * The following code is handling exactly that.
1146 *
1147 */
1148 if (dep->flags & DWC3_EP_PENDING_REQUEST) {
1149 /*
1150 * If xfernotready is already elapsed and it is a case
1151 * of isoc transfer, then issue END TRANSFER, so that
1152 * you can receive xfernotready again and can have
1153 * notion of current microframe.
1154 */
1155 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1156 if (list_empty(&dep->started_list)) {
1157 dwc3_stop_active_transfer(dwc, dep->number, true);
1158 dep->flags = DWC3_EP_ENABLED;
1159 }
1160 return 0;
1161 }
1162
1163 ret = __dwc3_gadget_kick_transfer(dep, 0);
1164 if (!ret)
1165 dep->flags &= ~DWC3_EP_PENDING_REQUEST;
1166
1167 goto out;
1168 }
1169
1170 /*
1171 * 2. XferInProgress on Isoc EP with an active transfer. We need to
1172 * kick the transfer here after queuing a request, otherwise the
1173 * core may not see the modified TRB(s).
1174 */
1175 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
1176 (dep->flags & DWC3_EP_BUSY) &&
1177 !(dep->flags & DWC3_EP_MISSED_ISOC)) {
1178 WARN_ON_ONCE(!dep->resource_index);
1179 ret = __dwc3_gadget_kick_transfer(dep, dep->resource_index);
1180 goto out;
1181 }
1182
1183 /*
1184 * 4. Stream Capable Bulk Endpoints. We need to start the transfer
1185 * right away, otherwise host will not know we have streams to be
1186 * handled.
1187 */
1188 if (dep->stream_capable)
1189 ret = __dwc3_gadget_kick_transfer(dep, 0);
1190
1191 out:
1192 if (ret && ret != -EBUSY)
1193 dwc3_trace(trace_dwc3_gadget,
1194 "%s: failed to kick transfers",
1195 dep->name);
1196 if (ret == -EBUSY)
1197 ret = 0;
1198
1199 return ret;
1200 }
1201
1202 static void __dwc3_gadget_ep_zlp_complete(struct usb_ep *ep,
1203 struct usb_request *request)
1204 {
1205 dwc3_gadget_ep_free_request(ep, request);
1206 }
1207
1208 static int __dwc3_gadget_ep_queue_zlp(struct dwc3 *dwc, struct dwc3_ep *dep)
1209 {
1210 struct dwc3_request *req;
1211 struct usb_request *request;
1212 struct usb_ep *ep = &dep->endpoint;
1213
1214 dwc3_trace(trace_dwc3_gadget, "queueing ZLP");
1215 request = dwc3_gadget_ep_alloc_request(ep, GFP_ATOMIC);
1216 if (!request)
1217 return -ENOMEM;
1218
1219 request->length = 0;
1220 request->buf = dwc->zlp_buf;
1221 request->complete = __dwc3_gadget_ep_zlp_complete;
1222
1223 req = to_dwc3_request(request);
1224
1225 return __dwc3_gadget_ep_queue(dep, req);
1226 }
1227
1228 static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
1229 gfp_t gfp_flags)
1230 {
1231 struct dwc3_request *req = to_dwc3_request(request);
1232 struct dwc3_ep *dep = to_dwc3_ep(ep);
1233 struct dwc3 *dwc = dep->dwc;
1234
1235 unsigned long flags;
1236
1237 int ret;
1238
1239 spin_lock_irqsave(&dwc->lock, flags);
1240 ret = __dwc3_gadget_ep_queue(dep, req);
1241
1242 /*
1243 * Okay, here's the thing, if gadget driver has requested for a ZLP by
1244 * setting request->zero, instead of doing magic, we will just queue an
1245 * extra usb_request ourselves so that it gets handled the same way as
1246 * any other request.
1247 */
1248 if (ret == 0 && request->zero && request->length &&
1249 (request->length % ep->maxpacket == 0))
1250 ret = __dwc3_gadget_ep_queue_zlp(dwc, dep);
1251
1252 spin_unlock_irqrestore(&dwc->lock, flags);
1253
1254 return ret;
1255 }
1256
1257 static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
1258 struct usb_request *request)
1259 {
1260 struct dwc3_request *req = to_dwc3_request(request);
1261 struct dwc3_request *r = NULL;
1262
1263 struct dwc3_ep *dep = to_dwc3_ep(ep);
1264 struct dwc3 *dwc = dep->dwc;
1265
1266 unsigned long flags;
1267 int ret = 0;
1268
1269 trace_dwc3_ep_dequeue(req);
1270
1271 spin_lock_irqsave(&dwc->lock, flags);
1272
1273 list_for_each_entry(r, &dep->pending_list, list) {
1274 if (r == req)
1275 break;
1276 }
1277
1278 if (r != req) {
1279 list_for_each_entry(r, &dep->started_list, list) {
1280 if (r == req)
1281 break;
1282 }
1283 if (r == req) {
1284 /* wait until it is processed */
1285 dwc3_stop_active_transfer(dwc, dep->number, true);
1286 goto out1;
1287 }
1288 dev_err(dwc->dev, "request %p was not queued to %s\n",
1289 request, ep->name);
1290 ret = -EINVAL;
1291 goto out0;
1292 }
1293
1294 out1:
1295 /* giveback the request */
1296 dwc3_gadget_giveback(dep, req, -ECONNRESET);
1297
1298 out0:
1299 spin_unlock_irqrestore(&dwc->lock, flags);
1300
1301 return ret;
1302 }
1303
1304 int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
1305 {
1306 struct dwc3_gadget_ep_cmd_params params;
1307 struct dwc3 *dwc = dep->dwc;
1308 int ret;
1309
1310 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1311 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
1312 return -EINVAL;
1313 }
1314
1315 memset(&params, 0x00, sizeof(params));
1316
1317 if (value) {
1318 struct dwc3_trb *trb;
1319
1320 unsigned transfer_in_flight;
1321 unsigned started;
1322
1323 if (dep->number > 1)
1324 trb = dwc3_ep_prev_trb(dep, dep->trb_enqueue);
1325 else
1326 trb = &dwc->ep0_trb[dep->trb_enqueue];
1327
1328 transfer_in_flight = trb->ctrl & DWC3_TRB_CTRL_HWO;
1329 started = !list_empty(&dep->started_list);
1330
1331 if (!protocol && ((dep->direction && transfer_in_flight) ||
1332 (!dep->direction && started))) {
1333 dwc3_trace(trace_dwc3_gadget,
1334 "%s: pending request, cannot halt",
1335 dep->name);
1336 return -EAGAIN;
1337 }
1338
1339 ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETSTALL,
1340 &params);
1341 if (ret)
1342 dev_err(dwc->dev, "failed to set STALL on %s\n",
1343 dep->name);
1344 else
1345 dep->flags |= DWC3_EP_STALL;
1346 } else {
1347
1348 ret = dwc3_send_clear_stall_ep_cmd(dep);
1349 if (ret)
1350 dev_err(dwc->dev, "failed to clear STALL on %s\n",
1351 dep->name);
1352 else
1353 dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
1354 }
1355
1356 return ret;
1357 }
1358
1359 static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
1360 {
1361 struct dwc3_ep *dep = to_dwc3_ep(ep);
1362 struct dwc3 *dwc = dep->dwc;
1363
1364 unsigned long flags;
1365
1366 int ret;
1367
1368 spin_lock_irqsave(&dwc->lock, flags);
1369 ret = __dwc3_gadget_ep_set_halt(dep, value, false);
1370 spin_unlock_irqrestore(&dwc->lock, flags);
1371
1372 return ret;
1373 }
1374
1375 static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
1376 {
1377 struct dwc3_ep *dep = to_dwc3_ep(ep);
1378 struct dwc3 *dwc = dep->dwc;
1379 unsigned long flags;
1380 int ret;
1381
1382 spin_lock_irqsave(&dwc->lock, flags);
1383 dep->flags |= DWC3_EP_WEDGE;
1384
1385 if (dep->number == 0 || dep->number == 1)
1386 ret = __dwc3_gadget_ep0_set_halt(ep, 1);
1387 else
1388 ret = __dwc3_gadget_ep_set_halt(dep, 1, false);
1389 spin_unlock_irqrestore(&dwc->lock, flags);
1390
1391 return ret;
1392 }
1393
1394 /* -------------------------------------------------------------------------- */
1395
1396 static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = {
1397 .bLength = USB_DT_ENDPOINT_SIZE,
1398 .bDescriptorType = USB_DT_ENDPOINT,
1399 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
1400 };
1401
1402 static const struct usb_ep_ops dwc3_gadget_ep0_ops = {
1403 .enable = dwc3_gadget_ep0_enable,
1404 .disable = dwc3_gadget_ep0_disable,
1405 .alloc_request = dwc3_gadget_ep_alloc_request,
1406 .free_request = dwc3_gadget_ep_free_request,
1407 .queue = dwc3_gadget_ep0_queue,
1408 .dequeue = dwc3_gadget_ep_dequeue,
1409 .set_halt = dwc3_gadget_ep0_set_halt,
1410 .set_wedge = dwc3_gadget_ep_set_wedge,
1411 };
1412
1413 static const struct usb_ep_ops dwc3_gadget_ep_ops = {
1414 .enable = dwc3_gadget_ep_enable,
1415 .disable = dwc3_gadget_ep_disable,
1416 .alloc_request = dwc3_gadget_ep_alloc_request,
1417 .free_request = dwc3_gadget_ep_free_request,
1418 .queue = dwc3_gadget_ep_queue,
1419 .dequeue = dwc3_gadget_ep_dequeue,
1420 .set_halt = dwc3_gadget_ep_set_halt,
1421 .set_wedge = dwc3_gadget_ep_set_wedge,
1422 };
1423
1424 /* -------------------------------------------------------------------------- */
1425
1426 static int dwc3_gadget_get_frame(struct usb_gadget *g)
1427 {
1428 struct dwc3 *dwc = gadget_to_dwc(g);
1429 u32 reg;
1430
1431 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1432 return DWC3_DSTS_SOFFN(reg);
1433 }
1434
1435 static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
1436 {
1437 int retries;
1438
1439 int ret;
1440 u32 reg;
1441
1442 u8 link_state;
1443 u8 speed;
1444
1445 /*
1446 * According to the Databook Remote wakeup request should
1447 * be issued only when the device is in early suspend state.
1448 *
1449 * We can check that via USB Link State bits in DSTS register.
1450 */
1451 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1452
1453 speed = reg & DWC3_DSTS_CONNECTSPD;
1454 if ((speed == DWC3_DSTS_SUPERSPEED) ||
1455 (speed == DWC3_DSTS_SUPERSPEED_PLUS)) {
1456 dwc3_trace(trace_dwc3_gadget, "no wakeup on SuperSpeed");
1457 return 0;
1458 }
1459
1460 link_state = DWC3_DSTS_USBLNKST(reg);
1461
1462 switch (link_state) {
1463 case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */
1464 case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */
1465 break;
1466 default:
1467 dwc3_trace(trace_dwc3_gadget,
1468 "can't wakeup from '%s'",
1469 dwc3_gadget_link_string(link_state));
1470 return -EINVAL;
1471 }
1472
1473 ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
1474 if (ret < 0) {
1475 dev_err(dwc->dev, "failed to put link in Recovery\n");
1476 return ret;
1477 }
1478
1479 /* Recent versions do this automatically */
1480 if (dwc->revision < DWC3_REVISION_194A) {
1481 /* write zeroes to Link Change Request */
1482 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1483 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
1484 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1485 }
1486
1487 /* poll until Link State changes to ON */
1488 retries = 20000;
1489
1490 while (retries--) {
1491 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1492
1493 /* in HS, means ON */
1494 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
1495 break;
1496 }
1497
1498 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
1499 dev_err(dwc->dev, "failed to send remote wakeup\n");
1500 return -EINVAL;
1501 }
1502
1503 return 0;
1504 }
1505
1506 static int dwc3_gadget_wakeup(struct usb_gadget *g)
1507 {
1508 struct dwc3 *dwc = gadget_to_dwc(g);
1509 unsigned long flags;
1510 int ret;
1511
1512 spin_lock_irqsave(&dwc->lock, flags);
1513 ret = __dwc3_gadget_wakeup(dwc);
1514 spin_unlock_irqrestore(&dwc->lock, flags);
1515
1516 return ret;
1517 }
1518
1519 static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
1520 int is_selfpowered)
1521 {
1522 struct dwc3 *dwc = gadget_to_dwc(g);
1523 unsigned long flags;
1524
1525 spin_lock_irqsave(&dwc->lock, flags);
1526 g->is_selfpowered = !!is_selfpowered;
1527 spin_unlock_irqrestore(&dwc->lock, flags);
1528
1529 return 0;
1530 }
1531
1532 static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
1533 {
1534 u32 reg;
1535 u32 timeout = 500;
1536
1537 if (pm_runtime_suspended(dwc->dev))
1538 return 0;
1539
1540 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1541 if (is_on) {
1542 if (dwc->revision <= DWC3_REVISION_187A) {
1543 reg &= ~DWC3_DCTL_TRGTULST_MASK;
1544 reg |= DWC3_DCTL_TRGTULST_RX_DET;
1545 }
1546
1547 if (dwc->revision >= DWC3_REVISION_194A)
1548 reg &= ~DWC3_DCTL_KEEP_CONNECT;
1549 reg |= DWC3_DCTL_RUN_STOP;
1550
1551 if (dwc->has_hibernation)
1552 reg |= DWC3_DCTL_KEEP_CONNECT;
1553
1554 dwc->pullups_connected = true;
1555 } else {
1556 reg &= ~DWC3_DCTL_RUN_STOP;
1557
1558 if (dwc->has_hibernation && !suspend)
1559 reg &= ~DWC3_DCTL_KEEP_CONNECT;
1560
1561 dwc->pullups_connected = false;
1562 }
1563
1564 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1565
1566 do {
1567 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1568 reg &= DWC3_DSTS_DEVCTRLHLT;
1569 } while (--timeout && !(!is_on ^ !reg));
1570
1571 if (!timeout)
1572 return -ETIMEDOUT;
1573
1574 dwc3_trace(trace_dwc3_gadget, "gadget %s data soft-%s",
1575 dwc->gadget_driver
1576 ? dwc->gadget_driver->function : "no-function",
1577 is_on ? "connect" : "disconnect");
1578
1579 return 0;
1580 }
1581
1582 static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
1583 {
1584 struct dwc3 *dwc = gadget_to_dwc(g);
1585 unsigned long flags;
1586 int ret;
1587
1588 is_on = !!is_on;
1589
1590 spin_lock_irqsave(&dwc->lock, flags);
1591 ret = dwc3_gadget_run_stop(dwc, is_on, false);
1592 spin_unlock_irqrestore(&dwc->lock, flags);
1593
1594 return ret;
1595 }
1596
1597 static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
1598 {
1599 u32 reg;
1600
1601 /* Enable all but Start and End of Frame IRQs */
1602 reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN |
1603 DWC3_DEVTEN_EVNTOVERFLOWEN |
1604 DWC3_DEVTEN_CMDCMPLTEN |
1605 DWC3_DEVTEN_ERRTICERREN |
1606 DWC3_DEVTEN_WKUPEVTEN |
1607 DWC3_DEVTEN_ULSTCNGEN |
1608 DWC3_DEVTEN_CONNECTDONEEN |
1609 DWC3_DEVTEN_USBRSTEN |
1610 DWC3_DEVTEN_DISCONNEVTEN);
1611
1612 dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
1613 }
1614
1615 static void dwc3_gadget_disable_irq(struct dwc3 *dwc)
1616 {
1617 /* mask all interrupts */
1618 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
1619 }
1620
1621 static irqreturn_t dwc3_interrupt(int irq, void *_dwc);
1622 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc);
1623
1624 /**
1625 * dwc3_gadget_setup_nump - Calculate and initialize NUMP field of DCFG
1626 * dwc: pointer to our context structure
1627 *
1628 * The following looks like complex but it's actually very simple. In order to
1629 * calculate the number of packets we can burst at once on OUT transfers, we're
1630 * gonna use RxFIFO size.
1631 *
1632 * To calculate RxFIFO size we need two numbers:
1633 * MDWIDTH = size, in bits, of the internal memory bus
1634 * RAM2_DEPTH = depth, in MDWIDTH, of internal RAM2 (where RxFIFO sits)
1635 *
1636 * Given these two numbers, the formula is simple:
1637 *
1638 * RxFIFO Size = (RAM2_DEPTH * MDWIDTH / 8) - 24 - 16;
1639 *
1640 * 24 bytes is for 3x SETUP packets
1641 * 16 bytes is a clock domain crossing tolerance
1642 *
1643 * Given RxFIFO Size, NUMP = RxFIFOSize / 1024;
1644 */
1645 static void dwc3_gadget_setup_nump(struct dwc3 *dwc)
1646 {
1647 u32 ram2_depth;
1648 u32 mdwidth;
1649 u32 nump;
1650 u32 reg;
1651
1652 ram2_depth = DWC3_GHWPARAMS7_RAM2_DEPTH(dwc->hwparams.hwparams7);
1653 mdwidth = DWC3_GHWPARAMS0_MDWIDTH(dwc->hwparams.hwparams0);
1654
1655 nump = ((ram2_depth * mdwidth / 8) - 24 - 16) / 1024;
1656 nump = min_t(u32, nump, 16);
1657
1658 /* update NumP */
1659 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1660 reg &= ~DWC3_DCFG_NUMP_MASK;
1661 reg |= nump << DWC3_DCFG_NUMP_SHIFT;
1662 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1663 }
1664
1665 static int __dwc3_gadget_start(struct dwc3 *dwc)
1666 {
1667 struct dwc3_ep *dep;
1668 int ret = 0;
1669 u32 reg;
1670
1671 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1672 reg &= ~(DWC3_DCFG_SPEED_MASK);
1673
1674 /**
1675 * WORKAROUND: DWC3 revision < 2.20a have an issue
1676 * which would cause metastability state on Run/Stop
1677 * bit if we try to force the IP to USB2-only mode.
1678 *
1679 * Because of that, we cannot configure the IP to any
1680 * speed other than the SuperSpeed
1681 *
1682 * Refers to:
1683 *
1684 * STAR#9000525659: Clock Domain Crossing on DCTL in
1685 * USB 2.0 Mode
1686 */
1687 if (dwc->revision < DWC3_REVISION_220A) {
1688 reg |= DWC3_DCFG_SUPERSPEED;
1689 } else {
1690 switch (dwc->maximum_speed) {
1691 case USB_SPEED_LOW:
1692 reg |= DWC3_DCFG_LOWSPEED;
1693 break;
1694 case USB_SPEED_FULL:
1695 reg |= DWC3_DCFG_FULLSPEED1;
1696 break;
1697 case USB_SPEED_HIGH:
1698 reg |= DWC3_DCFG_HIGHSPEED;
1699 break;
1700 case USB_SPEED_SUPER_PLUS:
1701 reg |= DWC3_DCFG_SUPERSPEED_PLUS;
1702 break;
1703 default:
1704 dev_err(dwc->dev, "invalid dwc->maximum_speed (%d)\n",
1705 dwc->maximum_speed);
1706 /* fall through */
1707 case USB_SPEED_SUPER:
1708 reg |= DWC3_DCFG_SUPERSPEED;
1709 break;
1710 }
1711 }
1712 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1713
1714 /*
1715 * We are telling dwc3 that we want to use DCFG.NUMP as ACK TP's NUMP
1716 * field instead of letting dwc3 itself calculate that automatically.
1717 *
1718 * This way, we maximize the chances that we'll be able to get several
1719 * bursts of data without going through any sort of endpoint throttling.
1720 */
1721 reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
1722 reg &= ~DWC3_GRXTHRCFG_PKTCNTSEL;
1723 dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
1724
1725 dwc3_gadget_setup_nump(dwc);
1726
1727 /* Start with SuperSpeed Default */
1728 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
1729
1730 dep = dwc->eps[0];
1731 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
1732 false);
1733 if (ret) {
1734 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1735 goto err0;
1736 }
1737
1738 dep = dwc->eps[1];
1739 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
1740 false);
1741 if (ret) {
1742 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1743 goto err1;
1744 }
1745
1746 /* begin to receive SETUP packets */
1747 dwc->ep0state = EP0_SETUP_PHASE;
1748 dwc3_ep0_out_start(dwc);
1749
1750 dwc3_gadget_enable_irq(dwc);
1751
1752 return 0;
1753
1754 err1:
1755 __dwc3_gadget_ep_disable(dwc->eps[0]);
1756
1757 err0:
1758 return ret;
1759 }
1760
1761 static int dwc3_gadget_start(struct usb_gadget *g,
1762 struct usb_gadget_driver *driver)
1763 {
1764 struct dwc3 *dwc = gadget_to_dwc(g);
1765 unsigned long flags;
1766 int ret = 0;
1767 int irq;
1768
1769 irq = dwc->irq_gadget;
1770 ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
1771 IRQF_SHARED, "dwc3", dwc->ev_buf);
1772 if (ret) {
1773 dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
1774 irq, ret);
1775 goto err0;
1776 }
1777
1778 spin_lock_irqsave(&dwc->lock, flags);
1779 if (dwc->gadget_driver) {
1780 dev_err(dwc->dev, "%s is already bound to %s\n",
1781 dwc->gadget.name,
1782 dwc->gadget_driver->driver.name);
1783 ret = -EBUSY;
1784 goto err1;
1785 }
1786
1787 dwc->gadget_driver = driver;
1788
1789 if (pm_runtime_active(dwc->dev))
1790 __dwc3_gadget_start(dwc);
1791
1792 spin_unlock_irqrestore(&dwc->lock, flags);
1793
1794 return 0;
1795
1796 err1:
1797 spin_unlock_irqrestore(&dwc->lock, flags);
1798 free_irq(irq, dwc);
1799
1800 err0:
1801 return ret;
1802 }
1803
1804 static void __dwc3_gadget_stop(struct dwc3 *dwc)
1805 {
1806 if (pm_runtime_suspended(dwc->dev))
1807 return;
1808
1809 dwc3_gadget_disable_irq(dwc);
1810 __dwc3_gadget_ep_disable(dwc->eps[0]);
1811 __dwc3_gadget_ep_disable(dwc->eps[1]);
1812 }
1813
1814 static int dwc3_gadget_stop(struct usb_gadget *g)
1815 {
1816 struct dwc3 *dwc = gadget_to_dwc(g);
1817 unsigned long flags;
1818
1819 spin_lock_irqsave(&dwc->lock, flags);
1820 __dwc3_gadget_stop(dwc);
1821 dwc->gadget_driver = NULL;
1822 spin_unlock_irqrestore(&dwc->lock, flags);
1823
1824 free_irq(dwc->irq_gadget, dwc->ev_buf);
1825
1826 return 0;
1827 }
1828
1829 static const struct usb_gadget_ops dwc3_gadget_ops = {
1830 .get_frame = dwc3_gadget_get_frame,
1831 .wakeup = dwc3_gadget_wakeup,
1832 .set_selfpowered = dwc3_gadget_set_selfpowered,
1833 .pullup = dwc3_gadget_pullup,
1834 .udc_start = dwc3_gadget_start,
1835 .udc_stop = dwc3_gadget_stop,
1836 };
1837
1838 /* -------------------------------------------------------------------------- */
1839
1840 static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc,
1841 u8 num, u32 direction)
1842 {
1843 struct dwc3_ep *dep;
1844 u8 i;
1845
1846 for (i = 0; i < num; i++) {
1847 u8 epnum = (i << 1) | (direction ? 1 : 0);
1848
1849 dep = kzalloc(sizeof(*dep), GFP_KERNEL);
1850 if (!dep)
1851 return -ENOMEM;
1852
1853 dep->dwc = dwc;
1854 dep->number = epnum;
1855 dep->direction = !!direction;
1856 dep->regs = dwc->regs + DWC3_DEP_BASE(epnum);
1857 dwc->eps[epnum] = dep;
1858
1859 snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1,
1860 (epnum & 1) ? "in" : "out");
1861
1862 dep->endpoint.name = dep->name;
1863 spin_lock_init(&dep->lock);
1864
1865 dwc3_trace(trace_dwc3_gadget, "initializing %s", dep->name);
1866
1867 if (epnum == 0 || epnum == 1) {
1868 usb_ep_set_maxpacket_limit(&dep->endpoint, 512);
1869 dep->endpoint.maxburst = 1;
1870 dep->endpoint.ops = &dwc3_gadget_ep0_ops;
1871 if (!epnum)
1872 dwc->gadget.ep0 = &dep->endpoint;
1873 } else {
1874 int ret;
1875
1876 usb_ep_set_maxpacket_limit(&dep->endpoint, 1024);
1877 dep->endpoint.max_streams = 15;
1878 dep->endpoint.ops = &dwc3_gadget_ep_ops;
1879 list_add_tail(&dep->endpoint.ep_list,
1880 &dwc->gadget.ep_list);
1881
1882 ret = dwc3_alloc_trb_pool(dep);
1883 if (ret)
1884 return ret;
1885 }
1886
1887 if (epnum == 0 || epnum == 1) {
1888 dep->endpoint.caps.type_control = true;
1889 } else {
1890 dep->endpoint.caps.type_iso = true;
1891 dep->endpoint.caps.type_bulk = true;
1892 dep->endpoint.caps.type_int = true;
1893 }
1894
1895 dep->endpoint.caps.dir_in = !!direction;
1896 dep->endpoint.caps.dir_out = !direction;
1897
1898 INIT_LIST_HEAD(&dep->pending_list);
1899 INIT_LIST_HEAD(&dep->started_list);
1900 }
1901
1902 return 0;
1903 }
1904
1905 static int dwc3_gadget_init_endpoints(struct dwc3 *dwc)
1906 {
1907 int ret;
1908
1909 INIT_LIST_HEAD(&dwc->gadget.ep_list);
1910
1911 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_out_eps, 0);
1912 if (ret < 0) {
1913 dwc3_trace(trace_dwc3_gadget,
1914 "failed to allocate OUT endpoints");
1915 return ret;
1916 }
1917
1918 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_in_eps, 1);
1919 if (ret < 0) {
1920 dwc3_trace(trace_dwc3_gadget,
1921 "failed to allocate IN endpoints");
1922 return ret;
1923 }
1924
1925 return 0;
1926 }
1927
1928 static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
1929 {
1930 struct dwc3_ep *dep;
1931 u8 epnum;
1932
1933 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1934 dep = dwc->eps[epnum];
1935 if (!dep)
1936 continue;
1937 /*
1938 * Physical endpoints 0 and 1 are special; they form the
1939 * bi-directional USB endpoint 0.
1940 *
1941 * For those two physical endpoints, we don't allocate a TRB
1942 * pool nor do we add them the endpoints list. Due to that, we
1943 * shouldn't do these two operations otherwise we would end up
1944 * with all sorts of bugs when removing dwc3.ko.
1945 */
1946 if (epnum != 0 && epnum != 1) {
1947 dwc3_free_trb_pool(dep);
1948 list_del(&dep->endpoint.ep_list);
1949 }
1950
1951 kfree(dep);
1952 }
1953 }
1954
1955 /* -------------------------------------------------------------------------- */
1956
1957 static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
1958 struct dwc3_request *req, struct dwc3_trb *trb,
1959 const struct dwc3_event_depevt *event, int status,
1960 int chain)
1961 {
1962 unsigned int count;
1963 unsigned int s_pkt = 0;
1964 unsigned int trb_status;
1965
1966 dep->queued_requests--;
1967 trace_dwc3_complete_trb(dep, trb);
1968
1969 /*
1970 * If we're in the middle of series of chained TRBs and we
1971 * receive a short transfer along the way, DWC3 will skip
1972 * through all TRBs including the last TRB in the chain (the
1973 * where CHN bit is zero. DWC3 will also avoid clearing HWO
1974 * bit and SW has to do it manually.
1975 *
1976 * We're going to do that here to avoid problems of HW trying
1977 * to use bogus TRBs for transfers.
1978 */
1979 if (chain && (trb->ctrl & DWC3_TRB_CTRL_HWO))
1980 trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
1981
1982 if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
1983 return 1;
1984
1985 count = trb->size & DWC3_TRB_SIZE_MASK;
1986
1987 if (dep->direction) {
1988 if (count) {
1989 trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size);
1990 if (trb_status == DWC3_TRBSTS_MISSED_ISOC) {
1991 dwc3_trace(trace_dwc3_gadget,
1992 "%s: incomplete IN transfer",
1993 dep->name);
1994 /*
1995 * If missed isoc occurred and there is
1996 * no request queued then issue END
1997 * TRANSFER, so that core generates
1998 * next xfernotready and we will issue
1999 * a fresh START TRANSFER.
2000 * If there are still queued request
2001 * then wait, do not issue either END
2002 * or UPDATE TRANSFER, just attach next
2003 * request in pending_list during
2004 * giveback.If any future queued request
2005 * is successfully transferred then we
2006 * will issue UPDATE TRANSFER for all
2007 * request in the pending_list.
2008 */
2009 dep->flags |= DWC3_EP_MISSED_ISOC;
2010 } else {
2011 dev_err(dwc->dev, "incomplete IN transfer %s\n",
2012 dep->name);
2013 status = -ECONNRESET;
2014 }
2015 } else {
2016 dep->flags &= ~DWC3_EP_MISSED_ISOC;
2017 }
2018 } else {
2019 if (count && (event->status & DEPEVT_STATUS_SHORT))
2020 s_pkt = 1;
2021 }
2022
2023 if (s_pkt && !chain)
2024 return 1;
2025 if ((event->status & DEPEVT_STATUS_LST) &&
2026 (trb->ctrl & (DWC3_TRB_CTRL_LST |
2027 DWC3_TRB_CTRL_HWO)))
2028 return 1;
2029 if ((event->status & DEPEVT_STATUS_IOC) &&
2030 (trb->ctrl & DWC3_TRB_CTRL_IOC))
2031 return 1;
2032 return 0;
2033 }
2034
2035 static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
2036 const struct dwc3_event_depevt *event, int status)
2037 {
2038 struct dwc3_request *req;
2039 struct dwc3_trb *trb;
2040 unsigned int slot;
2041 unsigned int i;
2042 int count = 0;
2043 int ret;
2044
2045 do {
2046 int chain;
2047
2048 req = next_request(&dep->started_list);
2049 if (WARN_ON_ONCE(!req))
2050 return 1;
2051
2052 chain = req->request.num_mapped_sgs > 0;
2053 i = 0;
2054 do {
2055 slot = req->first_trb_index + i;
2056 if (slot == DWC3_TRB_NUM - 1)
2057 slot++;
2058 slot %= DWC3_TRB_NUM;
2059 trb = &dep->trb_pool[slot];
2060 count += trb->size & DWC3_TRB_SIZE_MASK;
2061
2062 ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb,
2063 event, status, chain);
2064 if (ret)
2065 break;
2066 } while (++i < req->request.num_mapped_sgs);
2067
2068 /*
2069 * We assume here we will always receive the entire data block
2070 * which we should receive. Meaning, if we program RX to
2071 * receive 4K but we receive only 2K, we assume that's all we
2072 * should receive and we simply bounce the request back to the
2073 * gadget driver for further processing.
2074 */
2075 req->request.actual += req->request.length - count;
2076 dwc3_gadget_giveback(dep, req, status);
2077
2078 if (ret)
2079 break;
2080 } while (1);
2081
2082 /*
2083 * Our endpoint might get disabled by another thread during
2084 * dwc3_gadget_giveback(). If that happens, we're just gonna return 1
2085 * early on so DWC3_EP_BUSY flag gets cleared
2086 */
2087 if (!dep->endpoint.desc)
2088 return 1;
2089
2090 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
2091 list_empty(&dep->started_list)) {
2092 if (list_empty(&dep->pending_list)) {
2093 /*
2094 * If there is no entry in request list then do
2095 * not issue END TRANSFER now. Just set PENDING
2096 * flag, so that END TRANSFER is issued when an
2097 * entry is added into request list.
2098 */
2099 dep->flags = DWC3_EP_PENDING_REQUEST;
2100 } else {
2101 dwc3_stop_active_transfer(dwc, dep->number, true);
2102 dep->flags = DWC3_EP_ENABLED;
2103 }
2104 return 1;
2105 }
2106
2107 if (usb_endpoint_xfer_isoc(dep->endpoint.desc))
2108 if ((event->status & DEPEVT_STATUS_IOC) &&
2109 (trb->ctrl & DWC3_TRB_CTRL_IOC))
2110 return 0;
2111 return 1;
2112 }
2113
2114 static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
2115 struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
2116 {
2117 unsigned status = 0;
2118 int clean_busy;
2119 u32 is_xfer_complete;
2120
2121 is_xfer_complete = (event->endpoint_event == DWC3_DEPEVT_XFERCOMPLETE);
2122
2123 if (event->status & DEPEVT_STATUS_BUSERR)
2124 status = -ECONNRESET;
2125
2126 clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status);
2127 if (clean_busy && (!dep->endpoint.desc || is_xfer_complete ||
2128 usb_endpoint_xfer_isoc(dep->endpoint.desc)))
2129 dep->flags &= ~DWC3_EP_BUSY;
2130
2131 /*
2132 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
2133 * See dwc3_gadget_linksts_change_interrupt() for 1st half.
2134 */
2135 if (dwc->revision < DWC3_REVISION_183A) {
2136 u32 reg;
2137 int i;
2138
2139 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
2140 dep = dwc->eps[i];
2141
2142 if (!(dep->flags & DWC3_EP_ENABLED))
2143 continue;
2144
2145 if (!list_empty(&dep->started_list))
2146 return;
2147 }
2148
2149 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2150 reg |= dwc->u1u2;
2151 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2152
2153 dwc->u1u2 = 0;
2154 }
2155
2156 /*
2157 * Our endpoint might get disabled by another thread during
2158 * dwc3_gadget_giveback(). If that happens, we're just gonna return 1
2159 * early on so DWC3_EP_BUSY flag gets cleared
2160 */
2161 if (!dep->endpoint.desc)
2162 return;
2163
2164 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
2165 int ret;
2166
2167 ret = __dwc3_gadget_kick_transfer(dep, 0);
2168 if (!ret || ret == -EBUSY)
2169 return;
2170 }
2171 }
2172
2173 static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
2174 const struct dwc3_event_depevt *event)
2175 {
2176 struct dwc3_ep *dep;
2177 u8 epnum = event->endpoint_number;
2178
2179 dep = dwc->eps[epnum];
2180
2181 if (!(dep->flags & DWC3_EP_ENABLED))
2182 return;
2183
2184 if (epnum == 0 || epnum == 1) {
2185 dwc3_ep0_interrupt(dwc, event);
2186 return;
2187 }
2188
2189 switch (event->endpoint_event) {
2190 case DWC3_DEPEVT_XFERCOMPLETE:
2191 dep->resource_index = 0;
2192
2193 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
2194 dwc3_trace(trace_dwc3_gadget,
2195 "%s is an Isochronous endpoint",
2196 dep->name);
2197 return;
2198 }
2199
2200 dwc3_endpoint_transfer_complete(dwc, dep, event);
2201 break;
2202 case DWC3_DEPEVT_XFERINPROGRESS:
2203 dwc3_endpoint_transfer_complete(dwc, dep, event);
2204 break;
2205 case DWC3_DEPEVT_XFERNOTREADY:
2206 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
2207 dwc3_gadget_start_isoc(dwc, dep, event);
2208 } else {
2209 int active;
2210 int ret;
2211
2212 active = event->status & DEPEVT_STATUS_TRANSFER_ACTIVE;
2213
2214 dwc3_trace(trace_dwc3_gadget, "%s: reason %s",
2215 dep->name, active ? "Transfer Active"
2216 : "Transfer Not Active");
2217
2218 ret = __dwc3_gadget_kick_transfer(dep, 0);
2219 if (!ret || ret == -EBUSY)
2220 return;
2221
2222 dwc3_trace(trace_dwc3_gadget,
2223 "%s: failed to kick transfers",
2224 dep->name);
2225 }
2226
2227 break;
2228 case DWC3_DEPEVT_STREAMEVT:
2229 if (!usb_endpoint_xfer_bulk(dep->endpoint.desc)) {
2230 dev_err(dwc->dev, "Stream event for non-Bulk %s\n",
2231 dep->name);
2232 return;
2233 }
2234
2235 switch (event->status) {
2236 case DEPEVT_STREAMEVT_FOUND:
2237 dwc3_trace(trace_dwc3_gadget,
2238 "Stream %d found and started",
2239 event->parameters);
2240
2241 break;
2242 case DEPEVT_STREAMEVT_NOTFOUND:
2243 /* FALLTHROUGH */
2244 default:
2245 dwc3_trace(trace_dwc3_gadget,
2246 "unable to find suitable stream");
2247 }
2248 break;
2249 case DWC3_DEPEVT_RXTXFIFOEVT:
2250 dwc3_trace(trace_dwc3_gadget, "%s FIFO Overrun", dep->name);
2251 break;
2252 case DWC3_DEPEVT_EPCMDCMPLT:
2253 dwc3_trace(trace_dwc3_gadget, "Endpoint Command Complete");
2254 break;
2255 }
2256 }
2257
2258 static void dwc3_disconnect_gadget(struct dwc3 *dwc)
2259 {
2260 if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
2261 spin_unlock(&dwc->lock);
2262 dwc->gadget_driver->disconnect(&dwc->gadget);
2263 spin_lock(&dwc->lock);
2264 }
2265 }
2266
2267 static void dwc3_suspend_gadget(struct dwc3 *dwc)
2268 {
2269 if (dwc->gadget_driver && dwc->gadget_driver->suspend) {
2270 spin_unlock(&dwc->lock);
2271 dwc->gadget_driver->suspend(&dwc->gadget);
2272 spin_lock(&dwc->lock);
2273 }
2274 }
2275
2276 static void dwc3_resume_gadget(struct dwc3 *dwc)
2277 {
2278 if (dwc->gadget_driver && dwc->gadget_driver->resume) {
2279 spin_unlock(&dwc->lock);
2280 dwc->gadget_driver->resume(&dwc->gadget);
2281 spin_lock(&dwc->lock);
2282 }
2283 }
2284
2285 static void dwc3_reset_gadget(struct dwc3 *dwc)
2286 {
2287 if (!dwc->gadget_driver)
2288 return;
2289
2290 if (dwc->gadget.speed != USB_SPEED_UNKNOWN) {
2291 spin_unlock(&dwc->lock);
2292 usb_gadget_udc_reset(&dwc->gadget, dwc->gadget_driver);
2293 spin_lock(&dwc->lock);
2294 }
2295 }
2296
2297 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force)
2298 {
2299 struct dwc3_ep *dep;
2300 struct dwc3_gadget_ep_cmd_params params;
2301 u32 cmd;
2302 int ret;
2303
2304 dep = dwc->eps[epnum];
2305
2306 if (!dep->resource_index)
2307 return;
2308
2309 /*
2310 * NOTICE: We are violating what the Databook says about the
2311 * EndTransfer command. Ideally we would _always_ wait for the
2312 * EndTransfer Command Completion IRQ, but that's causing too
2313 * much trouble synchronizing between us and gadget driver.
2314 *
2315 * We have discussed this with the IP Provider and it was
2316 * suggested to giveback all requests here, but give HW some
2317 * extra time to synchronize with the interconnect. We're using
2318 * an arbitrary 100us delay for that.
2319 *
2320 * Note also that a similar handling was tested by Synopsys
2321 * (thanks a lot Paul) and nothing bad has come out of it.
2322 * In short, what we're doing is:
2323 *
2324 * - Issue EndTransfer WITH CMDIOC bit set
2325 * - Wait 100us
2326 */
2327
2328 cmd = DWC3_DEPCMD_ENDTRANSFER;
2329 cmd |= force ? DWC3_DEPCMD_HIPRI_FORCERM : 0;
2330 cmd |= DWC3_DEPCMD_CMDIOC;
2331 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
2332 memset(&params, 0, sizeof(params));
2333 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
2334 WARN_ON_ONCE(ret);
2335 dep->resource_index = 0;
2336 dep->flags &= ~DWC3_EP_BUSY;
2337 udelay(100);
2338 }
2339
2340 static void dwc3_stop_active_transfers(struct dwc3 *dwc)
2341 {
2342 u32 epnum;
2343
2344 for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2345 struct dwc3_ep *dep;
2346
2347 dep = dwc->eps[epnum];
2348 if (!dep)
2349 continue;
2350
2351 if (!(dep->flags & DWC3_EP_ENABLED))
2352 continue;
2353
2354 dwc3_remove_requests(dwc, dep);
2355 }
2356 }
2357
2358 static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
2359 {
2360 u32 epnum;
2361
2362 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2363 struct dwc3_ep *dep;
2364 int ret;
2365
2366 dep = dwc->eps[epnum];
2367 if (!dep)
2368 continue;
2369
2370 if (!(dep->flags & DWC3_EP_STALL))
2371 continue;
2372
2373 dep->flags &= ~DWC3_EP_STALL;
2374
2375 ret = dwc3_send_clear_stall_ep_cmd(dep);
2376 WARN_ON_ONCE(ret);
2377 }
2378 }
2379
2380 static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
2381 {
2382 int reg;
2383
2384 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2385 reg &= ~DWC3_DCTL_INITU1ENA;
2386 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2387
2388 reg &= ~DWC3_DCTL_INITU2ENA;
2389 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2390
2391 dwc3_disconnect_gadget(dwc);
2392
2393 dwc->gadget.speed = USB_SPEED_UNKNOWN;
2394 dwc->setup_packet_pending = false;
2395 usb_gadget_set_state(&dwc->gadget, USB_STATE_NOTATTACHED);
2396
2397 dwc->connected = false;
2398 }
2399
2400 static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
2401 {
2402 u32 reg;
2403
2404 dwc->connected = true;
2405
2406 /*
2407 * WORKAROUND: DWC3 revisions <1.88a have an issue which
2408 * would cause a missing Disconnect Event if there's a
2409 * pending Setup Packet in the FIFO.
2410 *
2411 * There's no suggested workaround on the official Bug
2412 * report, which states that "unless the driver/application
2413 * is doing any special handling of a disconnect event,
2414 * there is no functional issue".
2415 *
2416 * Unfortunately, it turns out that we _do_ some special
2417 * handling of a disconnect event, namely complete all
2418 * pending transfers, notify gadget driver of the
2419 * disconnection, and so on.
2420 *
2421 * Our suggested workaround is to follow the Disconnect
2422 * Event steps here, instead, based on a setup_packet_pending
2423 * flag. Such flag gets set whenever we have a SETUP_PENDING
2424 * status for EP0 TRBs and gets cleared on XferComplete for the
2425 * same endpoint.
2426 *
2427 * Refers to:
2428 *
2429 * STAR#9000466709: RTL: Device : Disconnect event not
2430 * generated if setup packet pending in FIFO
2431 */
2432 if (dwc->revision < DWC3_REVISION_188A) {
2433 if (dwc->setup_packet_pending)
2434 dwc3_gadget_disconnect_interrupt(dwc);
2435 }
2436
2437 dwc3_reset_gadget(dwc);
2438
2439 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2440 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
2441 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2442 dwc->test_mode = false;
2443
2444 dwc3_stop_active_transfers(dwc);
2445 dwc3_clear_stall_all_ep(dwc);
2446
2447 /* Reset device address to zero */
2448 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2449 reg &= ~(DWC3_DCFG_DEVADDR_MASK);
2450 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2451 }
2452
2453 static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed)
2454 {
2455 u32 reg;
2456 u32 usb30_clock = DWC3_GCTL_CLK_BUS;
2457
2458 /*
2459 * We change the clock only at SS but I dunno why I would want to do
2460 * this. Maybe it becomes part of the power saving plan.
2461 */
2462
2463 if ((speed != DWC3_DSTS_SUPERSPEED) &&
2464 (speed != DWC3_DSTS_SUPERSPEED_PLUS))
2465 return;
2466
2467 /*
2468 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
2469 * each time on Connect Done.
2470 */
2471 if (!usb30_clock)
2472 return;
2473
2474 reg = dwc3_readl(dwc->regs, DWC3_GCTL);
2475 reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock);
2476 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
2477 }
2478
2479 static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
2480 {
2481 struct dwc3_ep *dep;
2482 int ret;
2483 u32 reg;
2484 u8 speed;
2485
2486 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
2487 speed = reg & DWC3_DSTS_CONNECTSPD;
2488 dwc->speed = speed;
2489
2490 dwc3_update_ram_clk_sel(dwc, speed);
2491
2492 switch (speed) {
2493 case DWC3_DSTS_SUPERSPEED_PLUS:
2494 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2495 dwc->gadget.ep0->maxpacket = 512;
2496 dwc->gadget.speed = USB_SPEED_SUPER_PLUS;
2497 break;
2498 case DWC3_DSTS_SUPERSPEED:
2499 /*
2500 * WORKAROUND: DWC3 revisions <1.90a have an issue which
2501 * would cause a missing USB3 Reset event.
2502 *
2503 * In such situations, we should force a USB3 Reset
2504 * event by calling our dwc3_gadget_reset_interrupt()
2505 * routine.
2506 *
2507 * Refers to:
2508 *
2509 * STAR#9000483510: RTL: SS : USB3 reset event may
2510 * not be generated always when the link enters poll
2511 */
2512 if (dwc->revision < DWC3_REVISION_190A)
2513 dwc3_gadget_reset_interrupt(dwc);
2514
2515 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2516 dwc->gadget.ep0->maxpacket = 512;
2517 dwc->gadget.speed = USB_SPEED_SUPER;
2518 break;
2519 case DWC3_DSTS_HIGHSPEED:
2520 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2521 dwc->gadget.ep0->maxpacket = 64;
2522 dwc->gadget.speed = USB_SPEED_HIGH;
2523 break;
2524 case DWC3_DSTS_FULLSPEED2:
2525 case DWC3_DSTS_FULLSPEED1:
2526 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2527 dwc->gadget.ep0->maxpacket = 64;
2528 dwc->gadget.speed = USB_SPEED_FULL;
2529 break;
2530 case DWC3_DSTS_LOWSPEED:
2531 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8);
2532 dwc->gadget.ep0->maxpacket = 8;
2533 dwc->gadget.speed = USB_SPEED_LOW;
2534 break;
2535 }
2536
2537 /* Enable USB2 LPM Capability */
2538
2539 if ((dwc->revision > DWC3_REVISION_194A) &&
2540 (speed != DWC3_DSTS_SUPERSPEED) &&
2541 (speed != DWC3_DSTS_SUPERSPEED_PLUS)) {
2542 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2543 reg |= DWC3_DCFG_LPM_CAP;
2544 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2545
2546 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2547 reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN);
2548
2549 reg |= DWC3_DCTL_HIRD_THRES(dwc->hird_threshold);
2550
2551 /*
2552 * When dwc3 revisions >= 2.40a, LPM Erratum is enabled and
2553 * DCFG.LPMCap is set, core responses with an ACK and the
2554 * BESL value in the LPM token is less than or equal to LPM
2555 * NYET threshold.
2556 */
2557 WARN_ONCE(dwc->revision < DWC3_REVISION_240A
2558 && dwc->has_lpm_erratum,
2559 "LPM Erratum not available on dwc3 revisisions < 2.40a\n");
2560
2561 if (dwc->has_lpm_erratum && dwc->revision >= DWC3_REVISION_240A)
2562 reg |= DWC3_DCTL_LPM_ERRATA(dwc->lpm_nyet_threshold);
2563
2564 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2565 } else {
2566 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2567 reg &= ~DWC3_DCTL_HIRD_THRES_MASK;
2568 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2569 }
2570
2571 dep = dwc->eps[0];
2572 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true,
2573 false);
2574 if (ret) {
2575 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2576 return;
2577 }
2578
2579 dep = dwc->eps[1];
2580 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true,
2581 false);
2582 if (ret) {
2583 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2584 return;
2585 }
2586
2587 /*
2588 * Configure PHY via GUSB3PIPECTLn if required.
2589 *
2590 * Update GTXFIFOSIZn
2591 *
2592 * In both cases reset values should be sufficient.
2593 */
2594 }
2595
2596 static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
2597 {
2598 /*
2599 * TODO take core out of low power mode when that's
2600 * implemented.
2601 */
2602
2603 if (dwc->gadget_driver && dwc->gadget_driver->resume) {
2604 spin_unlock(&dwc->lock);
2605 dwc->gadget_driver->resume(&dwc->gadget);
2606 spin_lock(&dwc->lock);
2607 }
2608 }
2609
2610 static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
2611 unsigned int evtinfo)
2612 {
2613 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
2614 unsigned int pwropt;
2615
2616 /*
2617 * WORKAROUND: DWC3 < 2.50a have an issue when configured without
2618 * Hibernation mode enabled which would show up when device detects
2619 * host-initiated U3 exit.
2620 *
2621 * In that case, device will generate a Link State Change Interrupt
2622 * from U3 to RESUME which is only necessary if Hibernation is
2623 * configured in.
2624 *
2625 * There are no functional changes due to such spurious event and we
2626 * just need to ignore it.
2627 *
2628 * Refers to:
2629 *
2630 * STAR#9000570034 RTL: SS Resume event generated in non-Hibernation
2631 * operational mode
2632 */
2633 pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1);
2634 if ((dwc->revision < DWC3_REVISION_250A) &&
2635 (pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) {
2636 if ((dwc->link_state == DWC3_LINK_STATE_U3) &&
2637 (next == DWC3_LINK_STATE_RESUME)) {
2638 dwc3_trace(trace_dwc3_gadget,
2639 "ignoring transition U3 -> Resume");
2640 return;
2641 }
2642 }
2643
2644 /*
2645 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending
2646 * on the link partner, the USB session might do multiple entry/exit
2647 * of low power states before a transfer takes place.
2648 *
2649 * Due to this problem, we might experience lower throughput. The
2650 * suggested workaround is to disable DCTL[12:9] bits if we're
2651 * transitioning from U1/U2 to U0 and enable those bits again
2652 * after a transfer completes and there are no pending transfers
2653 * on any of the enabled endpoints.
2654 *
2655 * This is the first half of that workaround.
2656 *
2657 * Refers to:
2658 *
2659 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us
2660 * core send LGO_Ux entering U0
2661 */
2662 if (dwc->revision < DWC3_REVISION_183A) {
2663 if (next == DWC3_LINK_STATE_U0) {
2664 u32 u1u2;
2665 u32 reg;
2666
2667 switch (dwc->link_state) {
2668 case DWC3_LINK_STATE_U1:
2669 case DWC3_LINK_STATE_U2:
2670 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2671 u1u2 = reg & (DWC3_DCTL_INITU2ENA
2672 | DWC3_DCTL_ACCEPTU2ENA
2673 | DWC3_DCTL_INITU1ENA
2674 | DWC3_DCTL_ACCEPTU1ENA);
2675
2676 if (!dwc->u1u2)
2677 dwc->u1u2 = reg & u1u2;
2678
2679 reg &= ~u1u2;
2680
2681 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2682 break;
2683 default:
2684 /* do nothing */
2685 break;
2686 }
2687 }
2688 }
2689
2690 switch (next) {
2691 case DWC3_LINK_STATE_U1:
2692 if (dwc->speed == USB_SPEED_SUPER)
2693 dwc3_suspend_gadget(dwc);
2694 break;
2695 case DWC3_LINK_STATE_U2:
2696 case DWC3_LINK_STATE_U3:
2697 dwc3_suspend_gadget(dwc);
2698 break;
2699 case DWC3_LINK_STATE_RESUME:
2700 dwc3_resume_gadget(dwc);
2701 break;
2702 default:
2703 /* do nothing */
2704 break;
2705 }
2706
2707 dwc->link_state = next;
2708 }
2709
2710 static void dwc3_gadget_suspend_interrupt(struct dwc3 *dwc,
2711 unsigned int evtinfo)
2712 {
2713 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
2714
2715 if (dwc->link_state != next && next == DWC3_LINK_STATE_U3)
2716 dwc3_suspend_gadget(dwc);
2717
2718 dwc->link_state = next;
2719 }
2720
2721 static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc,
2722 unsigned int evtinfo)
2723 {
2724 unsigned int is_ss = evtinfo & BIT(4);
2725
2726 /**
2727 * WORKAROUND: DWC3 revison 2.20a with hibernation support
2728 * have a known issue which can cause USB CV TD.9.23 to fail
2729 * randomly.
2730 *
2731 * Because of this issue, core could generate bogus hibernation
2732 * events which SW needs to ignore.
2733 *
2734 * Refers to:
2735 *
2736 * STAR#9000546576: Device Mode Hibernation: Issue in USB 2.0
2737 * Device Fallback from SuperSpeed
2738 */
2739 if (is_ss ^ (dwc->speed == USB_SPEED_SUPER))
2740 return;
2741
2742 /* enter hibernation here */
2743 }
2744
2745 static void dwc3_gadget_interrupt(struct dwc3 *dwc,
2746 const struct dwc3_event_devt *event)
2747 {
2748 switch (event->type) {
2749 case DWC3_DEVICE_EVENT_DISCONNECT:
2750 dwc3_gadget_disconnect_interrupt(dwc);
2751 break;
2752 case DWC3_DEVICE_EVENT_RESET:
2753 dwc3_gadget_reset_interrupt(dwc);
2754 break;
2755 case DWC3_DEVICE_EVENT_CONNECT_DONE:
2756 dwc3_gadget_conndone_interrupt(dwc);
2757 break;
2758 case DWC3_DEVICE_EVENT_WAKEUP:
2759 dwc3_gadget_wakeup_interrupt(dwc);
2760 break;
2761 case DWC3_DEVICE_EVENT_HIBER_REQ:
2762 if (dev_WARN_ONCE(dwc->dev, !dwc->has_hibernation,
2763 "unexpected hibernation event\n"))
2764 break;
2765
2766 dwc3_gadget_hibernation_interrupt(dwc, event->event_info);
2767 break;
2768 case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
2769 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
2770 break;
2771 case DWC3_DEVICE_EVENT_EOPF:
2772 /* It changed to be suspend event for version 2.30a and above */
2773 if (dwc->revision < DWC3_REVISION_230A) {
2774 dwc3_trace(trace_dwc3_gadget, "End of Periodic Frame");
2775 } else {
2776 dwc3_trace(trace_dwc3_gadget, "U3/L1-L2 Suspend Event");
2777
2778 /*
2779 * Ignore suspend event until the gadget enters into
2780 * USB_STATE_CONFIGURED state.
2781 */
2782 if (dwc->gadget.state >= USB_STATE_CONFIGURED)
2783 dwc3_gadget_suspend_interrupt(dwc,
2784 event->event_info);
2785 }
2786 break;
2787 case DWC3_DEVICE_EVENT_SOF:
2788 dwc3_trace(trace_dwc3_gadget, "Start of Periodic Frame");
2789 break;
2790 case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
2791 dwc3_trace(trace_dwc3_gadget, "Erratic Error");
2792 break;
2793 case DWC3_DEVICE_EVENT_CMD_CMPL:
2794 dwc3_trace(trace_dwc3_gadget, "Command Complete");
2795 break;
2796 case DWC3_DEVICE_EVENT_OVERFLOW:
2797 dwc3_trace(trace_dwc3_gadget, "Overflow");
2798 break;
2799 default:
2800 dev_WARN(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
2801 }
2802 }
2803
2804 static void dwc3_process_event_entry(struct dwc3 *dwc,
2805 const union dwc3_event *event)
2806 {
2807 trace_dwc3_event(event->raw);
2808
2809 /* Endpoint IRQ, handle it and return early */
2810 if (event->type.is_devspec == 0) {
2811 /* depevt */
2812 return dwc3_endpoint_interrupt(dwc, &event->depevt);
2813 }
2814
2815 switch (event->type.type) {
2816 case DWC3_EVENT_TYPE_DEV:
2817 dwc3_gadget_interrupt(dwc, &event->devt);
2818 break;
2819 /* REVISIT what to do with Carkit and I2C events ? */
2820 default:
2821 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw);
2822 }
2823 }
2824
2825 static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt)
2826 {
2827 struct dwc3 *dwc = evt->dwc;
2828 irqreturn_t ret = IRQ_NONE;
2829 int left;
2830 u32 reg;
2831
2832 left = evt->count;
2833
2834 if (!(evt->flags & DWC3_EVENT_PENDING))
2835 return IRQ_NONE;
2836
2837 while (left > 0) {
2838 union dwc3_event event;
2839
2840 event.raw = *(u32 *) (evt->buf + evt->lpos);
2841
2842 dwc3_process_event_entry(dwc, &event);
2843
2844 /*
2845 * FIXME we wrap around correctly to the next entry as
2846 * almost all entries are 4 bytes in size. There is one
2847 * entry which has 12 bytes which is a regular entry
2848 * followed by 8 bytes data. ATM I don't know how
2849 * things are organized if we get next to the a
2850 * boundary so I worry about that once we try to handle
2851 * that.
2852 */
2853 evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
2854 left -= 4;
2855
2856 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), 4);
2857 }
2858
2859 evt->count = 0;
2860 evt->flags &= ~DWC3_EVENT_PENDING;
2861 ret = IRQ_HANDLED;
2862
2863 /* Unmask interrupt */
2864 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0));
2865 reg &= ~DWC3_GEVNTSIZ_INTMASK;
2866 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg);
2867
2868 return ret;
2869 }
2870
2871 static irqreturn_t dwc3_thread_interrupt(int irq, void *_evt)
2872 {
2873 struct dwc3_event_buffer *evt = _evt;
2874 struct dwc3 *dwc = evt->dwc;
2875 unsigned long flags;
2876 irqreturn_t ret = IRQ_NONE;
2877
2878 spin_lock_irqsave(&dwc->lock, flags);
2879 ret = dwc3_process_event_buf(evt);
2880 spin_unlock_irqrestore(&dwc->lock, flags);
2881
2882 return ret;
2883 }
2884
2885 static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt)
2886 {
2887 struct dwc3 *dwc = evt->dwc;
2888 u32 count;
2889 u32 reg;
2890
2891 if (pm_runtime_suspended(dwc->dev)) {
2892 pm_runtime_get(dwc->dev);
2893 disable_irq_nosync(dwc->irq_gadget);
2894 dwc->pending_events = true;
2895 return IRQ_HANDLED;
2896 }
2897
2898 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
2899 count &= DWC3_GEVNTCOUNT_MASK;
2900 if (!count)
2901 return IRQ_NONE;
2902
2903 evt->count = count;
2904 evt->flags |= DWC3_EVENT_PENDING;
2905
2906 /* Mask interrupt */
2907 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0));
2908 reg |= DWC3_GEVNTSIZ_INTMASK;
2909 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg);
2910
2911 return IRQ_WAKE_THREAD;
2912 }
2913
2914 static irqreturn_t dwc3_interrupt(int irq, void *_evt)
2915 {
2916 struct dwc3_event_buffer *evt = _evt;
2917
2918 return dwc3_check_event_buf(evt);
2919 }
2920
2921 /**
2922 * dwc3_gadget_init - Initializes gadget related registers
2923 * @dwc: pointer to our controller context structure
2924 *
2925 * Returns 0 on success otherwise negative errno.
2926 */
2927 int dwc3_gadget_init(struct dwc3 *dwc)
2928 {
2929 int ret, irq;
2930 struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
2931
2932 irq = platform_get_irq_byname(dwc3_pdev, "peripheral");
2933 if (irq == -EPROBE_DEFER)
2934 return irq;
2935
2936 if (irq <= 0) {
2937 irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3");
2938 if (irq == -EPROBE_DEFER)
2939 return irq;
2940
2941 if (irq <= 0) {
2942 irq = platform_get_irq(dwc3_pdev, 0);
2943 if (irq <= 0) {
2944 if (irq != -EPROBE_DEFER) {
2945 dev_err(dwc->dev,
2946 "missing peripheral IRQ\n");
2947 }
2948 if (!irq)
2949 irq = -EINVAL;
2950 return irq;
2951 }
2952 }
2953 }
2954
2955 dwc->irq_gadget = irq;
2956
2957 dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2958 &dwc->ctrl_req_addr, GFP_KERNEL);
2959 if (!dwc->ctrl_req) {
2960 dev_err(dwc->dev, "failed to allocate ctrl request\n");
2961 ret = -ENOMEM;
2962 goto err0;
2963 }
2964
2965 dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
2966 &dwc->ep0_trb_addr, GFP_KERNEL);
2967 if (!dwc->ep0_trb) {
2968 dev_err(dwc->dev, "failed to allocate ep0 trb\n");
2969 ret = -ENOMEM;
2970 goto err1;
2971 }
2972
2973 dwc->setup_buf = kzalloc(DWC3_EP0_BOUNCE_SIZE, GFP_KERNEL);
2974 if (!dwc->setup_buf) {
2975 ret = -ENOMEM;
2976 goto err2;
2977 }
2978
2979 dwc->ep0_bounce = dma_alloc_coherent(dwc->dev,
2980 DWC3_EP0_BOUNCE_SIZE, &dwc->ep0_bounce_addr,
2981 GFP_KERNEL);
2982 if (!dwc->ep0_bounce) {
2983 dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n");
2984 ret = -ENOMEM;
2985 goto err3;
2986 }
2987
2988 dwc->zlp_buf = kzalloc(DWC3_ZLP_BUF_SIZE, GFP_KERNEL);
2989 if (!dwc->zlp_buf) {
2990 ret = -ENOMEM;
2991 goto err4;
2992 }
2993
2994 dwc->gadget.ops = &dwc3_gadget_ops;
2995 dwc->gadget.speed = USB_SPEED_UNKNOWN;
2996 dwc->gadget.sg_supported = true;
2997 dwc->gadget.name = "dwc3-gadget";
2998 dwc->gadget.is_otg = dwc->dr_mode == USB_DR_MODE_OTG;
2999
3000 /*
3001 * FIXME We might be setting max_speed to <SUPER, however versions
3002 * <2.20a of dwc3 have an issue with metastability (documented
3003 * elsewhere in this driver) which tells us we can't set max speed to
3004 * anything lower than SUPER.
3005 *
3006 * Because gadget.max_speed is only used by composite.c and function
3007 * drivers (i.e. it won't go into dwc3's registers) we are allowing this
3008 * to happen so we avoid sending SuperSpeed Capability descriptor
3009 * together with our BOS descriptor as that could confuse host into
3010 * thinking we can handle super speed.
3011 *
3012 * Note that, in fact, we won't even support GetBOS requests when speed
3013 * is less than super speed because we don't have means, yet, to tell
3014 * composite.c that we are USB 2.0 + LPM ECN.
3015 */
3016 if (dwc->revision < DWC3_REVISION_220A)
3017 dwc3_trace(trace_dwc3_gadget,
3018 "Changing max_speed on rev %08x",
3019 dwc->revision);
3020
3021 dwc->gadget.max_speed = dwc->maximum_speed;
3022
3023 /*
3024 * Per databook, DWC3 needs buffer size to be aligned to MaxPacketSize
3025 * on ep out.
3026 */
3027 dwc->gadget.quirk_ep_out_aligned_size = true;
3028
3029 /*
3030 * REVISIT: Here we should clear all pending IRQs to be
3031 * sure we're starting from a well known location.
3032 */
3033
3034 ret = dwc3_gadget_init_endpoints(dwc);
3035 if (ret)
3036 goto err5;
3037
3038 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget);
3039 if (ret) {
3040 dev_err(dwc->dev, "failed to register udc\n");
3041 goto err5;
3042 }
3043
3044 return 0;
3045
3046 err5:
3047 kfree(dwc->zlp_buf);
3048
3049 err4:
3050 dwc3_gadget_free_endpoints(dwc);
3051 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
3052 dwc->ep0_bounce, dwc->ep0_bounce_addr);
3053
3054 err3:
3055 kfree(dwc->setup_buf);
3056
3057 err2:
3058 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
3059 dwc->ep0_trb, dwc->ep0_trb_addr);
3060
3061 err1:
3062 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
3063 dwc->ctrl_req, dwc->ctrl_req_addr);
3064
3065 err0:
3066 return ret;
3067 }
3068
3069 /* -------------------------------------------------------------------------- */
3070
3071 void dwc3_gadget_exit(struct dwc3 *dwc)
3072 {
3073 usb_del_gadget_udc(&dwc->gadget);
3074
3075 dwc3_gadget_free_endpoints(dwc);
3076
3077 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
3078 dwc->ep0_bounce, dwc->ep0_bounce_addr);
3079
3080 kfree(dwc->setup_buf);
3081 kfree(dwc->zlp_buf);
3082
3083 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
3084 dwc->ep0_trb, dwc->ep0_trb_addr);
3085
3086 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
3087 dwc->ctrl_req, dwc->ctrl_req_addr);
3088 }
3089
3090 int dwc3_gadget_suspend(struct dwc3 *dwc)
3091 {
3092 int ret;
3093
3094 if (!dwc->gadget_driver)
3095 return 0;
3096
3097 ret = dwc3_gadget_run_stop(dwc, false, false);
3098 if (ret < 0)
3099 return ret;
3100
3101 dwc3_disconnect_gadget(dwc);
3102 __dwc3_gadget_stop(dwc);
3103
3104 return 0;
3105 }
3106
3107 int dwc3_gadget_resume(struct dwc3 *dwc)
3108 {
3109 int ret;
3110
3111 if (!dwc->gadget_driver)
3112 return 0;
3113
3114 ret = __dwc3_gadget_start(dwc);
3115 if (ret < 0)
3116 goto err0;
3117
3118 ret = dwc3_gadget_run_stop(dwc, true, false);
3119 if (ret < 0)
3120 goto err1;
3121
3122 return 0;
3123
3124 err1:
3125 __dwc3_gadget_stop(dwc);
3126
3127 err0:
3128 return ret;
3129 }
3130
3131 void dwc3_gadget_process_pending_events(struct dwc3 *dwc)
3132 {
3133 if (dwc->pending_events) {
3134 dwc3_interrupt(dwc->irq_gadget, dwc->ev_buf);
3135 dwc->pending_events = false;
3136 enable_irq(dwc->irq_gadget);
3137 }
3138 }
This page took 0.100465 seconds and 5 git commands to generate.