Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph...
[deliverable/linux.git] / drivers / staging / dwc2 / hcd_ddma.c
1 /*
2 * hcd_ddma.c - DesignWare HS OTG Controller descriptor DMA routines
3 *
4 * Copyright (C) 2004-2013 Synopsys, Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions, and the following disclaimer,
11 * without modification.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The names of the above-listed copyright holders may not be used
16 * to endorse or promote products derived from this software without
17 * specific prior written permission.
18 *
19 * ALTERNATIVELY, this software may be distributed under the terms of the
20 * GNU General Public License ("GPL") as published by the Free Software
21 * Foundation; either version 2 of the License, or (at your option) any
22 * later version.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
25 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
28 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
29 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
30 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
31 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 /*
38 * This file contains the Descriptor DMA implementation for Host mode
39 */
40 #include <linux/kernel.h>
41 #include <linux/module.h>
42 #include <linux/spinlock.h>
43 #include <linux/interrupt.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/io.h>
46 #include <linux/slab.h>
47 #include <linux/usb.h>
48
49 #include <linux/usb/hcd.h>
50 #include <linux/usb/ch11.h>
51
52 #include "core.h"
53 #include "hcd.h"
54
55 static u16 dwc2_frame_list_idx(u16 frame)
56 {
57 return frame & (FRLISTEN_64_SIZE - 1);
58 }
59
60 static u16 dwc2_desclist_idx_inc(u16 idx, u16 inc, u8 speed)
61 {
62 return (idx + inc) &
63 ((speed == USB_SPEED_HIGH ? MAX_DMA_DESC_NUM_HS_ISOC :
64 MAX_DMA_DESC_NUM_GENERIC) - 1);
65 }
66
67 static u16 dwc2_desclist_idx_dec(u16 idx, u16 inc, u8 speed)
68 {
69 return (idx - inc) &
70 ((speed == USB_SPEED_HIGH ? MAX_DMA_DESC_NUM_HS_ISOC :
71 MAX_DMA_DESC_NUM_GENERIC) - 1);
72 }
73
74 static u16 dwc2_max_desc_num(struct dwc2_qh *qh)
75 {
76 return (qh->ep_type == USB_ENDPOINT_XFER_ISOC &&
77 qh->dev_speed == USB_SPEED_HIGH) ?
78 MAX_DMA_DESC_NUM_HS_ISOC : MAX_DMA_DESC_NUM_GENERIC;
79 }
80
81 static u16 dwc2_frame_incr_val(struct dwc2_qh *qh)
82 {
83 return qh->dev_speed == USB_SPEED_HIGH ?
84 (qh->interval + 8 - 1) / 8 : qh->interval;
85 }
86
87 static int dwc2_desc_list_alloc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
88 gfp_t flags)
89 {
90 qh->desc_list = dma_alloc_coherent(hsotg->dev,
91 sizeof(struct dwc2_hcd_dma_desc) *
92 dwc2_max_desc_num(qh), &qh->desc_list_dma,
93 flags);
94
95 if (!qh->desc_list)
96 return -ENOMEM;
97
98 memset(qh->desc_list, 0,
99 sizeof(struct dwc2_hcd_dma_desc) * dwc2_max_desc_num(qh));
100
101 qh->n_bytes = kzalloc(sizeof(u32) * dwc2_max_desc_num(qh), flags);
102 if (!qh->n_bytes) {
103 dma_free_coherent(hsotg->dev, sizeof(struct dwc2_hcd_dma_desc)
104 * dwc2_max_desc_num(qh), qh->desc_list,
105 qh->desc_list_dma);
106 qh->desc_list = NULL;
107 return -ENOMEM;
108 }
109
110 return 0;
111 }
112
113 static void dwc2_desc_list_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
114 {
115 if (qh->desc_list) {
116 dma_free_coherent(hsotg->dev, sizeof(struct dwc2_hcd_dma_desc)
117 * dwc2_max_desc_num(qh), qh->desc_list,
118 qh->desc_list_dma);
119 qh->desc_list = NULL;
120 }
121
122 kfree(qh->n_bytes);
123 qh->n_bytes = NULL;
124 }
125
126 static int dwc2_frame_list_alloc(struct dwc2_hsotg *hsotg, gfp_t mem_flags)
127 {
128 if (hsotg->frame_list)
129 return 0;
130
131 hsotg->frame_list = dma_alloc_coherent(hsotg->dev,
132 4 * FRLISTEN_64_SIZE,
133 &hsotg->frame_list_dma,
134 mem_flags);
135 if (!hsotg->frame_list)
136 return -ENOMEM;
137
138 memset(hsotg->frame_list, 0, 4 * FRLISTEN_64_SIZE);
139 return 0;
140 }
141
142 static void dwc2_frame_list_free(struct dwc2_hsotg *hsotg)
143 {
144 u32 *frame_list;
145 dma_addr_t frame_list_dma;
146 unsigned long flags;
147
148 spin_lock_irqsave(&hsotg->lock, flags);
149
150 if (!hsotg->frame_list) {
151 spin_unlock_irqrestore(&hsotg->lock, flags);
152 return;
153 }
154
155 frame_list = hsotg->frame_list;
156 frame_list_dma = hsotg->frame_list_dma;
157 hsotg->frame_list = NULL;
158
159 spin_unlock_irqrestore(&hsotg->lock, flags);
160
161 dma_free_coherent(hsotg->dev, 4 * FRLISTEN_64_SIZE, frame_list,
162 frame_list_dma);
163 }
164
165 static void dwc2_per_sched_enable(struct dwc2_hsotg *hsotg, u32 fr_list_en)
166 {
167 u32 hcfg;
168 unsigned long flags;
169
170 spin_lock_irqsave(&hsotg->lock, flags);
171
172 hcfg = readl(hsotg->regs + HCFG);
173 if (hcfg & HCFG_PERSCHEDENA) {
174 /* already enabled */
175 spin_unlock_irqrestore(&hsotg->lock, flags);
176 return;
177 }
178
179 writel(hsotg->frame_list_dma, hsotg->regs + HFLBADDR);
180
181 hcfg &= ~HCFG_FRLISTEN_MASK;
182 hcfg |= fr_list_en | HCFG_PERSCHEDENA;
183 dev_vdbg(hsotg->dev, "Enabling Periodic schedule\n");
184 writel(hcfg, hsotg->regs + HCFG);
185
186 spin_unlock_irqrestore(&hsotg->lock, flags);
187 }
188
189 static void dwc2_per_sched_disable(struct dwc2_hsotg *hsotg)
190 {
191 u32 hcfg;
192 unsigned long flags;
193
194 spin_lock_irqsave(&hsotg->lock, flags);
195
196 hcfg = readl(hsotg->regs + HCFG);
197 if (!(hcfg & HCFG_PERSCHEDENA)) {
198 /* already disabled */
199 spin_unlock_irqrestore(&hsotg->lock, flags);
200 return;
201 }
202
203 hcfg &= ~HCFG_PERSCHEDENA;
204 dev_vdbg(hsotg->dev, "Disabling Periodic schedule\n");
205 writel(hcfg, hsotg->regs + HCFG);
206
207 spin_unlock_irqrestore(&hsotg->lock, flags);
208 }
209
210 /*
211 * Activates/Deactivates FrameList entries for the channel based on endpoint
212 * servicing period
213 */
214 static void dwc2_update_frame_list(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
215 int enable)
216 {
217 struct dwc2_host_chan *chan;
218 u16 i, j, inc;
219
220 if (!hsotg) {
221 pr_err("hsotg = %p\n", hsotg);
222 return;
223 }
224
225 if (!qh->channel) {
226 dev_err(hsotg->dev, "qh->channel = %p\n", qh->channel);
227 return;
228 }
229
230 if (!hsotg->frame_list) {
231 dev_err(hsotg->dev, "hsotg->frame_list = %p\n",
232 hsotg->frame_list);
233 return;
234 }
235
236 chan = qh->channel;
237 inc = dwc2_frame_incr_val(qh);
238 if (qh->ep_type == USB_ENDPOINT_XFER_ISOC)
239 i = dwc2_frame_list_idx(qh->sched_frame);
240 else
241 i = 0;
242
243 j = i;
244 do {
245 if (enable)
246 hsotg->frame_list[j] |= 1 << chan->hc_num;
247 else
248 hsotg->frame_list[j] &= ~(1 << chan->hc_num);
249 j = (j + inc) & (FRLISTEN_64_SIZE - 1);
250 } while (j != i);
251
252 if (!enable)
253 return;
254
255 chan->schinfo = 0;
256 if (chan->speed == USB_SPEED_HIGH && qh->interval) {
257 j = 1;
258 /* TODO - check this */
259 inc = (8 + qh->interval - 1) / qh->interval;
260 for (i = 0; i < inc; i++) {
261 chan->schinfo |= j;
262 j = j << qh->interval;
263 }
264 } else {
265 chan->schinfo = 0xff;
266 }
267 }
268
269 static void dwc2_release_channel_ddma(struct dwc2_hsotg *hsotg,
270 struct dwc2_qh *qh)
271 {
272 struct dwc2_host_chan *chan = qh->channel;
273
274 if (dwc2_qh_is_non_per(qh))
275 hsotg->non_periodic_channels--;
276 else
277 dwc2_update_frame_list(hsotg, qh, 0);
278
279 /*
280 * The condition is added to prevent double cleanup try in case of
281 * device disconnect. See channel cleanup in dwc2_hcd_disconnect().
282 */
283 if (chan->qh) {
284 if (!list_empty(&chan->hc_list_entry))
285 list_del(&chan->hc_list_entry);
286 dwc2_hc_cleanup(hsotg, chan);
287 list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list);
288 chan->qh = NULL;
289 }
290
291 qh->channel = NULL;
292 qh->ntd = 0;
293
294 if (qh->desc_list)
295 memset(qh->desc_list, 0, sizeof(struct dwc2_hcd_dma_desc) *
296 dwc2_max_desc_num(qh));
297 }
298
299 /**
300 * dwc2_hcd_qh_init_ddma() - Initializes a QH structure's Descriptor DMA
301 * related members
302 *
303 * @hsotg: The HCD state structure for the DWC OTG controller
304 * @qh: The QH to init
305 *
306 * Return: 0 if successful, negative error code otherwise
307 *
308 * Allocates memory for the descriptor list. For the first periodic QH,
309 * allocates memory for the FrameList and enables periodic scheduling.
310 */
311 int dwc2_hcd_qh_init_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
312 gfp_t mem_flags)
313 {
314 int retval;
315
316 if (qh->do_split) {
317 dev_err(hsotg->dev,
318 "SPLIT Transfers are not supported in Descriptor DMA mode.\n");
319 retval = -EINVAL;
320 goto err0;
321 }
322
323 retval = dwc2_desc_list_alloc(hsotg, qh, mem_flags);
324 if (retval)
325 goto err0;
326
327 if (qh->ep_type == USB_ENDPOINT_XFER_ISOC ||
328 qh->ep_type == USB_ENDPOINT_XFER_INT) {
329 if (!hsotg->frame_list) {
330 retval = dwc2_frame_list_alloc(hsotg, mem_flags);
331 if (retval)
332 goto err1;
333 /* Enable periodic schedule on first periodic QH */
334 dwc2_per_sched_enable(hsotg, HCFG_FRLISTEN_64);
335 }
336 }
337
338 qh->ntd = 0;
339 return 0;
340
341 err1:
342 dwc2_desc_list_free(hsotg, qh);
343 err0:
344 return retval;
345 }
346
347 /**
348 * dwc2_hcd_qh_free_ddma() - Frees a QH structure's Descriptor DMA related
349 * members
350 *
351 * @hsotg: The HCD state structure for the DWC OTG controller
352 * @qh: The QH to free
353 *
354 * Frees descriptor list memory associated with the QH. If QH is periodic and
355 * the last, frees FrameList memory and disables periodic scheduling.
356 */
357 void dwc2_hcd_qh_free_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
358 {
359 dwc2_desc_list_free(hsotg, qh);
360
361 /*
362 * Channel still assigned due to some reasons.
363 * Seen on Isoc URB dequeue. Channel halted but no subsequent
364 * ChHalted interrupt to release the channel. Afterwards
365 * when it comes here from endpoint disable routine
366 * channel remains assigned.
367 */
368 if (qh->channel)
369 dwc2_release_channel_ddma(hsotg, qh);
370
371 if ((qh->ep_type == USB_ENDPOINT_XFER_ISOC ||
372 qh->ep_type == USB_ENDPOINT_XFER_INT) &&
373 !hsotg->periodic_channels && hsotg->frame_list) {
374 dwc2_per_sched_disable(hsotg);
375 dwc2_frame_list_free(hsotg);
376 }
377 }
378
379 static u8 dwc2_frame_to_desc_idx(struct dwc2_qh *qh, u16 frame_idx)
380 {
381 if (qh->dev_speed == USB_SPEED_HIGH)
382 /* Descriptor set (8 descriptors) index which is 8-aligned */
383 return (frame_idx & ((MAX_DMA_DESC_NUM_HS_ISOC / 8) - 1)) * 8;
384 else
385 return frame_idx & (MAX_DMA_DESC_NUM_GENERIC - 1);
386 }
387
388 /*
389 * Determine starting frame for Isochronous transfer.
390 * Few frames skipped to prevent race condition with HC.
391 */
392 static u16 dwc2_calc_starting_frame(struct dwc2_hsotg *hsotg,
393 struct dwc2_qh *qh, u16 *skip_frames)
394 {
395 u16 frame;
396
397 hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);
398
399 /* sched_frame is always frame number (not uFrame) both in FS and HS! */
400
401 /*
402 * skip_frames is used to limit activated descriptors number
403 * to avoid the situation when HC services the last activated
404 * descriptor firstly.
405 * Example for FS:
406 * Current frame is 1, scheduled frame is 3. Since HC always fetches
407 * the descriptor corresponding to curr_frame+1, the descriptor
408 * corresponding to frame 2 will be fetched. If the number of
409 * descriptors is max=64 (or greather) the list will be fully programmed
410 * with Active descriptors and it is possible case (rare) that the
411 * latest descriptor(considering rollback) corresponding to frame 2 will
412 * be serviced first. HS case is more probable because, in fact, up to
413 * 11 uframes (16 in the code) may be skipped.
414 */
415 if (qh->dev_speed == USB_SPEED_HIGH) {
416 /*
417 * Consider uframe counter also, to start xfer asap. If half of
418 * the frame elapsed skip 2 frames otherwise just 1 frame.
419 * Starting descriptor index must be 8-aligned, so if the
420 * current frame is near to complete the next one is skipped as
421 * well.
422 */
423 if (dwc2_micro_frame_num(hsotg->frame_number) >= 5) {
424 *skip_frames = 2 * 8;
425 frame = dwc2_frame_num_inc(hsotg->frame_number,
426 *skip_frames);
427 } else {
428 *skip_frames = 1 * 8;
429 frame = dwc2_frame_num_inc(hsotg->frame_number,
430 *skip_frames);
431 }
432
433 frame = dwc2_full_frame_num(frame);
434 } else {
435 /*
436 * Two frames are skipped for FS - the current and the next.
437 * But for descriptor programming, 1 frame (descriptor) is
438 * enough, see example above.
439 */
440 *skip_frames = 1;
441 frame = dwc2_frame_num_inc(hsotg->frame_number, 2);
442 }
443
444 return frame;
445 }
446
447 /*
448 * Calculate initial descriptor index for isochronous transfer based on
449 * scheduled frame
450 */
451 static u16 dwc2_recalc_initial_desc_idx(struct dwc2_hsotg *hsotg,
452 struct dwc2_qh *qh)
453 {
454 u16 frame, fr_idx, fr_idx_tmp, skip_frames;
455
456 /*
457 * With current ISOC processing algorithm the channel is being released
458 * when no more QTDs in the list (qh->ntd == 0). Thus this function is
459 * called only when qh->ntd == 0 and qh->channel == 0.
460 *
461 * So qh->channel != NULL branch is not used and just not removed from
462 * the source file. It is required for another possible approach which
463 * is, do not disable and release the channel when ISOC session
464 * completed, just move QH to inactive schedule until new QTD arrives.
465 * On new QTD, the QH moved back to 'ready' schedule, starting frame and
466 * therefore starting desc_index are recalculated. In this case channel
467 * is released only on ep_disable.
468 */
469
470 /*
471 * Calculate starting descriptor index. For INTERRUPT endpoint it is
472 * always 0.
473 */
474 if (qh->channel) {
475 frame = dwc2_calc_starting_frame(hsotg, qh, &skip_frames);
476 /*
477 * Calculate initial descriptor index based on FrameList current
478 * bitmap and servicing period
479 */
480 fr_idx_tmp = dwc2_frame_list_idx(frame);
481 fr_idx = (FRLISTEN_64_SIZE +
482 dwc2_frame_list_idx(qh->sched_frame) - fr_idx_tmp)
483 % dwc2_frame_incr_val(qh);
484 fr_idx = (fr_idx + fr_idx_tmp) % FRLISTEN_64_SIZE;
485 } else {
486 qh->sched_frame = dwc2_calc_starting_frame(hsotg, qh,
487 &skip_frames);
488 fr_idx = dwc2_frame_list_idx(qh->sched_frame);
489 }
490
491 qh->td_first = qh->td_last = dwc2_frame_to_desc_idx(qh, fr_idx);
492
493 return skip_frames;
494 }
495
496 #define ISOC_URB_GIVEBACK_ASAP
497
498 #define MAX_ISOC_XFER_SIZE_FS 1023
499 #define MAX_ISOC_XFER_SIZE_HS 3072
500 #define DESCNUM_THRESHOLD 4
501
502 static void dwc2_fill_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
503 struct dwc2_qtd *qtd,
504 struct dwc2_qh *qh, u32 max_xfer_size,
505 u16 idx)
506 {
507 struct dwc2_hcd_dma_desc *dma_desc = &qh->desc_list[idx];
508 struct dwc2_hcd_iso_packet_desc *frame_desc;
509
510 memset(dma_desc, 0, sizeof(*dma_desc));
511 frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last];
512
513 if (frame_desc->length > max_xfer_size)
514 qh->n_bytes[idx] = max_xfer_size;
515 else
516 qh->n_bytes[idx] = frame_desc->length;
517
518 dma_desc->buf = (u32)(qtd->urb->dma + frame_desc->offset);
519 dma_desc->status = qh->n_bytes[idx] << HOST_DMA_ISOC_NBYTES_SHIFT &
520 HOST_DMA_ISOC_NBYTES_MASK;
521
522 #ifdef ISOC_URB_GIVEBACK_ASAP
523 /* Set IOC for each descriptor corresponding to last frame of URB */
524 if (qtd->isoc_frame_index_last == qtd->urb->packet_count)
525 dma_desc->status |= HOST_DMA_IOC;
526 #endif
527
528 qh->ntd++;
529 qtd->isoc_frame_index_last++;
530 }
531
532 static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg,
533 struct dwc2_qh *qh, u16 skip_frames)
534 {
535 struct dwc2_qtd *qtd;
536 u32 max_xfer_size;
537 u16 idx, inc, n_desc, ntd_max = 0;
538
539 idx = qh->td_last;
540 inc = qh->interval;
541 n_desc = 0;
542
543 if (qh->interval) {
544 ntd_max = (dwc2_max_desc_num(qh) + qh->interval - 1) /
545 qh->interval;
546 if (skip_frames && !qh->channel)
547 ntd_max -= skip_frames / qh->interval;
548 }
549
550 max_xfer_size = qh->dev_speed == USB_SPEED_HIGH ?
551 MAX_ISOC_XFER_SIZE_HS : MAX_ISOC_XFER_SIZE_FS;
552
553 list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry) {
554 while (qh->ntd < ntd_max && qtd->isoc_frame_index_last <
555 qtd->urb->packet_count) {
556 if (n_desc > 1)
557 qh->desc_list[n_desc - 1].status |= HOST_DMA_A;
558 dwc2_fill_host_isoc_dma_desc(hsotg, qtd, qh,
559 max_xfer_size, idx);
560 idx = dwc2_desclist_idx_inc(idx, inc, qh->dev_speed);
561 n_desc++;
562 }
563 qtd->in_process = 1;
564 }
565
566 qh->td_last = idx;
567
568 #ifdef ISOC_URB_GIVEBACK_ASAP
569 /* Set IOC for last descriptor if descriptor list is full */
570 if (qh->ntd == ntd_max) {
571 idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
572 qh->desc_list[idx].status |= HOST_DMA_IOC;
573 }
574 #else
575 /*
576 * Set IOC bit only for one descriptor. Always try to be ahead of HW
577 * processing, i.e. on IOC generation driver activates next descriptor
578 * but core continues to process descriptors following the one with IOC
579 * set.
580 */
581
582 if (n_desc > DESCNUM_THRESHOLD)
583 /*
584 * Move IOC "up". Required even if there is only one QTD
585 * in the list, because QTDs might continue to be queued,
586 * but during the activation it was only one queued.
587 * Actually more than one QTD might be in the list if this
588 * function called from XferCompletion - QTDs was queued during
589 * HW processing of the previous descriptor chunk.
590 */
591 idx = dwc2_desclist_idx_dec(idx, inc * ((qh->ntd + 1) / 2),
592 qh->dev_speed);
593 else
594 /*
595 * Set the IOC for the latest descriptor if either number of
596 * descriptors is not greater than threshold or no more new
597 * descriptors activated
598 */
599 idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
600
601 qh->desc_list[idx].status |= HOST_DMA_IOC;
602 #endif
603
604 if (n_desc) {
605 qh->desc_list[n_desc - 1].status |= HOST_DMA_A;
606 if (n_desc > 1)
607 qh->desc_list[0].status |= HOST_DMA_A;
608 }
609 }
610
611 static void dwc2_fill_host_dma_desc(struct dwc2_hsotg *hsotg,
612 struct dwc2_host_chan *chan,
613 struct dwc2_qtd *qtd, struct dwc2_qh *qh,
614 int n_desc)
615 {
616 struct dwc2_hcd_dma_desc *dma_desc = &qh->desc_list[n_desc];
617 int len = chan->xfer_len;
618
619 if (len > MAX_DMA_DESC_SIZE)
620 len = MAX_DMA_DESC_SIZE - chan->max_packet + 1;
621
622 if (chan->ep_is_in) {
623 int num_packets;
624
625 if (len > 0 && chan->max_packet)
626 num_packets = (len + chan->max_packet - 1)
627 / chan->max_packet;
628 else
629 /* Need 1 packet for transfer length of 0 */
630 num_packets = 1;
631
632 /* Always program an integral # of packets for IN transfers */
633 len = num_packets * chan->max_packet;
634 }
635
636 dma_desc->status = len << HOST_DMA_NBYTES_SHIFT & HOST_DMA_NBYTES_MASK;
637 qh->n_bytes[n_desc] = len;
638
639 if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL &&
640 qtd->control_phase == DWC2_CONTROL_SETUP)
641 dma_desc->status |= HOST_DMA_SUP;
642
643 dma_desc->buf = (u32)chan->xfer_dma;
644
645 /*
646 * Last (or only) descriptor of IN transfer with actual size less
647 * than MaxPacket
648 */
649 if (len > chan->xfer_len) {
650 chan->xfer_len = 0;
651 } else {
652 chan->xfer_dma += len;
653 chan->xfer_len -= len;
654 }
655 }
656
657 static void dwc2_init_non_isoc_dma_desc(struct dwc2_hsotg *hsotg,
658 struct dwc2_qh *qh)
659 {
660 struct dwc2_qtd *qtd;
661 struct dwc2_host_chan *chan = qh->channel;
662 int n_desc = 0;
663
664 dev_vdbg(hsotg->dev, "%s(): qh=%p dma=%08lx len=%d\n", __func__, qh,
665 (unsigned long)chan->xfer_dma, chan->xfer_len);
666
667 /*
668 * Start with chan->xfer_dma initialized in assign_and_init_hc(), then
669 * if SG transfer consists of multiple URBs, this pointer is re-assigned
670 * to the buffer of the currently processed QTD. For non-SG request
671 * there is always one QTD active.
672 */
673
674 list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry) {
675 dev_vdbg(hsotg->dev, "qtd=%p\n", qtd);
676
677 if (n_desc) {
678 /* SG request - more than 1 QTD */
679 chan->xfer_dma = qtd->urb->dma +
680 qtd->urb->actual_length;
681 chan->xfer_len = qtd->urb->length -
682 qtd->urb->actual_length;
683 dev_vdbg(hsotg->dev, "buf=%08lx len=%d\n",
684 (unsigned long)chan->xfer_dma, chan->xfer_len);
685 }
686
687 qtd->n_desc = 0;
688 do {
689 if (n_desc > 1) {
690 qh->desc_list[n_desc - 1].status |= HOST_DMA_A;
691 dev_vdbg(hsotg->dev,
692 "set A bit in desc %d (%p)\n",
693 n_desc - 1,
694 &qh->desc_list[n_desc - 1]);
695 }
696 dwc2_fill_host_dma_desc(hsotg, chan, qtd, qh, n_desc);
697 dev_vdbg(hsotg->dev,
698 "desc %d (%p) buf=%08x status=%08x\n",
699 n_desc, &qh->desc_list[n_desc],
700 qh->desc_list[n_desc].buf,
701 qh->desc_list[n_desc].status);
702 qtd->n_desc++;
703 n_desc++;
704 } while (chan->xfer_len > 0 &&
705 n_desc != MAX_DMA_DESC_NUM_GENERIC);
706
707 dev_vdbg(hsotg->dev, "n_desc=%d\n", n_desc);
708 qtd->in_process = 1;
709 if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL)
710 break;
711 if (n_desc == MAX_DMA_DESC_NUM_GENERIC)
712 break;
713 }
714
715 if (n_desc) {
716 qh->desc_list[n_desc - 1].status |=
717 HOST_DMA_IOC | HOST_DMA_EOL | HOST_DMA_A;
718 dev_vdbg(hsotg->dev, "set IOC/EOL/A bits in desc %d (%p)\n",
719 n_desc - 1, &qh->desc_list[n_desc - 1]);
720 if (n_desc > 1) {
721 qh->desc_list[0].status |= HOST_DMA_A;
722 dev_vdbg(hsotg->dev, "set A bit in desc 0 (%p)\n",
723 &qh->desc_list[0]);
724 }
725 chan->ntd = n_desc;
726 }
727 }
728
729 /**
730 * dwc2_hcd_start_xfer_ddma() - Starts a transfer in Descriptor DMA mode
731 *
732 * @hsotg: The HCD state structure for the DWC OTG controller
733 * @qh: The QH to init
734 *
735 * Return: 0 if successful, negative error code otherwise
736 *
737 * For Control and Bulk endpoints, initializes descriptor list and starts the
738 * transfer. For Interrupt and Isochronous endpoints, initializes descriptor
739 * list then updates FrameList, marking appropriate entries as active.
740 *
741 * For Isochronous endpoints the starting descriptor index is calculated based
742 * on the scheduled frame, but only on the first transfer descriptor within a
743 * session. Then the transfer is started via enabling the channel.
744 *
745 * For Isochronous endpoints the channel is not halted on XferComplete
746 * interrupt so remains assigned to the endpoint(QH) until session is done.
747 */
748 void dwc2_hcd_start_xfer_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
749 {
750 /* Channel is already assigned */
751 struct dwc2_host_chan *chan = qh->channel;
752 u16 skip_frames = 0;
753
754 switch (chan->ep_type) {
755 case USB_ENDPOINT_XFER_CONTROL:
756 case USB_ENDPOINT_XFER_BULK:
757 dwc2_init_non_isoc_dma_desc(hsotg, qh);
758 dwc2_hc_start_transfer_ddma(hsotg, chan);
759 break;
760 case USB_ENDPOINT_XFER_INT:
761 dwc2_init_non_isoc_dma_desc(hsotg, qh);
762 dwc2_update_frame_list(hsotg, qh, 1);
763 dwc2_hc_start_transfer_ddma(hsotg, chan);
764 break;
765 case USB_ENDPOINT_XFER_ISOC:
766 if (!qh->ntd)
767 skip_frames = dwc2_recalc_initial_desc_idx(hsotg, qh);
768 dwc2_init_isoc_dma_desc(hsotg, qh, skip_frames);
769
770 if (!chan->xfer_started) {
771 dwc2_update_frame_list(hsotg, qh, 1);
772
773 /*
774 * Always set to max, instead of actual size. Otherwise
775 * ntd will be changed with channel being enabled. Not
776 * recommended.
777 */
778 chan->ntd = dwc2_max_desc_num(qh);
779
780 /* Enable channel only once for ISOC */
781 dwc2_hc_start_transfer_ddma(hsotg, chan);
782 }
783
784 break;
785 default:
786 break;
787 }
788 }
789
790 #define DWC2_CMPL_DONE 1
791 #define DWC2_CMPL_STOP 2
792
793 static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
794 struct dwc2_host_chan *chan,
795 struct dwc2_qtd *qtd,
796 struct dwc2_qh *qh, u16 idx)
797 {
798 struct dwc2_hcd_dma_desc *dma_desc = &qh->desc_list[idx];
799 struct dwc2_hcd_iso_packet_desc *frame_desc;
800 u16 remain = 0;
801 int rc = 0;
802
803 if (!qtd->urb)
804 return -EINVAL;
805
806 frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last];
807 dma_desc->buf = (u32)(qtd->urb->dma + frame_desc->offset);
808 if (chan->ep_is_in)
809 remain = (dma_desc->status & HOST_DMA_ISOC_NBYTES_MASK) >>
810 HOST_DMA_ISOC_NBYTES_SHIFT;
811
812 if ((dma_desc->status & HOST_DMA_STS_MASK) == HOST_DMA_STS_PKTERR) {
813 /*
814 * XactError, or unable to complete all the transactions
815 * in the scheduled micro-frame/frame, both indicated by
816 * HOST_DMA_STS_PKTERR
817 */
818 qtd->urb->error_count++;
819 frame_desc->actual_length = qh->n_bytes[idx] - remain;
820 frame_desc->status = -EPROTO;
821 } else {
822 /* Success */
823 frame_desc->actual_length = qh->n_bytes[idx] - remain;
824 frame_desc->status = 0;
825 }
826
827 if (++qtd->isoc_frame_index == qtd->urb->packet_count) {
828 /*
829 * urb->status is not used for isoc transfers here. The
830 * individual frame_desc status are used instead.
831 */
832 dwc2_host_complete(hsotg, qtd, 0);
833 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
834
835 /*
836 * This check is necessary because urb_dequeue can be called
837 * from urb complete callback (sound driver for example). All
838 * pending URBs are dequeued there, so no need for further
839 * processing.
840 */
841 if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE)
842 return -1;
843 rc = DWC2_CMPL_DONE;
844 }
845
846 qh->ntd--;
847
848 /* Stop if IOC requested descriptor reached */
849 if (dma_desc->status & HOST_DMA_IOC)
850 rc = DWC2_CMPL_STOP;
851
852 return rc;
853 }
854
855 static void dwc2_complete_isoc_xfer_ddma(struct dwc2_hsotg *hsotg,
856 struct dwc2_host_chan *chan,
857 enum dwc2_halt_status halt_status)
858 {
859 struct dwc2_hcd_iso_packet_desc *frame_desc;
860 struct dwc2_qtd *qtd, *qtd_tmp;
861 struct dwc2_qh *qh;
862 u16 idx;
863 int rc;
864
865 qh = chan->qh;
866 idx = qh->td_first;
867
868 if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) {
869 list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry)
870 qtd->in_process = 0;
871 return;
872 }
873
874 if (halt_status == DWC2_HC_XFER_AHB_ERR ||
875 halt_status == DWC2_HC_XFER_BABBLE_ERR) {
876 /*
877 * Channel is halted in these error cases, considered as serious
878 * issues.
879 * Complete all URBs marking all frames as failed, irrespective
880 * whether some of the descriptors (frames) succeeded or not.
881 * Pass error code to completion routine as well, to update
882 * urb->status, some of class drivers might use it to stop
883 * queing transfer requests.
884 */
885 int err = halt_status == DWC2_HC_XFER_AHB_ERR ?
886 -EIO : -EOVERFLOW;
887
888 list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list,
889 qtd_list_entry) {
890 if (qtd->urb) {
891 for (idx = 0; idx < qtd->urb->packet_count;
892 idx++) {
893 frame_desc = &qtd->urb->iso_descs[idx];
894 frame_desc->status = err;
895 }
896
897 dwc2_host_complete(hsotg, qtd, err);
898 }
899
900 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
901 }
902
903 return;
904 }
905
906 list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list, qtd_list_entry) {
907 if (!qtd->in_process)
908 break;
909 do {
910 rc = dwc2_cmpl_host_isoc_dma_desc(hsotg, chan, qtd, qh,
911 idx);
912 if (rc < 0)
913 return;
914 idx = dwc2_desclist_idx_inc(idx, qh->interval,
915 chan->speed);
916 if (rc == DWC2_CMPL_STOP)
917 goto stop_scan;
918 if (rc == DWC2_CMPL_DONE)
919 break;
920 } while (idx != qh->td_first);
921 }
922
923 stop_scan:
924 qh->td_first = idx;
925 }
926
927 static int dwc2_update_non_isoc_urb_state_ddma(struct dwc2_hsotg *hsotg,
928 struct dwc2_host_chan *chan,
929 struct dwc2_qtd *qtd,
930 struct dwc2_hcd_dma_desc *dma_desc,
931 enum dwc2_halt_status halt_status,
932 u32 n_bytes, int *xfer_done)
933 {
934 struct dwc2_hcd_urb *urb = qtd->urb;
935 u16 remain = 0;
936
937 if (chan->ep_is_in)
938 remain = (dma_desc->status & HOST_DMA_NBYTES_MASK) >>
939 HOST_DMA_NBYTES_SHIFT;
940
941 dev_vdbg(hsotg->dev, "remain=%d dwc2_urb=%p\n", remain, urb);
942
943 if (halt_status == DWC2_HC_XFER_AHB_ERR) {
944 dev_err(hsotg->dev, "EIO\n");
945 urb->status = -EIO;
946 return 1;
947 }
948
949 if ((dma_desc->status & HOST_DMA_STS_MASK) == HOST_DMA_STS_PKTERR) {
950 switch (halt_status) {
951 case DWC2_HC_XFER_STALL:
952 dev_vdbg(hsotg->dev, "Stall\n");
953 urb->status = -EPIPE;
954 break;
955 case DWC2_HC_XFER_BABBLE_ERR:
956 dev_err(hsotg->dev, "Babble\n");
957 urb->status = -EOVERFLOW;
958 break;
959 case DWC2_HC_XFER_XACT_ERR:
960 dev_err(hsotg->dev, "XactErr\n");
961 urb->status = -EPROTO;
962 break;
963 default:
964 dev_err(hsotg->dev,
965 "%s: Unhandled descriptor error status (%d)\n",
966 __func__, halt_status);
967 break;
968 }
969 return 1;
970 }
971
972 if (dma_desc->status & HOST_DMA_A) {
973 dev_vdbg(hsotg->dev,
974 "Active descriptor encountered on channel %d\n",
975 chan->hc_num);
976 return 0;
977 }
978
979 if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL) {
980 if (qtd->control_phase == DWC2_CONTROL_DATA) {
981 urb->actual_length += n_bytes - remain;
982 if (remain || urb->actual_length >= urb->length) {
983 /*
984 * For Control Data stage do not set urb->status
985 * to 0, to prevent URB callback. Set it when
986 * Status phase is done. See below.
987 */
988 *xfer_done = 1;
989 }
990 } else if (qtd->control_phase == DWC2_CONTROL_STATUS) {
991 urb->status = 0;
992 *xfer_done = 1;
993 }
994 /* No handling for SETUP stage */
995 } else {
996 /* BULK and INTR */
997 urb->actual_length += n_bytes - remain;
998 dev_vdbg(hsotg->dev, "length=%d actual=%d\n", urb->length,
999 urb->actual_length);
1000 if (remain || urb->actual_length >= urb->length) {
1001 urb->status = 0;
1002 *xfer_done = 1;
1003 }
1004 }
1005
1006 return 0;
1007 }
1008
1009 static int dwc2_process_non_isoc_desc(struct dwc2_hsotg *hsotg,
1010 struct dwc2_host_chan *chan,
1011 int chnum, struct dwc2_qtd *qtd,
1012 int desc_num,
1013 enum dwc2_halt_status halt_status,
1014 int *xfer_done)
1015 {
1016 struct dwc2_qh *qh = chan->qh;
1017 struct dwc2_hcd_urb *urb = qtd->urb;
1018 struct dwc2_hcd_dma_desc *dma_desc;
1019 u32 n_bytes;
1020 int failed;
1021
1022 dev_vdbg(hsotg->dev, "%s()\n", __func__);
1023
1024 if (!urb)
1025 return -EINVAL;
1026
1027 dma_desc = &qh->desc_list[desc_num];
1028 n_bytes = qh->n_bytes[desc_num];
1029 dev_vdbg(hsotg->dev,
1030 "qtd=%p dwc2_urb=%p desc_num=%d desc=%p n_bytes=%d\n",
1031 qtd, urb, desc_num, dma_desc, n_bytes);
1032 failed = dwc2_update_non_isoc_urb_state_ddma(hsotg, chan, qtd, dma_desc,
1033 halt_status, n_bytes,
1034 xfer_done);
1035 if (failed || (*xfer_done && urb->status != -EINPROGRESS)) {
1036 dwc2_host_complete(hsotg, qtd, urb->status);
1037 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
1038 dev_vdbg(hsotg->dev, "failed=%1x xfer_done=%1x status=%08x\n",
1039 failed, *xfer_done, urb->status);
1040 return failed;
1041 }
1042
1043 if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL) {
1044 switch (qtd->control_phase) {
1045 case DWC2_CONTROL_SETUP:
1046 if (urb->length > 0)
1047 qtd->control_phase = DWC2_CONTROL_DATA;
1048 else
1049 qtd->control_phase = DWC2_CONTROL_STATUS;
1050 dev_vdbg(hsotg->dev,
1051 " Control setup transaction done\n");
1052 break;
1053 case DWC2_CONTROL_DATA:
1054 if (*xfer_done) {
1055 qtd->control_phase = DWC2_CONTROL_STATUS;
1056 dev_vdbg(hsotg->dev,
1057 " Control data transfer done\n");
1058 } else if (desc_num + 1 == qtd->n_desc) {
1059 /*
1060 * Last descriptor for Control data stage which
1061 * is not completed yet
1062 */
1063 dwc2_hcd_save_data_toggle(hsotg, chan, chnum,
1064 qtd);
1065 }
1066 break;
1067 default:
1068 break;
1069 }
1070 }
1071
1072 return 0;
1073 }
1074
1075 static void dwc2_complete_non_isoc_xfer_ddma(struct dwc2_hsotg *hsotg,
1076 struct dwc2_host_chan *chan,
1077 int chnum,
1078 enum dwc2_halt_status halt_status)
1079 {
1080 struct list_head *qtd_item, *qtd_tmp;
1081 struct dwc2_qh *qh = chan->qh;
1082 struct dwc2_qtd *qtd = NULL;
1083 int xfer_done;
1084 int desc_num = 0;
1085
1086 if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) {
1087 list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry)
1088 qtd->in_process = 0;
1089 return;
1090 }
1091
1092 list_for_each_safe(qtd_item, qtd_tmp, &qh->qtd_list) {
1093 int i;
1094
1095 qtd = list_entry(qtd_item, struct dwc2_qtd, qtd_list_entry);
1096 xfer_done = 0;
1097
1098 for (i = 0; i < qtd->n_desc; i++) {
1099 if (dwc2_process_non_isoc_desc(hsotg, chan, chnum, qtd,
1100 desc_num, halt_status,
1101 &xfer_done))
1102 break;
1103 desc_num++;
1104 }
1105 }
1106
1107 if (qh->ep_type != USB_ENDPOINT_XFER_CONTROL) {
1108 /*
1109 * Resetting the data toggle for bulk and interrupt endpoints
1110 * in case of stall. See handle_hc_stall_intr().
1111 */
1112 if (halt_status == DWC2_HC_XFER_STALL)
1113 qh->data_toggle = DWC2_HC_PID_DATA0;
1114 else if (qtd)
1115 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1116 }
1117
1118 if (halt_status == DWC2_HC_XFER_COMPLETE) {
1119 if (chan->hcint & HCINTMSK_NYET) {
1120 /*
1121 * Got a NYET on the last transaction of the transfer.
1122 * It means that the endpoint should be in the PING
1123 * state at the beginning of the next transfer.
1124 */
1125 qh->ping_state = 1;
1126 }
1127 }
1128 }
1129
1130 /**
1131 * dwc2_hcd_complete_xfer_ddma() - Scans the descriptor list, updates URB's
1132 * status and calls completion routine for the URB if it's done. Called from
1133 * interrupt handlers.
1134 *
1135 * @hsotg: The HCD state structure for the DWC OTG controller
1136 * @chan: Host channel the transfer is completed on
1137 * @chnum: Index of Host channel registers
1138 * @halt_status: Reason the channel is being halted or just XferComplete
1139 * for isochronous transfers
1140 *
1141 * Releases the channel to be used by other transfers.
1142 * In case of Isochronous endpoint the channel is not halted until the end of
1143 * the session, i.e. QTD list is empty.
1144 * If periodic channel released the FrameList is updated accordingly.
1145 * Calls transaction selection routines to activate pending transfers.
1146 */
1147 void dwc2_hcd_complete_xfer_ddma(struct dwc2_hsotg *hsotg,
1148 struct dwc2_host_chan *chan, int chnum,
1149 enum dwc2_halt_status halt_status)
1150 {
1151 struct dwc2_qh *qh = chan->qh;
1152 int continue_isoc_xfer = 0;
1153 enum dwc2_transaction_type tr_type;
1154
1155 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1156 dwc2_complete_isoc_xfer_ddma(hsotg, chan, halt_status);
1157
1158 /* Release the channel if halted or session completed */
1159 if (halt_status != DWC2_HC_XFER_COMPLETE ||
1160 list_empty(&qh->qtd_list)) {
1161 /* Halt the channel if session completed */
1162 if (halt_status == DWC2_HC_XFER_COMPLETE)
1163 dwc2_hc_halt(hsotg, chan, halt_status);
1164 dwc2_release_channel_ddma(hsotg, qh);
1165 dwc2_hcd_qh_unlink(hsotg, qh);
1166 } else {
1167 /* Keep in assigned schedule to continue transfer */
1168 list_move(&qh->qh_list_entry,
1169 &hsotg->periodic_sched_assigned);
1170 continue_isoc_xfer = 1;
1171 }
1172 /*
1173 * Todo: Consider the case when period exceeds FrameList size.
1174 * Frame Rollover interrupt should be used.
1175 */
1176 } else {
1177 /*
1178 * Scan descriptor list to complete the URB(s), then release
1179 * the channel
1180 */
1181 dwc2_complete_non_isoc_xfer_ddma(hsotg, chan, chnum,
1182 halt_status);
1183 dwc2_release_channel_ddma(hsotg, qh);
1184 dwc2_hcd_qh_unlink(hsotg, qh);
1185
1186 if (!list_empty(&qh->qtd_list)) {
1187 /*
1188 * Add back to inactive non-periodic schedule on normal
1189 * completion
1190 */
1191 dwc2_hcd_qh_add(hsotg, qh);
1192 }
1193 }
1194
1195 tr_type = dwc2_hcd_select_transactions(hsotg);
1196 if (tr_type != DWC2_TRANSACTION_NONE || continue_isoc_xfer) {
1197 if (continue_isoc_xfer) {
1198 if (tr_type == DWC2_TRANSACTION_NONE)
1199 tr_type = DWC2_TRANSACTION_PERIODIC;
1200 else if (tr_type == DWC2_TRANSACTION_NON_PERIODIC)
1201 tr_type = DWC2_TRANSACTION_ALL;
1202 }
1203 dwc2_hcd_queue_transactions(hsotg, tr_type);
1204 }
1205 }
This page took 0.054647 seconds and 6 git commands to generate.