Merge remote-tracking branch 'lightnvm/for-next'
[deliverable/linux.git] / drivers / dma / qcom / hidma_ll.c
1 /*
2 * Qualcomm Technologies HIDMA DMA engine low level code
3 *
4 * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16 #include <linux/dmaengine.h>
17 #include <linux/slab.h>
18 #include <linux/interrupt.h>
19 #include <linux/mm.h>
20 #include <linux/highmem.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/delay.h>
23 #include <linux/atomic.h>
24 #include <linux/iopoll.h>
25 #include <linux/kfifo.h>
26 #include <linux/bitops.h>
27
28 #include "hidma.h"
29
30 #define HIDMA_EVRE_SIZE 16 /* each EVRE is 16 bytes */
31
32 #define HIDMA_TRCA_CTRLSTS_REG 0x000
33 #define HIDMA_TRCA_RING_LOW_REG 0x008
34 #define HIDMA_TRCA_RING_HIGH_REG 0x00C
35 #define HIDMA_TRCA_RING_LEN_REG 0x010
36 #define HIDMA_TRCA_DOORBELL_REG 0x400
37
38 #define HIDMA_EVCA_CTRLSTS_REG 0x000
39 #define HIDMA_EVCA_INTCTRL_REG 0x004
40 #define HIDMA_EVCA_RING_LOW_REG 0x008
41 #define HIDMA_EVCA_RING_HIGH_REG 0x00C
42 #define HIDMA_EVCA_RING_LEN_REG 0x010
43 #define HIDMA_EVCA_WRITE_PTR_REG 0x020
44 #define HIDMA_EVCA_DOORBELL_REG 0x400
45
46 #define HIDMA_EVCA_IRQ_STAT_REG 0x100
47 #define HIDMA_EVCA_IRQ_CLR_REG 0x108
48 #define HIDMA_EVCA_IRQ_EN_REG 0x110
49
50 #define HIDMA_EVRE_CFG_IDX 0
51
52 #define HIDMA_EVRE_ERRINFO_BIT_POS 24
53 #define HIDMA_EVRE_CODE_BIT_POS 28
54
55 #define HIDMA_EVRE_ERRINFO_MASK GENMASK(3, 0)
56 #define HIDMA_EVRE_CODE_MASK GENMASK(3, 0)
57
58 #define HIDMA_CH_CONTROL_MASK GENMASK(7, 0)
59 #define HIDMA_CH_STATE_MASK GENMASK(7, 0)
60 #define HIDMA_CH_STATE_BIT_POS 0x8
61
62 #define HIDMA_IRQ_EV_CH_EOB_IRQ_BIT_POS 0
63 #define HIDMA_IRQ_EV_CH_WR_RESP_BIT_POS 1
64 #define HIDMA_IRQ_TR_CH_TRE_RD_RSP_ER_BIT_POS 9
65 #define HIDMA_IRQ_TR_CH_DATA_RD_ER_BIT_POS 10
66 #define HIDMA_IRQ_TR_CH_DATA_WR_ER_BIT_POS 11
67 #define HIDMA_IRQ_TR_CH_INVALID_TRE_BIT_POS 14
68
69 #define ENABLE_IRQS (BIT(HIDMA_IRQ_EV_CH_EOB_IRQ_BIT_POS) | \
70 BIT(HIDMA_IRQ_EV_CH_WR_RESP_BIT_POS) | \
71 BIT(HIDMA_IRQ_TR_CH_TRE_RD_RSP_ER_BIT_POS) | \
72 BIT(HIDMA_IRQ_TR_CH_DATA_RD_ER_BIT_POS) | \
73 BIT(HIDMA_IRQ_TR_CH_DATA_WR_ER_BIT_POS) | \
74 BIT(HIDMA_IRQ_TR_CH_INVALID_TRE_BIT_POS))
75
76 #define HIDMA_INCREMENT_ITERATOR(iter, size, ring_size) \
77 do { \
78 iter += size; \
79 if (iter >= ring_size) \
80 iter -= ring_size; \
81 } while (0)
82
83 #define HIDMA_CH_STATE(val) \
84 ((val >> HIDMA_CH_STATE_BIT_POS) & HIDMA_CH_STATE_MASK)
85
86 #define HIDMA_ERR_INT_MASK \
87 (BIT(HIDMA_IRQ_TR_CH_INVALID_TRE_BIT_POS) | \
88 BIT(HIDMA_IRQ_TR_CH_TRE_RD_RSP_ER_BIT_POS) | \
89 BIT(HIDMA_IRQ_EV_CH_WR_RESP_BIT_POS) | \
90 BIT(HIDMA_IRQ_TR_CH_DATA_RD_ER_BIT_POS) | \
91 BIT(HIDMA_IRQ_TR_CH_DATA_WR_ER_BIT_POS))
92
93 enum ch_command {
94 HIDMA_CH_DISABLE = 0,
95 HIDMA_CH_ENABLE = 1,
96 HIDMA_CH_SUSPEND = 2,
97 HIDMA_CH_RESET = 9,
98 };
99
100 enum ch_state {
101 HIDMA_CH_DISABLED = 0,
102 HIDMA_CH_ENABLED = 1,
103 HIDMA_CH_RUNNING = 2,
104 HIDMA_CH_SUSPENDED = 3,
105 HIDMA_CH_STOPPED = 4,
106 };
107
108 enum tre_type {
109 HIDMA_TRE_MEMCPY = 3,
110 };
111
112 enum err_code {
113 HIDMA_EVRE_STATUS_COMPLETE = 1,
114 HIDMA_EVRE_STATUS_ERROR = 4,
115 };
116
117 static int hidma_is_chan_enabled(int state)
118 {
119 switch (state) {
120 case HIDMA_CH_ENABLED:
121 case HIDMA_CH_RUNNING:
122 return true;
123 default:
124 return false;
125 }
126 }
127
128 void hidma_ll_free(struct hidma_lldev *lldev, u32 tre_ch)
129 {
130 struct hidma_tre *tre;
131
132 if (tre_ch >= lldev->nr_tres) {
133 dev_err(lldev->dev, "invalid TRE number in free:%d", tre_ch);
134 return;
135 }
136
137 tre = &lldev->trepool[tre_ch];
138 if (atomic_read(&tre->allocated) != true) {
139 dev_err(lldev->dev, "trying to free an unused TRE:%d", tre_ch);
140 return;
141 }
142
143 atomic_set(&tre->allocated, 0);
144 }
145
146 int hidma_ll_request(struct hidma_lldev *lldev, u32 sig, const char *dev_name,
147 void (*callback)(void *data), void *data, u32 *tre_ch)
148 {
149 unsigned int i;
150 struct hidma_tre *tre;
151 u32 *tre_local;
152
153 if (!tre_ch || !lldev)
154 return -EINVAL;
155
156 /* need to have at least one empty spot in the queue */
157 for (i = 0; i < lldev->nr_tres - 1; i++) {
158 if (atomic_add_unless(&lldev->trepool[i].allocated, 1, 1))
159 break;
160 }
161
162 if (i == (lldev->nr_tres - 1))
163 return -ENOMEM;
164
165 tre = &lldev->trepool[i];
166 tre->dma_sig = sig;
167 tre->dev_name = dev_name;
168 tre->callback = callback;
169 tre->data = data;
170 tre->idx = i;
171 tre->status = 0;
172 tre->queued = 0;
173 tre->err_code = 0;
174 tre->err_info = 0;
175 tre->lldev = lldev;
176 tre_local = &tre->tre_local[0];
177 tre_local[HIDMA_TRE_CFG_IDX] = HIDMA_TRE_MEMCPY;
178 tre_local[HIDMA_TRE_CFG_IDX] |= (lldev->chidx & 0xFF) << 8;
179 tre_local[HIDMA_TRE_CFG_IDX] |= BIT(16); /* set IEOB */
180 *tre_ch = i;
181 if (callback)
182 callback(data);
183 return 0;
184 }
185
186 /*
187 * Multiple TREs may be queued and waiting in the pending queue.
188 */
189 static void hidma_ll_tre_complete(unsigned long arg)
190 {
191 struct hidma_lldev *lldev = (struct hidma_lldev *)arg;
192 struct hidma_tre *tre;
193
194 while (kfifo_out(&lldev->handoff_fifo, &tre, 1)) {
195 /* call the user if it has been read by the hardware */
196 if (tre->callback)
197 tre->callback(tre->data);
198 }
199 }
200
201 static int hidma_post_completed(struct hidma_lldev *lldev, int tre_iterator,
202 u8 err_info, u8 err_code)
203 {
204 struct hidma_tre *tre;
205 unsigned long flags;
206
207 spin_lock_irqsave(&lldev->lock, flags);
208 tre = lldev->pending_tre_list[tre_iterator / HIDMA_TRE_SIZE];
209 if (!tre) {
210 spin_unlock_irqrestore(&lldev->lock, flags);
211 dev_warn(lldev->dev, "tre_index [%d] and tre out of sync\n",
212 tre_iterator / HIDMA_TRE_SIZE);
213 return -EINVAL;
214 }
215 lldev->pending_tre_list[tre->tre_index] = NULL;
216
217 /*
218 * Keep track of pending TREs that SW is expecting to receive
219 * from HW. We got one now. Decrement our counter.
220 */
221 lldev->pending_tre_count--;
222 if (lldev->pending_tre_count < 0) {
223 dev_warn(lldev->dev, "tre count mismatch on completion");
224 lldev->pending_tre_count = 0;
225 }
226
227 spin_unlock_irqrestore(&lldev->lock, flags);
228
229 tre->err_info = err_info;
230 tre->err_code = err_code;
231 tre->queued = 0;
232
233 kfifo_put(&lldev->handoff_fifo, tre);
234 tasklet_schedule(&lldev->task);
235
236 return 0;
237 }
238
239 /*
240 * Called to handle the interrupt for the channel.
241 * Return a positive number if TRE or EVRE were consumed on this run.
242 * Return a positive number if there are pending TREs or EVREs.
243 * Return 0 if there is nothing to consume or no pending TREs/EVREs found.
244 */
245 static int hidma_handle_tre_completion(struct hidma_lldev *lldev)
246 {
247 u32 evre_ring_size = lldev->evre_ring_size;
248 u32 tre_ring_size = lldev->tre_ring_size;
249 u32 err_info, err_code, evre_write_off;
250 u32 tre_iterator, evre_iterator;
251 u32 num_completed = 0;
252
253 evre_write_off = readl_relaxed(lldev->evca + HIDMA_EVCA_WRITE_PTR_REG);
254 tre_iterator = lldev->tre_processed_off;
255 evre_iterator = lldev->evre_processed_off;
256
257 if ((evre_write_off > evre_ring_size) ||
258 (evre_write_off % HIDMA_EVRE_SIZE)) {
259 dev_err(lldev->dev, "HW reports invalid EVRE write offset\n");
260 return 0;
261 }
262
263 /*
264 * By the time control reaches here the number of EVREs and TREs
265 * may not match. Only consume the ones that hardware told us.
266 */
267 while ((evre_iterator != evre_write_off)) {
268 u32 *current_evre = lldev->evre_ring + evre_iterator;
269 u32 cfg;
270
271 cfg = current_evre[HIDMA_EVRE_CFG_IDX];
272 err_info = cfg >> HIDMA_EVRE_ERRINFO_BIT_POS;
273 err_info &= HIDMA_EVRE_ERRINFO_MASK;
274 err_code =
275 (cfg >> HIDMA_EVRE_CODE_BIT_POS) & HIDMA_EVRE_CODE_MASK;
276
277 if (hidma_post_completed(lldev, tre_iterator, err_info,
278 err_code))
279 break;
280
281 HIDMA_INCREMENT_ITERATOR(tre_iterator, HIDMA_TRE_SIZE,
282 tre_ring_size);
283 HIDMA_INCREMENT_ITERATOR(evre_iterator, HIDMA_EVRE_SIZE,
284 evre_ring_size);
285
286 /*
287 * Read the new event descriptor written by the HW.
288 * As we are processing the delivered events, other events
289 * get queued to the SW for processing.
290 */
291 evre_write_off =
292 readl_relaxed(lldev->evca + HIDMA_EVCA_WRITE_PTR_REG);
293 num_completed++;
294 }
295
296 if (num_completed) {
297 u32 evre_read_off = (lldev->evre_processed_off +
298 HIDMA_EVRE_SIZE * num_completed);
299 u32 tre_read_off = (lldev->tre_processed_off +
300 HIDMA_TRE_SIZE * num_completed);
301
302 evre_read_off = evre_read_off % evre_ring_size;
303 tre_read_off = tre_read_off % tre_ring_size;
304
305 writel(evre_read_off, lldev->evca + HIDMA_EVCA_DOORBELL_REG);
306
307 /* record the last processed tre offset */
308 lldev->tre_processed_off = tre_read_off;
309 lldev->evre_processed_off = evre_read_off;
310 }
311
312 return num_completed;
313 }
314
315 void hidma_cleanup_pending_tre(struct hidma_lldev *lldev, u8 err_info,
316 u8 err_code)
317 {
318 u32 tre_iterator;
319 u32 tre_ring_size = lldev->tre_ring_size;
320 int num_completed = 0;
321 u32 tre_read_off;
322
323 tre_iterator = lldev->tre_processed_off;
324 while (lldev->pending_tre_count) {
325 if (hidma_post_completed(lldev, tre_iterator, err_info,
326 err_code))
327 break;
328 HIDMA_INCREMENT_ITERATOR(tre_iterator, HIDMA_TRE_SIZE,
329 tre_ring_size);
330 num_completed++;
331 }
332 tre_read_off = (lldev->tre_processed_off +
333 HIDMA_TRE_SIZE * num_completed);
334
335 tre_read_off = tre_read_off % tre_ring_size;
336
337 /* record the last processed tre offset */
338 lldev->tre_processed_off = tre_read_off;
339 }
340
341 static int hidma_ll_reset(struct hidma_lldev *lldev)
342 {
343 u32 val;
344 int ret;
345
346 val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
347 val &= ~(HIDMA_CH_CONTROL_MASK << 16);
348 val |= HIDMA_CH_RESET << 16;
349 writel(val, lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
350
351 /*
352 * Delay 10ms after reset to allow DMA logic to quiesce.
353 * Do a polled read up to 1ms and 10ms maximum.
354 */
355 ret = readl_poll_timeout(lldev->trca + HIDMA_TRCA_CTRLSTS_REG, val,
356 HIDMA_CH_STATE(val) == HIDMA_CH_DISABLED,
357 1000, 10000);
358 if (ret) {
359 dev_err(lldev->dev, "transfer channel did not reset\n");
360 return ret;
361 }
362
363 val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
364 val &= ~(HIDMA_CH_CONTROL_MASK << 16);
365 val |= HIDMA_CH_RESET << 16;
366 writel(val, lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
367
368 /*
369 * Delay 10ms after reset to allow DMA logic to quiesce.
370 * Do a polled read up to 1ms and 10ms maximum.
371 */
372 ret = readl_poll_timeout(lldev->evca + HIDMA_EVCA_CTRLSTS_REG, val,
373 HIDMA_CH_STATE(val) == HIDMA_CH_DISABLED,
374 1000, 10000);
375 if (ret)
376 return ret;
377
378 lldev->trch_state = HIDMA_CH_DISABLED;
379 lldev->evch_state = HIDMA_CH_DISABLED;
380 return 0;
381 }
382
383 /*
384 * The interrupt handler for HIDMA will try to consume as many pending
385 * EVRE from the event queue as possible. Each EVRE has an associated
386 * TRE that holds the user interface parameters. EVRE reports the
387 * result of the transaction. Hardware guarantees ordering between EVREs
388 * and TREs. We use last processed offset to figure out which TRE is
389 * associated with which EVRE. If two TREs are consumed by HW, the EVREs
390 * are in order in the event ring.
391 *
392 * This handler will do a one pass for consuming EVREs. Other EVREs may
393 * be delivered while we are working. It will try to consume incoming
394 * EVREs one more time and return.
395 *
396 * For unprocessed EVREs, hardware will trigger another interrupt until
397 * all the interrupt bits are cleared.
398 *
399 * Hardware guarantees that by the time interrupt is observed, all data
400 * transactions in flight are delivered to their respective places and
401 * are visible to the CPU.
402 *
403 * On demand paging for IOMMU is only supported for PCIe via PRI
404 * (Page Request Interface) not for HIDMA. All other hardware instances
405 * including HIDMA work on pinned DMA addresses.
406 *
407 * HIDMA is not aware of IOMMU presence since it follows the DMA API. All
408 * IOMMU latency will be built into the data movement time. By the time
409 * interrupt happens, IOMMU lookups + data movement has already taken place.
410 *
411 * While the first read in a typical PCI endpoint ISR flushes all outstanding
412 * requests traditionally to the destination, this concept does not apply
413 * here for this HW.
414 */
415 irqreturn_t hidma_ll_inthandler(int chirq, void *arg)
416 {
417 struct hidma_lldev *lldev = arg;
418 u32 status;
419 u32 enable;
420 u32 cause;
421
422 /*
423 * Fine tuned for this HW...
424 *
425 * This ISR has been designed for this particular hardware. Relaxed
426 * read and write accessors are used for performance reasons due to
427 * interrupt delivery guarantees. Do not copy this code blindly and
428 * expect that to work.
429 */
430 status = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
431 enable = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
432 cause = status & enable;
433
434 while (cause) {
435 if (cause & HIDMA_ERR_INT_MASK) {
436 dev_err(lldev->dev, "error 0x%x, disabling...\n",
437 cause);
438
439 /* Clear out pending interrupts */
440 writel(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
441
442 /* No further submissions. */
443 hidma_ll_disable(lldev);
444
445 /* Driver completes the txn and intimates the client.*/
446 hidma_cleanup_pending_tre(lldev, 0xFF,
447 HIDMA_EVRE_STATUS_ERROR);
448 goto out;
449 }
450
451 /*
452 * Try to consume as many EVREs as possible.
453 */
454 hidma_handle_tre_completion(lldev);
455
456 /* We consumed TREs or there are pending TREs or EVREs. */
457 writel_relaxed(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
458
459 /*
460 * Another interrupt might have arrived while we are
461 * processing this one. Read the new cause.
462 */
463 status = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
464 enable = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
465 cause = status & enable;
466 }
467
468 out:
469 return IRQ_HANDLED;
470 }
471
472 int hidma_ll_enable(struct hidma_lldev *lldev)
473 {
474 u32 val;
475 int ret;
476
477 val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
478 val &= ~(HIDMA_CH_CONTROL_MASK << 16);
479 val |= HIDMA_CH_ENABLE << 16;
480 writel(val, lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
481
482 ret = readl_poll_timeout(lldev->evca + HIDMA_EVCA_CTRLSTS_REG, val,
483 hidma_is_chan_enabled(HIDMA_CH_STATE(val)),
484 1000, 10000);
485 if (ret) {
486 dev_err(lldev->dev, "event channel did not get enabled\n");
487 return ret;
488 }
489
490 val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
491 val &= ~(HIDMA_CH_CONTROL_MASK << 16);
492 val |= HIDMA_CH_ENABLE << 16;
493 writel(val, lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
494
495 ret = readl_poll_timeout(lldev->trca + HIDMA_TRCA_CTRLSTS_REG, val,
496 hidma_is_chan_enabled(HIDMA_CH_STATE(val)),
497 1000, 10000);
498 if (ret) {
499 dev_err(lldev->dev, "transfer channel did not get enabled\n");
500 return ret;
501 }
502
503 lldev->trch_state = HIDMA_CH_ENABLED;
504 lldev->evch_state = HIDMA_CH_ENABLED;
505
506 return 0;
507 }
508
509 void hidma_ll_start(struct hidma_lldev *lldev)
510 {
511 unsigned long irqflags;
512
513 spin_lock_irqsave(&lldev->lock, irqflags);
514 writel(lldev->tre_write_offset, lldev->trca + HIDMA_TRCA_DOORBELL_REG);
515 spin_unlock_irqrestore(&lldev->lock, irqflags);
516 }
517
518 bool hidma_ll_isenabled(struct hidma_lldev *lldev)
519 {
520 u32 val;
521
522 val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
523 lldev->trch_state = HIDMA_CH_STATE(val);
524 val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
525 lldev->evch_state = HIDMA_CH_STATE(val);
526
527 /* both channels have to be enabled before calling this function */
528 if (hidma_is_chan_enabled(lldev->trch_state) &&
529 hidma_is_chan_enabled(lldev->evch_state))
530 return true;
531
532 return false;
533 }
534
535 void hidma_ll_queue_request(struct hidma_lldev *lldev, u32 tre_ch)
536 {
537 struct hidma_tre *tre;
538 unsigned long flags;
539
540 tre = &lldev->trepool[tre_ch];
541
542 /* copy the TRE into its location in the TRE ring */
543 spin_lock_irqsave(&lldev->lock, flags);
544 tre->tre_index = lldev->tre_write_offset / HIDMA_TRE_SIZE;
545 lldev->pending_tre_list[tre->tre_index] = tre;
546 memcpy(lldev->tre_ring + lldev->tre_write_offset,
547 &tre->tre_local[0], HIDMA_TRE_SIZE);
548 tre->err_code = 0;
549 tre->err_info = 0;
550 tre->queued = 1;
551 lldev->pending_tre_count++;
552 lldev->tre_write_offset = (lldev->tre_write_offset + HIDMA_TRE_SIZE)
553 % lldev->tre_ring_size;
554 spin_unlock_irqrestore(&lldev->lock, flags);
555 }
556
557 /*
558 * Note that even though we stop this channel if there is a pending transaction
559 * in flight it will complete and follow the callback. This request will
560 * prevent further requests to be made.
561 */
562 int hidma_ll_disable(struct hidma_lldev *lldev)
563 {
564 u32 val;
565 int ret;
566
567 val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
568 lldev->evch_state = HIDMA_CH_STATE(val);
569 val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
570 lldev->trch_state = HIDMA_CH_STATE(val);
571
572 /* already suspended by this OS */
573 if ((lldev->trch_state == HIDMA_CH_SUSPENDED) ||
574 (lldev->evch_state == HIDMA_CH_SUSPENDED))
575 return 0;
576
577 /* already stopped by the manager */
578 if ((lldev->trch_state == HIDMA_CH_STOPPED) ||
579 (lldev->evch_state == HIDMA_CH_STOPPED))
580 return 0;
581
582 val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
583 val &= ~(HIDMA_CH_CONTROL_MASK << 16);
584 val |= HIDMA_CH_SUSPEND << 16;
585 writel(val, lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
586
587 /*
588 * Start the wait right after the suspend is confirmed.
589 * Do a polled read up to 1ms and 10ms maximum.
590 */
591 ret = readl_poll_timeout(lldev->trca + HIDMA_TRCA_CTRLSTS_REG, val,
592 HIDMA_CH_STATE(val) == HIDMA_CH_SUSPENDED,
593 1000, 10000);
594 if (ret)
595 return ret;
596
597 val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
598 val &= ~(HIDMA_CH_CONTROL_MASK << 16);
599 val |= HIDMA_CH_SUSPEND << 16;
600 writel(val, lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
601
602 /*
603 * Start the wait right after the suspend is confirmed
604 * Delay up to 10ms after reset to allow DMA logic to quiesce.
605 */
606 ret = readl_poll_timeout(lldev->evca + HIDMA_EVCA_CTRLSTS_REG, val,
607 HIDMA_CH_STATE(val) == HIDMA_CH_SUSPENDED,
608 1000, 10000);
609 if (ret)
610 return ret;
611
612 lldev->trch_state = HIDMA_CH_SUSPENDED;
613 lldev->evch_state = HIDMA_CH_SUSPENDED;
614 return 0;
615 }
616
617 void hidma_ll_set_transfer_params(struct hidma_lldev *lldev, u32 tre_ch,
618 dma_addr_t src, dma_addr_t dest, u32 len,
619 u32 flags)
620 {
621 struct hidma_tre *tre;
622 u32 *tre_local;
623
624 if (tre_ch >= lldev->nr_tres) {
625 dev_err(lldev->dev, "invalid TRE number in transfer params:%d",
626 tre_ch);
627 return;
628 }
629
630 tre = &lldev->trepool[tre_ch];
631 if (atomic_read(&tre->allocated) != true) {
632 dev_err(lldev->dev, "trying to set params on an unused TRE:%d",
633 tre_ch);
634 return;
635 }
636
637 tre_local = &tre->tre_local[0];
638 tre_local[HIDMA_TRE_LEN_IDX] = len;
639 tre_local[HIDMA_TRE_SRC_LOW_IDX] = lower_32_bits(src);
640 tre_local[HIDMA_TRE_SRC_HI_IDX] = upper_32_bits(src);
641 tre_local[HIDMA_TRE_DEST_LOW_IDX] = lower_32_bits(dest);
642 tre_local[HIDMA_TRE_DEST_HI_IDX] = upper_32_bits(dest);
643 tre->int_flags = flags;
644 }
645
646 /*
647 * Called during initialization and after an error condition
648 * to restore hardware state.
649 */
650 int hidma_ll_setup(struct hidma_lldev *lldev)
651 {
652 int rc;
653 u64 addr;
654 u32 val;
655 u32 nr_tres = lldev->nr_tres;
656
657 lldev->pending_tre_count = 0;
658 lldev->tre_processed_off = 0;
659 lldev->evre_processed_off = 0;
660 lldev->tre_write_offset = 0;
661
662 /* disable interrupts */
663 writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
664
665 /* clear all pending interrupts */
666 val = readl(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
667 writel(val, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
668
669 rc = hidma_ll_reset(lldev);
670 if (rc)
671 return rc;
672
673 /*
674 * Clear all pending interrupts again.
675 * Otherwise, we observe reset complete interrupts.
676 */
677 val = readl(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
678 writel(val, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
679
680 /* disable interrupts again after reset */
681 writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
682
683 addr = lldev->tre_dma;
684 writel(lower_32_bits(addr), lldev->trca + HIDMA_TRCA_RING_LOW_REG);
685 writel(upper_32_bits(addr), lldev->trca + HIDMA_TRCA_RING_HIGH_REG);
686 writel(lldev->tre_ring_size, lldev->trca + HIDMA_TRCA_RING_LEN_REG);
687
688 addr = lldev->evre_dma;
689 writel(lower_32_bits(addr), lldev->evca + HIDMA_EVCA_RING_LOW_REG);
690 writel(upper_32_bits(addr), lldev->evca + HIDMA_EVCA_RING_HIGH_REG);
691 writel(HIDMA_EVRE_SIZE * nr_tres,
692 lldev->evca + HIDMA_EVCA_RING_LEN_REG);
693
694 /* support IRQ only for now */
695 val = readl(lldev->evca + HIDMA_EVCA_INTCTRL_REG);
696 val &= ~0xF;
697 val |= 0x1;
698 writel(val, lldev->evca + HIDMA_EVCA_INTCTRL_REG);
699
700 /* clear all pending interrupts and enable them */
701 writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
702 writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
703
704 return hidma_ll_enable(lldev);
705 }
706
707 struct hidma_lldev *hidma_ll_init(struct device *dev, u32 nr_tres,
708 void __iomem *trca, void __iomem *evca,
709 u8 chidx)
710 {
711 u32 required_bytes;
712 struct hidma_lldev *lldev;
713 int rc;
714 size_t sz;
715
716 if (!trca || !evca || !dev || !nr_tres)
717 return NULL;
718
719 /* need at least four TREs */
720 if (nr_tres < 4)
721 return NULL;
722
723 /* need an extra space */
724 nr_tres += 1;
725
726 lldev = devm_kzalloc(dev, sizeof(struct hidma_lldev), GFP_KERNEL);
727 if (!lldev)
728 return NULL;
729
730 lldev->evca = evca;
731 lldev->trca = trca;
732 lldev->dev = dev;
733 sz = sizeof(struct hidma_tre);
734 lldev->trepool = devm_kcalloc(lldev->dev, nr_tres, sz, GFP_KERNEL);
735 if (!lldev->trepool)
736 return NULL;
737
738 required_bytes = sizeof(lldev->pending_tre_list[0]);
739 lldev->pending_tre_list = devm_kcalloc(dev, nr_tres, required_bytes,
740 GFP_KERNEL);
741 if (!lldev->pending_tre_list)
742 return NULL;
743
744 sz = (HIDMA_TRE_SIZE + 1) * nr_tres;
745 lldev->tre_ring = dmam_alloc_coherent(dev, sz, &lldev->tre_dma,
746 GFP_KERNEL);
747 if (!lldev->tre_ring)
748 return NULL;
749
750 memset(lldev->tre_ring, 0, (HIDMA_TRE_SIZE + 1) * nr_tres);
751 lldev->tre_ring_size = HIDMA_TRE_SIZE * nr_tres;
752 lldev->nr_tres = nr_tres;
753
754 /* the TRE ring has to be TRE_SIZE aligned */
755 if (!IS_ALIGNED(lldev->tre_dma, HIDMA_TRE_SIZE)) {
756 u8 tre_ring_shift;
757
758 tre_ring_shift = lldev->tre_dma % HIDMA_TRE_SIZE;
759 tre_ring_shift = HIDMA_TRE_SIZE - tre_ring_shift;
760 lldev->tre_dma += tre_ring_shift;
761 lldev->tre_ring += tre_ring_shift;
762 }
763
764 sz = (HIDMA_EVRE_SIZE + 1) * nr_tres;
765 lldev->evre_ring = dmam_alloc_coherent(dev, sz, &lldev->evre_dma,
766 GFP_KERNEL);
767 if (!lldev->evre_ring)
768 return NULL;
769
770 memset(lldev->evre_ring, 0, (HIDMA_EVRE_SIZE + 1) * nr_tres);
771 lldev->evre_ring_size = HIDMA_EVRE_SIZE * nr_tres;
772
773 /* the EVRE ring has to be EVRE_SIZE aligned */
774 if (!IS_ALIGNED(lldev->evre_dma, HIDMA_EVRE_SIZE)) {
775 u8 evre_ring_shift;
776
777 evre_ring_shift = lldev->evre_dma % HIDMA_EVRE_SIZE;
778 evre_ring_shift = HIDMA_EVRE_SIZE - evre_ring_shift;
779 lldev->evre_dma += evre_ring_shift;
780 lldev->evre_ring += evre_ring_shift;
781 }
782 lldev->nr_tres = nr_tres;
783 lldev->chidx = chidx;
784
785 sz = nr_tres * sizeof(struct hidma_tre *);
786 rc = kfifo_alloc(&lldev->handoff_fifo, sz, GFP_KERNEL);
787 if (rc)
788 return NULL;
789
790 rc = hidma_ll_setup(lldev);
791 if (rc)
792 return NULL;
793
794 spin_lock_init(&lldev->lock);
795 tasklet_init(&lldev->task, hidma_ll_tre_complete, (unsigned long)lldev);
796 lldev->initialized = 1;
797 writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
798 return lldev;
799 }
800
801 int hidma_ll_uninit(struct hidma_lldev *lldev)
802 {
803 u32 required_bytes;
804 int rc = 0;
805 u32 val;
806
807 if (!lldev)
808 return -ENODEV;
809
810 if (!lldev->initialized)
811 return 0;
812
813 lldev->initialized = 0;
814
815 required_bytes = sizeof(struct hidma_tre) * lldev->nr_tres;
816 tasklet_kill(&lldev->task);
817 memset(lldev->trepool, 0, required_bytes);
818 lldev->trepool = NULL;
819 lldev->pending_tre_count = 0;
820 lldev->tre_write_offset = 0;
821
822 rc = hidma_ll_reset(lldev);
823
824 /*
825 * Clear all pending interrupts again.
826 * Otherwise, we observe reset complete interrupts.
827 */
828 val = readl(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
829 writel(val, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
830 writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
831 return rc;
832 }
833
834 enum dma_status hidma_ll_status(struct hidma_lldev *lldev, u32 tre_ch)
835 {
836 enum dma_status ret = DMA_ERROR;
837 struct hidma_tre *tre;
838 unsigned long flags;
839 u8 err_code;
840
841 spin_lock_irqsave(&lldev->lock, flags);
842
843 tre = &lldev->trepool[tre_ch];
844 err_code = tre->err_code;
845
846 if (err_code & HIDMA_EVRE_STATUS_COMPLETE)
847 ret = DMA_COMPLETE;
848 else if (err_code & HIDMA_EVRE_STATUS_ERROR)
849 ret = DMA_ERROR;
850 else
851 ret = DMA_IN_PROGRESS;
852 spin_unlock_irqrestore(&lldev->lock, flags);
853
854 return ret;
855 }
This page took 0.05042 seconds and 5 git commands to generate.