Commit | Line | Data |
---|---|---|
d1615ca2 SK |
1 | /* |
2 | * Qualcomm Technologies HIDMA DMA engine low level code | |
3 | * | |
4 | * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 and | |
8 | * only version 2 as published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | */ | |
15 | ||
16 | #include <linux/dmaengine.h> | |
17 | #include <linux/slab.h> | |
18 | #include <linux/interrupt.h> | |
19 | #include <linux/mm.h> | |
20 | #include <linux/highmem.h> | |
21 | #include <linux/dma-mapping.h> | |
22 | #include <linux/delay.h> | |
23 | #include <linux/atomic.h> | |
24 | #include <linux/iopoll.h> | |
25 | #include <linux/kfifo.h> | |
26 | #include <linux/bitops.h> | |
27 | ||
28 | #include "hidma.h" | |
29 | ||
30 | #define HIDMA_EVRE_SIZE 16 /* each EVRE is 16 bytes */ | |
31 | ||
32 | #define HIDMA_TRCA_CTRLSTS_REG 0x000 | |
33 | #define HIDMA_TRCA_RING_LOW_REG 0x008 | |
34 | #define HIDMA_TRCA_RING_HIGH_REG 0x00C | |
35 | #define HIDMA_TRCA_RING_LEN_REG 0x010 | |
36 | #define HIDMA_TRCA_DOORBELL_REG 0x400 | |
37 | ||
38 | #define HIDMA_EVCA_CTRLSTS_REG 0x000 | |
39 | #define HIDMA_EVCA_INTCTRL_REG 0x004 | |
40 | #define HIDMA_EVCA_RING_LOW_REG 0x008 | |
41 | #define HIDMA_EVCA_RING_HIGH_REG 0x00C | |
42 | #define HIDMA_EVCA_RING_LEN_REG 0x010 | |
43 | #define HIDMA_EVCA_WRITE_PTR_REG 0x020 | |
44 | #define HIDMA_EVCA_DOORBELL_REG 0x400 | |
45 | ||
46 | #define HIDMA_EVCA_IRQ_STAT_REG 0x100 | |
47 | #define HIDMA_EVCA_IRQ_CLR_REG 0x108 | |
48 | #define HIDMA_EVCA_IRQ_EN_REG 0x110 | |
49 | ||
50 | #define HIDMA_EVRE_CFG_IDX 0 | |
51 | ||
52 | #define HIDMA_EVRE_ERRINFO_BIT_POS 24 | |
53 | #define HIDMA_EVRE_CODE_BIT_POS 28 | |
54 | ||
55 | #define HIDMA_EVRE_ERRINFO_MASK GENMASK(3, 0) | |
56 | #define HIDMA_EVRE_CODE_MASK GENMASK(3, 0) | |
57 | ||
58 | #define HIDMA_CH_CONTROL_MASK GENMASK(7, 0) | |
59 | #define HIDMA_CH_STATE_MASK GENMASK(7, 0) | |
60 | #define HIDMA_CH_STATE_BIT_POS 0x8 | |
61 | ||
62 | #define HIDMA_IRQ_EV_CH_EOB_IRQ_BIT_POS 0 | |
63 | #define HIDMA_IRQ_EV_CH_WR_RESP_BIT_POS 1 | |
64 | #define HIDMA_IRQ_TR_CH_TRE_RD_RSP_ER_BIT_POS 9 | |
65 | #define HIDMA_IRQ_TR_CH_DATA_RD_ER_BIT_POS 10 | |
66 | #define HIDMA_IRQ_TR_CH_DATA_WR_ER_BIT_POS 11 | |
67 | #define HIDMA_IRQ_TR_CH_INVALID_TRE_BIT_POS 14 | |
68 | ||
69 | #define ENABLE_IRQS (BIT(HIDMA_IRQ_EV_CH_EOB_IRQ_BIT_POS) | \ | |
70 | BIT(HIDMA_IRQ_EV_CH_WR_RESP_BIT_POS) | \ | |
71 | BIT(HIDMA_IRQ_TR_CH_TRE_RD_RSP_ER_BIT_POS) | \ | |
72 | BIT(HIDMA_IRQ_TR_CH_DATA_RD_ER_BIT_POS) | \ | |
73 | BIT(HIDMA_IRQ_TR_CH_DATA_WR_ER_BIT_POS) | \ | |
74 | BIT(HIDMA_IRQ_TR_CH_INVALID_TRE_BIT_POS)) | |
75 | ||
76 | #define HIDMA_INCREMENT_ITERATOR(iter, size, ring_size) \ | |
77 | do { \ | |
78 | iter += size; \ | |
79 | if (iter >= ring_size) \ | |
80 | iter -= ring_size; \ | |
81 | } while (0) | |
82 | ||
83 | #define HIDMA_CH_STATE(val) \ | |
84 | ((val >> HIDMA_CH_STATE_BIT_POS) & HIDMA_CH_STATE_MASK) | |
85 | ||
86 | #define HIDMA_ERR_INT_MASK \ | |
87 | (BIT(HIDMA_IRQ_TR_CH_INVALID_TRE_BIT_POS) | \ | |
88 | BIT(HIDMA_IRQ_TR_CH_TRE_RD_RSP_ER_BIT_POS) | \ | |
89 | BIT(HIDMA_IRQ_EV_CH_WR_RESP_BIT_POS) | \ | |
90 | BIT(HIDMA_IRQ_TR_CH_DATA_RD_ER_BIT_POS) | \ | |
91 | BIT(HIDMA_IRQ_TR_CH_DATA_WR_ER_BIT_POS)) | |
92 | ||
93 | enum ch_command { | |
94 | HIDMA_CH_DISABLE = 0, | |
95 | HIDMA_CH_ENABLE = 1, | |
96 | HIDMA_CH_SUSPEND = 2, | |
97 | HIDMA_CH_RESET = 9, | |
98 | }; | |
99 | ||
100 | enum ch_state { | |
101 | HIDMA_CH_DISABLED = 0, | |
102 | HIDMA_CH_ENABLED = 1, | |
103 | HIDMA_CH_RUNNING = 2, | |
104 | HIDMA_CH_SUSPENDED = 3, | |
105 | HIDMA_CH_STOPPED = 4, | |
106 | }; | |
107 | ||
108 | enum tre_type { | |
109 | HIDMA_TRE_MEMCPY = 3, | |
110 | }; | |
111 | ||
112 | enum err_code { | |
113 | HIDMA_EVRE_STATUS_COMPLETE = 1, | |
114 | HIDMA_EVRE_STATUS_ERROR = 4, | |
115 | }; | |
116 | ||
117 | static int hidma_is_chan_enabled(int state) | |
118 | { | |
119 | switch (state) { | |
120 | case HIDMA_CH_ENABLED: | |
121 | case HIDMA_CH_RUNNING: | |
122 | return true; | |
123 | default: | |
124 | return false; | |
125 | } | |
126 | } | |
127 | ||
128 | void hidma_ll_free(struct hidma_lldev *lldev, u32 tre_ch) | |
129 | { | |
130 | struct hidma_tre *tre; | |
131 | ||
132 | if (tre_ch >= lldev->nr_tres) { | |
133 | dev_err(lldev->dev, "invalid TRE number in free:%d", tre_ch); | |
134 | return; | |
135 | } | |
136 | ||
137 | tre = &lldev->trepool[tre_ch]; | |
138 | if (atomic_read(&tre->allocated) != true) { | |
139 | dev_err(lldev->dev, "trying to free an unused TRE:%d", tre_ch); | |
140 | return; | |
141 | } | |
142 | ||
143 | atomic_set(&tre->allocated, 0); | |
144 | } | |
145 | ||
146 | int hidma_ll_request(struct hidma_lldev *lldev, u32 sig, const char *dev_name, | |
147 | void (*callback)(void *data), void *data, u32 *tre_ch) | |
148 | { | |
149 | unsigned int i; | |
150 | struct hidma_tre *tre; | |
151 | u32 *tre_local; | |
152 | ||
153 | if (!tre_ch || !lldev) | |
154 | return -EINVAL; | |
155 | ||
156 | /* need to have at least one empty spot in the queue */ | |
157 | for (i = 0; i < lldev->nr_tres - 1; i++) { | |
158 | if (atomic_add_unless(&lldev->trepool[i].allocated, 1, 1)) | |
159 | break; | |
160 | } | |
161 | ||
162 | if (i == (lldev->nr_tres - 1)) | |
163 | return -ENOMEM; | |
164 | ||
165 | tre = &lldev->trepool[i]; | |
166 | tre->dma_sig = sig; | |
167 | tre->dev_name = dev_name; | |
168 | tre->callback = callback; | |
169 | tre->data = data; | |
170 | tre->idx = i; | |
171 | tre->status = 0; | |
172 | tre->queued = 0; | |
173 | tre->err_code = 0; | |
174 | tre->err_info = 0; | |
175 | tre->lldev = lldev; | |
176 | tre_local = &tre->tre_local[0]; | |
177 | tre_local[HIDMA_TRE_CFG_IDX] = HIDMA_TRE_MEMCPY; | |
178 | tre_local[HIDMA_TRE_CFG_IDX] |= (lldev->chidx & 0xFF) << 8; | |
179 | tre_local[HIDMA_TRE_CFG_IDX] |= BIT(16); /* set IEOB */ | |
180 | *tre_ch = i; | |
181 | if (callback) | |
182 | callback(data); | |
183 | return 0; | |
184 | } | |
185 | ||
186 | /* | |
187 | * Multiple TREs may be queued and waiting in the pending queue. | |
188 | */ | |
189 | static void hidma_ll_tre_complete(unsigned long arg) | |
190 | { | |
191 | struct hidma_lldev *lldev = (struct hidma_lldev *)arg; | |
192 | struct hidma_tre *tre; | |
193 | ||
194 | while (kfifo_out(&lldev->handoff_fifo, &tre, 1)) { | |
195 | /* call the user if it has been read by the hardware */ | |
196 | if (tre->callback) | |
197 | tre->callback(tre->data); | |
198 | } | |
199 | } | |
200 | ||
201 | static int hidma_post_completed(struct hidma_lldev *lldev, int tre_iterator, | |
202 | u8 err_info, u8 err_code) | |
203 | { | |
204 | struct hidma_tre *tre; | |
205 | unsigned long flags; | |
206 | ||
207 | spin_lock_irqsave(&lldev->lock, flags); | |
208 | tre = lldev->pending_tre_list[tre_iterator / HIDMA_TRE_SIZE]; | |
209 | if (!tre) { | |
210 | spin_unlock_irqrestore(&lldev->lock, flags); | |
211 | dev_warn(lldev->dev, "tre_index [%d] and tre out of sync\n", | |
212 | tre_iterator / HIDMA_TRE_SIZE); | |
213 | return -EINVAL; | |
214 | } | |
215 | lldev->pending_tre_list[tre->tre_index] = NULL; | |
216 | ||
217 | /* | |
218 | * Keep track of pending TREs that SW is expecting to receive | |
219 | * from HW. We got one now. Decrement our counter. | |
220 | */ | |
221 | lldev->pending_tre_count--; | |
222 | if (lldev->pending_tre_count < 0) { | |
223 | dev_warn(lldev->dev, "tre count mismatch on completion"); | |
224 | lldev->pending_tre_count = 0; | |
225 | } | |
226 | ||
227 | spin_unlock_irqrestore(&lldev->lock, flags); | |
228 | ||
229 | tre->err_info = err_info; | |
230 | tre->err_code = err_code; | |
231 | tre->queued = 0; | |
232 | ||
233 | kfifo_put(&lldev->handoff_fifo, tre); | |
234 | tasklet_schedule(&lldev->task); | |
235 | ||
236 | return 0; | |
237 | } | |
238 | ||
239 | /* | |
240 | * Called to handle the interrupt for the channel. | |
241 | * Return a positive number if TRE or EVRE were consumed on this run. | |
242 | * Return a positive number if there are pending TREs or EVREs. | |
243 | * Return 0 if there is nothing to consume or no pending TREs/EVREs found. | |
244 | */ | |
245 | static int hidma_handle_tre_completion(struct hidma_lldev *lldev) | |
246 | { | |
247 | u32 evre_ring_size = lldev->evre_ring_size; | |
248 | u32 tre_ring_size = lldev->tre_ring_size; | |
249 | u32 err_info, err_code, evre_write_off; | |
250 | u32 tre_iterator, evre_iterator; | |
251 | u32 num_completed = 0; | |
252 | ||
253 | evre_write_off = readl_relaxed(lldev->evca + HIDMA_EVCA_WRITE_PTR_REG); | |
254 | tre_iterator = lldev->tre_processed_off; | |
255 | evre_iterator = lldev->evre_processed_off; | |
256 | ||
257 | if ((evre_write_off > evre_ring_size) || | |
258 | (evre_write_off % HIDMA_EVRE_SIZE)) { | |
259 | dev_err(lldev->dev, "HW reports invalid EVRE write offset\n"); | |
260 | return 0; | |
261 | } | |
262 | ||
263 | /* | |
264 | * By the time control reaches here the number of EVREs and TREs | |
265 | * may not match. Only consume the ones that hardware told us. | |
266 | */ | |
267 | while ((evre_iterator != evre_write_off)) { | |
268 | u32 *current_evre = lldev->evre_ring + evre_iterator; | |
269 | u32 cfg; | |
270 | ||
271 | cfg = current_evre[HIDMA_EVRE_CFG_IDX]; | |
272 | err_info = cfg >> HIDMA_EVRE_ERRINFO_BIT_POS; | |
273 | err_info &= HIDMA_EVRE_ERRINFO_MASK; | |
274 | err_code = | |
275 | (cfg >> HIDMA_EVRE_CODE_BIT_POS) & HIDMA_EVRE_CODE_MASK; | |
276 | ||
277 | if (hidma_post_completed(lldev, tre_iterator, err_info, | |
278 | err_code)) | |
279 | break; | |
280 | ||
281 | HIDMA_INCREMENT_ITERATOR(tre_iterator, HIDMA_TRE_SIZE, | |
282 | tre_ring_size); | |
283 | HIDMA_INCREMENT_ITERATOR(evre_iterator, HIDMA_EVRE_SIZE, | |
284 | evre_ring_size); | |
285 | ||
286 | /* | |
287 | * Read the new event descriptor written by the HW. | |
288 | * As we are processing the delivered events, other events | |
289 | * get queued to the SW for processing. | |
290 | */ | |
291 | evre_write_off = | |
292 | readl_relaxed(lldev->evca + HIDMA_EVCA_WRITE_PTR_REG); | |
293 | num_completed++; | |
294 | } | |
295 | ||
296 | if (num_completed) { | |
297 | u32 evre_read_off = (lldev->evre_processed_off + | |
298 | HIDMA_EVRE_SIZE * num_completed); | |
299 | u32 tre_read_off = (lldev->tre_processed_off + | |
300 | HIDMA_TRE_SIZE * num_completed); | |
301 | ||
302 | evre_read_off = evre_read_off % evre_ring_size; | |
303 | tre_read_off = tre_read_off % tre_ring_size; | |
304 | ||
305 | writel(evre_read_off, lldev->evca + HIDMA_EVCA_DOORBELL_REG); | |
306 | ||
307 | /* record the last processed tre offset */ | |
308 | lldev->tre_processed_off = tre_read_off; | |
309 | lldev->evre_processed_off = evre_read_off; | |
310 | } | |
311 | ||
312 | return num_completed; | |
313 | } | |
314 | ||
315 | void hidma_cleanup_pending_tre(struct hidma_lldev *lldev, u8 err_info, | |
316 | u8 err_code) | |
317 | { | |
318 | u32 tre_iterator; | |
319 | u32 tre_ring_size = lldev->tre_ring_size; | |
320 | int num_completed = 0; | |
321 | u32 tre_read_off; | |
322 | ||
323 | tre_iterator = lldev->tre_processed_off; | |
324 | while (lldev->pending_tre_count) { | |
325 | if (hidma_post_completed(lldev, tre_iterator, err_info, | |
326 | err_code)) | |
327 | break; | |
328 | HIDMA_INCREMENT_ITERATOR(tre_iterator, HIDMA_TRE_SIZE, | |
329 | tre_ring_size); | |
330 | num_completed++; | |
331 | } | |
332 | tre_read_off = (lldev->tre_processed_off + | |
333 | HIDMA_TRE_SIZE * num_completed); | |
334 | ||
335 | tre_read_off = tre_read_off % tre_ring_size; | |
336 | ||
337 | /* record the last processed tre offset */ | |
338 | lldev->tre_processed_off = tre_read_off; | |
339 | } | |
340 | ||
341 | static int hidma_ll_reset(struct hidma_lldev *lldev) | |
342 | { | |
343 | u32 val; | |
344 | int ret; | |
345 | ||
346 | val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG); | |
347 | val &= ~(HIDMA_CH_CONTROL_MASK << 16); | |
348 | val |= HIDMA_CH_RESET << 16; | |
349 | writel(val, lldev->trca + HIDMA_TRCA_CTRLSTS_REG); | |
350 | ||
351 | /* | |
352 | * Delay 10ms after reset to allow DMA logic to quiesce. | |
353 | * Do a polled read up to 1ms and 10ms maximum. | |
354 | */ | |
355 | ret = readl_poll_timeout(lldev->trca + HIDMA_TRCA_CTRLSTS_REG, val, | |
356 | HIDMA_CH_STATE(val) == HIDMA_CH_DISABLED, | |
357 | 1000, 10000); | |
358 | if (ret) { | |
359 | dev_err(lldev->dev, "transfer channel did not reset\n"); | |
360 | return ret; | |
361 | } | |
362 | ||
363 | val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG); | |
364 | val &= ~(HIDMA_CH_CONTROL_MASK << 16); | |
365 | val |= HIDMA_CH_RESET << 16; | |
366 | writel(val, lldev->evca + HIDMA_EVCA_CTRLSTS_REG); | |
367 | ||
368 | /* | |
369 | * Delay 10ms after reset to allow DMA logic to quiesce. | |
370 | * Do a polled read up to 1ms and 10ms maximum. | |
371 | */ | |
372 | ret = readl_poll_timeout(lldev->evca + HIDMA_EVCA_CTRLSTS_REG, val, | |
373 | HIDMA_CH_STATE(val) == HIDMA_CH_DISABLED, | |
374 | 1000, 10000); | |
375 | if (ret) | |
376 | return ret; | |
377 | ||
378 | lldev->trch_state = HIDMA_CH_DISABLED; | |
379 | lldev->evch_state = HIDMA_CH_DISABLED; | |
380 | return 0; | |
381 | } | |
382 | ||
383 | /* | |
384 | * Abort all transactions and perform a reset. | |
385 | */ | |
386 | static void hidma_ll_abort(unsigned long arg) | |
387 | { | |
388 | struct hidma_lldev *lldev = (struct hidma_lldev *)arg; | |
389 | u8 err_code = HIDMA_EVRE_STATUS_ERROR; | |
390 | u8 err_info = 0xFF; | |
391 | int rc; | |
392 | ||
393 | hidma_cleanup_pending_tre(lldev, err_info, err_code); | |
394 | ||
395 | /* reset the channel for recovery */ | |
396 | rc = hidma_ll_setup(lldev); | |
397 | if (rc) { | |
398 | dev_err(lldev->dev, "channel reinitialize failed after error\n"); | |
399 | return; | |
400 | } | |
401 | writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG); | |
402 | } | |
403 | ||
404 | /* | |
405 | * The interrupt handler for HIDMA will try to consume as many pending | |
406 | * EVRE from the event queue as possible. Each EVRE has an associated | |
407 | * TRE that holds the user interface parameters. EVRE reports the | |
408 | * result of the transaction. Hardware guarantees ordering between EVREs | |
409 | * and TREs. We use last processed offset to figure out which TRE is | |
410 | * associated with which EVRE. If two TREs are consumed by HW, the EVREs | |
411 | * are in order in the event ring. | |
412 | * | |
413 | * This handler will do a one pass for consuming EVREs. Other EVREs may | |
414 | * be delivered while we are working. It will try to consume incoming | |
415 | * EVREs one more time and return. | |
416 | * | |
417 | * For unprocessed EVREs, hardware will trigger another interrupt until | |
418 | * all the interrupt bits are cleared. | |
419 | * | |
420 | * Hardware guarantees that by the time interrupt is observed, all data | |
421 | * transactions in flight are delivered to their respective places and | |
422 | * are visible to the CPU. | |
423 | * | |
424 | * On demand paging for IOMMU is only supported for PCIe via PRI | |
425 | * (Page Request Interface) not for HIDMA. All other hardware instances | |
426 | * including HIDMA work on pinned DMA addresses. | |
427 | * | |
428 | * HIDMA is not aware of IOMMU presence since it follows the DMA API. All | |
429 | * IOMMU latency will be built into the data movement time. By the time | |
430 | * interrupt happens, IOMMU lookups + data movement has already taken place. | |
431 | * | |
432 | * While the first read in a typical PCI endpoint ISR flushes all outstanding | |
433 | * requests traditionally to the destination, this concept does not apply | |
434 | * here for this HW. | |
435 | */ | |
436 | irqreturn_t hidma_ll_inthandler(int chirq, void *arg) | |
437 | { | |
438 | struct hidma_lldev *lldev = arg; | |
439 | u32 status; | |
440 | u32 enable; | |
441 | u32 cause; | |
442 | ||
443 | /* | |
444 | * Fine tuned for this HW... | |
445 | * | |
446 | * This ISR has been designed for this particular hardware. Relaxed | |
447 | * read and write accessors are used for performance reasons due to | |
448 | * interrupt delivery guarantees. Do not copy this code blindly and | |
449 | * expect that to work. | |
450 | */ | |
451 | status = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG); | |
452 | enable = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_EN_REG); | |
453 | cause = status & enable; | |
454 | ||
455 | while (cause) { | |
456 | if (cause & HIDMA_ERR_INT_MASK) { | |
457 | dev_err(lldev->dev, "error 0x%x, resetting...\n", | |
458 | cause); | |
459 | ||
460 | /* Clear out pending interrupts */ | |
461 | writel(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG); | |
462 | ||
463 | tasklet_schedule(&lldev->rst_task); | |
464 | goto out; | |
465 | } | |
466 | ||
467 | /* | |
468 | * Try to consume as many EVREs as possible. | |
469 | */ | |
470 | hidma_handle_tre_completion(lldev); | |
471 | ||
472 | /* We consumed TREs or there are pending TREs or EVREs. */ | |
473 | writel_relaxed(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG); | |
474 | ||
475 | /* | |
476 | * Another interrupt might have arrived while we are | |
477 | * processing this one. Read the new cause. | |
478 | */ | |
479 | status = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG); | |
480 | enable = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_EN_REG); | |
481 | cause = status & enable; | |
482 | } | |
483 | ||
484 | out: | |
485 | return IRQ_HANDLED; | |
486 | } | |
487 | ||
488 | int hidma_ll_enable(struct hidma_lldev *lldev) | |
489 | { | |
490 | u32 val; | |
491 | int ret; | |
492 | ||
493 | val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG); | |
494 | val &= ~(HIDMA_CH_CONTROL_MASK << 16); | |
495 | val |= HIDMA_CH_ENABLE << 16; | |
496 | writel(val, lldev->evca + HIDMA_EVCA_CTRLSTS_REG); | |
497 | ||
498 | ret = readl_poll_timeout(lldev->evca + HIDMA_EVCA_CTRLSTS_REG, val, | |
499 | hidma_is_chan_enabled(HIDMA_CH_STATE(val)), | |
500 | 1000, 10000); | |
501 | if (ret) { | |
502 | dev_err(lldev->dev, "event channel did not get enabled\n"); | |
503 | return ret; | |
504 | } | |
505 | ||
506 | val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG); | |
507 | val &= ~(HIDMA_CH_CONTROL_MASK << 16); | |
508 | val |= HIDMA_CH_ENABLE << 16; | |
509 | writel(val, lldev->trca + HIDMA_TRCA_CTRLSTS_REG); | |
510 | ||
511 | ret = readl_poll_timeout(lldev->trca + HIDMA_TRCA_CTRLSTS_REG, val, | |
512 | hidma_is_chan_enabled(HIDMA_CH_STATE(val)), | |
513 | 1000, 10000); | |
514 | if (ret) { | |
515 | dev_err(lldev->dev, "transfer channel did not get enabled\n"); | |
516 | return ret; | |
517 | } | |
518 | ||
519 | lldev->trch_state = HIDMA_CH_ENABLED; | |
520 | lldev->evch_state = HIDMA_CH_ENABLED; | |
521 | ||
522 | return 0; | |
523 | } | |
524 | ||
525 | void hidma_ll_start(struct hidma_lldev *lldev) | |
526 | { | |
527 | unsigned long irqflags; | |
528 | ||
529 | spin_lock_irqsave(&lldev->lock, irqflags); | |
530 | writel(lldev->tre_write_offset, lldev->trca + HIDMA_TRCA_DOORBELL_REG); | |
531 | spin_unlock_irqrestore(&lldev->lock, irqflags); | |
532 | } | |
533 | ||
534 | bool hidma_ll_isenabled(struct hidma_lldev *lldev) | |
535 | { | |
536 | u32 val; | |
537 | ||
538 | val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG); | |
539 | lldev->trch_state = HIDMA_CH_STATE(val); | |
540 | val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG); | |
541 | lldev->evch_state = HIDMA_CH_STATE(val); | |
542 | ||
543 | /* both channels have to be enabled before calling this function */ | |
544 | if (hidma_is_chan_enabled(lldev->trch_state) && | |
545 | hidma_is_chan_enabled(lldev->evch_state)) | |
546 | return true; | |
547 | ||
548 | return false; | |
549 | } | |
550 | ||
551 | void hidma_ll_queue_request(struct hidma_lldev *lldev, u32 tre_ch) | |
552 | { | |
553 | struct hidma_tre *tre; | |
554 | unsigned long flags; | |
555 | ||
556 | tre = &lldev->trepool[tre_ch]; | |
557 | ||
558 | /* copy the TRE into its location in the TRE ring */ | |
559 | spin_lock_irqsave(&lldev->lock, flags); | |
560 | tre->tre_index = lldev->tre_write_offset / HIDMA_TRE_SIZE; | |
561 | lldev->pending_tre_list[tre->tre_index] = tre; | |
562 | memcpy(lldev->tre_ring + lldev->tre_write_offset, | |
563 | &tre->tre_local[0], HIDMA_TRE_SIZE); | |
564 | tre->err_code = 0; | |
565 | tre->err_info = 0; | |
566 | tre->queued = 1; | |
567 | lldev->pending_tre_count++; | |
568 | lldev->tre_write_offset = (lldev->tre_write_offset + HIDMA_TRE_SIZE) | |
569 | % lldev->tre_ring_size; | |
570 | spin_unlock_irqrestore(&lldev->lock, flags); | |
571 | } | |
572 | ||
573 | /* | |
574 | * Note that even though we stop this channel if there is a pending transaction | |
575 | * in flight it will complete and follow the callback. This request will | |
576 | * prevent further requests to be made. | |
577 | */ | |
578 | int hidma_ll_disable(struct hidma_lldev *lldev) | |
579 | { | |
580 | u32 val; | |
581 | int ret; | |
582 | ||
583 | val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG); | |
584 | lldev->evch_state = HIDMA_CH_STATE(val); | |
585 | val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG); | |
586 | lldev->trch_state = HIDMA_CH_STATE(val); | |
587 | ||
588 | /* already suspended by this OS */ | |
589 | if ((lldev->trch_state == HIDMA_CH_SUSPENDED) || | |
590 | (lldev->evch_state == HIDMA_CH_SUSPENDED)) | |
591 | return 0; | |
592 | ||
593 | /* already stopped by the manager */ | |
594 | if ((lldev->trch_state == HIDMA_CH_STOPPED) || | |
595 | (lldev->evch_state == HIDMA_CH_STOPPED)) | |
596 | return 0; | |
597 | ||
598 | val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG); | |
599 | val &= ~(HIDMA_CH_CONTROL_MASK << 16); | |
600 | val |= HIDMA_CH_SUSPEND << 16; | |
601 | writel(val, lldev->trca + HIDMA_TRCA_CTRLSTS_REG); | |
602 | ||
603 | /* | |
604 | * Start the wait right after the suspend is confirmed. | |
605 | * Do a polled read up to 1ms and 10ms maximum. | |
606 | */ | |
607 | ret = readl_poll_timeout(lldev->trca + HIDMA_TRCA_CTRLSTS_REG, val, | |
608 | HIDMA_CH_STATE(val) == HIDMA_CH_SUSPENDED, | |
609 | 1000, 10000); | |
610 | if (ret) | |
611 | return ret; | |
612 | ||
613 | val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG); | |
614 | val &= ~(HIDMA_CH_CONTROL_MASK << 16); | |
615 | val |= HIDMA_CH_SUSPEND << 16; | |
616 | writel(val, lldev->evca + HIDMA_EVCA_CTRLSTS_REG); | |
617 | ||
618 | /* | |
619 | * Start the wait right after the suspend is confirmed | |
620 | * Delay up to 10ms after reset to allow DMA logic to quiesce. | |
621 | */ | |
622 | ret = readl_poll_timeout(lldev->evca + HIDMA_EVCA_CTRLSTS_REG, val, | |
623 | HIDMA_CH_STATE(val) == HIDMA_CH_SUSPENDED, | |
624 | 1000, 10000); | |
625 | if (ret) | |
626 | return ret; | |
627 | ||
628 | lldev->trch_state = HIDMA_CH_SUSPENDED; | |
629 | lldev->evch_state = HIDMA_CH_SUSPENDED; | |
630 | return 0; | |
631 | } | |
632 | ||
633 | void hidma_ll_set_transfer_params(struct hidma_lldev *lldev, u32 tre_ch, | |
634 | dma_addr_t src, dma_addr_t dest, u32 len, | |
635 | u32 flags) | |
636 | { | |
637 | struct hidma_tre *tre; | |
638 | u32 *tre_local; | |
639 | ||
640 | if (tre_ch >= lldev->nr_tres) { | |
641 | dev_err(lldev->dev, "invalid TRE number in transfer params:%d", | |
642 | tre_ch); | |
643 | return; | |
644 | } | |
645 | ||
646 | tre = &lldev->trepool[tre_ch]; | |
647 | if (atomic_read(&tre->allocated) != true) { | |
648 | dev_err(lldev->dev, "trying to set params on an unused TRE:%d", | |
649 | tre_ch); | |
650 | return; | |
651 | } | |
652 | ||
653 | tre_local = &tre->tre_local[0]; | |
654 | tre_local[HIDMA_TRE_LEN_IDX] = len; | |
655 | tre_local[HIDMA_TRE_SRC_LOW_IDX] = lower_32_bits(src); | |
656 | tre_local[HIDMA_TRE_SRC_HI_IDX] = upper_32_bits(src); | |
657 | tre_local[HIDMA_TRE_DEST_LOW_IDX] = lower_32_bits(dest); | |
658 | tre_local[HIDMA_TRE_DEST_HI_IDX] = upper_32_bits(dest); | |
659 | tre->int_flags = flags; | |
660 | } | |
661 | ||
662 | /* | |
663 | * Called during initialization and after an error condition | |
664 | * to restore hardware state. | |
665 | */ | |
666 | int hidma_ll_setup(struct hidma_lldev *lldev) | |
667 | { | |
668 | int rc; | |
669 | u64 addr; | |
670 | u32 val; | |
671 | u32 nr_tres = lldev->nr_tres; | |
672 | ||
673 | lldev->pending_tre_count = 0; | |
674 | lldev->tre_processed_off = 0; | |
675 | lldev->evre_processed_off = 0; | |
676 | lldev->tre_write_offset = 0; | |
677 | ||
678 | /* disable interrupts */ | |
679 | writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG); | |
680 | ||
681 | /* clear all pending interrupts */ | |
682 | val = readl(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG); | |
683 | writel(val, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG); | |
684 | ||
685 | rc = hidma_ll_reset(lldev); | |
686 | if (rc) | |
687 | return rc; | |
688 | ||
689 | /* | |
690 | * Clear all pending interrupts again. | |
691 | * Otherwise, we observe reset complete interrupts. | |
692 | */ | |
693 | val = readl(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG); | |
694 | writel(val, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG); | |
695 | ||
696 | /* disable interrupts again after reset */ | |
697 | writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG); | |
698 | ||
699 | addr = lldev->tre_dma; | |
700 | writel(lower_32_bits(addr), lldev->trca + HIDMA_TRCA_RING_LOW_REG); | |
701 | writel(upper_32_bits(addr), lldev->trca + HIDMA_TRCA_RING_HIGH_REG); | |
702 | writel(lldev->tre_ring_size, lldev->trca + HIDMA_TRCA_RING_LEN_REG); | |
703 | ||
704 | addr = lldev->evre_dma; | |
705 | writel(lower_32_bits(addr), lldev->evca + HIDMA_EVCA_RING_LOW_REG); | |
706 | writel(upper_32_bits(addr), lldev->evca + HIDMA_EVCA_RING_HIGH_REG); | |
707 | writel(HIDMA_EVRE_SIZE * nr_tres, | |
708 | lldev->evca + HIDMA_EVCA_RING_LEN_REG); | |
709 | ||
710 | /* support IRQ only for now */ | |
711 | val = readl(lldev->evca + HIDMA_EVCA_INTCTRL_REG); | |
712 | val &= ~0xF; | |
713 | val |= 0x1; | |
714 | writel(val, lldev->evca + HIDMA_EVCA_INTCTRL_REG); | |
715 | ||
716 | /* clear all pending interrupts and enable them */ | |
717 | writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG); | |
718 | writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG); | |
719 | ||
720 | return hidma_ll_enable(lldev); | |
721 | } | |
722 | ||
723 | struct hidma_lldev *hidma_ll_init(struct device *dev, u32 nr_tres, | |
724 | void __iomem *trca, void __iomem *evca, | |
725 | u8 chidx) | |
726 | { | |
727 | u32 required_bytes; | |
728 | struct hidma_lldev *lldev; | |
729 | int rc; | |
730 | size_t sz; | |
731 | ||
732 | if (!trca || !evca || !dev || !nr_tres) | |
733 | return NULL; | |
734 | ||
735 | /* need at least four TREs */ | |
736 | if (nr_tres < 4) | |
737 | return NULL; | |
738 | ||
739 | /* need an extra space */ | |
740 | nr_tres += 1; | |
741 | ||
742 | lldev = devm_kzalloc(dev, sizeof(struct hidma_lldev), GFP_KERNEL); | |
743 | if (!lldev) | |
744 | return NULL; | |
745 | ||
746 | lldev->evca = evca; | |
747 | lldev->trca = trca; | |
748 | lldev->dev = dev; | |
749 | sz = sizeof(struct hidma_tre); | |
750 | lldev->trepool = devm_kcalloc(lldev->dev, nr_tres, sz, GFP_KERNEL); | |
751 | if (!lldev->trepool) | |
752 | return NULL; | |
753 | ||
754 | required_bytes = sizeof(lldev->pending_tre_list[0]); | |
755 | lldev->pending_tre_list = devm_kcalloc(dev, nr_tres, required_bytes, | |
756 | GFP_KERNEL); | |
757 | if (!lldev->pending_tre_list) | |
758 | return NULL; | |
759 | ||
760 | sz = (HIDMA_TRE_SIZE + 1) * nr_tres; | |
761 | lldev->tre_ring = dmam_alloc_coherent(dev, sz, &lldev->tre_dma, | |
762 | GFP_KERNEL); | |
763 | if (!lldev->tre_ring) | |
764 | return NULL; | |
765 | ||
766 | memset(lldev->tre_ring, 0, (HIDMA_TRE_SIZE + 1) * nr_tres); | |
767 | lldev->tre_ring_size = HIDMA_TRE_SIZE * nr_tres; | |
768 | lldev->nr_tres = nr_tres; | |
769 | ||
770 | /* the TRE ring has to be TRE_SIZE aligned */ | |
771 | if (!IS_ALIGNED(lldev->tre_dma, HIDMA_TRE_SIZE)) { | |
772 | u8 tre_ring_shift; | |
773 | ||
774 | tre_ring_shift = lldev->tre_dma % HIDMA_TRE_SIZE; | |
775 | tre_ring_shift = HIDMA_TRE_SIZE - tre_ring_shift; | |
776 | lldev->tre_dma += tre_ring_shift; | |
777 | lldev->tre_ring += tre_ring_shift; | |
778 | } | |
779 | ||
780 | sz = (HIDMA_EVRE_SIZE + 1) * nr_tres; | |
781 | lldev->evre_ring = dmam_alloc_coherent(dev, sz, &lldev->evre_dma, | |
782 | GFP_KERNEL); | |
783 | if (!lldev->evre_ring) | |
784 | return NULL; | |
785 | ||
786 | memset(lldev->evre_ring, 0, (HIDMA_EVRE_SIZE + 1) * nr_tres); | |
787 | lldev->evre_ring_size = HIDMA_EVRE_SIZE * nr_tres; | |
788 | ||
789 | /* the EVRE ring has to be EVRE_SIZE aligned */ | |
790 | if (!IS_ALIGNED(lldev->evre_dma, HIDMA_EVRE_SIZE)) { | |
791 | u8 evre_ring_shift; | |
792 | ||
793 | evre_ring_shift = lldev->evre_dma % HIDMA_EVRE_SIZE; | |
794 | evre_ring_shift = HIDMA_EVRE_SIZE - evre_ring_shift; | |
795 | lldev->evre_dma += evre_ring_shift; | |
796 | lldev->evre_ring += evre_ring_shift; | |
797 | } | |
798 | lldev->nr_tres = nr_tres; | |
799 | lldev->chidx = chidx; | |
800 | ||
801 | sz = nr_tres * sizeof(struct hidma_tre *); | |
802 | rc = kfifo_alloc(&lldev->handoff_fifo, sz, GFP_KERNEL); | |
803 | if (rc) | |
804 | return NULL; | |
805 | ||
806 | rc = hidma_ll_setup(lldev); | |
807 | if (rc) | |
808 | return NULL; | |
809 | ||
810 | spin_lock_init(&lldev->lock); | |
811 | tasklet_init(&lldev->rst_task, hidma_ll_abort, (unsigned long)lldev); | |
812 | tasklet_init(&lldev->task, hidma_ll_tre_complete, (unsigned long)lldev); | |
813 | lldev->initialized = 1; | |
814 | writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG); | |
815 | return lldev; | |
816 | } | |
817 | ||
818 | int hidma_ll_uninit(struct hidma_lldev *lldev) | |
819 | { | |
820 | u32 required_bytes; | |
821 | int rc = 0; | |
822 | u32 val; | |
823 | ||
824 | if (!lldev) | |
825 | return -ENODEV; | |
826 | ||
827 | if (!lldev->initialized) | |
828 | return 0; | |
829 | ||
830 | lldev->initialized = 0; | |
831 | ||
832 | required_bytes = sizeof(struct hidma_tre) * lldev->nr_tres; | |
833 | tasklet_kill(&lldev->task); | |
4cad91b2 | 834 | tasklet_kill(&lldev->rst_task); |
d1615ca2 SK |
835 | memset(lldev->trepool, 0, required_bytes); |
836 | lldev->trepool = NULL; | |
837 | lldev->pending_tre_count = 0; | |
838 | lldev->tre_write_offset = 0; | |
839 | ||
840 | rc = hidma_ll_reset(lldev); | |
841 | ||
842 | /* | |
843 | * Clear all pending interrupts again. | |
844 | * Otherwise, we observe reset complete interrupts. | |
845 | */ | |
846 | val = readl(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG); | |
847 | writel(val, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG); | |
848 | writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG); | |
849 | return rc; | |
850 | } | |
851 | ||
852 | enum dma_status hidma_ll_status(struct hidma_lldev *lldev, u32 tre_ch) | |
853 | { | |
854 | enum dma_status ret = DMA_ERROR; | |
855 | struct hidma_tre *tre; | |
856 | unsigned long flags; | |
857 | u8 err_code; | |
858 | ||
859 | spin_lock_irqsave(&lldev->lock, flags); | |
860 | ||
861 | tre = &lldev->trepool[tre_ch]; | |
862 | err_code = tre->err_code; | |
863 | ||
864 | if (err_code & HIDMA_EVRE_STATUS_COMPLETE) | |
865 | ret = DMA_COMPLETE; | |
866 | else if (err_code & HIDMA_EVRE_STATUS_ERROR) | |
867 | ret = DMA_ERROR; | |
868 | else | |
869 | ret = DMA_IN_PROGRESS; | |
870 | spin_unlock_irqrestore(&lldev->lock, flags); | |
871 | ||
872 | return ret; | |
873 | } |