Commit | Line | Data |
---|---|---|
3bfb1d20 | 1 | /* |
b801479b | 2 | * Core driver for the Synopsys DesignWare DMA Controller |
3bfb1d20 HS |
3 | * |
4 | * Copyright (C) 2007-2008 Atmel Corporation | |
aecb7b64 | 5 | * Copyright (C) 2010-2011 ST Microelectronics |
9cade1a4 | 6 | * Copyright (C) 2013 Intel Corporation |
3bfb1d20 HS |
7 | * |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License version 2 as | |
10 | * published by the Free Software Foundation. | |
11 | */ | |
b801479b | 12 | |
327e6970 | 13 | #include <linux/bitops.h> |
3bfb1d20 HS |
14 | #include <linux/delay.h> |
15 | #include <linux/dmaengine.h> | |
16 | #include <linux/dma-mapping.h> | |
f8122a82 | 17 | #include <linux/dmapool.h> |
7331205a | 18 | #include <linux/err.h> |
3bfb1d20 HS |
19 | #include <linux/init.h> |
20 | #include <linux/interrupt.h> | |
21 | #include <linux/io.h> | |
22 | #include <linux/mm.h> | |
23 | #include <linux/module.h> | |
3bfb1d20 | 24 | #include <linux/slab.h> |
bb32baf7 | 25 | #include <linux/pm_runtime.h> |
3bfb1d20 | 26 | |
61a76496 | 27 | #include "../dmaengine.h" |
9cade1a4 | 28 | #include "internal.h" |
3bfb1d20 HS |
29 | |
30 | /* | |
31 | * This supports the Synopsys "DesignWare AHB Central DMA Controller", | |
32 | * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all | |
33 | * of which use ARM any more). See the "Databook" from Synopsys for | |
34 | * information beyond what licensees probably provide. | |
35 | * | |
dd5720b3 AS |
36 | * The driver has been tested with the Atmel AT32AP7000, which does not |
37 | * support descriptor writeback. | |
3bfb1d20 HS |
38 | */ |
39 | ||
327e6970 | 40 | #define DWC_DEFAULT_CTLLO(_chan) ({ \ |
327e6970 VK |
41 | struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \ |
42 | struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \ | |
495aea4b | 43 | bool _is_slave = is_slave_direction(_dwc->direction); \ |
495aea4b | 44 | u8 _smsize = _is_slave ? _sconfig->src_maxburst : \ |
327e6970 | 45 | DW_DMA_MSIZE_16; \ |
495aea4b | 46 | u8 _dmsize = _is_slave ? _sconfig->dst_maxburst : \ |
327e6970 | 47 | DW_DMA_MSIZE_16; \ |
bb3450ad MR |
48 | u8 _dms = (_dwc->direction == DMA_MEM_TO_DEV) ? \ |
49 | _dwc->p_master : _dwc->m_master; \ | |
50 | u8 _sms = (_dwc->direction == DMA_DEV_TO_MEM) ? \ | |
51 | _dwc->p_master : _dwc->m_master; \ | |
f301c062 | 52 | \ |
327e6970 VK |
53 | (DWC_CTLL_DST_MSIZE(_dmsize) \ |
54 | | DWC_CTLL_SRC_MSIZE(_smsize) \ | |
f301c062 JI |
55 | | DWC_CTLL_LLP_D_EN \ |
56 | | DWC_CTLL_LLP_S_EN \ | |
bb3450ad MR |
57 | | DWC_CTLL_DMS(_dms) \ |
58 | | DWC_CTLL_SMS(_sms)); \ | |
f301c062 | 59 | }) |
3bfb1d20 | 60 | |
029a40e9 AS |
61 | /* The set of bus widths supported by the DMA controller */ |
62 | #define DW_DMA_BUSWIDTHS \ | |
63 | BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \ | |
64 | BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ | |
65 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ | |
66 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | |
67 | ||
3bfb1d20 | 68 | /*----------------------------------------------------------------------*/ |
3bfb1d20 | 69 | |
41d5e59c DW |
70 | static struct device *chan2dev(struct dma_chan *chan) |
71 | { | |
72 | return &chan->dev->device; | |
73 | } | |
41d5e59c | 74 | |
3bfb1d20 HS |
75 | static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) |
76 | { | |
e63a47a3 | 77 | return to_dw_desc(dwc->active_list.next); |
3bfb1d20 HS |
78 | } |
79 | ||
ab703f81 | 80 | static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx) |
3bfb1d20 | 81 | { |
ab703f81 CL |
82 | struct dw_desc *desc = txd_to_dw_desc(tx); |
83 | struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan); | |
84 | dma_cookie_t cookie; | |
85 | unsigned long flags; | |
3bfb1d20 | 86 | |
69cea5a0 | 87 | spin_lock_irqsave(&dwc->lock, flags); |
ab703f81 CL |
88 | cookie = dma_cookie_assign(tx); |
89 | ||
90 | /* | |
91 | * REVISIT: We should attempt to chain as many descriptors as | |
92 | * possible, perhaps even appending to those already submitted | |
93 | * for DMA. But this is hard to do in a race-free manner. | |
94 | */ | |
95 | ||
96 | list_add_tail(&desc->desc_node, &dwc->queue); | |
69cea5a0 | 97 | spin_unlock_irqrestore(&dwc->lock, flags); |
ab703f81 CL |
98 | dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", |
99 | __func__, desc->txd.cookie); | |
3bfb1d20 | 100 | |
ab703f81 CL |
101 | return cookie; |
102 | } | |
3bfb1d20 | 103 | |
ab703f81 CL |
104 | static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) |
105 | { | |
106 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | |
107 | struct dw_desc *desc; | |
108 | dma_addr_t phys; | |
109 | ||
110 | desc = dma_pool_zalloc(dw->desc_pool, GFP_ATOMIC, &phys); | |
111 | if (!desc) | |
112 | return NULL; | |
113 | ||
114 | dwc->descs_allocated++; | |
115 | INIT_LIST_HEAD(&desc->tx_list); | |
116 | dma_async_tx_descriptor_init(&desc->txd, &dwc->chan); | |
117 | desc->txd.tx_submit = dwc_tx_submit; | |
118 | desc->txd.flags = DMA_CTRL_ACK; | |
119 | desc->txd.phys = phys; | |
120 | return desc; | |
3bfb1d20 HS |
121 | } |
122 | ||
3bfb1d20 HS |
123 | static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) |
124 | { | |
ab703f81 CL |
125 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); |
126 | struct dw_desc *child, *_next; | |
69cea5a0 | 127 | |
ab703f81 CL |
128 | if (unlikely(!desc)) |
129 | return; | |
3bfb1d20 | 130 | |
ab703f81 CL |
131 | list_for_each_entry_safe(child, _next, &desc->tx_list, desc_node) { |
132 | list_del(&child->desc_node); | |
133 | dma_pool_free(dw->desc_pool, child, child->txd.phys); | |
134 | dwc->descs_allocated--; | |
3bfb1d20 | 135 | } |
ab703f81 CL |
136 | |
137 | dma_pool_free(dw->desc_pool, desc, desc->txd.phys); | |
138 | dwc->descs_allocated--; | |
3bfb1d20 HS |
139 | } |
140 | ||
61e183f8 VK |
141 | static void dwc_initialize(struct dw_dma_chan *dwc) |
142 | { | |
143 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | |
61e183f8 VK |
144 | u32 cfghi = DWC_CFGH_FIFO_MODE; |
145 | u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority); | |
146 | ||
423f9cbf | 147 | if (test_bit(DW_DMA_IS_INITIALIZED, &dwc->flags)) |
61e183f8 VK |
148 | return; |
149 | ||
3fe6409c AS |
150 | cfghi |= DWC_CFGH_DST_PER(dwc->dst_id); |
151 | cfghi |= DWC_CFGH_SRC_PER(dwc->src_id); | |
61e183f8 VK |
152 | |
153 | channel_writel(dwc, CFG_LO, cfglo); | |
154 | channel_writel(dwc, CFG_HI, cfghi); | |
155 | ||
156 | /* Enable interrupts */ | |
157 | channel_set_bit(dw, MASK.XFER, dwc->mask); | |
61e183f8 VK |
158 | channel_set_bit(dw, MASK.ERROR, dwc->mask); |
159 | ||
423f9cbf | 160 | set_bit(DW_DMA_IS_INITIALIZED, &dwc->flags); |
61e183f8 VK |
161 | } |
162 | ||
3bfb1d20 HS |
163 | /*----------------------------------------------------------------------*/ |
164 | ||
f52b36d2 | 165 | static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc) |
1d455437 AS |
166 | { |
167 | dev_err(chan2dev(&dwc->chan), | |
168 | " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", | |
169 | channel_readl(dwc, SAR), | |
170 | channel_readl(dwc, DAR), | |
171 | channel_readl(dwc, LLP), | |
172 | channel_readl(dwc, CTL_HI), | |
173 | channel_readl(dwc, CTL_LO)); | |
174 | } | |
175 | ||
3f936207 AS |
176 | static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc) |
177 | { | |
178 | channel_clear_bit(dw, CH_EN, dwc->mask); | |
179 | while (dma_readl(dw, CH_EN) & dwc->mask) | |
180 | cpu_relax(); | |
181 | } | |
182 | ||
1d455437 AS |
183 | /*----------------------------------------------------------------------*/ |
184 | ||
fed2574b AS |
185 | /* Perform single block transfer */ |
186 | static inline void dwc_do_single_block(struct dw_dma_chan *dwc, | |
187 | struct dw_desc *desc) | |
188 | { | |
189 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | |
190 | u32 ctllo; | |
191 | ||
1d566f11 AS |
192 | /* |
193 | * Software emulation of LLP mode relies on interrupts to continue | |
194 | * multi block transfer. | |
195 | */ | |
df1f3a23 | 196 | ctllo = lli_read(desc, ctllo) | DWC_CTLL_INT_EN; |
fed2574b | 197 | |
df1f3a23 MR |
198 | channel_writel(dwc, SAR, lli_read(desc, sar)); |
199 | channel_writel(dwc, DAR, lli_read(desc, dar)); | |
fed2574b | 200 | channel_writel(dwc, CTL_LO, ctllo); |
df1f3a23 | 201 | channel_writel(dwc, CTL_HI, lli_read(desc, ctlhi)); |
fed2574b | 202 | channel_set_bit(dw, CH_EN, dwc->mask); |
f5c6a7df AS |
203 | |
204 | /* Move pointer to next descriptor */ | |
205 | dwc->tx_node_active = dwc->tx_node_active->next; | |
fed2574b AS |
206 | } |
207 | ||
3bfb1d20 HS |
208 | /* Called with dwc->lock held and bh disabled */ |
209 | static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) | |
210 | { | |
211 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | |
2a0fae02 | 212 | u8 lms = DWC_LLP_LMS(dwc->m_master); |
fed2574b | 213 | unsigned long was_soft_llp; |
3bfb1d20 HS |
214 | |
215 | /* ASSERT: channel is idle */ | |
216 | if (dma_readl(dw, CH_EN) & dwc->mask) { | |
41d5e59c | 217 | dev_err(chan2dev(&dwc->chan), |
550da64b JN |
218 | "%s: BUG: Attempted to start non-idle channel\n", |
219 | __func__); | |
1d455437 | 220 | dwc_dump_chan_regs(dwc); |
3bfb1d20 HS |
221 | |
222 | /* The tasklet will hopefully advance the queue... */ | |
223 | return; | |
224 | } | |
225 | ||
fed2574b AS |
226 | if (dwc->nollp) { |
227 | was_soft_llp = test_and_set_bit(DW_DMA_IS_SOFT_LLP, | |
228 | &dwc->flags); | |
229 | if (was_soft_llp) { | |
230 | dev_err(chan2dev(&dwc->chan), | |
fc61f6b4 | 231 | "BUG: Attempted to start new LLP transfer inside ongoing one\n"); |
fed2574b AS |
232 | return; |
233 | } | |
234 | ||
235 | dwc_initialize(dwc); | |
236 | ||
b68fd097 | 237 | first->residue = first->total_len; |
f5c6a7df | 238 | dwc->tx_node_active = &first->tx_list; |
fed2574b | 239 | |
fdf475fa | 240 | /* Submit first block */ |
fed2574b AS |
241 | dwc_do_single_block(dwc, first); |
242 | ||
243 | return; | |
244 | } | |
245 | ||
61e183f8 VK |
246 | dwc_initialize(dwc); |
247 | ||
2a0fae02 MR |
248 | channel_writel(dwc, LLP, first->txd.phys | lms); |
249 | channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); | |
3bfb1d20 HS |
250 | channel_writel(dwc, CTL_HI, 0); |
251 | channel_set_bit(dw, CH_EN, dwc->mask); | |
252 | } | |
253 | ||
e7637c6c AS |
254 | static void dwc_dostart_first_queued(struct dw_dma_chan *dwc) |
255 | { | |
cba15617 AS |
256 | struct dw_desc *desc; |
257 | ||
e7637c6c AS |
258 | if (list_empty(&dwc->queue)) |
259 | return; | |
260 | ||
261 | list_move(dwc->queue.next, &dwc->active_list); | |
cba15617 AS |
262 | desc = dwc_first_active(dwc); |
263 | dev_vdbg(chan2dev(&dwc->chan), "%s: started %u\n", __func__, desc->txd.cookie); | |
264 | dwc_dostart(dwc, desc); | |
e7637c6c AS |
265 | } |
266 | ||
3bfb1d20 HS |
267 | /*----------------------------------------------------------------------*/ |
268 | ||
269 | static void | |
5fedefb8 VK |
270 | dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc, |
271 | bool callback_required) | |
3bfb1d20 | 272 | { |
3bfb1d20 | 273 | struct dma_async_tx_descriptor *txd = &desc->txd; |
e518076e | 274 | struct dw_desc *child; |
69cea5a0 | 275 | unsigned long flags; |
577ef925 | 276 | struct dmaengine_desc_callback cb; |
3bfb1d20 | 277 | |
41d5e59c | 278 | dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); |
3bfb1d20 | 279 | |
69cea5a0 | 280 | spin_lock_irqsave(&dwc->lock, flags); |
f7fbce07 | 281 | dma_cookie_complete(txd); |
577ef925 DJ |
282 | if (callback_required) |
283 | dmaengine_desc_get_callback(txd, &cb); | |
284 | else | |
285 | memset(&cb, 0, sizeof(cb)); | |
3bfb1d20 | 286 | |
e518076e VK |
287 | /* async_tx_ack */ |
288 | list_for_each_entry(child, &desc->tx_list, desc_node) | |
289 | async_tx_ack(&child->txd); | |
290 | async_tx_ack(&desc->txd); | |
ab703f81 | 291 | dwc_desc_put(dwc, desc); |
69cea5a0 VK |
292 | spin_unlock_irqrestore(&dwc->lock, flags); |
293 | ||
577ef925 | 294 | dmaengine_desc_callback_invoke(&cb, NULL); |
3bfb1d20 HS |
295 | } |
296 | ||
297 | static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) | |
298 | { | |
299 | struct dw_desc *desc, *_desc; | |
300 | LIST_HEAD(list); | |
69cea5a0 | 301 | unsigned long flags; |
3bfb1d20 | 302 | |
69cea5a0 | 303 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 | 304 | if (dma_readl(dw, CH_EN) & dwc->mask) { |
41d5e59c | 305 | dev_err(chan2dev(&dwc->chan), |
3bfb1d20 HS |
306 | "BUG: XFER bit set, but channel not idle!\n"); |
307 | ||
308 | /* Try to continue after resetting the channel... */ | |
3f936207 | 309 | dwc_chan_disable(dw, dwc); |
3bfb1d20 HS |
310 | } |
311 | ||
312 | /* | |
313 | * Submit queued descriptors ASAP, i.e. before we go through | |
314 | * the completed ones. | |
315 | */ | |
3bfb1d20 | 316 | list_splice_init(&dwc->active_list, &list); |
e7637c6c | 317 | dwc_dostart_first_queued(dwc); |
3bfb1d20 | 318 | |
69cea5a0 VK |
319 | spin_unlock_irqrestore(&dwc->lock, flags); |
320 | ||
3bfb1d20 | 321 | list_for_each_entry_safe(desc, _desc, &list, desc_node) |
5fedefb8 | 322 | dwc_descriptor_complete(dwc, desc, true); |
3bfb1d20 HS |
323 | } |
324 | ||
4702d524 AS |
325 | /* Returns how many bytes were already received from source */ |
326 | static inline u32 dwc_get_sent(struct dw_dma_chan *dwc) | |
327 | { | |
328 | u32 ctlhi = channel_readl(dwc, CTL_HI); | |
329 | u32 ctllo = channel_readl(dwc, CTL_LO); | |
330 | ||
331 | return (ctlhi & DWC_CTLH_BLOCK_TS_MASK) * (1 << (ctllo >> 4 & 7)); | |
332 | } | |
333 | ||
3bfb1d20 HS |
334 | static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) |
335 | { | |
336 | dma_addr_t llp; | |
337 | struct dw_desc *desc, *_desc; | |
338 | struct dw_desc *child; | |
339 | u32 status_xfer; | |
69cea5a0 | 340 | unsigned long flags; |
3bfb1d20 | 341 | |
69cea5a0 | 342 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 HS |
343 | llp = channel_readl(dwc, LLP); |
344 | status_xfer = dma_readl(dw, RAW.XFER); | |
345 | ||
346 | if (status_xfer & dwc->mask) { | |
347 | /* Everything we've submitted is done */ | |
348 | dma_writel(dw, CLEAR.XFER, dwc->mask); | |
77bcc497 AS |
349 | |
350 | if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) { | |
fdf475fa AS |
351 | struct list_head *head, *active = dwc->tx_node_active; |
352 | ||
353 | /* | |
354 | * We are inside first active descriptor. | |
355 | * Otherwise something is really wrong. | |
356 | */ | |
357 | desc = dwc_first_active(dwc); | |
358 | ||
359 | head = &desc->tx_list; | |
360 | if (active != head) { | |
b68fd097 AS |
361 | /* Update residue to reflect last sent descriptor */ |
362 | if (active == head->next) | |
363 | desc->residue -= desc->len; | |
364 | else | |
365 | desc->residue -= to_dw_desc(active->prev)->len; | |
4702d524 | 366 | |
fdf475fa | 367 | child = to_dw_desc(active); |
77bcc497 AS |
368 | |
369 | /* Submit next block */ | |
fdf475fa | 370 | dwc_do_single_block(dwc, child); |
77bcc497 | 371 | |
fdf475fa | 372 | spin_unlock_irqrestore(&dwc->lock, flags); |
77bcc497 AS |
373 | return; |
374 | } | |
fdf475fa | 375 | |
77bcc497 AS |
376 | /* We are done here */ |
377 | clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags); | |
378 | } | |
4702d524 | 379 | |
69cea5a0 VK |
380 | spin_unlock_irqrestore(&dwc->lock, flags); |
381 | ||
3bfb1d20 HS |
382 | dwc_complete_all(dw, dwc); |
383 | return; | |
384 | } | |
385 | ||
69cea5a0 VK |
386 | if (list_empty(&dwc->active_list)) { |
387 | spin_unlock_irqrestore(&dwc->lock, flags); | |
087809fc | 388 | return; |
69cea5a0 | 389 | } |
087809fc | 390 | |
77bcc497 AS |
391 | if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) { |
392 | dev_vdbg(chan2dev(&dwc->chan), "%s: soft LLP mode\n", __func__); | |
69cea5a0 | 393 | spin_unlock_irqrestore(&dwc->lock, flags); |
087809fc | 394 | return; |
69cea5a0 | 395 | } |
087809fc | 396 | |
5a87f0e6 | 397 | dev_vdbg(chan2dev(&dwc->chan), "%s: llp=%pad\n", __func__, &llp); |
3bfb1d20 HS |
398 | |
399 | list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { | |
75c61225 | 400 | /* Initial residue value */ |
b68fd097 | 401 | desc->residue = desc->total_len; |
4702d524 | 402 | |
75c61225 | 403 | /* Check first descriptors addr */ |
2a0fae02 | 404 | if (desc->txd.phys == DWC_LLP_LOC(llp)) { |
69cea5a0 | 405 | spin_unlock_irqrestore(&dwc->lock, flags); |
84adccfb | 406 | return; |
69cea5a0 | 407 | } |
84adccfb | 408 | |
75c61225 | 409 | /* Check first descriptors llp */ |
df1f3a23 | 410 | if (lli_read(desc, llp) == llp) { |
3bfb1d20 | 411 | /* This one is currently in progress */ |
b68fd097 | 412 | desc->residue -= dwc_get_sent(dwc); |
69cea5a0 | 413 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 | 414 | return; |
69cea5a0 | 415 | } |
3bfb1d20 | 416 | |
b68fd097 | 417 | desc->residue -= desc->len; |
4702d524 | 418 | list_for_each_entry(child, &desc->tx_list, desc_node) { |
df1f3a23 | 419 | if (lli_read(child, llp) == llp) { |
3bfb1d20 | 420 | /* Currently in progress */ |
b68fd097 | 421 | desc->residue -= dwc_get_sent(dwc); |
69cea5a0 | 422 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 | 423 | return; |
69cea5a0 | 424 | } |
b68fd097 | 425 | desc->residue -= child->len; |
4702d524 | 426 | } |
3bfb1d20 HS |
427 | |
428 | /* | |
429 | * No descriptors so far seem to be in progress, i.e. | |
430 | * this one must be done. | |
431 | */ | |
69cea5a0 | 432 | spin_unlock_irqrestore(&dwc->lock, flags); |
5fedefb8 | 433 | dwc_descriptor_complete(dwc, desc, true); |
69cea5a0 | 434 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 HS |
435 | } |
436 | ||
41d5e59c | 437 | dev_err(chan2dev(&dwc->chan), |
3bfb1d20 HS |
438 | "BUG: All descriptors done, but channel not idle!\n"); |
439 | ||
440 | /* Try to continue after resetting the channel... */ | |
3f936207 | 441 | dwc_chan_disable(dw, dwc); |
3bfb1d20 | 442 | |
e7637c6c | 443 | dwc_dostart_first_queued(dwc); |
69cea5a0 | 444 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 HS |
445 | } |
446 | ||
df1f3a23 | 447 | static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_desc *desc) |
3bfb1d20 | 448 | { |
21d43f49 | 449 | dev_crit(chan2dev(&dwc->chan), " desc: s0x%x d0x%x l0x%x c0x%x:%x\n", |
df1f3a23 MR |
450 | lli_read(desc, sar), |
451 | lli_read(desc, dar), | |
452 | lli_read(desc, llp), | |
453 | lli_read(desc, ctlhi), | |
454 | lli_read(desc, ctllo)); | |
3bfb1d20 HS |
455 | } |
456 | ||
457 | static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) | |
458 | { | |
459 | struct dw_desc *bad_desc; | |
460 | struct dw_desc *child; | |
69cea5a0 | 461 | unsigned long flags; |
3bfb1d20 HS |
462 | |
463 | dwc_scan_descriptors(dw, dwc); | |
464 | ||
69cea5a0 VK |
465 | spin_lock_irqsave(&dwc->lock, flags); |
466 | ||
3bfb1d20 HS |
467 | /* |
468 | * The descriptor currently at the head of the active list is | |
469 | * borked. Since we don't have any way to report errors, we'll | |
470 | * just have to scream loudly and try to carry on. | |
471 | */ | |
472 | bad_desc = dwc_first_active(dwc); | |
473 | list_del_init(&bad_desc->desc_node); | |
f336e42f | 474 | list_move(dwc->queue.next, dwc->active_list.prev); |
3bfb1d20 HS |
475 | |
476 | /* Clear the error flag and try to restart the controller */ | |
477 | dma_writel(dw, CLEAR.ERROR, dwc->mask); | |
478 | if (!list_empty(&dwc->active_list)) | |
479 | dwc_dostart(dwc, dwc_first_active(dwc)); | |
480 | ||
481 | /* | |
ba84bd71 | 482 | * WARN may seem harsh, but since this only happens |
3bfb1d20 HS |
483 | * when someone submits a bad physical address in a |
484 | * descriptor, we should consider ourselves lucky that the | |
485 | * controller flagged an error instead of scribbling over | |
486 | * random memory locations. | |
487 | */ | |
ba84bd71 AS |
488 | dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n" |
489 | " cookie: %d\n", bad_desc->txd.cookie); | |
df1f3a23 | 490 | dwc_dump_lli(dwc, bad_desc); |
e0bd0f8c | 491 | list_for_each_entry(child, &bad_desc->tx_list, desc_node) |
df1f3a23 | 492 | dwc_dump_lli(dwc, child); |
3bfb1d20 | 493 | |
69cea5a0 VK |
494 | spin_unlock_irqrestore(&dwc->lock, flags); |
495 | ||
3bfb1d20 | 496 | /* Pretend the descriptor completed successfully */ |
5fedefb8 | 497 | dwc_descriptor_complete(dwc, bad_desc, true); |
3bfb1d20 HS |
498 | } |
499 | ||
d9de4519 HCE |
500 | /* --------------------- Cyclic DMA API extensions -------------------- */ |
501 | ||
8004cbb4 | 502 | dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan) |
d9de4519 HCE |
503 | { |
504 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
505 | return channel_readl(dwc, SAR); | |
506 | } | |
507 | EXPORT_SYMBOL(dw_dma_get_src_addr); | |
508 | ||
8004cbb4 | 509 | dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan) |
d9de4519 HCE |
510 | { |
511 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
512 | return channel_readl(dwc, DAR); | |
513 | } | |
514 | EXPORT_SYMBOL(dw_dma_get_dst_addr); | |
515 | ||
75c61225 | 516 | /* Called with dwc->lock held and all DMAC interrupts disabled */ |
d9de4519 | 517 | static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, |
2895b2ca | 518 | u32 status_block, u32 status_err, u32 status_xfer) |
d9de4519 | 519 | { |
69cea5a0 VK |
520 | unsigned long flags; |
521 | ||
2895b2ca | 522 | if (status_block & dwc->mask) { |
d9de4519 HCE |
523 | void (*callback)(void *param); |
524 | void *callback_param; | |
525 | ||
526 | dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n", | |
527 | channel_readl(dwc, LLP)); | |
2895b2ca | 528 | dma_writel(dw, CLEAR.BLOCK, dwc->mask); |
d9de4519 HCE |
529 | |
530 | callback = dwc->cdesc->period_callback; | |
531 | callback_param = dwc->cdesc->period_callback_param; | |
69cea5a0 VK |
532 | |
533 | if (callback) | |
d9de4519 | 534 | callback(callback_param); |
d9de4519 HCE |
535 | } |
536 | ||
537 | /* | |
538 | * Error and transfer complete are highly unlikely, and will most | |
539 | * likely be due to a configuration error by the user. | |
540 | */ | |
541 | if (unlikely(status_err & dwc->mask) || | |
542 | unlikely(status_xfer & dwc->mask)) { | |
7794e5b9 | 543 | unsigned int i; |
d9de4519 | 544 | |
fc61f6b4 AS |
545 | dev_err(chan2dev(&dwc->chan), |
546 | "cyclic DMA unexpected %s interrupt, stopping DMA transfer\n", | |
547 | status_xfer ? "xfer" : "error"); | |
69cea5a0 VK |
548 | |
549 | spin_lock_irqsave(&dwc->lock, flags); | |
550 | ||
1d455437 | 551 | dwc_dump_chan_regs(dwc); |
d9de4519 | 552 | |
3f936207 | 553 | dwc_chan_disable(dw, dwc); |
d9de4519 | 554 | |
75c61225 | 555 | /* Make sure DMA does not restart by loading a new list */ |
d9de4519 HCE |
556 | channel_writel(dwc, LLP, 0); |
557 | channel_writel(dwc, CTL_LO, 0); | |
558 | channel_writel(dwc, CTL_HI, 0); | |
559 | ||
2895b2ca | 560 | dma_writel(dw, CLEAR.BLOCK, dwc->mask); |
d9de4519 HCE |
561 | dma_writel(dw, CLEAR.ERROR, dwc->mask); |
562 | dma_writel(dw, CLEAR.XFER, dwc->mask); | |
563 | ||
564 | for (i = 0; i < dwc->cdesc->periods; i++) | |
df1f3a23 | 565 | dwc_dump_lli(dwc, dwc->cdesc->desc[i]); |
69cea5a0 VK |
566 | |
567 | spin_unlock_irqrestore(&dwc->lock, flags); | |
d9de4519 | 568 | } |
ee1cdcda AS |
569 | |
570 | /* Re-enable interrupts */ | |
571 | channel_set_bit(dw, MASK.BLOCK, dwc->mask); | |
d9de4519 HCE |
572 | } |
573 | ||
574 | /* ------------------------------------------------------------------------- */ | |
575 | ||
3bfb1d20 HS |
576 | static void dw_dma_tasklet(unsigned long data) |
577 | { | |
578 | struct dw_dma *dw = (struct dw_dma *)data; | |
579 | struct dw_dma_chan *dwc; | |
2895b2ca | 580 | u32 status_block; |
3bfb1d20 HS |
581 | u32 status_xfer; |
582 | u32 status_err; | |
7794e5b9 | 583 | unsigned int i; |
3bfb1d20 | 584 | |
2895b2ca | 585 | status_block = dma_readl(dw, RAW.BLOCK); |
7fe7b2f4 | 586 | status_xfer = dma_readl(dw, RAW.XFER); |
3bfb1d20 HS |
587 | status_err = dma_readl(dw, RAW.ERROR); |
588 | ||
2e4c364e | 589 | dev_vdbg(dw->dma.dev, "%s: status_err=%x\n", __func__, status_err); |
3bfb1d20 HS |
590 | |
591 | for (i = 0; i < dw->dma.chancnt; i++) { | |
592 | dwc = &dw->chan[i]; | |
d9de4519 | 593 | if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) |
2895b2ca MR |
594 | dwc_handle_cyclic(dw, dwc, status_block, status_err, |
595 | status_xfer); | |
d9de4519 | 596 | else if (status_err & (1 << i)) |
3bfb1d20 | 597 | dwc_handle_error(dw, dwc); |
77bcc497 | 598 | else if (status_xfer & (1 << i)) |
3bfb1d20 | 599 | dwc_scan_descriptors(dw, dwc); |
3bfb1d20 HS |
600 | } |
601 | ||
ee1cdcda | 602 | /* Re-enable interrupts */ |
3bfb1d20 | 603 | channel_set_bit(dw, MASK.XFER, dw->all_chan_mask); |
3bfb1d20 HS |
604 | channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask); |
605 | } | |
606 | ||
607 | static irqreturn_t dw_dma_interrupt(int irq, void *dev_id) | |
608 | { | |
609 | struct dw_dma *dw = dev_id; | |
02a21b79 | 610 | u32 status; |
3bfb1d20 | 611 | |
02a21b79 AS |
612 | /* Check if we have any interrupt from the DMAC which is not in use */ |
613 | if (!dw->in_use) | |
614 | return IRQ_NONE; | |
615 | ||
616 | status = dma_readl(dw, STATUS_INT); | |
3783cef8 AS |
617 | dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, status); |
618 | ||
619 | /* Check if we have any interrupt from the DMAC */ | |
02a21b79 | 620 | if (!status) |
3783cef8 | 621 | return IRQ_NONE; |
3bfb1d20 HS |
622 | |
623 | /* | |
624 | * Just disable the interrupts. We'll turn them back on in the | |
625 | * softirq handler. | |
626 | */ | |
627 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); | |
2895b2ca | 628 | channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); |
3bfb1d20 HS |
629 | channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); |
630 | ||
631 | status = dma_readl(dw, STATUS_INT); | |
632 | if (status) { | |
633 | dev_err(dw->dma.dev, | |
634 | "BUG: Unexpected interrupts pending: 0x%x\n", | |
635 | status); | |
636 | ||
637 | /* Try to recover */ | |
638 | channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1); | |
2895b2ca | 639 | channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1); |
3bfb1d20 HS |
640 | channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1); |
641 | channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1); | |
642 | channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1); | |
643 | } | |
644 | ||
645 | tasklet_schedule(&dw->tasklet); | |
646 | ||
647 | return IRQ_HANDLED; | |
648 | } | |
649 | ||
650 | /*----------------------------------------------------------------------*/ | |
651 | ||
3bfb1d20 HS |
652 | static struct dma_async_tx_descriptor * |
653 | dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |
654 | size_t len, unsigned long flags) | |
655 | { | |
656 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
f776076b | 657 | struct dw_dma *dw = to_dw_dma(chan->device); |
3bfb1d20 HS |
658 | struct dw_desc *desc; |
659 | struct dw_desc *first; | |
660 | struct dw_desc *prev; | |
661 | size_t xfer_count; | |
662 | size_t offset; | |
2e65060e | 663 | u8 m_master = dwc->m_master; |
3bfb1d20 HS |
664 | unsigned int src_width; |
665 | unsigned int dst_width; | |
161c3d04 | 666 | unsigned int data_width = dw->pdata->data_width[m_master]; |
3bfb1d20 | 667 | u32 ctllo; |
2e65060e | 668 | u8 lms = DWC_LLP_LMS(m_master); |
3bfb1d20 | 669 | |
2f45d613 | 670 | dev_vdbg(chan2dev(chan), |
5a87f0e6 AS |
671 | "%s: d%pad s%pad l0x%zx f0x%lx\n", __func__, |
672 | &dest, &src, len, flags); | |
3bfb1d20 HS |
673 | |
674 | if (unlikely(!len)) { | |
2e4c364e | 675 | dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__); |
3bfb1d20 HS |
676 | return NULL; |
677 | } | |
678 | ||
0fdb567f AS |
679 | dwc->direction = DMA_MEM_TO_MEM; |
680 | ||
2e65060e | 681 | src_width = dst_width = __ffs(data_width | src | dest | len); |
3bfb1d20 | 682 | |
327e6970 | 683 | ctllo = DWC_DEFAULT_CTLLO(chan) |
3bfb1d20 HS |
684 | | DWC_CTLL_DST_WIDTH(dst_width) |
685 | | DWC_CTLL_SRC_WIDTH(src_width) | |
686 | | DWC_CTLL_DST_INC | |
687 | | DWC_CTLL_SRC_INC | |
688 | | DWC_CTLL_FC_M2M; | |
689 | prev = first = NULL; | |
690 | ||
691 | for (offset = 0; offset < len; offset += xfer_count << src_width) { | |
692 | xfer_count = min_t(size_t, (len - offset) >> src_width, | |
4a63a8b3 | 693 | dwc->block_size); |
3bfb1d20 HS |
694 | |
695 | desc = dwc_desc_get(dwc); | |
696 | if (!desc) | |
697 | goto err_desc_get; | |
698 | ||
df1f3a23 MR |
699 | lli_write(desc, sar, src + offset); |
700 | lli_write(desc, dar, dest + offset); | |
701 | lli_write(desc, ctllo, ctllo); | |
702 | lli_write(desc, ctlhi, xfer_count); | |
176dcec5 | 703 | desc->len = xfer_count << src_width; |
3bfb1d20 HS |
704 | |
705 | if (!first) { | |
706 | first = desc; | |
707 | } else { | |
2a0fae02 | 708 | lli_write(prev, llp, desc->txd.phys | lms); |
df1f3a23 | 709 | list_add_tail(&desc->desc_node, &first->tx_list); |
3bfb1d20 HS |
710 | } |
711 | prev = desc; | |
712 | } | |
713 | ||
3bfb1d20 HS |
714 | if (flags & DMA_PREP_INTERRUPT) |
715 | /* Trigger interrupt after last block */ | |
df1f3a23 | 716 | lli_set(prev, ctllo, DWC_CTLL_INT_EN); |
3bfb1d20 HS |
717 | |
718 | prev->lli.llp = 0; | |
a3e55799 | 719 | lli_clear(prev, ctllo, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); |
3bfb1d20 | 720 | first->txd.flags = flags; |
30d38a32 | 721 | first->total_len = len; |
3bfb1d20 HS |
722 | |
723 | return &first->txd; | |
724 | ||
725 | err_desc_get: | |
726 | dwc_desc_put(dwc, first); | |
727 | return NULL; | |
728 | } | |
729 | ||
730 | static struct dma_async_tx_descriptor * | |
731 | dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |
db8196df | 732 | unsigned int sg_len, enum dma_transfer_direction direction, |
185ecb5f | 733 | unsigned long flags, void *context) |
3bfb1d20 HS |
734 | { |
735 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
f776076b | 736 | struct dw_dma *dw = to_dw_dma(chan->device); |
327e6970 | 737 | struct dma_slave_config *sconfig = &dwc->dma_sconfig; |
3bfb1d20 HS |
738 | struct dw_desc *prev; |
739 | struct dw_desc *first; | |
740 | u32 ctllo; | |
2e65060e AS |
741 | u8 m_master = dwc->m_master; |
742 | u8 lms = DWC_LLP_LMS(m_master); | |
3bfb1d20 HS |
743 | dma_addr_t reg; |
744 | unsigned int reg_width; | |
745 | unsigned int mem_width; | |
161c3d04 | 746 | unsigned int data_width = dw->pdata->data_width[m_master]; |
3bfb1d20 HS |
747 | unsigned int i; |
748 | struct scatterlist *sg; | |
749 | size_t total_len = 0; | |
750 | ||
2e4c364e | 751 | dev_vdbg(chan2dev(chan), "%s\n", __func__); |
3bfb1d20 | 752 | |
495aea4b | 753 | if (unlikely(!is_slave_direction(direction) || !sg_len)) |
3bfb1d20 HS |
754 | return NULL; |
755 | ||
0fdb567f AS |
756 | dwc->direction = direction; |
757 | ||
3bfb1d20 HS |
758 | prev = first = NULL; |
759 | ||
3bfb1d20 | 760 | switch (direction) { |
db8196df | 761 | case DMA_MEM_TO_DEV: |
39416677 | 762 | reg_width = __ffs(sconfig->dst_addr_width); |
327e6970 VK |
763 | reg = sconfig->dst_addr; |
764 | ctllo = (DWC_DEFAULT_CTLLO(chan) | |
3bfb1d20 HS |
765 | | DWC_CTLL_DST_WIDTH(reg_width) |
766 | | DWC_CTLL_DST_FIX | |
327e6970 VK |
767 | | DWC_CTLL_SRC_INC); |
768 | ||
769 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) : | |
770 | DWC_CTLL_FC(DW_DMA_FC_D_M2P); | |
771 | ||
3bfb1d20 HS |
772 | for_each_sg(sgl, sg, sg_len, i) { |
773 | struct dw_desc *desc; | |
69dc14b5 | 774 | u32 len, dlen, mem; |
3bfb1d20 | 775 | |
cbb796cc | 776 | mem = sg_dma_address(sg); |
69dc14b5 | 777 | len = sg_dma_len(sg); |
6bc711f6 | 778 | |
2e65060e | 779 | mem_width = __ffs(data_width | mem | len); |
3bfb1d20 | 780 | |
69dc14b5 | 781 | slave_sg_todev_fill_desc: |
3bfb1d20 | 782 | desc = dwc_desc_get(dwc); |
b2607227 | 783 | if (!desc) |
3bfb1d20 | 784 | goto err_desc_get; |
3bfb1d20 | 785 | |
df1f3a23 MR |
786 | lli_write(desc, sar, mem); |
787 | lli_write(desc, dar, reg); | |
788 | lli_write(desc, ctllo, ctllo | DWC_CTLL_SRC_WIDTH(mem_width)); | |
4a63a8b3 AS |
789 | if ((len >> mem_width) > dwc->block_size) { |
790 | dlen = dwc->block_size << mem_width; | |
69dc14b5 VK |
791 | mem += dlen; |
792 | len -= dlen; | |
793 | } else { | |
794 | dlen = len; | |
795 | len = 0; | |
796 | } | |
797 | ||
df1f3a23 | 798 | lli_write(desc, ctlhi, dlen >> mem_width); |
176dcec5 | 799 | desc->len = dlen; |
3bfb1d20 HS |
800 | |
801 | if (!first) { | |
802 | first = desc; | |
803 | } else { | |
2a0fae02 | 804 | lli_write(prev, llp, desc->txd.phys | lms); |
df1f3a23 | 805 | list_add_tail(&desc->desc_node, &first->tx_list); |
3bfb1d20 HS |
806 | } |
807 | prev = desc; | |
69dc14b5 VK |
808 | total_len += dlen; |
809 | ||
810 | if (len) | |
811 | goto slave_sg_todev_fill_desc; | |
3bfb1d20 HS |
812 | } |
813 | break; | |
db8196df | 814 | case DMA_DEV_TO_MEM: |
39416677 | 815 | reg_width = __ffs(sconfig->src_addr_width); |
327e6970 VK |
816 | reg = sconfig->src_addr; |
817 | ctllo = (DWC_DEFAULT_CTLLO(chan) | |
3bfb1d20 HS |
818 | | DWC_CTLL_SRC_WIDTH(reg_width) |
819 | | DWC_CTLL_DST_INC | |
327e6970 VK |
820 | | DWC_CTLL_SRC_FIX); |
821 | ||
822 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) : | |
823 | DWC_CTLL_FC(DW_DMA_FC_D_P2M); | |
3bfb1d20 | 824 | |
3bfb1d20 HS |
825 | for_each_sg(sgl, sg, sg_len, i) { |
826 | struct dw_desc *desc; | |
69dc14b5 | 827 | u32 len, dlen, mem; |
3bfb1d20 | 828 | |
cbb796cc | 829 | mem = sg_dma_address(sg); |
3bfb1d20 | 830 | len = sg_dma_len(sg); |
6bc711f6 | 831 | |
2e65060e | 832 | mem_width = __ffs(data_width | mem | len); |
3bfb1d20 | 833 | |
69dc14b5 VK |
834 | slave_sg_fromdev_fill_desc: |
835 | desc = dwc_desc_get(dwc); | |
b2607227 | 836 | if (!desc) |
69dc14b5 | 837 | goto err_desc_get; |
69dc14b5 | 838 | |
df1f3a23 MR |
839 | lli_write(desc, sar, reg); |
840 | lli_write(desc, dar, mem); | |
841 | lli_write(desc, ctllo, ctllo | DWC_CTLL_DST_WIDTH(mem_width)); | |
4a63a8b3 AS |
842 | if ((len >> reg_width) > dwc->block_size) { |
843 | dlen = dwc->block_size << reg_width; | |
69dc14b5 VK |
844 | mem += dlen; |
845 | len -= dlen; | |
846 | } else { | |
847 | dlen = len; | |
848 | len = 0; | |
849 | } | |
df1f3a23 | 850 | lli_write(desc, ctlhi, dlen >> reg_width); |
176dcec5 | 851 | desc->len = dlen; |
3bfb1d20 HS |
852 | |
853 | if (!first) { | |
854 | first = desc; | |
855 | } else { | |
2a0fae02 | 856 | lli_write(prev, llp, desc->txd.phys | lms); |
df1f3a23 | 857 | list_add_tail(&desc->desc_node, &first->tx_list); |
3bfb1d20 HS |
858 | } |
859 | prev = desc; | |
69dc14b5 VK |
860 | total_len += dlen; |
861 | ||
862 | if (len) | |
863 | goto slave_sg_fromdev_fill_desc; | |
3bfb1d20 HS |
864 | } |
865 | break; | |
866 | default: | |
867 | return NULL; | |
868 | } | |
869 | ||
870 | if (flags & DMA_PREP_INTERRUPT) | |
871 | /* Trigger interrupt after last block */ | |
df1f3a23 | 872 | lli_set(prev, ctllo, DWC_CTLL_INT_EN); |
3bfb1d20 HS |
873 | |
874 | prev->lli.llp = 0; | |
a3e55799 | 875 | lli_clear(prev, ctllo, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); |
30d38a32 | 876 | first->total_len = total_len; |
3bfb1d20 HS |
877 | |
878 | return &first->txd; | |
879 | ||
880 | err_desc_get: | |
b2607227 JN |
881 | dev_err(chan2dev(chan), |
882 | "not enough descriptors available. Direction %d\n", direction); | |
3bfb1d20 HS |
883 | dwc_desc_put(dwc, first); |
884 | return NULL; | |
885 | } | |
886 | ||
4d130de2 AS |
887 | bool dw_dma_filter(struct dma_chan *chan, void *param) |
888 | { | |
889 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
890 | struct dw_dma_slave *dws = param; | |
891 | ||
3fe6409c | 892 | if (dws->dma_dev != chan->device->dev) |
4d130de2 AS |
893 | return false; |
894 | ||
895 | /* We have to copy data since dws can be temporary storage */ | |
896 | ||
897 | dwc->src_id = dws->src_id; | |
898 | dwc->dst_id = dws->dst_id; | |
899 | ||
c422025c AS |
900 | dwc->m_master = dws->m_master; |
901 | dwc->p_master = dws->p_master; | |
4d130de2 AS |
902 | |
903 | return true; | |
904 | } | |
905 | EXPORT_SYMBOL_GPL(dw_dma_filter); | |
906 | ||
327e6970 VK |
907 | /* |
908 | * Fix sconfig's burst size according to dw_dmac. We need to convert them as: | |
909 | * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3. | |
910 | * | |
911 | * NOTE: burst size 2 is not supported by controller. | |
912 | * | |
913 | * This can be done by finding least significant bit set: n & (n - 1) | |
914 | */ | |
915 | static inline void convert_burst(u32 *maxburst) | |
916 | { | |
917 | if (*maxburst > 1) | |
918 | *maxburst = fls(*maxburst) - 2; | |
919 | else | |
920 | *maxburst = 0; | |
921 | } | |
922 | ||
a4b0d348 | 923 | static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig) |
327e6970 VK |
924 | { |
925 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
926 | ||
495aea4b AS |
927 | /* Check if chan will be configured for slave transfers */ |
928 | if (!is_slave_direction(sconfig->direction)) | |
327e6970 VK |
929 | return -EINVAL; |
930 | ||
931 | memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig)); | |
0fdb567f | 932 | dwc->direction = sconfig->direction; |
327e6970 VK |
933 | |
934 | convert_burst(&dwc->dma_sconfig.src_maxburst); | |
935 | convert_burst(&dwc->dma_sconfig.dst_maxburst); | |
936 | ||
937 | return 0; | |
938 | } | |
939 | ||
a4b0d348 | 940 | static int dwc_pause(struct dma_chan *chan) |
21fe3c52 | 941 | { |
a4b0d348 MR |
942 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
943 | unsigned long flags; | |
944 | unsigned int count = 20; /* timeout iterations */ | |
945 | u32 cfglo; | |
946 | ||
947 | spin_lock_irqsave(&dwc->lock, flags); | |
21fe3c52 | 948 | |
a4b0d348 | 949 | cfglo = channel_readl(dwc, CFG_LO); |
21fe3c52 | 950 | channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP); |
123b69ab AS |
951 | while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--) |
952 | udelay(2); | |
21fe3c52 | 953 | |
5e09f98e | 954 | set_bit(DW_DMA_IS_PAUSED, &dwc->flags); |
a4b0d348 MR |
955 | |
956 | spin_unlock_irqrestore(&dwc->lock, flags); | |
957 | ||
958 | return 0; | |
21fe3c52 AS |
959 | } |
960 | ||
961 | static inline void dwc_chan_resume(struct dw_dma_chan *dwc) | |
962 | { | |
963 | u32 cfglo = channel_readl(dwc, CFG_LO); | |
964 | ||
965 | channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP); | |
966 | ||
5e09f98e | 967 | clear_bit(DW_DMA_IS_PAUSED, &dwc->flags); |
21fe3c52 AS |
968 | } |
969 | ||
a4b0d348 | 970 | static int dwc_resume(struct dma_chan *chan) |
3bfb1d20 HS |
971 | { |
972 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
69cea5a0 | 973 | unsigned long flags; |
3bfb1d20 | 974 | |
a4b0d348 | 975 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 | 976 | |
5e09f98e AS |
977 | if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags)) |
978 | dwc_chan_resume(dwc); | |
3bfb1d20 | 979 | |
a4b0d348 | 980 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 | 981 | |
a4b0d348 MR |
982 | return 0; |
983 | } | |
3bfb1d20 | 984 | |
a4b0d348 MR |
985 | static int dwc_terminate_all(struct dma_chan *chan) |
986 | { | |
987 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
988 | struct dw_dma *dw = to_dw_dma(chan->device); | |
989 | struct dw_desc *desc, *_desc; | |
990 | unsigned long flags; | |
991 | LIST_HEAD(list); | |
3bfb1d20 | 992 | |
a4b0d348 | 993 | spin_lock_irqsave(&dwc->lock, flags); |
fed2574b | 994 | |
a4b0d348 | 995 | clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags); |
fed2574b | 996 | |
a4b0d348 | 997 | dwc_chan_disable(dw, dwc); |
a7c57cf7 | 998 | |
a4b0d348 | 999 | dwc_chan_resume(dwc); |
a7c57cf7 | 1000 | |
a4b0d348 MR |
1001 | /* active_list entries will end up before queued entries */ |
1002 | list_splice_init(&dwc->queue, &list); | |
1003 | list_splice_init(&dwc->active_list, &list); | |
a7c57cf7 | 1004 | |
a4b0d348 | 1005 | spin_unlock_irqrestore(&dwc->lock, flags); |
a7c57cf7 | 1006 | |
a4b0d348 MR |
1007 | /* Flush all pending and queued descriptors */ |
1008 | list_for_each_entry_safe(desc, _desc, &list, desc_node) | |
1009 | dwc_descriptor_complete(dwc, desc, false); | |
c3635c78 LW |
1010 | |
1011 | return 0; | |
3bfb1d20 HS |
1012 | } |
1013 | ||
b68fd097 AS |
1014 | static struct dw_desc *dwc_find_desc(struct dw_dma_chan *dwc, dma_cookie_t c) |
1015 | { | |
1016 | struct dw_desc *desc; | |
1017 | ||
1018 | list_for_each_entry(desc, &dwc->active_list, desc_node) | |
1019 | if (desc->txd.cookie == c) | |
1020 | return desc; | |
1021 | ||
1022 | return NULL; | |
1023 | } | |
1024 | ||
1025 | static u32 dwc_get_residue(struct dw_dma_chan *dwc, dma_cookie_t cookie) | |
4702d524 | 1026 | { |
b68fd097 | 1027 | struct dw_desc *desc; |
4702d524 AS |
1028 | unsigned long flags; |
1029 | u32 residue; | |
1030 | ||
1031 | spin_lock_irqsave(&dwc->lock, flags); | |
1032 | ||
b68fd097 AS |
1033 | desc = dwc_find_desc(dwc, cookie); |
1034 | if (desc) { | |
1035 | if (desc == dwc_first_active(dwc)) { | |
1036 | residue = desc->residue; | |
1037 | if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue) | |
1038 | residue -= dwc_get_sent(dwc); | |
1039 | } else { | |
1040 | residue = desc->total_len; | |
1041 | } | |
1042 | } else { | |
1043 | residue = 0; | |
1044 | } | |
4702d524 AS |
1045 | |
1046 | spin_unlock_irqrestore(&dwc->lock, flags); | |
1047 | return residue; | |
1048 | } | |
1049 | ||
3bfb1d20 | 1050 | static enum dma_status |
07934481 LW |
1051 | dwc_tx_status(struct dma_chan *chan, |
1052 | dma_cookie_t cookie, | |
1053 | struct dma_tx_state *txstate) | |
3bfb1d20 HS |
1054 | { |
1055 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
96a2af41 | 1056 | enum dma_status ret; |
3bfb1d20 | 1057 | |
96a2af41 | 1058 | ret = dma_cookie_status(chan, cookie, txstate); |
2c40410b | 1059 | if (ret == DMA_COMPLETE) |
12381dc0 | 1060 | return ret; |
3bfb1d20 | 1061 | |
12381dc0 | 1062 | dwc_scan_descriptors(to_dw_dma(chan->device), dwc); |
3bfb1d20 | 1063 | |
12381dc0 | 1064 | ret = dma_cookie_status(chan, cookie, txstate); |
b68fd097 AS |
1065 | if (ret == DMA_COMPLETE) |
1066 | return ret; | |
1067 | ||
1068 | dma_set_residue(txstate, dwc_get_residue(dwc, cookie)); | |
3bfb1d20 | 1069 | |
5e09f98e | 1070 | if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags) && ret == DMA_IN_PROGRESS) |
a7c57cf7 | 1071 | return DMA_PAUSED; |
3bfb1d20 HS |
1072 | |
1073 | return ret; | |
1074 | } | |
1075 | ||
1076 | static void dwc_issue_pending(struct dma_chan *chan) | |
1077 | { | |
1078 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
dd8ecfca | 1079 | unsigned long flags; |
3bfb1d20 | 1080 | |
dd8ecfca AS |
1081 | spin_lock_irqsave(&dwc->lock, flags); |
1082 | if (list_empty(&dwc->active_list)) | |
1083 | dwc_dostart_first_queued(dwc); | |
1084 | spin_unlock_irqrestore(&dwc->lock, flags); | |
3bfb1d20 HS |
1085 | } |
1086 | ||
99d9bf4e AS |
1087 | /*----------------------------------------------------------------------*/ |
1088 | ||
1089 | static void dw_dma_off(struct dw_dma *dw) | |
1090 | { | |
7794e5b9 | 1091 | unsigned int i; |
99d9bf4e AS |
1092 | |
1093 | dma_writel(dw, CFG, 0); | |
1094 | ||
1095 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); | |
2895b2ca | 1096 | channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); |
99d9bf4e AS |
1097 | channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); |
1098 | channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); | |
1099 | channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); | |
1100 | ||
1101 | while (dma_readl(dw, CFG) & DW_CFG_DMA_EN) | |
1102 | cpu_relax(); | |
1103 | ||
1104 | for (i = 0; i < dw->dma.chancnt; i++) | |
423f9cbf | 1105 | clear_bit(DW_DMA_IS_INITIALIZED, &dw->chan[i].flags); |
99d9bf4e AS |
1106 | } |
1107 | ||
1108 | static void dw_dma_on(struct dw_dma *dw) | |
1109 | { | |
1110 | dma_writel(dw, CFG, DW_CFG_DMA_EN); | |
1111 | } | |
1112 | ||
aa1e6f1a | 1113 | static int dwc_alloc_chan_resources(struct dma_chan *chan) |
3bfb1d20 HS |
1114 | { |
1115 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
1116 | struct dw_dma *dw = to_dw_dma(chan->device); | |
3bfb1d20 | 1117 | |
2e4c364e | 1118 | dev_vdbg(chan2dev(chan), "%s\n", __func__); |
3bfb1d20 | 1119 | |
3bfb1d20 HS |
1120 | /* ASSERT: channel is idle */ |
1121 | if (dma_readl(dw, CH_EN) & dwc->mask) { | |
41d5e59c | 1122 | dev_dbg(chan2dev(chan), "DMA channel not idle?\n"); |
3bfb1d20 HS |
1123 | return -EIO; |
1124 | } | |
1125 | ||
d3ee98cd | 1126 | dma_cookie_init(chan); |
3bfb1d20 | 1127 | |
3bfb1d20 HS |
1128 | /* |
1129 | * NOTE: some controllers may have additional features that we | |
1130 | * need to initialize here, like "scatter-gather" (which | |
1131 | * doesn't mean what you think it means), and status writeback. | |
1132 | */ | |
1133 | ||
3fe6409c AS |
1134 | /* |
1135 | * We need controller-specific data to set up slave transfers. | |
1136 | */ | |
1137 | if (chan->private && !dw_dma_filter(chan, chan->private)) { | |
1138 | dev_warn(chan2dev(chan), "Wrong controller-specific data\n"); | |
1139 | return -EINVAL; | |
1140 | } | |
1141 | ||
99d9bf4e AS |
1142 | /* Enable controller here if needed */ |
1143 | if (!dw->in_use) | |
1144 | dw_dma_on(dw); | |
1145 | dw->in_use |= dwc->mask; | |
1146 | ||
ab703f81 | 1147 | return 0; |
3bfb1d20 HS |
1148 | } |
1149 | ||
1150 | static void dwc_free_chan_resources(struct dma_chan *chan) | |
1151 | { | |
1152 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
1153 | struct dw_dma *dw = to_dw_dma(chan->device); | |
69cea5a0 | 1154 | unsigned long flags; |
3bfb1d20 HS |
1155 | LIST_HEAD(list); |
1156 | ||
2e4c364e | 1157 | dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__, |
3bfb1d20 HS |
1158 | dwc->descs_allocated); |
1159 | ||
1160 | /* ASSERT: channel is idle */ | |
1161 | BUG_ON(!list_empty(&dwc->active_list)); | |
1162 | BUG_ON(!list_empty(&dwc->queue)); | |
1163 | BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask); | |
1164 | ||
69cea5a0 | 1165 | spin_lock_irqsave(&dwc->lock, flags); |
3fe6409c AS |
1166 | |
1167 | /* Clear custom channel configuration */ | |
1168 | dwc->src_id = 0; | |
1169 | dwc->dst_id = 0; | |
1170 | ||
c422025c AS |
1171 | dwc->m_master = 0; |
1172 | dwc->p_master = 0; | |
3fe6409c | 1173 | |
423f9cbf | 1174 | clear_bit(DW_DMA_IS_INITIALIZED, &dwc->flags); |
3bfb1d20 HS |
1175 | |
1176 | /* Disable interrupts */ | |
1177 | channel_clear_bit(dw, MASK.XFER, dwc->mask); | |
2895b2ca | 1178 | channel_clear_bit(dw, MASK.BLOCK, dwc->mask); |
3bfb1d20 HS |
1179 | channel_clear_bit(dw, MASK.ERROR, dwc->mask); |
1180 | ||
69cea5a0 | 1181 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 | 1182 | |
99d9bf4e AS |
1183 | /* Disable controller in case it was a last user */ |
1184 | dw->in_use &= ~dwc->mask; | |
1185 | if (!dw->in_use) | |
1186 | dw_dma_off(dw); | |
1187 | ||
2e4c364e | 1188 | dev_vdbg(chan2dev(chan), "%s: done\n", __func__); |
3bfb1d20 HS |
1189 | } |
1190 | ||
d9de4519 HCE |
1191 | /* --------------------- Cyclic DMA API extensions -------------------- */ |
1192 | ||
1193 | /** | |
1194 | * dw_dma_cyclic_start - start the cyclic DMA transfer | |
1195 | * @chan: the DMA channel to start | |
1196 | * | |
1197 | * Must be called with soft interrupts disabled. Returns zero on success or | |
1198 | * -errno on failure. | |
1199 | */ | |
1200 | int dw_dma_cyclic_start(struct dma_chan *chan) | |
1201 | { | |
1202 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
ee1cdcda | 1203 | struct dw_dma *dw = to_dw_dma(chan->device); |
69cea5a0 | 1204 | unsigned long flags; |
d9de4519 HCE |
1205 | |
1206 | if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) { | |
1207 | dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n"); | |
1208 | return -ENODEV; | |
1209 | } | |
1210 | ||
69cea5a0 | 1211 | spin_lock_irqsave(&dwc->lock, flags); |
ee1cdcda AS |
1212 | |
1213 | /* Enable interrupts to perform cyclic transfer */ | |
1214 | channel_set_bit(dw, MASK.BLOCK, dwc->mask); | |
1215 | ||
df3bb8a0 | 1216 | dwc_dostart(dwc, dwc->cdesc->desc[0]); |
ee1cdcda | 1217 | |
69cea5a0 | 1218 | spin_unlock_irqrestore(&dwc->lock, flags); |
d9de4519 HCE |
1219 | |
1220 | return 0; | |
1221 | } | |
1222 | EXPORT_SYMBOL(dw_dma_cyclic_start); | |
1223 | ||
1224 | /** | |
1225 | * dw_dma_cyclic_stop - stop the cyclic DMA transfer | |
1226 | * @chan: the DMA channel to stop | |
1227 | * | |
1228 | * Must be called with soft interrupts disabled. | |
1229 | */ | |
1230 | void dw_dma_cyclic_stop(struct dma_chan *chan) | |
1231 | { | |
1232 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
1233 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | |
69cea5a0 | 1234 | unsigned long flags; |
d9de4519 | 1235 | |
69cea5a0 | 1236 | spin_lock_irqsave(&dwc->lock, flags); |
d9de4519 | 1237 | |
3f936207 | 1238 | dwc_chan_disable(dw, dwc); |
d9de4519 | 1239 | |
69cea5a0 | 1240 | spin_unlock_irqrestore(&dwc->lock, flags); |
d9de4519 HCE |
1241 | } |
1242 | EXPORT_SYMBOL(dw_dma_cyclic_stop); | |
1243 | ||
1244 | /** | |
1245 | * dw_dma_cyclic_prep - prepare the cyclic DMA transfer | |
1246 | * @chan: the DMA channel to prepare | |
1247 | * @buf_addr: physical DMA address where the buffer starts | |
1248 | * @buf_len: total number of bytes for the entire buffer | |
1249 | * @period_len: number of bytes for each period | |
1250 | * @direction: transfer direction, to or from device | |
1251 | * | |
1252 | * Must be called before trying to start the transfer. Returns a valid struct | |
1253 | * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful. | |
1254 | */ | |
1255 | struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | |
1256 | dma_addr_t buf_addr, size_t buf_len, size_t period_len, | |
db8196df | 1257 | enum dma_transfer_direction direction) |
d9de4519 HCE |
1258 | { |
1259 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
327e6970 | 1260 | struct dma_slave_config *sconfig = &dwc->dma_sconfig; |
d9de4519 HCE |
1261 | struct dw_cyclic_desc *cdesc; |
1262 | struct dw_cyclic_desc *retval = NULL; | |
1263 | struct dw_desc *desc; | |
1264 | struct dw_desc *last = NULL; | |
2a0fae02 | 1265 | u8 lms = DWC_LLP_LMS(dwc->m_master); |
d9de4519 HCE |
1266 | unsigned long was_cyclic; |
1267 | unsigned int reg_width; | |
1268 | unsigned int periods; | |
1269 | unsigned int i; | |
69cea5a0 | 1270 | unsigned long flags; |
d9de4519 | 1271 | |
69cea5a0 | 1272 | spin_lock_irqsave(&dwc->lock, flags); |
fed2574b AS |
1273 | if (dwc->nollp) { |
1274 | spin_unlock_irqrestore(&dwc->lock, flags); | |
1275 | dev_dbg(chan2dev(&dwc->chan), | |
1276 | "channel doesn't support LLP transfers\n"); | |
1277 | return ERR_PTR(-EINVAL); | |
1278 | } | |
1279 | ||
d9de4519 | 1280 | if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) { |
69cea5a0 | 1281 | spin_unlock_irqrestore(&dwc->lock, flags); |
d9de4519 HCE |
1282 | dev_dbg(chan2dev(&dwc->chan), |
1283 | "queue and/or active list are not empty\n"); | |
1284 | return ERR_PTR(-EBUSY); | |
1285 | } | |
1286 | ||
1287 | was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags); | |
69cea5a0 | 1288 | spin_unlock_irqrestore(&dwc->lock, flags); |
d9de4519 HCE |
1289 | if (was_cyclic) { |
1290 | dev_dbg(chan2dev(&dwc->chan), | |
1291 | "channel already prepared for cyclic DMA\n"); | |
1292 | return ERR_PTR(-EBUSY); | |
1293 | } | |
1294 | ||
1295 | retval = ERR_PTR(-EINVAL); | |
327e6970 | 1296 | |
f44b92f4 AS |
1297 | if (unlikely(!is_slave_direction(direction))) |
1298 | goto out_err; | |
1299 | ||
0fdb567f AS |
1300 | dwc->direction = direction; |
1301 | ||
327e6970 VK |
1302 | if (direction == DMA_MEM_TO_DEV) |
1303 | reg_width = __ffs(sconfig->dst_addr_width); | |
1304 | else | |
1305 | reg_width = __ffs(sconfig->src_addr_width); | |
1306 | ||
d9de4519 HCE |
1307 | periods = buf_len / period_len; |
1308 | ||
1309 | /* Check for too big/unaligned periods and unaligned DMA buffer. */ | |
4a63a8b3 | 1310 | if (period_len > (dwc->block_size << reg_width)) |
d9de4519 HCE |
1311 | goto out_err; |
1312 | if (unlikely(period_len & ((1 << reg_width) - 1))) | |
1313 | goto out_err; | |
1314 | if (unlikely(buf_addr & ((1 << reg_width) - 1))) | |
1315 | goto out_err; | |
d9de4519 HCE |
1316 | |
1317 | retval = ERR_PTR(-ENOMEM); | |
1318 | ||
d9de4519 HCE |
1319 | cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL); |
1320 | if (!cdesc) | |
1321 | goto out_err; | |
1322 | ||
1323 | cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL); | |
1324 | if (!cdesc->desc) | |
1325 | goto out_err_alloc; | |
1326 | ||
1327 | for (i = 0; i < periods; i++) { | |
1328 | desc = dwc_desc_get(dwc); | |
1329 | if (!desc) | |
1330 | goto out_err_desc_get; | |
1331 | ||
1332 | switch (direction) { | |
db8196df | 1333 | case DMA_MEM_TO_DEV: |
df1f3a23 MR |
1334 | lli_write(desc, dar, sconfig->dst_addr); |
1335 | lli_write(desc, sar, buf_addr + period_len * i); | |
1336 | lli_write(desc, ctllo, (DWC_DEFAULT_CTLLO(chan) | |
1337 | | DWC_CTLL_DST_WIDTH(reg_width) | |
1338 | | DWC_CTLL_SRC_WIDTH(reg_width) | |
1339 | | DWC_CTLL_DST_FIX | |
1340 | | DWC_CTLL_SRC_INC | |
1341 | | DWC_CTLL_INT_EN)); | |
1342 | ||
1343 | lli_set(desc, ctllo, sconfig->device_fc ? | |
1344 | DWC_CTLL_FC(DW_DMA_FC_P_M2P) : | |
1345 | DWC_CTLL_FC(DW_DMA_FC_D_M2P)); | |
327e6970 | 1346 | |
d9de4519 | 1347 | break; |
db8196df | 1348 | case DMA_DEV_TO_MEM: |
df1f3a23 MR |
1349 | lli_write(desc, dar, buf_addr + period_len * i); |
1350 | lli_write(desc, sar, sconfig->src_addr); | |
1351 | lli_write(desc, ctllo, (DWC_DEFAULT_CTLLO(chan) | |
1352 | | DWC_CTLL_SRC_WIDTH(reg_width) | |
1353 | | DWC_CTLL_DST_WIDTH(reg_width) | |
1354 | | DWC_CTLL_DST_INC | |
1355 | | DWC_CTLL_SRC_FIX | |
1356 | | DWC_CTLL_INT_EN)); | |
1357 | ||
1358 | lli_set(desc, ctllo, sconfig->device_fc ? | |
1359 | DWC_CTLL_FC(DW_DMA_FC_P_P2M) : | |
1360 | DWC_CTLL_FC(DW_DMA_FC_D_P2M)); | |
327e6970 | 1361 | |
d9de4519 HCE |
1362 | break; |
1363 | default: | |
1364 | break; | |
1365 | } | |
1366 | ||
df1f3a23 | 1367 | lli_write(desc, ctlhi, period_len >> reg_width); |
d9de4519 HCE |
1368 | cdesc->desc[i] = desc; |
1369 | ||
f8122a82 | 1370 | if (last) |
2a0fae02 | 1371 | lli_write(last, llp, desc->txd.phys | lms); |
d9de4519 HCE |
1372 | |
1373 | last = desc; | |
1374 | } | |
1375 | ||
75c61225 | 1376 | /* Let's make a cyclic list */ |
2a0fae02 | 1377 | lli_write(last, llp, cdesc->desc[0]->txd.phys | lms); |
d9de4519 | 1378 | |
5a87f0e6 AS |
1379 | dev_dbg(chan2dev(&dwc->chan), |
1380 | "cyclic prepared buf %pad len %zu period %zu periods %d\n", | |
1381 | &buf_addr, buf_len, period_len, periods); | |
d9de4519 HCE |
1382 | |
1383 | cdesc->periods = periods; | |
1384 | dwc->cdesc = cdesc; | |
1385 | ||
1386 | return cdesc; | |
1387 | ||
1388 | out_err_desc_get: | |
1389 | while (i--) | |
1390 | dwc_desc_put(dwc, cdesc->desc[i]); | |
1391 | out_err_alloc: | |
1392 | kfree(cdesc); | |
1393 | out_err: | |
1394 | clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags); | |
1395 | return (struct dw_cyclic_desc *)retval; | |
1396 | } | |
1397 | EXPORT_SYMBOL(dw_dma_cyclic_prep); | |
1398 | ||
1399 | /** | |
1400 | * dw_dma_cyclic_free - free a prepared cyclic DMA transfer | |
1401 | * @chan: the DMA channel to free | |
1402 | */ | |
1403 | void dw_dma_cyclic_free(struct dma_chan *chan) | |
1404 | { | |
1405 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
1406 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | |
1407 | struct dw_cyclic_desc *cdesc = dwc->cdesc; | |
7794e5b9 | 1408 | unsigned int i; |
69cea5a0 | 1409 | unsigned long flags; |
d9de4519 | 1410 | |
2e4c364e | 1411 | dev_dbg(chan2dev(&dwc->chan), "%s\n", __func__); |
d9de4519 HCE |
1412 | |
1413 | if (!cdesc) | |
1414 | return; | |
1415 | ||
69cea5a0 | 1416 | spin_lock_irqsave(&dwc->lock, flags); |
d9de4519 | 1417 | |
3f936207 | 1418 | dwc_chan_disable(dw, dwc); |
d9de4519 | 1419 | |
2895b2ca | 1420 | dma_writel(dw, CLEAR.BLOCK, dwc->mask); |
d9de4519 HCE |
1421 | dma_writel(dw, CLEAR.ERROR, dwc->mask); |
1422 | dma_writel(dw, CLEAR.XFER, dwc->mask); | |
1423 | ||
69cea5a0 | 1424 | spin_unlock_irqrestore(&dwc->lock, flags); |
d9de4519 HCE |
1425 | |
1426 | for (i = 0; i < cdesc->periods; i++) | |
1427 | dwc_desc_put(dwc, cdesc->desc[i]); | |
1428 | ||
1429 | kfree(cdesc->desc); | |
1430 | kfree(cdesc); | |
1431 | ||
925a7d04 AS |
1432 | dwc->cdesc = NULL; |
1433 | ||
d9de4519 HCE |
1434 | clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags); |
1435 | } | |
1436 | EXPORT_SYMBOL(dw_dma_cyclic_free); | |
1437 | ||
3bfb1d20 HS |
1438 | /*----------------------------------------------------------------------*/ |
1439 | ||
3a14c66d | 1440 | int dw_dma_probe(struct dw_dma_chip *chip) |
a9ddb575 | 1441 | { |
3a14c66d | 1442 | struct dw_dma_platform_data *pdata; |
3bfb1d20 | 1443 | struct dw_dma *dw; |
30cb2639 | 1444 | bool autocfg = false; |
482c67ea | 1445 | unsigned int dw_params; |
7794e5b9 | 1446 | unsigned int i; |
3bfb1d20 | 1447 | int err; |
3bfb1d20 | 1448 | |
000871ce AS |
1449 | dw = devm_kzalloc(chip->dev, sizeof(*dw), GFP_KERNEL); |
1450 | if (!dw) | |
1451 | return -ENOMEM; | |
1452 | ||
161c3d04 AS |
1453 | dw->pdata = devm_kzalloc(chip->dev, sizeof(*dw->pdata), GFP_KERNEL); |
1454 | if (!dw->pdata) | |
1455 | return -ENOMEM; | |
1456 | ||
000871ce AS |
1457 | dw->regs = chip->regs; |
1458 | chip->dw = dw; | |
1459 | ||
bb32baf7 AS |
1460 | pm_runtime_get_sync(chip->dev); |
1461 | ||
3a14c66d | 1462 | if (!chip->pdata) { |
897e40d3 | 1463 | dw_params = dma_readl(dw, DW_PARAMS); |
30cb2639 | 1464 | dev_dbg(chip->dev, "DW_PARAMS: 0x%08x\n", dw_params); |
482c67ea | 1465 | |
30cb2639 AS |
1466 | autocfg = dw_params >> DW_PARAMS_EN & 1; |
1467 | if (!autocfg) { | |
1468 | err = -EINVAL; | |
1469 | goto err_pdata; | |
1470 | } | |
123de543 | 1471 | |
161c3d04 AS |
1472 | /* Reassign the platform data pointer */ |
1473 | pdata = dw->pdata; | |
123de543 | 1474 | |
30cb2639 AS |
1475 | /* Get hardware configuration parameters */ |
1476 | pdata->nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 7) + 1; | |
1477 | pdata->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1; | |
1478 | for (i = 0; i < pdata->nr_masters; i++) { | |
1479 | pdata->data_width[i] = | |
2e65060e | 1480 | 4 << (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3); |
30cb2639 | 1481 | } |
161c3d04 | 1482 | pdata->block_size = dma_readl(dw, MAX_BLK_SIZE); |
30cb2639 | 1483 | |
123de543 AS |
1484 | /* Fill platform data with the default values */ |
1485 | pdata->is_private = true; | |
df5c7386 | 1486 | pdata->is_memcpy = true; |
123de543 AS |
1487 | pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING; |
1488 | pdata->chan_priority = CHAN_PRIORITY_ASCENDING; | |
3a14c66d | 1489 | } else if (chip->pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) { |
8be4f523 AS |
1490 | err = -EINVAL; |
1491 | goto err_pdata; | |
161c3d04 | 1492 | } else { |
3a14c66d | 1493 | memcpy(dw->pdata, chip->pdata, sizeof(*dw->pdata)); |
161c3d04 AS |
1494 | |
1495 | /* Reassign the platform data pointer */ | |
1496 | pdata = dw->pdata; | |
8be4f523 | 1497 | } |
123de543 | 1498 | |
30cb2639 | 1499 | dw->chan = devm_kcalloc(chip->dev, pdata->nr_channels, sizeof(*dw->chan), |
000871ce | 1500 | GFP_KERNEL); |
8be4f523 AS |
1501 | if (!dw->chan) { |
1502 | err = -ENOMEM; | |
1503 | goto err_pdata; | |
1504 | } | |
3bfb1d20 | 1505 | |
11f932ec | 1506 | /* Calculate all channel mask before DMA setup */ |
30cb2639 | 1507 | dw->all_chan_mask = (1 << pdata->nr_channels) - 1; |
11f932ec | 1508 | |
75c61225 | 1509 | /* Force dma off, just in case */ |
3bfb1d20 HS |
1510 | dw_dma_off(dw); |
1511 | ||
75c61225 | 1512 | /* Create a pool of consistent memory blocks for hardware descriptors */ |
9cade1a4 | 1513 | dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev, |
f8122a82 AS |
1514 | sizeof(struct dw_desc), 4, 0); |
1515 | if (!dw->desc_pool) { | |
9cade1a4 | 1516 | dev_err(chip->dev, "No memory for descriptors dma pool\n"); |
8be4f523 AS |
1517 | err = -ENOMEM; |
1518 | goto err_pdata; | |
f8122a82 AS |
1519 | } |
1520 | ||
3bfb1d20 HS |
1521 | tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); |
1522 | ||
97977f75 AS |
1523 | err = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED, |
1524 | "dw_dmac", dw); | |
1525 | if (err) | |
8be4f523 | 1526 | goto err_pdata; |
97977f75 | 1527 | |
3bfb1d20 | 1528 | INIT_LIST_HEAD(&dw->dma.channels); |
30cb2639 | 1529 | for (i = 0; i < pdata->nr_channels; i++) { |
3bfb1d20 HS |
1530 | struct dw_dma_chan *dwc = &dw->chan[i]; |
1531 | ||
1532 | dwc->chan.device = &dw->dma; | |
d3ee98cd | 1533 | dma_cookie_init(&dwc->chan); |
b0c3130d VK |
1534 | if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING) |
1535 | list_add_tail(&dwc->chan.device_node, | |
1536 | &dw->dma.channels); | |
1537 | else | |
1538 | list_add(&dwc->chan.device_node, &dw->dma.channels); | |
3bfb1d20 | 1539 | |
93317e8e VK |
1540 | /* 7 is highest priority & 0 is lowest. */ |
1541 | if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) | |
30cb2639 | 1542 | dwc->priority = pdata->nr_channels - i - 1; |
93317e8e VK |
1543 | else |
1544 | dwc->priority = i; | |
1545 | ||
3bfb1d20 HS |
1546 | dwc->ch_regs = &__dw_regs(dw)->CHAN[i]; |
1547 | spin_lock_init(&dwc->lock); | |
1548 | dwc->mask = 1 << i; | |
1549 | ||
1550 | INIT_LIST_HEAD(&dwc->active_list); | |
1551 | INIT_LIST_HEAD(&dwc->queue); | |
3bfb1d20 HS |
1552 | |
1553 | channel_clear_bit(dw, CH_EN, dwc->mask); | |
4a63a8b3 | 1554 | |
0fdb567f | 1555 | dwc->direction = DMA_TRANS_NONE; |
a0982004 | 1556 | |
75c61225 | 1557 | /* Hardware configuration */ |
fed2574b | 1558 | if (autocfg) { |
6bea0f6d | 1559 | unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1; |
897e40d3 AS |
1560 | void __iomem *addr = &__dw_regs(dw)->DWC_PARAMS[r]; |
1561 | unsigned int dwc_params = dma_readl_native(addr); | |
fed2574b | 1562 | |
9cade1a4 AS |
1563 | dev_dbg(chip->dev, "DWC_PARAMS[%d]: 0x%08x\n", i, |
1564 | dwc_params); | |
985a6c7d | 1565 | |
1d566f11 AS |
1566 | /* |
1567 | * Decode maximum block size for given channel. The | |
4a63a8b3 | 1568 | * stored 4 bit value represents blocks from 0x00 for 3 |
1d566f11 AS |
1569 | * up to 0x0a for 4095. |
1570 | */ | |
4a63a8b3 | 1571 | dwc->block_size = |
161c3d04 | 1572 | (4 << ((pdata->block_size >> 4 * i) & 0xf)) - 1; |
fed2574b AS |
1573 | dwc->nollp = |
1574 | (dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0; | |
1575 | } else { | |
4a63a8b3 | 1576 | dwc->block_size = pdata->block_size; |
fed2574b AS |
1577 | |
1578 | /* Check if channel supports multi block transfer */ | |
2a0fae02 MR |
1579 | channel_writel(dwc, LLP, DWC_LLP_LOC(0xffffffff)); |
1580 | dwc->nollp = DWC_LLP_LOC(channel_readl(dwc, LLP)) == 0; | |
fed2574b AS |
1581 | channel_writel(dwc, LLP, 0); |
1582 | } | |
3bfb1d20 HS |
1583 | } |
1584 | ||
11f932ec | 1585 | /* Clear all interrupts on all channels. */ |
3bfb1d20 | 1586 | dma_writel(dw, CLEAR.XFER, dw->all_chan_mask); |
236b106f | 1587 | dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask); |
3bfb1d20 HS |
1588 | dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask); |
1589 | dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask); | |
1590 | dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask); | |
1591 | ||
df5c7386 | 1592 | /* Set capabilities */ |
3bfb1d20 | 1593 | dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); |
95ea759e JI |
1594 | if (pdata->is_private) |
1595 | dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask); | |
df5c7386 AS |
1596 | if (pdata->is_memcpy) |
1597 | dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); | |
1598 | ||
9cade1a4 | 1599 | dw->dma.dev = chip->dev; |
3bfb1d20 HS |
1600 | dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources; |
1601 | dw->dma.device_free_chan_resources = dwc_free_chan_resources; | |
1602 | ||
1603 | dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy; | |
3bfb1d20 | 1604 | dw->dma.device_prep_slave_sg = dwc_prep_slave_sg; |
029a40e9 | 1605 | |
a4b0d348 MR |
1606 | dw->dma.device_config = dwc_config; |
1607 | dw->dma.device_pause = dwc_pause; | |
1608 | dw->dma.device_resume = dwc_resume; | |
1609 | dw->dma.device_terminate_all = dwc_terminate_all; | |
3bfb1d20 | 1610 | |
07934481 | 1611 | dw->dma.device_tx_status = dwc_tx_status; |
3bfb1d20 HS |
1612 | dw->dma.device_issue_pending = dwc_issue_pending; |
1613 | ||
029a40e9 AS |
1614 | /* DMA capabilities */ |
1615 | dw->dma.src_addr_widths = DW_DMA_BUSWIDTHS; | |
1616 | dw->dma.dst_addr_widths = DW_DMA_BUSWIDTHS; | |
1617 | dw->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) | | |
1618 | BIT(DMA_MEM_TO_MEM); | |
1619 | dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | |
1620 | ||
1222934e AS |
1621 | err = dma_async_device_register(&dw->dma); |
1622 | if (err) | |
1623 | goto err_dma_register; | |
1624 | ||
9cade1a4 | 1625 | dev_info(chip->dev, "DesignWare DMA Controller, %d channels\n", |
30cb2639 | 1626 | pdata->nr_channels); |
3bfb1d20 | 1627 | |
bb32baf7 AS |
1628 | pm_runtime_put_sync_suspend(chip->dev); |
1629 | ||
3bfb1d20 | 1630 | return 0; |
8be4f523 | 1631 | |
1222934e AS |
1632 | err_dma_register: |
1633 | free_irq(chip->irq, dw); | |
8be4f523 | 1634 | err_pdata: |
bb32baf7 | 1635 | pm_runtime_put_sync_suspend(chip->dev); |
8be4f523 | 1636 | return err; |
3bfb1d20 | 1637 | } |
9cade1a4 | 1638 | EXPORT_SYMBOL_GPL(dw_dma_probe); |
3bfb1d20 | 1639 | |
9cade1a4 | 1640 | int dw_dma_remove(struct dw_dma_chip *chip) |
3bfb1d20 | 1641 | { |
9cade1a4 | 1642 | struct dw_dma *dw = chip->dw; |
3bfb1d20 | 1643 | struct dw_dma_chan *dwc, *_dwc; |
3bfb1d20 | 1644 | |
bb32baf7 AS |
1645 | pm_runtime_get_sync(chip->dev); |
1646 | ||
3bfb1d20 HS |
1647 | dw_dma_off(dw); |
1648 | dma_async_device_unregister(&dw->dma); | |
1649 | ||
97977f75 | 1650 | free_irq(chip->irq, dw); |
3bfb1d20 HS |
1651 | tasklet_kill(&dw->tasklet); |
1652 | ||
1653 | list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels, | |
1654 | chan.device_node) { | |
1655 | list_del(&dwc->chan.device_node); | |
1656 | channel_clear_bit(dw, CH_EN, dwc->mask); | |
1657 | } | |
1658 | ||
bb32baf7 | 1659 | pm_runtime_put_sync_suspend(chip->dev); |
3bfb1d20 HS |
1660 | return 0; |
1661 | } | |
9cade1a4 | 1662 | EXPORT_SYMBOL_GPL(dw_dma_remove); |
3bfb1d20 | 1663 | |
2540f74b | 1664 | int dw_dma_disable(struct dw_dma_chip *chip) |
3bfb1d20 | 1665 | { |
9cade1a4 | 1666 | struct dw_dma *dw = chip->dw; |
3bfb1d20 | 1667 | |
6168d567 | 1668 | dw_dma_off(dw); |
3bfb1d20 HS |
1669 | return 0; |
1670 | } | |
2540f74b | 1671 | EXPORT_SYMBOL_GPL(dw_dma_disable); |
3bfb1d20 | 1672 | |
2540f74b | 1673 | int dw_dma_enable(struct dw_dma_chip *chip) |
3bfb1d20 | 1674 | { |
9cade1a4 | 1675 | struct dw_dma *dw = chip->dw; |
3bfb1d20 | 1676 | |
7a83c045 | 1677 | dw_dma_on(dw); |
3bfb1d20 | 1678 | return 0; |
3bfb1d20 | 1679 | } |
2540f74b | 1680 | EXPORT_SYMBOL_GPL(dw_dma_enable); |
3bfb1d20 HS |
1681 | |
1682 | MODULE_LICENSE("GPL v2"); | |
9cade1a4 | 1683 | MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller core driver"); |
e05503ef | 1684 | MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); |
da89947b | 1685 | MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>"); |