Commit | Line | Data |
---|---|---|
8d318a50 | 1 | /* |
d49278e3 PF |
2 | * Copyright (C) Ericsson AB 2007-2008 |
3 | * Copyright (C) ST-Ericsson SA 2008-2010 | |
661385f9 | 4 | * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson |
767a9675 | 5 | * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson |
8d318a50 | 6 | * License terms: GNU General Public License (GPL) version 2 |
8d318a50 LW |
7 | */ |
8 | ||
b7f080cf | 9 | #include <linux/dma-mapping.h> |
8d318a50 LW |
10 | #include <linux/kernel.h> |
11 | #include <linux/slab.h> | |
f492b210 | 12 | #include <linux/export.h> |
8d318a50 LW |
13 | #include <linux/dmaengine.h> |
14 | #include <linux/platform_device.h> | |
15 | #include <linux/clk.h> | |
16 | #include <linux/delay.h> | |
7fb3e75e N |
17 | #include <linux/pm.h> |
18 | #include <linux/pm_runtime.h> | |
698e4732 | 19 | #include <linux/err.h> |
f4b89764 | 20 | #include <linux/amba/bus.h> |
8d318a50 LW |
21 | |
22 | #include <plat/ste_dma40.h> | |
23 | ||
24 | #include "ste_dma40_ll.h" | |
25 | ||
26 | #define D40_NAME "dma40" | |
27 | ||
28 | #define D40_PHY_CHAN -1 | |
29 | ||
30 | /* For masking out/in 2 bit channel positions */ | |
31 | #define D40_CHAN_POS(chan) (2 * (chan / 2)) | |
32 | #define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan)) | |
33 | ||
34 | /* Maximum iterations taken before giving up suspending a channel */ | |
35 | #define D40_SUSPEND_MAX_IT 500 | |
36 | ||
7fb3e75e N |
37 | /* Milliseconds */ |
38 | #define DMA40_AUTOSUSPEND_DELAY 100 | |
39 | ||
508849ad LW |
40 | /* Hardware requirement on LCLA alignment */ |
41 | #define LCLA_ALIGNMENT 0x40000 | |
698e4732 JA |
42 | |
43 | /* Max number of links per event group */ | |
44 | #define D40_LCLA_LINK_PER_EVENT_GRP 128 | |
45 | #define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP | |
46 | ||
508849ad LW |
47 | /* Attempts before giving up to trying to get pages that are aligned */ |
48 | #define MAX_LCLA_ALLOC_ATTEMPTS 256 | |
49 | ||
50 | /* Bit markings for allocation map */ | |
8d318a50 LW |
51 | #define D40_ALLOC_FREE (1 << 31) |
52 | #define D40_ALLOC_PHY (1 << 30) | |
53 | #define D40_ALLOC_LOG_FREE 0 | |
54 | ||
8d318a50 LW |
55 | /** |
56 | * enum 40_command - The different commands and/or statuses. | |
57 | * | |
58 | * @D40_DMA_STOP: DMA channel command STOP or status STOPPED, | |
59 | * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN. | |
60 | * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible. | |
61 | * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED. | |
62 | */ | |
63 | enum d40_command { | |
64 | D40_DMA_STOP = 0, | |
65 | D40_DMA_RUN = 1, | |
66 | D40_DMA_SUSPEND_REQ = 2, | |
67 | D40_DMA_SUSPENDED = 3 | |
68 | }; | |
69 | ||
7fb3e75e N |
70 | /* |
71 | * These are the registers that has to be saved and later restored | |
72 | * when the DMA hw is powered off. | |
73 | * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works. | |
74 | */ | |
75 | static u32 d40_backup_regs[] = { | |
76 | D40_DREG_LCPA, | |
77 | D40_DREG_LCLA, | |
78 | D40_DREG_PRMSE, | |
79 | D40_DREG_PRMSO, | |
80 | D40_DREG_PRMOE, | |
81 | D40_DREG_PRMOO, | |
82 | }; | |
83 | ||
84 | #define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs) | |
85 | ||
86 | /* TODO: Check if all these registers have to be saved/restored on dma40 v3 */ | |
87 | static u32 d40_backup_regs_v3[] = { | |
88 | D40_DREG_PSEG1, | |
89 | D40_DREG_PSEG2, | |
90 | D40_DREG_PSEG3, | |
91 | D40_DREG_PSEG4, | |
92 | D40_DREG_PCEG1, | |
93 | D40_DREG_PCEG2, | |
94 | D40_DREG_PCEG3, | |
95 | D40_DREG_PCEG4, | |
96 | D40_DREG_RSEG1, | |
97 | D40_DREG_RSEG2, | |
98 | D40_DREG_RSEG3, | |
99 | D40_DREG_RSEG4, | |
100 | D40_DREG_RCEG1, | |
101 | D40_DREG_RCEG2, | |
102 | D40_DREG_RCEG3, | |
103 | D40_DREG_RCEG4, | |
104 | }; | |
105 | ||
106 | #define BACKUP_REGS_SZ_V3 ARRAY_SIZE(d40_backup_regs_v3) | |
107 | ||
108 | static u32 d40_backup_regs_chan[] = { | |
109 | D40_CHAN_REG_SSCFG, | |
110 | D40_CHAN_REG_SSELT, | |
111 | D40_CHAN_REG_SSPTR, | |
112 | D40_CHAN_REG_SSLNK, | |
113 | D40_CHAN_REG_SDCFG, | |
114 | D40_CHAN_REG_SDELT, | |
115 | D40_CHAN_REG_SDPTR, | |
116 | D40_CHAN_REG_SDLNK, | |
117 | }; | |
118 | ||
8d318a50 LW |
119 | /** |
120 | * struct d40_lli_pool - Structure for keeping LLIs in memory | |
121 | * | |
122 | * @base: Pointer to memory area when the pre_alloc_lli's are not large | |
123 | * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if | |
124 | * pre_alloc_lli is used. | |
b00f938c | 125 | * @dma_addr: DMA address, if mapped |
8d318a50 LW |
126 | * @size: The size in bytes of the memory at base or the size of pre_alloc_lli. |
127 | * @pre_alloc_lli: Pre allocated area for the most common case of transfers, | |
128 | * one buffer to one buffer. | |
129 | */ | |
130 | struct d40_lli_pool { | |
131 | void *base; | |
508849ad | 132 | int size; |
b00f938c | 133 | dma_addr_t dma_addr; |
8d318a50 | 134 | /* Space for dst and src, plus an extra for padding */ |
508849ad | 135 | u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)]; |
8d318a50 LW |
136 | }; |
137 | ||
138 | /** | |
139 | * struct d40_desc - A descriptor is one DMA job. | |
140 | * | |
141 | * @lli_phy: LLI settings for physical channel. Both src and dst= | |
142 | * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if | |
143 | * lli_len equals one. | |
144 | * @lli_log: Same as above but for logical channels. | |
145 | * @lli_pool: The pool with two entries pre-allocated. | |
941b77a3 | 146 | * @lli_len: Number of llis of current descriptor. |
25985edc | 147 | * @lli_current: Number of transferred llis. |
698e4732 | 148 | * @lcla_alloc: Number of LCLA entries allocated. |
8d318a50 LW |
149 | * @txd: DMA engine struct. Used for among other things for communication |
150 | * during a transfer. | |
151 | * @node: List entry. | |
8d318a50 | 152 | * @is_in_client_list: true if the client owns this descriptor. |
7fb3e75e | 153 | * @cyclic: true if this is a cyclic job |
8d318a50 LW |
154 | * |
155 | * This descriptor is used for both logical and physical transfers. | |
156 | */ | |
8d318a50 LW |
157 | struct d40_desc { |
158 | /* LLI physical */ | |
159 | struct d40_phy_lli_bidir lli_phy; | |
160 | /* LLI logical */ | |
161 | struct d40_log_lli_bidir lli_log; | |
162 | ||
163 | struct d40_lli_pool lli_pool; | |
941b77a3 | 164 | int lli_len; |
698e4732 JA |
165 | int lli_current; |
166 | int lcla_alloc; | |
8d318a50 LW |
167 | |
168 | struct dma_async_tx_descriptor txd; | |
169 | struct list_head node; | |
170 | ||
8d318a50 | 171 | bool is_in_client_list; |
0c842b55 | 172 | bool cyclic; |
8d318a50 LW |
173 | }; |
174 | ||
175 | /** | |
176 | * struct d40_lcla_pool - LCLA pool settings and data. | |
177 | * | |
508849ad LW |
178 | * @base: The virtual address of LCLA. 18 bit aligned. |
179 | * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used. | |
180 | * This pointer is only there for clean-up on error. | |
181 | * @pages: The number of pages needed for all physical channels. | |
182 | * Only used later for clean-up on error | |
8d318a50 | 183 | * @lock: Lock to protect the content in this struct. |
698e4732 | 184 | * @alloc_map: big map over which LCLA entry is own by which job. |
8d318a50 LW |
185 | */ |
186 | struct d40_lcla_pool { | |
187 | void *base; | |
026cbc42 | 188 | dma_addr_t dma_addr; |
508849ad LW |
189 | void *base_unaligned; |
190 | int pages; | |
8d318a50 | 191 | spinlock_t lock; |
698e4732 | 192 | struct d40_desc **alloc_map; |
8d318a50 LW |
193 | }; |
194 | ||
195 | /** | |
196 | * struct d40_phy_res - struct for handling eventlines mapped to physical | |
197 | * channels. | |
198 | * | |
199 | * @lock: A lock protection this entity. | |
7fb3e75e | 200 | * @reserved: True if used by secure world or otherwise. |
8d318a50 LW |
201 | * @num: The physical channel number of this entity. |
202 | * @allocated_src: Bit mapped to show which src event line's are mapped to | |
203 | * this physical channel. Can also be free or physically allocated. | |
204 | * @allocated_dst: Same as for src but is dst. | |
205 | * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as | |
767a9675 | 206 | * event line number. |
8d318a50 LW |
207 | */ |
208 | struct d40_phy_res { | |
209 | spinlock_t lock; | |
7fb3e75e | 210 | bool reserved; |
8d318a50 LW |
211 | int num; |
212 | u32 allocated_src; | |
213 | u32 allocated_dst; | |
214 | }; | |
215 | ||
216 | struct d40_base; | |
217 | ||
218 | /** | |
219 | * struct d40_chan - Struct that describes a channel. | |
220 | * | |
221 | * @lock: A spinlock to protect this struct. | |
222 | * @log_num: The logical number, if any of this channel. | |
223 | * @completed: Starts with 1, after first interrupt it is set to dma engine's | |
224 | * current cookie. | |
225 | * @pending_tx: The number of pending transfers. Used between interrupt handler | |
226 | * and tasklet. | |
227 | * @busy: Set to true when transfer is ongoing on this channel. | |
2a614340 JA |
228 | * @phy_chan: Pointer to physical channel which this instance runs on. If this |
229 | * point is NULL, then the channel is not allocated. | |
8d318a50 LW |
230 | * @chan: DMA engine handle. |
231 | * @tasklet: Tasklet that gets scheduled from interrupt context to complete a | |
232 | * transfer and call client callback. | |
233 | * @client: Cliented owned descriptor list. | |
da063d26 | 234 | * @pending_queue: Submitted jobs, to be issued by issue_pending() |
8d318a50 LW |
235 | * @active: Active descriptor. |
236 | * @queue: Queued jobs. | |
82babbb3 | 237 | * @prepare_queue: Prepared jobs. |
8d318a50 | 238 | * @dma_cfg: The client configuration of this dma channel. |
ce2ca125 | 239 | * @configured: whether the dma_cfg configuration is valid |
8d318a50 LW |
240 | * @base: Pointer to the device instance struct. |
241 | * @src_def_cfg: Default cfg register setting for src. | |
242 | * @dst_def_cfg: Default cfg register setting for dst. | |
243 | * @log_def: Default logical channel settings. | |
8d318a50 | 244 | * @lcpa: Pointer to dst and src lcpa settings. |
ae752bf4 | 245 | * @runtime_addr: runtime configured address. |
246 | * @runtime_direction: runtime configured direction. | |
8d318a50 LW |
247 | * |
248 | * This struct can either "be" a logical or a physical channel. | |
249 | */ | |
250 | struct d40_chan { | |
251 | spinlock_t lock; | |
252 | int log_num; | |
253 | /* ID of the most recent completed transfer */ | |
254 | int completed; | |
255 | int pending_tx; | |
256 | bool busy; | |
257 | struct d40_phy_res *phy_chan; | |
258 | struct dma_chan chan; | |
259 | struct tasklet_struct tasklet; | |
260 | struct list_head client; | |
a8f3067b | 261 | struct list_head pending_queue; |
8d318a50 LW |
262 | struct list_head active; |
263 | struct list_head queue; | |
82babbb3 | 264 | struct list_head prepare_queue; |
8d318a50 | 265 | struct stedma40_chan_cfg dma_cfg; |
ce2ca125 | 266 | bool configured; |
8d318a50 LW |
267 | struct d40_base *base; |
268 | /* Default register configurations */ | |
269 | u32 src_def_cfg; | |
270 | u32 dst_def_cfg; | |
271 | struct d40_def_lcsp log_def; | |
8d318a50 | 272 | struct d40_log_lli_full *lcpa; |
95e1400f LW |
273 | /* Runtime reconfiguration */ |
274 | dma_addr_t runtime_addr; | |
db8196df | 275 | enum dma_transfer_direction runtime_direction; |
8d318a50 LW |
276 | }; |
277 | ||
278 | /** | |
279 | * struct d40_base - The big global struct, one for each probe'd instance. | |
280 | * | |
281 | * @interrupt_lock: Lock used to make sure one interrupt is handle a time. | |
282 | * @execmd_lock: Lock for execute command usage since several channels share | |
283 | * the same physical register. | |
284 | * @dev: The device structure. | |
285 | * @virtbase: The virtual base address of the DMA's register. | |
f4185592 | 286 | * @rev: silicon revision detected. |
8d318a50 LW |
287 | * @clk: Pointer to the DMA clock structure. |
288 | * @phy_start: Physical memory start of the DMA registers. | |
289 | * @phy_size: Size of the DMA register map. | |
290 | * @irq: The IRQ number. | |
291 | * @num_phy_chans: The number of physical channels. Read from HW. This | |
292 | * is the number of available channels for this driver, not counting "Secure | |
293 | * mode" allocated physical channels. | |
294 | * @num_log_chans: The number of logical channels. Calculated from | |
295 | * num_phy_chans. | |
296 | * @dma_both: dma_device channels that can do both memcpy and slave transfers. | |
297 | * @dma_slave: dma_device channels that can do only do slave transfers. | |
298 | * @dma_memcpy: dma_device channels that can do only do memcpy transfers. | |
7fb3e75e | 299 | * @phy_chans: Room for all possible physical channels in system. |
8d318a50 LW |
300 | * @log_chans: Room for all possible logical channels in system. |
301 | * @lookup_log_chans: Used to map interrupt number to logical channel. Points | |
302 | * to log_chans entries. | |
303 | * @lookup_phy_chans: Used to map interrupt number to physical channel. Points | |
304 | * to phy_chans entries. | |
305 | * @plat_data: Pointer to provided platform_data which is the driver | |
306 | * configuration. | |
28c7a19d | 307 | * @lcpa_regulator: Pointer to hold the regulator for the esram bank for lcla. |
8d318a50 LW |
308 | * @phy_res: Vector containing all physical channels. |
309 | * @lcla_pool: lcla pool settings and data. | |
310 | * @lcpa_base: The virtual mapped address of LCPA. | |
311 | * @phy_lcpa: The physical address of the LCPA. | |
312 | * @lcpa_size: The size of the LCPA area. | |
c675b1b4 | 313 | * @desc_slab: cache for descriptors. |
7fb3e75e N |
314 | * @reg_val_backup: Here the values of some hardware registers are stored |
315 | * before the DMA is powered off. They are restored when the power is back on. | |
316 | * @reg_val_backup_v3: Backup of registers that only exits on dma40 v3 and | |
317 | * later. | |
318 | * @reg_val_backup_chan: Backup data for standard channel parameter registers. | |
319 | * @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off. | |
320 | * @initialized: true if the dma has been initialized | |
8d318a50 LW |
321 | */ |
322 | struct d40_base { | |
323 | spinlock_t interrupt_lock; | |
324 | spinlock_t execmd_lock; | |
325 | struct device *dev; | |
326 | void __iomem *virtbase; | |
f4185592 | 327 | u8 rev:4; |
8d318a50 LW |
328 | struct clk *clk; |
329 | phys_addr_t phy_start; | |
330 | resource_size_t phy_size; | |
331 | int irq; | |
332 | int num_phy_chans; | |
333 | int num_log_chans; | |
334 | struct dma_device dma_both; | |
335 | struct dma_device dma_slave; | |
336 | struct dma_device dma_memcpy; | |
337 | struct d40_chan *phy_chans; | |
338 | struct d40_chan *log_chans; | |
339 | struct d40_chan **lookup_log_chans; | |
340 | struct d40_chan **lookup_phy_chans; | |
341 | struct stedma40_platform_data *plat_data; | |
28c7a19d | 342 | struct regulator *lcpa_regulator; |
8d318a50 LW |
343 | /* Physical half channels */ |
344 | struct d40_phy_res *phy_res; | |
345 | struct d40_lcla_pool lcla_pool; | |
346 | void *lcpa_base; | |
347 | dma_addr_t phy_lcpa; | |
348 | resource_size_t lcpa_size; | |
c675b1b4 | 349 | struct kmem_cache *desc_slab; |
7fb3e75e N |
350 | u32 reg_val_backup[BACKUP_REGS_SZ]; |
351 | u32 reg_val_backup_v3[BACKUP_REGS_SZ_V3]; | |
352 | u32 *reg_val_backup_chan; | |
353 | u16 gcc_pwr_off_mask; | |
354 | bool initialized; | |
8d318a50 LW |
355 | }; |
356 | ||
357 | /** | |
358 | * struct d40_interrupt_lookup - lookup table for interrupt handler | |
359 | * | |
360 | * @src: Interrupt mask register. | |
361 | * @clr: Interrupt clear register. | |
362 | * @is_error: true if this is an error interrupt. | |
363 | * @offset: start delta in the lookup_log_chans in d40_base. If equals to | |
364 | * D40_PHY_CHAN, the lookup_phy_chans shall be used instead. | |
365 | */ | |
366 | struct d40_interrupt_lookup { | |
367 | u32 src; | |
368 | u32 clr; | |
369 | bool is_error; | |
370 | int offset; | |
371 | }; | |
372 | ||
373 | /** | |
374 | * struct d40_reg_val - simple lookup struct | |
375 | * | |
376 | * @reg: The register. | |
377 | * @val: The value that belongs to the register in reg. | |
378 | */ | |
379 | struct d40_reg_val { | |
380 | unsigned int reg; | |
381 | unsigned int val; | |
382 | }; | |
383 | ||
262d2915 RV |
384 | static struct device *chan2dev(struct d40_chan *d40c) |
385 | { | |
386 | return &d40c->chan.dev->device; | |
387 | } | |
388 | ||
724a8577 RV |
389 | static bool chan_is_physical(struct d40_chan *chan) |
390 | { | |
391 | return chan->log_num == D40_PHY_CHAN; | |
392 | } | |
393 | ||
394 | static bool chan_is_logical(struct d40_chan *chan) | |
395 | { | |
396 | return !chan_is_physical(chan); | |
397 | } | |
398 | ||
8ca84687 RV |
399 | static void __iomem *chan_base(struct d40_chan *chan) |
400 | { | |
401 | return chan->base->virtbase + D40_DREG_PCBASE + | |
402 | chan->phy_chan->num * D40_DREG_PCDELTA; | |
403 | } | |
404 | ||
6db5a8ba RV |
405 | #define d40_err(dev, format, arg...) \ |
406 | dev_err(dev, "[%s] " format, __func__, ## arg) | |
407 | ||
408 | #define chan_err(d40c, format, arg...) \ | |
409 | d40_err(chan2dev(d40c), format, ## arg) | |
410 | ||
b00f938c | 411 | static int d40_pool_lli_alloc(struct d40_chan *d40c, struct d40_desc *d40d, |
dbd88788 | 412 | int lli_len) |
8d318a50 | 413 | { |
dbd88788 | 414 | bool is_log = chan_is_logical(d40c); |
8d318a50 LW |
415 | u32 align; |
416 | void *base; | |
417 | ||
418 | if (is_log) | |
419 | align = sizeof(struct d40_log_lli); | |
420 | else | |
421 | align = sizeof(struct d40_phy_lli); | |
422 | ||
423 | if (lli_len == 1) { | |
424 | base = d40d->lli_pool.pre_alloc_lli; | |
425 | d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli); | |
426 | d40d->lli_pool.base = NULL; | |
427 | } else { | |
594ece4d | 428 | d40d->lli_pool.size = lli_len * 2 * align; |
8d318a50 LW |
429 | |
430 | base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT); | |
431 | d40d->lli_pool.base = base; | |
432 | ||
433 | if (d40d->lli_pool.base == NULL) | |
434 | return -ENOMEM; | |
435 | } | |
436 | ||
437 | if (is_log) { | |
d924abad | 438 | d40d->lli_log.src = PTR_ALIGN(base, align); |
594ece4d | 439 | d40d->lli_log.dst = d40d->lli_log.src + lli_len; |
b00f938c RV |
440 | |
441 | d40d->lli_pool.dma_addr = 0; | |
8d318a50 | 442 | } else { |
d924abad | 443 | d40d->lli_phy.src = PTR_ALIGN(base, align); |
594ece4d | 444 | d40d->lli_phy.dst = d40d->lli_phy.src + lli_len; |
b00f938c RV |
445 | |
446 | d40d->lli_pool.dma_addr = dma_map_single(d40c->base->dev, | |
447 | d40d->lli_phy.src, | |
448 | d40d->lli_pool.size, | |
449 | DMA_TO_DEVICE); | |
450 | ||
451 | if (dma_mapping_error(d40c->base->dev, | |
452 | d40d->lli_pool.dma_addr)) { | |
453 | kfree(d40d->lli_pool.base); | |
454 | d40d->lli_pool.base = NULL; | |
455 | d40d->lli_pool.dma_addr = 0; | |
456 | return -ENOMEM; | |
457 | } | |
8d318a50 LW |
458 | } |
459 | ||
460 | return 0; | |
461 | } | |
462 | ||
b00f938c | 463 | static void d40_pool_lli_free(struct d40_chan *d40c, struct d40_desc *d40d) |
8d318a50 | 464 | { |
b00f938c RV |
465 | if (d40d->lli_pool.dma_addr) |
466 | dma_unmap_single(d40c->base->dev, d40d->lli_pool.dma_addr, | |
467 | d40d->lli_pool.size, DMA_TO_DEVICE); | |
468 | ||
8d318a50 LW |
469 | kfree(d40d->lli_pool.base); |
470 | d40d->lli_pool.base = NULL; | |
471 | d40d->lli_pool.size = 0; | |
472 | d40d->lli_log.src = NULL; | |
473 | d40d->lli_log.dst = NULL; | |
474 | d40d->lli_phy.src = NULL; | |
475 | d40d->lli_phy.dst = NULL; | |
8d318a50 LW |
476 | } |
477 | ||
698e4732 JA |
478 | static int d40_lcla_alloc_one(struct d40_chan *d40c, |
479 | struct d40_desc *d40d) | |
480 | { | |
481 | unsigned long flags; | |
482 | int i; | |
483 | int ret = -EINVAL; | |
484 | int p; | |
485 | ||
486 | spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); | |
487 | ||
488 | p = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP; | |
489 | ||
490 | /* | |
491 | * Allocate both src and dst at the same time, therefore the half | |
492 | * start on 1 since 0 can't be used since zero is used as end marker. | |
493 | */ | |
494 | for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) { | |
495 | if (!d40c->base->lcla_pool.alloc_map[p + i]) { | |
496 | d40c->base->lcla_pool.alloc_map[p + i] = d40d; | |
497 | d40d->lcla_alloc++; | |
498 | ret = i; | |
499 | break; | |
500 | } | |
501 | } | |
502 | ||
503 | spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags); | |
504 | ||
505 | return ret; | |
506 | } | |
507 | ||
508 | static int d40_lcla_free_all(struct d40_chan *d40c, | |
509 | struct d40_desc *d40d) | |
510 | { | |
511 | unsigned long flags; | |
512 | int i; | |
513 | int ret = -EINVAL; | |
514 | ||
724a8577 | 515 | if (chan_is_physical(d40c)) |
698e4732 JA |
516 | return 0; |
517 | ||
518 | spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); | |
519 | ||
520 | for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) { | |
521 | if (d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num * | |
522 | D40_LCLA_LINK_PER_EVENT_GRP + i] == d40d) { | |
523 | d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num * | |
524 | D40_LCLA_LINK_PER_EVENT_GRP + i] = NULL; | |
525 | d40d->lcla_alloc--; | |
526 | if (d40d->lcla_alloc == 0) { | |
527 | ret = 0; | |
528 | break; | |
529 | } | |
530 | } | |
531 | } | |
532 | ||
533 | spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags); | |
534 | ||
535 | return ret; | |
536 | ||
537 | } | |
538 | ||
8d318a50 LW |
539 | static void d40_desc_remove(struct d40_desc *d40d) |
540 | { | |
541 | list_del(&d40d->node); | |
542 | } | |
543 | ||
544 | static struct d40_desc *d40_desc_get(struct d40_chan *d40c) | |
545 | { | |
a2c15fa4 | 546 | struct d40_desc *desc = NULL; |
8d318a50 LW |
547 | |
548 | if (!list_empty(&d40c->client)) { | |
a2c15fa4 RV |
549 | struct d40_desc *d; |
550 | struct d40_desc *_d; | |
551 | ||
7fb3e75e | 552 | list_for_each_entry_safe(d, _d, &d40c->client, node) { |
8d318a50 | 553 | if (async_tx_test_ack(&d->txd)) { |
8d318a50 | 554 | d40_desc_remove(d); |
a2c15fa4 RV |
555 | desc = d; |
556 | memset(desc, 0, sizeof(*desc)); | |
c675b1b4 | 557 | break; |
8d318a50 | 558 | } |
7fb3e75e | 559 | } |
8d318a50 | 560 | } |
a2c15fa4 RV |
561 | |
562 | if (!desc) | |
563 | desc = kmem_cache_zalloc(d40c->base->desc_slab, GFP_NOWAIT); | |
564 | ||
565 | if (desc) | |
566 | INIT_LIST_HEAD(&desc->node); | |
567 | ||
568 | return desc; | |
8d318a50 LW |
569 | } |
570 | ||
571 | static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d) | |
572 | { | |
698e4732 | 573 | |
b00f938c | 574 | d40_pool_lli_free(d40c, d40d); |
698e4732 | 575 | d40_lcla_free_all(d40c, d40d); |
c675b1b4 | 576 | kmem_cache_free(d40c->base->desc_slab, d40d); |
8d318a50 LW |
577 | } |
578 | ||
579 | static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc) | |
580 | { | |
581 | list_add_tail(&desc->node, &d40c->active); | |
582 | } | |
583 | ||
1c4b0927 RV |
584 | static void d40_phy_lli_load(struct d40_chan *chan, struct d40_desc *desc) |
585 | { | |
586 | struct d40_phy_lli *lli_dst = desc->lli_phy.dst; | |
587 | struct d40_phy_lli *lli_src = desc->lli_phy.src; | |
588 | void __iomem *base = chan_base(chan); | |
589 | ||
590 | writel(lli_src->reg_cfg, base + D40_CHAN_REG_SSCFG); | |
591 | writel(lli_src->reg_elt, base + D40_CHAN_REG_SSELT); | |
592 | writel(lli_src->reg_ptr, base + D40_CHAN_REG_SSPTR); | |
593 | writel(lli_src->reg_lnk, base + D40_CHAN_REG_SSLNK); | |
594 | ||
595 | writel(lli_dst->reg_cfg, base + D40_CHAN_REG_SDCFG); | |
596 | writel(lli_dst->reg_elt, base + D40_CHAN_REG_SDELT); | |
597 | writel(lli_dst->reg_ptr, base + D40_CHAN_REG_SDPTR); | |
598 | writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK); | |
599 | } | |
600 | ||
e65889c7 | 601 | static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc) |
698e4732 | 602 | { |
e65889c7 RV |
603 | struct d40_lcla_pool *pool = &chan->base->lcla_pool; |
604 | struct d40_log_lli_bidir *lli = &desc->lli_log; | |
605 | int lli_current = desc->lli_current; | |
606 | int lli_len = desc->lli_len; | |
0c842b55 | 607 | bool cyclic = desc->cyclic; |
e65889c7 | 608 | int curr_lcla = -EINVAL; |
0c842b55 | 609 | int first_lcla = 0; |
28c7a19d | 610 | bool use_esram_lcla = chan->base->plat_data->use_esram_lcla; |
0c842b55 | 611 | bool linkback; |
e65889c7 | 612 | |
0c842b55 RV |
613 | /* |
614 | * We may have partially running cyclic transfers, in case we did't get | |
615 | * enough LCLA entries. | |
616 | */ | |
617 | linkback = cyclic && lli_current == 0; | |
618 | ||
619 | /* | |
620 | * For linkback, we need one LCLA even with only one link, because we | |
621 | * can't link back to the one in LCPA space | |
622 | */ | |
623 | if (linkback || (lli_len - lli_current > 1)) { | |
e65889c7 | 624 | curr_lcla = d40_lcla_alloc_one(chan, desc); |
0c842b55 RV |
625 | first_lcla = curr_lcla; |
626 | } | |
627 | ||
628 | /* | |
629 | * For linkback, we normally load the LCPA in the loop since we need to | |
630 | * link it to the second LCLA and not the first. However, if we | |
631 | * couldn't even get a first LCLA, then we have to run in LCPA and | |
632 | * reload manually. | |
633 | */ | |
634 | if (!linkback || curr_lcla == -EINVAL) { | |
635 | unsigned int flags = 0; | |
e65889c7 | 636 | |
0c842b55 RV |
637 | if (curr_lcla == -EINVAL) |
638 | flags |= LLI_TERM_INT; | |
e65889c7 | 639 | |
0c842b55 RV |
640 | d40_log_lli_lcpa_write(chan->lcpa, |
641 | &lli->dst[lli_current], | |
642 | &lli->src[lli_current], | |
643 | curr_lcla, | |
644 | flags); | |
645 | lli_current++; | |
646 | } | |
6045f0bb RV |
647 | |
648 | if (curr_lcla < 0) | |
649 | goto out; | |
650 | ||
e65889c7 RV |
651 | for (; lli_current < lli_len; lli_current++) { |
652 | unsigned int lcla_offset = chan->phy_chan->num * 1024 + | |
653 | 8 * curr_lcla * 2; | |
654 | struct d40_log_lli *lcla = pool->base + lcla_offset; | |
0c842b55 | 655 | unsigned int flags = 0; |
e65889c7 RV |
656 | int next_lcla; |
657 | ||
658 | if (lli_current + 1 < lli_len) | |
659 | next_lcla = d40_lcla_alloc_one(chan, desc); | |
660 | else | |
0c842b55 RV |
661 | next_lcla = linkback ? first_lcla : -EINVAL; |
662 | ||
663 | if (cyclic || next_lcla == -EINVAL) | |
664 | flags |= LLI_TERM_INT; | |
e65889c7 | 665 | |
0c842b55 RV |
666 | if (linkback && curr_lcla == first_lcla) { |
667 | /* First link goes in both LCPA and LCLA */ | |
668 | d40_log_lli_lcpa_write(chan->lcpa, | |
669 | &lli->dst[lli_current], | |
670 | &lli->src[lli_current], | |
671 | next_lcla, flags); | |
672 | } | |
673 | ||
674 | /* | |
675 | * One unused LCLA in the cyclic case if the very first | |
676 | * next_lcla fails... | |
677 | */ | |
e65889c7 RV |
678 | d40_log_lli_lcla_write(lcla, |
679 | &lli->dst[lli_current], | |
680 | &lli->src[lli_current], | |
0c842b55 | 681 | next_lcla, flags); |
e65889c7 | 682 | |
28c7a19d N |
683 | /* |
684 | * Cache maintenance is not needed if lcla is | |
685 | * mapped in esram | |
686 | */ | |
687 | if (!use_esram_lcla) { | |
688 | dma_sync_single_range_for_device(chan->base->dev, | |
689 | pool->dma_addr, lcla_offset, | |
690 | 2 * sizeof(struct d40_log_lli), | |
691 | DMA_TO_DEVICE); | |
692 | } | |
e65889c7 RV |
693 | curr_lcla = next_lcla; |
694 | ||
0c842b55 | 695 | if (curr_lcla == -EINVAL || curr_lcla == first_lcla) { |
e65889c7 RV |
696 | lli_current++; |
697 | break; | |
698 | } | |
699 | } | |
700 | ||
6045f0bb | 701 | out: |
e65889c7 RV |
702 | desc->lli_current = lli_current; |
703 | } | |
698e4732 | 704 | |
e65889c7 RV |
705 | static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d) |
706 | { | |
724a8577 | 707 | if (chan_is_physical(d40c)) { |
1c4b0927 | 708 | d40_phy_lli_load(d40c, d40d); |
698e4732 | 709 | d40d->lli_current = d40d->lli_len; |
e65889c7 RV |
710 | } else |
711 | d40_log_lli_to_lcxa(d40c, d40d); | |
698e4732 JA |
712 | } |
713 | ||
8d318a50 LW |
714 | static struct d40_desc *d40_first_active_get(struct d40_chan *d40c) |
715 | { | |
716 | struct d40_desc *d; | |
717 | ||
718 | if (list_empty(&d40c->active)) | |
719 | return NULL; | |
720 | ||
721 | d = list_first_entry(&d40c->active, | |
722 | struct d40_desc, | |
723 | node); | |
724 | return d; | |
725 | } | |
726 | ||
7404368c | 727 | /* remove desc from current queue and add it to the pending_queue */ |
8d318a50 LW |
728 | static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc) |
729 | { | |
7404368c PF |
730 | d40_desc_remove(desc); |
731 | desc->is_in_client_list = false; | |
a8f3067b PF |
732 | list_add_tail(&desc->node, &d40c->pending_queue); |
733 | } | |
734 | ||
735 | static struct d40_desc *d40_first_pending(struct d40_chan *d40c) | |
736 | { | |
737 | struct d40_desc *d; | |
738 | ||
739 | if (list_empty(&d40c->pending_queue)) | |
740 | return NULL; | |
741 | ||
742 | d = list_first_entry(&d40c->pending_queue, | |
743 | struct d40_desc, | |
744 | node); | |
745 | return d; | |
8d318a50 LW |
746 | } |
747 | ||
748 | static struct d40_desc *d40_first_queued(struct d40_chan *d40c) | |
749 | { | |
750 | struct d40_desc *d; | |
751 | ||
752 | if (list_empty(&d40c->queue)) | |
753 | return NULL; | |
754 | ||
755 | d = list_first_entry(&d40c->queue, | |
756 | struct d40_desc, | |
757 | node); | |
758 | return d; | |
759 | } | |
760 | ||
d49278e3 PF |
761 | static int d40_psize_2_burst_size(bool is_log, int psize) |
762 | { | |
763 | if (is_log) { | |
764 | if (psize == STEDMA40_PSIZE_LOG_1) | |
765 | return 1; | |
766 | } else { | |
767 | if (psize == STEDMA40_PSIZE_PHY_1) | |
768 | return 1; | |
769 | } | |
770 | ||
771 | return 2 << psize; | |
772 | } | |
773 | ||
774 | /* | |
775 | * The dma only supports transmitting packages up to | |
776 | * STEDMA40_MAX_SEG_SIZE << data_width. Calculate the total number of | |
777 | * dma elements required to send the entire sg list | |
778 | */ | |
779 | static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2) | |
780 | { | |
781 | int dmalen; | |
782 | u32 max_w = max(data_width1, data_width2); | |
783 | u32 min_w = min(data_width1, data_width2); | |
784 | u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE << min_w, 1 << max_w); | |
785 | ||
786 | if (seg_max > STEDMA40_MAX_SEG_SIZE) | |
787 | seg_max -= (1 << max_w); | |
788 | ||
789 | if (!IS_ALIGNED(size, 1 << max_w)) | |
790 | return -EINVAL; | |
791 | ||
792 | if (size <= seg_max) | |
793 | dmalen = 1; | |
794 | else { | |
795 | dmalen = size / seg_max; | |
796 | if (dmalen * seg_max < size) | |
797 | dmalen++; | |
798 | } | |
799 | return dmalen; | |
800 | } | |
801 | ||
802 | static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len, | |
803 | u32 data_width1, u32 data_width2) | |
804 | { | |
805 | struct scatterlist *sg; | |
806 | int i; | |
807 | int len = 0; | |
808 | int ret; | |
809 | ||
810 | for_each_sg(sgl, sg, sg_len, i) { | |
811 | ret = d40_size_2_dmalen(sg_dma_len(sg), | |
812 | data_width1, data_width2); | |
813 | if (ret < 0) | |
814 | return ret; | |
815 | len += ret; | |
816 | } | |
817 | return len; | |
818 | } | |
8d318a50 | 819 | |
7fb3e75e N |
820 | |
821 | #ifdef CONFIG_PM | |
822 | static void dma40_backup(void __iomem *baseaddr, u32 *backup, | |
823 | u32 *regaddr, int num, bool save) | |
824 | { | |
825 | int i; | |
826 | ||
827 | for (i = 0; i < num; i++) { | |
828 | void __iomem *addr = baseaddr + regaddr[i]; | |
829 | ||
830 | if (save) | |
831 | backup[i] = readl_relaxed(addr); | |
832 | else | |
833 | writel_relaxed(backup[i], addr); | |
834 | } | |
835 | } | |
836 | ||
837 | static void d40_save_restore_registers(struct d40_base *base, bool save) | |
838 | { | |
839 | int i; | |
840 | ||
841 | /* Save/Restore channel specific registers */ | |
842 | for (i = 0; i < base->num_phy_chans; i++) { | |
843 | void __iomem *addr; | |
844 | int idx; | |
845 | ||
846 | if (base->phy_res[i].reserved) | |
847 | continue; | |
848 | ||
849 | addr = base->virtbase + D40_DREG_PCBASE + i * D40_DREG_PCDELTA; | |
850 | idx = i * ARRAY_SIZE(d40_backup_regs_chan); | |
851 | ||
852 | dma40_backup(addr, &base->reg_val_backup_chan[idx], | |
853 | d40_backup_regs_chan, | |
854 | ARRAY_SIZE(d40_backup_regs_chan), | |
855 | save); | |
856 | } | |
857 | ||
858 | /* Save/Restore global registers */ | |
859 | dma40_backup(base->virtbase, base->reg_val_backup, | |
860 | d40_backup_regs, ARRAY_SIZE(d40_backup_regs), | |
861 | save); | |
862 | ||
863 | /* Save/Restore registers only existing on dma40 v3 and later */ | |
864 | if (base->rev >= 3) | |
865 | dma40_backup(base->virtbase, base->reg_val_backup_v3, | |
866 | d40_backup_regs_v3, | |
867 | ARRAY_SIZE(d40_backup_regs_v3), | |
868 | save); | |
869 | } | |
870 | #else | |
871 | static void d40_save_restore_registers(struct d40_base *base, bool save) | |
872 | { | |
873 | } | |
874 | #endif | |
8d318a50 LW |
875 | |
876 | static int d40_channel_execute_command(struct d40_chan *d40c, | |
877 | enum d40_command command) | |
878 | { | |
767a9675 JA |
879 | u32 status; |
880 | int i; | |
8d318a50 LW |
881 | void __iomem *active_reg; |
882 | int ret = 0; | |
883 | unsigned long flags; | |
1d392a7b | 884 | u32 wmask; |
8d318a50 LW |
885 | |
886 | spin_lock_irqsave(&d40c->base->execmd_lock, flags); | |
887 | ||
888 | if (d40c->phy_chan->num % 2 == 0) | |
889 | active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; | |
890 | else | |
891 | active_reg = d40c->base->virtbase + D40_DREG_ACTIVO; | |
892 | ||
893 | if (command == D40_DMA_SUSPEND_REQ) { | |
894 | status = (readl(active_reg) & | |
895 | D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> | |
896 | D40_CHAN_POS(d40c->phy_chan->num); | |
897 | ||
898 | if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP) | |
899 | goto done; | |
900 | } | |
901 | ||
1d392a7b JA |
902 | wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num)); |
903 | writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)), | |
904 | active_reg); | |
8d318a50 LW |
905 | |
906 | if (command == D40_DMA_SUSPEND_REQ) { | |
907 | ||
908 | for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) { | |
909 | status = (readl(active_reg) & | |
910 | D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> | |
911 | D40_CHAN_POS(d40c->phy_chan->num); | |
912 | ||
913 | cpu_relax(); | |
914 | /* | |
915 | * Reduce the number of bus accesses while | |
916 | * waiting for the DMA to suspend. | |
917 | */ | |
918 | udelay(3); | |
919 | ||
920 | if (status == D40_DMA_STOP || | |
921 | status == D40_DMA_SUSPENDED) | |
922 | break; | |
923 | } | |
924 | ||
925 | if (i == D40_SUSPEND_MAX_IT) { | |
6db5a8ba RV |
926 | chan_err(d40c, |
927 | "unable to suspend the chl %d (log: %d) status %x\n", | |
928 | d40c->phy_chan->num, d40c->log_num, | |
8d318a50 LW |
929 | status); |
930 | dump_stack(); | |
931 | ret = -EBUSY; | |
932 | } | |
933 | ||
934 | } | |
935 | done: | |
936 | spin_unlock_irqrestore(&d40c->base->execmd_lock, flags); | |
937 | return ret; | |
938 | } | |
939 | ||
940 | static void d40_term_all(struct d40_chan *d40c) | |
941 | { | |
942 | struct d40_desc *d40d; | |
7404368c | 943 | struct d40_desc *_d; |
8d318a50 LW |
944 | |
945 | /* Release active descriptors */ | |
946 | while ((d40d = d40_first_active_get(d40c))) { | |
947 | d40_desc_remove(d40d); | |
8d318a50 LW |
948 | d40_desc_free(d40c, d40d); |
949 | } | |
950 | ||
951 | /* Release queued descriptors waiting for transfer */ | |
952 | while ((d40d = d40_first_queued(d40c))) { | |
953 | d40_desc_remove(d40d); | |
8d318a50 LW |
954 | d40_desc_free(d40c, d40d); |
955 | } | |
956 | ||
a8f3067b PF |
957 | /* Release pending descriptors */ |
958 | while ((d40d = d40_first_pending(d40c))) { | |
959 | d40_desc_remove(d40d); | |
960 | d40_desc_free(d40c, d40d); | |
961 | } | |
8d318a50 | 962 | |
7404368c PF |
963 | /* Release client owned descriptors */ |
964 | if (!list_empty(&d40c->client)) | |
965 | list_for_each_entry_safe(d40d, _d, &d40c->client, node) { | |
966 | d40_desc_remove(d40d); | |
967 | d40_desc_free(d40c, d40d); | |
968 | } | |
969 | ||
82babbb3 PF |
970 | /* Release descriptors in prepare queue */ |
971 | if (!list_empty(&d40c->prepare_queue)) | |
972 | list_for_each_entry_safe(d40d, _d, | |
973 | &d40c->prepare_queue, node) { | |
974 | d40_desc_remove(d40d); | |
975 | d40_desc_free(d40c, d40d); | |
976 | } | |
7404368c | 977 | |
8d318a50 LW |
978 | d40c->pending_tx = 0; |
979 | d40c->busy = false; | |
980 | } | |
981 | ||
262d2915 RV |
982 | static void __d40_config_set_event(struct d40_chan *d40c, bool enable, |
983 | u32 event, int reg) | |
984 | { | |
8ca84687 | 985 | void __iomem *addr = chan_base(d40c) + reg; |
262d2915 RV |
986 | int tries; |
987 | ||
988 | if (!enable) { | |
989 | writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) | |
990 | | ~D40_EVENTLINE_MASK(event), addr); | |
991 | return; | |
992 | } | |
993 | ||
994 | /* | |
995 | * The hardware sometimes doesn't register the enable when src and dst | |
996 | * event lines are active on the same logical channel. Retry to ensure | |
997 | * it does. Usually only one retry is sufficient. | |
998 | */ | |
999 | tries = 100; | |
1000 | while (--tries) { | |
1001 | writel((D40_ACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) | |
1002 | | ~D40_EVENTLINE_MASK(event), addr); | |
1003 | ||
1004 | if (readl(addr) & D40_EVENTLINE_MASK(event)) | |
1005 | break; | |
1006 | } | |
1007 | ||
1008 | if (tries != 99) | |
1009 | dev_dbg(chan2dev(d40c), | |
1010 | "[%s] workaround enable S%cLNK (%d tries)\n", | |
1011 | __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D', | |
1012 | 100 - tries); | |
1013 | ||
1014 | WARN_ON(!tries); | |
1015 | } | |
1016 | ||
8d318a50 LW |
1017 | static void d40_config_set_event(struct d40_chan *d40c, bool do_enable) |
1018 | { | |
8d318a50 LW |
1019 | unsigned long flags; |
1020 | ||
8d318a50 LW |
1021 | spin_lock_irqsave(&d40c->phy_chan->lock, flags); |
1022 | ||
1023 | /* Enable event line connected to device (or memcpy) */ | |
1024 | if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) || | |
1025 | (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) { | |
1026 | u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); | |
1027 | ||
262d2915 RV |
1028 | __d40_config_set_event(d40c, do_enable, event, |
1029 | D40_CHAN_REG_SSLNK); | |
8d318a50 | 1030 | } |
262d2915 | 1031 | |
8d318a50 LW |
1032 | if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) { |
1033 | u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); | |
1034 | ||
262d2915 RV |
1035 | __d40_config_set_event(d40c, do_enable, event, |
1036 | D40_CHAN_REG_SDLNK); | |
8d318a50 LW |
1037 | } |
1038 | ||
1039 | spin_unlock_irqrestore(&d40c->phy_chan->lock, flags); | |
1040 | } | |
1041 | ||
a5ebca47 | 1042 | static u32 d40_chan_has_events(struct d40_chan *d40c) |
8d318a50 | 1043 | { |
8ca84687 | 1044 | void __iomem *chanbase = chan_base(d40c); |
be8cb7df | 1045 | u32 val; |
8d318a50 | 1046 | |
8ca84687 RV |
1047 | val = readl(chanbase + D40_CHAN_REG_SSLNK); |
1048 | val |= readl(chanbase + D40_CHAN_REG_SDLNK); | |
be8cb7df | 1049 | |
a5ebca47 | 1050 | return val; |
8d318a50 LW |
1051 | } |
1052 | ||
20a5b6d0 RV |
1053 | static u32 d40_get_prmo(struct d40_chan *d40c) |
1054 | { | |
1055 | static const unsigned int phy_map[] = { | |
1056 | [STEDMA40_PCHAN_BASIC_MODE] | |
1057 | = D40_DREG_PRMO_PCHAN_BASIC, | |
1058 | [STEDMA40_PCHAN_MODULO_MODE] | |
1059 | = D40_DREG_PRMO_PCHAN_MODULO, | |
1060 | [STEDMA40_PCHAN_DOUBLE_DST_MODE] | |
1061 | = D40_DREG_PRMO_PCHAN_DOUBLE_DST, | |
1062 | }; | |
1063 | static const unsigned int log_map[] = { | |
1064 | [STEDMA40_LCHAN_SRC_PHY_DST_LOG] | |
1065 | = D40_DREG_PRMO_LCHAN_SRC_PHY_DST_LOG, | |
1066 | [STEDMA40_LCHAN_SRC_LOG_DST_PHY] | |
1067 | = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_PHY, | |
1068 | [STEDMA40_LCHAN_SRC_LOG_DST_LOG] | |
1069 | = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG, | |
1070 | }; | |
1071 | ||
724a8577 | 1072 | if (chan_is_physical(d40c)) |
20a5b6d0 RV |
1073 | return phy_map[d40c->dma_cfg.mode_opt]; |
1074 | else | |
1075 | return log_map[d40c->dma_cfg.mode_opt]; | |
1076 | } | |
1077 | ||
b55912c6 | 1078 | static void d40_config_write(struct d40_chan *d40c) |
8d318a50 LW |
1079 | { |
1080 | u32 addr_base; | |
1081 | u32 var; | |
8d318a50 LW |
1082 | |
1083 | /* Odd addresses are even addresses + 4 */ | |
1084 | addr_base = (d40c->phy_chan->num % 2) * 4; | |
1085 | /* Setup channel mode to logical or physical */ | |
724a8577 | 1086 | var = ((u32)(chan_is_logical(d40c)) + 1) << |
8d318a50 LW |
1087 | D40_CHAN_POS(d40c->phy_chan->num); |
1088 | writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base); | |
1089 | ||
1090 | /* Setup operational mode option register */ | |
20a5b6d0 | 1091 | var = d40_get_prmo(d40c) << D40_CHAN_POS(d40c->phy_chan->num); |
8d318a50 LW |
1092 | |
1093 | writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base); | |
1094 | ||
724a8577 | 1095 | if (chan_is_logical(d40c)) { |
8ca84687 RV |
1096 | int lidx = (d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) |
1097 | & D40_SREG_ELEM_LOG_LIDX_MASK; | |
1098 | void __iomem *chanbase = chan_base(d40c); | |
1099 | ||
8d318a50 | 1100 | /* Set default config for CFG reg */ |
8ca84687 RV |
1101 | writel(d40c->src_def_cfg, chanbase + D40_CHAN_REG_SSCFG); |
1102 | writel(d40c->dst_def_cfg, chanbase + D40_CHAN_REG_SDCFG); | |
8d318a50 | 1103 | |
b55912c6 | 1104 | /* Set LIDX for lcla */ |
8ca84687 RV |
1105 | writel(lidx, chanbase + D40_CHAN_REG_SSELT); |
1106 | writel(lidx, chanbase + D40_CHAN_REG_SDELT); | |
e9f3a49c RV |
1107 | |
1108 | /* Clear LNK which will be used by d40_chan_has_events() */ | |
1109 | writel(0, chanbase + D40_CHAN_REG_SSLNK); | |
1110 | writel(0, chanbase + D40_CHAN_REG_SDLNK); | |
8d318a50 | 1111 | } |
8d318a50 LW |
1112 | } |
1113 | ||
aa182ae2 JA |
1114 | static u32 d40_residue(struct d40_chan *d40c) |
1115 | { | |
1116 | u32 num_elt; | |
1117 | ||
724a8577 | 1118 | if (chan_is_logical(d40c)) |
aa182ae2 JA |
1119 | num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK) |
1120 | >> D40_MEM_LCSP2_ECNT_POS; | |
8ca84687 RV |
1121 | else { |
1122 | u32 val = readl(chan_base(d40c) + D40_CHAN_REG_SDELT); | |
1123 | num_elt = (val & D40_SREG_ELEM_PHY_ECNT_MASK) | |
1124 | >> D40_SREG_ELEM_PHY_ECNT_POS; | |
1125 | } | |
1126 | ||
aa182ae2 JA |
1127 | return num_elt * (1 << d40c->dma_cfg.dst_info.data_width); |
1128 | } | |
1129 | ||
1130 | static bool d40_tx_is_linked(struct d40_chan *d40c) | |
1131 | { | |
1132 | bool is_link; | |
1133 | ||
724a8577 | 1134 | if (chan_is_logical(d40c)) |
aa182ae2 JA |
1135 | is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK; |
1136 | else | |
8ca84687 RV |
1137 | is_link = readl(chan_base(d40c) + D40_CHAN_REG_SDLNK) |
1138 | & D40_SREG_LNK_PHYS_LNK_MASK; | |
1139 | ||
aa182ae2 JA |
1140 | return is_link; |
1141 | } | |
1142 | ||
86eb5fb6 | 1143 | static int d40_pause(struct d40_chan *d40c) |
aa182ae2 | 1144 | { |
aa182ae2 JA |
1145 | int res = 0; |
1146 | unsigned long flags; | |
1147 | ||
3ac012af JA |
1148 | if (!d40c->busy) |
1149 | return 0; | |
1150 | ||
7fb3e75e | 1151 | pm_runtime_get_sync(d40c->base->dev); |
aa182ae2 JA |
1152 | spin_lock_irqsave(&d40c->lock, flags); |
1153 | ||
1154 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); | |
1155 | if (res == 0) { | |
724a8577 | 1156 | if (chan_is_logical(d40c)) { |
aa182ae2 JA |
1157 | d40_config_set_event(d40c, false); |
1158 | /* Resume the other logical channels if any */ | |
1159 | if (d40_chan_has_events(d40c)) | |
1160 | res = d40_channel_execute_command(d40c, | |
1161 | D40_DMA_RUN); | |
1162 | } | |
1163 | } | |
7fb3e75e N |
1164 | pm_runtime_mark_last_busy(d40c->base->dev); |
1165 | pm_runtime_put_autosuspend(d40c->base->dev); | |
aa182ae2 JA |
1166 | spin_unlock_irqrestore(&d40c->lock, flags); |
1167 | return res; | |
1168 | } | |
1169 | ||
86eb5fb6 | 1170 | static int d40_resume(struct d40_chan *d40c) |
aa182ae2 | 1171 | { |
aa182ae2 JA |
1172 | int res = 0; |
1173 | unsigned long flags; | |
1174 | ||
3ac012af JA |
1175 | if (!d40c->busy) |
1176 | return 0; | |
1177 | ||
aa182ae2 | 1178 | spin_lock_irqsave(&d40c->lock, flags); |
7fb3e75e | 1179 | pm_runtime_get_sync(d40c->base->dev); |
aa182ae2 | 1180 | if (d40c->base->rev == 0) |
724a8577 | 1181 | if (chan_is_logical(d40c)) { |
aa182ae2 JA |
1182 | res = d40_channel_execute_command(d40c, |
1183 | D40_DMA_SUSPEND_REQ); | |
1184 | goto no_suspend; | |
1185 | } | |
1186 | ||
1187 | /* If bytes left to transfer or linked tx resume job */ | |
1188 | if (d40_residue(d40c) || d40_tx_is_linked(d40c)) { | |
1189 | ||
724a8577 | 1190 | if (chan_is_logical(d40c)) |
aa182ae2 JA |
1191 | d40_config_set_event(d40c, true); |
1192 | ||
1193 | res = d40_channel_execute_command(d40c, D40_DMA_RUN); | |
1194 | } | |
1195 | ||
1196 | no_suspend: | |
7fb3e75e N |
1197 | pm_runtime_mark_last_busy(d40c->base->dev); |
1198 | pm_runtime_put_autosuspend(d40c->base->dev); | |
aa182ae2 JA |
1199 | spin_unlock_irqrestore(&d40c->lock, flags); |
1200 | return res; | |
1201 | } | |
1202 | ||
86eb5fb6 RV |
1203 | static int d40_terminate_all(struct d40_chan *chan) |
1204 | { | |
1205 | unsigned long flags; | |
1206 | int ret = 0; | |
1207 | ||
1208 | ret = d40_pause(chan); | |
1209 | if (!ret && chan_is_physical(chan)) | |
1210 | ret = d40_channel_execute_command(chan, D40_DMA_STOP); | |
1211 | ||
1212 | spin_lock_irqsave(&chan->lock, flags); | |
1213 | d40_term_all(chan); | |
1214 | spin_unlock_irqrestore(&chan->lock, flags); | |
1215 | ||
1216 | return ret; | |
1217 | } | |
1218 | ||
8d318a50 LW |
1219 | static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) |
1220 | { | |
1221 | struct d40_chan *d40c = container_of(tx->chan, | |
1222 | struct d40_chan, | |
1223 | chan); | |
1224 | struct d40_desc *d40d = container_of(tx, struct d40_desc, txd); | |
1225 | unsigned long flags; | |
1226 | ||
1227 | spin_lock_irqsave(&d40c->lock, flags); | |
1228 | ||
aa182ae2 JA |
1229 | d40c->chan.cookie++; |
1230 | ||
1231 | if (d40c->chan.cookie < 0) | |
1232 | d40c->chan.cookie = 1; | |
1233 | ||
1234 | d40d->txd.cookie = d40c->chan.cookie; | |
1235 | ||
8d318a50 LW |
1236 | d40_desc_queue(d40c, d40d); |
1237 | ||
1238 | spin_unlock_irqrestore(&d40c->lock, flags); | |
1239 | ||
1240 | return tx->cookie; | |
1241 | } | |
1242 | ||
1243 | static int d40_start(struct d40_chan *d40c) | |
1244 | { | |
f4185592 LW |
1245 | if (d40c->base->rev == 0) { |
1246 | int err; | |
1247 | ||
724a8577 | 1248 | if (chan_is_logical(d40c)) { |
f4185592 LW |
1249 | err = d40_channel_execute_command(d40c, |
1250 | D40_DMA_SUSPEND_REQ); | |
1251 | if (err) | |
1252 | return err; | |
1253 | } | |
1254 | } | |
1255 | ||
724a8577 | 1256 | if (chan_is_logical(d40c)) |
8d318a50 | 1257 | d40_config_set_event(d40c, true); |
8d318a50 | 1258 | |
0c32269d | 1259 | return d40_channel_execute_command(d40c, D40_DMA_RUN); |
8d318a50 LW |
1260 | } |
1261 | ||
1262 | static struct d40_desc *d40_queue_start(struct d40_chan *d40c) | |
1263 | { | |
1264 | struct d40_desc *d40d; | |
1265 | int err; | |
1266 | ||
1267 | /* Start queued jobs, if any */ | |
1268 | d40d = d40_first_queued(d40c); | |
1269 | ||
1270 | if (d40d != NULL) { | |
7fb3e75e N |
1271 | if (!d40c->busy) |
1272 | d40c->busy = true; | |
1273 | ||
1274 | pm_runtime_get_sync(d40c->base->dev); | |
8d318a50 LW |
1275 | |
1276 | /* Remove from queue */ | |
1277 | d40_desc_remove(d40d); | |
1278 | ||
1279 | /* Add to active queue */ | |
1280 | d40_desc_submit(d40c, d40d); | |
1281 | ||
7d83a854 RV |
1282 | /* Initiate DMA job */ |
1283 | d40_desc_load(d40c, d40d); | |
8d318a50 | 1284 | |
7d83a854 RV |
1285 | /* Start dma job */ |
1286 | err = d40_start(d40c); | |
8d318a50 | 1287 | |
7d83a854 RV |
1288 | if (err) |
1289 | return NULL; | |
8d318a50 LW |
1290 | } |
1291 | ||
1292 | return d40d; | |
1293 | } | |
1294 | ||
1295 | /* called from interrupt context */ | |
1296 | static void dma_tc_handle(struct d40_chan *d40c) | |
1297 | { | |
1298 | struct d40_desc *d40d; | |
1299 | ||
8d318a50 LW |
1300 | /* Get first active entry from list */ |
1301 | d40d = d40_first_active_get(d40c); | |
1302 | ||
1303 | if (d40d == NULL) | |
1304 | return; | |
1305 | ||
0c842b55 RV |
1306 | if (d40d->cyclic) { |
1307 | /* | |
1308 | * If this was a paritially loaded list, we need to reloaded | |
1309 | * it, and only when the list is completed. We need to check | |
1310 | * for done because the interrupt will hit for every link, and | |
1311 | * not just the last one. | |
1312 | */ | |
1313 | if (d40d->lli_current < d40d->lli_len | |
1314 | && !d40_tx_is_linked(d40c) | |
1315 | && !d40_residue(d40c)) { | |
1316 | d40_lcla_free_all(d40c, d40d); | |
1317 | d40_desc_load(d40c, d40d); | |
1318 | (void) d40_start(d40c); | |
8d318a50 | 1319 | |
0c842b55 RV |
1320 | if (d40d->lli_current == d40d->lli_len) |
1321 | d40d->lli_current = 0; | |
1322 | } | |
1323 | } else { | |
1324 | d40_lcla_free_all(d40c, d40d); | |
8d318a50 | 1325 | |
0c842b55 RV |
1326 | if (d40d->lli_current < d40d->lli_len) { |
1327 | d40_desc_load(d40c, d40d); | |
1328 | /* Start dma job */ | |
1329 | (void) d40_start(d40c); | |
1330 | return; | |
1331 | } | |
1332 | ||
1333 | if (d40_queue_start(d40c) == NULL) | |
1334 | d40c->busy = false; | |
7fb3e75e N |
1335 | pm_runtime_mark_last_busy(d40c->base->dev); |
1336 | pm_runtime_put_autosuspend(d40c->base->dev); | |
0c842b55 | 1337 | } |
8d318a50 LW |
1338 | |
1339 | d40c->pending_tx++; | |
1340 | tasklet_schedule(&d40c->tasklet); | |
1341 | ||
1342 | } | |
1343 | ||
1344 | static void dma_tasklet(unsigned long data) | |
1345 | { | |
1346 | struct d40_chan *d40c = (struct d40_chan *) data; | |
767a9675 | 1347 | struct d40_desc *d40d; |
8d318a50 LW |
1348 | unsigned long flags; |
1349 | dma_async_tx_callback callback; | |
1350 | void *callback_param; | |
1351 | ||
1352 | spin_lock_irqsave(&d40c->lock, flags); | |
1353 | ||
1354 | /* Get first active entry from list */ | |
767a9675 | 1355 | d40d = d40_first_active_get(d40c); |
767a9675 | 1356 | if (d40d == NULL) |
8d318a50 LW |
1357 | goto err; |
1358 | ||
0c842b55 RV |
1359 | if (!d40d->cyclic) |
1360 | d40c->completed = d40d->txd.cookie; | |
8d318a50 LW |
1361 | |
1362 | /* | |
1363 | * If terminating a channel pending_tx is set to zero. | |
1364 | * This prevents any finished active jobs to return to the client. | |
1365 | */ | |
1366 | if (d40c->pending_tx == 0) { | |
1367 | spin_unlock_irqrestore(&d40c->lock, flags); | |
1368 | return; | |
1369 | } | |
1370 | ||
1371 | /* Callback to client */ | |
767a9675 JA |
1372 | callback = d40d->txd.callback; |
1373 | callback_param = d40d->txd.callback_param; | |
1374 | ||
0c842b55 RV |
1375 | if (!d40d->cyclic) { |
1376 | if (async_tx_test_ack(&d40d->txd)) { | |
767a9675 | 1377 | d40_desc_remove(d40d); |
0c842b55 RV |
1378 | d40_desc_free(d40c, d40d); |
1379 | } else { | |
1380 | if (!d40d->is_in_client_list) { | |
1381 | d40_desc_remove(d40d); | |
1382 | d40_lcla_free_all(d40c, d40d); | |
1383 | list_add_tail(&d40d->node, &d40c->client); | |
1384 | d40d->is_in_client_list = true; | |
1385 | } | |
8d318a50 LW |
1386 | } |
1387 | } | |
1388 | ||
1389 | d40c->pending_tx--; | |
1390 | ||
1391 | if (d40c->pending_tx) | |
1392 | tasklet_schedule(&d40c->tasklet); | |
1393 | ||
1394 | spin_unlock_irqrestore(&d40c->lock, flags); | |
1395 | ||
767a9675 | 1396 | if (callback && (d40d->txd.flags & DMA_PREP_INTERRUPT)) |
8d318a50 LW |
1397 | callback(callback_param); |
1398 | ||
1399 | return; | |
1400 | ||
1401 | err: | |
25985edc | 1402 | /* Rescue manoeuvre if receiving double interrupts */ |
8d318a50 LW |
1403 | if (d40c->pending_tx > 0) |
1404 | d40c->pending_tx--; | |
1405 | spin_unlock_irqrestore(&d40c->lock, flags); | |
1406 | } | |
1407 | ||
1408 | static irqreturn_t d40_handle_interrupt(int irq, void *data) | |
1409 | { | |
1410 | static const struct d40_interrupt_lookup il[] = { | |
1411 | {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0}, | |
1412 | {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32}, | |
1413 | {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64}, | |
1414 | {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96}, | |
1415 | {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0}, | |
1416 | {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32}, | |
1417 | {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64}, | |
1418 | {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96}, | |
1419 | {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN}, | |
1420 | {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN}, | |
1421 | }; | |
1422 | ||
1423 | int i; | |
1424 | u32 regs[ARRAY_SIZE(il)]; | |
8d318a50 LW |
1425 | u32 idx; |
1426 | u32 row; | |
1427 | long chan = -1; | |
1428 | struct d40_chan *d40c; | |
1429 | unsigned long flags; | |
1430 | struct d40_base *base = data; | |
1431 | ||
1432 | spin_lock_irqsave(&base->interrupt_lock, flags); | |
1433 | ||
1434 | /* Read interrupt status of both logical and physical channels */ | |
1435 | for (i = 0; i < ARRAY_SIZE(il); i++) | |
1436 | regs[i] = readl(base->virtbase + il[i].src); | |
1437 | ||
1438 | for (;;) { | |
1439 | ||
1440 | chan = find_next_bit((unsigned long *)regs, | |
1441 | BITS_PER_LONG * ARRAY_SIZE(il), chan + 1); | |
1442 | ||
1443 | /* No more set bits found? */ | |
1444 | if (chan == BITS_PER_LONG * ARRAY_SIZE(il)) | |
1445 | break; | |
1446 | ||
1447 | row = chan / BITS_PER_LONG; | |
1448 | idx = chan & (BITS_PER_LONG - 1); | |
1449 | ||
1450 | /* ACK interrupt */ | |
1b00348d | 1451 | writel(1 << idx, base->virtbase + il[row].clr); |
8d318a50 LW |
1452 | |
1453 | if (il[row].offset == D40_PHY_CHAN) | |
1454 | d40c = base->lookup_phy_chans[idx]; | |
1455 | else | |
1456 | d40c = base->lookup_log_chans[il[row].offset + idx]; | |
1457 | spin_lock(&d40c->lock); | |
1458 | ||
1459 | if (!il[row].is_error) | |
1460 | dma_tc_handle(d40c); | |
1461 | else | |
6db5a8ba RV |
1462 | d40_err(base->dev, "IRQ chan: %ld offset %d idx %d\n", |
1463 | chan, il[row].offset, idx); | |
8d318a50 LW |
1464 | |
1465 | spin_unlock(&d40c->lock); | |
1466 | } | |
1467 | ||
1468 | spin_unlock_irqrestore(&base->interrupt_lock, flags); | |
1469 | ||
1470 | return IRQ_HANDLED; | |
1471 | } | |
1472 | ||
8d318a50 LW |
1473 | static int d40_validate_conf(struct d40_chan *d40c, |
1474 | struct stedma40_chan_cfg *conf) | |
1475 | { | |
1476 | int res = 0; | |
1477 | u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type); | |
1478 | u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type); | |
38bdbf02 | 1479 | bool is_log = conf->mode == STEDMA40_MODE_LOGICAL; |
8d318a50 | 1480 | |
0747c7ba | 1481 | if (!conf->dir) { |
6db5a8ba | 1482 | chan_err(d40c, "Invalid direction.\n"); |
0747c7ba LW |
1483 | res = -EINVAL; |
1484 | } | |
1485 | ||
1486 | if (conf->dst_dev_type != STEDMA40_DEV_DST_MEMORY && | |
1487 | d40c->base->plat_data->dev_tx[conf->dst_dev_type] == 0 && | |
1488 | d40c->runtime_addr == 0) { | |
1489 | ||
6db5a8ba RV |
1490 | chan_err(d40c, "Invalid TX channel address (%d)\n", |
1491 | conf->dst_dev_type); | |
0747c7ba LW |
1492 | res = -EINVAL; |
1493 | } | |
1494 | ||
1495 | if (conf->src_dev_type != STEDMA40_DEV_SRC_MEMORY && | |
1496 | d40c->base->plat_data->dev_rx[conf->src_dev_type] == 0 && | |
1497 | d40c->runtime_addr == 0) { | |
6db5a8ba RV |
1498 | chan_err(d40c, "Invalid RX channel address (%d)\n", |
1499 | conf->src_dev_type); | |
0747c7ba LW |
1500 | res = -EINVAL; |
1501 | } | |
1502 | ||
1503 | if (conf->dir == STEDMA40_MEM_TO_PERIPH && | |
8d318a50 | 1504 | dst_event_group == STEDMA40_DEV_DST_MEMORY) { |
6db5a8ba | 1505 | chan_err(d40c, "Invalid dst\n"); |
8d318a50 LW |
1506 | res = -EINVAL; |
1507 | } | |
1508 | ||
0747c7ba | 1509 | if (conf->dir == STEDMA40_PERIPH_TO_MEM && |
8d318a50 | 1510 | src_event_group == STEDMA40_DEV_SRC_MEMORY) { |
6db5a8ba | 1511 | chan_err(d40c, "Invalid src\n"); |
8d318a50 LW |
1512 | res = -EINVAL; |
1513 | } | |
1514 | ||
1515 | if (src_event_group == STEDMA40_DEV_SRC_MEMORY && | |
1516 | dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) { | |
6db5a8ba | 1517 | chan_err(d40c, "No event line\n"); |
8d318a50 LW |
1518 | res = -EINVAL; |
1519 | } | |
1520 | ||
1521 | if (conf->dir == STEDMA40_PERIPH_TO_PERIPH && | |
1522 | (src_event_group != dst_event_group)) { | |
6db5a8ba | 1523 | chan_err(d40c, "Invalid event group\n"); |
8d318a50 LW |
1524 | res = -EINVAL; |
1525 | } | |
1526 | ||
1527 | if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) { | |
1528 | /* | |
1529 | * DMAC HW supports it. Will be added to this driver, | |
1530 | * in case any dma client requires it. | |
1531 | */ | |
6db5a8ba | 1532 | chan_err(d40c, "periph to periph not supported\n"); |
8d318a50 LW |
1533 | res = -EINVAL; |
1534 | } | |
1535 | ||
d49278e3 PF |
1536 | if (d40_psize_2_burst_size(is_log, conf->src_info.psize) * |
1537 | (1 << conf->src_info.data_width) != | |
1538 | d40_psize_2_burst_size(is_log, conf->dst_info.psize) * | |
1539 | (1 << conf->dst_info.data_width)) { | |
1540 | /* | |
1541 | * The DMAC hardware only supports | |
1542 | * src (burst x width) == dst (burst x width) | |
1543 | */ | |
1544 | ||
6db5a8ba | 1545 | chan_err(d40c, "src (burst x width) != dst (burst x width)\n"); |
d49278e3 PF |
1546 | res = -EINVAL; |
1547 | } | |
1548 | ||
8d318a50 LW |
1549 | return res; |
1550 | } | |
1551 | ||
5cd326fd N |
1552 | static bool d40_alloc_mask_set(struct d40_phy_res *phy, |
1553 | bool is_src, int log_event_line, bool is_log, | |
1554 | bool *first_user) | |
8d318a50 LW |
1555 | { |
1556 | unsigned long flags; | |
1557 | spin_lock_irqsave(&phy->lock, flags); | |
5cd326fd N |
1558 | |
1559 | *first_user = ((phy->allocated_src | phy->allocated_dst) | |
1560 | == D40_ALLOC_FREE); | |
1561 | ||
4aed79b2 | 1562 | if (!is_log) { |
8d318a50 LW |
1563 | /* Physical interrupts are masked per physical full channel */ |
1564 | if (phy->allocated_src == D40_ALLOC_FREE && | |
1565 | phy->allocated_dst == D40_ALLOC_FREE) { | |
1566 | phy->allocated_dst = D40_ALLOC_PHY; | |
1567 | phy->allocated_src = D40_ALLOC_PHY; | |
1568 | goto found; | |
1569 | } else | |
1570 | goto not_found; | |
1571 | } | |
1572 | ||
1573 | /* Logical channel */ | |
1574 | if (is_src) { | |
1575 | if (phy->allocated_src == D40_ALLOC_PHY) | |
1576 | goto not_found; | |
1577 | ||
1578 | if (phy->allocated_src == D40_ALLOC_FREE) | |
1579 | phy->allocated_src = D40_ALLOC_LOG_FREE; | |
1580 | ||
1581 | if (!(phy->allocated_src & (1 << log_event_line))) { | |
1582 | phy->allocated_src |= 1 << log_event_line; | |
1583 | goto found; | |
1584 | } else | |
1585 | goto not_found; | |
1586 | } else { | |
1587 | if (phy->allocated_dst == D40_ALLOC_PHY) | |
1588 | goto not_found; | |
1589 | ||
1590 | if (phy->allocated_dst == D40_ALLOC_FREE) | |
1591 | phy->allocated_dst = D40_ALLOC_LOG_FREE; | |
1592 | ||
1593 | if (!(phy->allocated_dst & (1 << log_event_line))) { | |
1594 | phy->allocated_dst |= 1 << log_event_line; | |
1595 | goto found; | |
1596 | } else | |
1597 | goto not_found; | |
1598 | } | |
1599 | ||
1600 | not_found: | |
1601 | spin_unlock_irqrestore(&phy->lock, flags); | |
1602 | return false; | |
1603 | found: | |
1604 | spin_unlock_irqrestore(&phy->lock, flags); | |
1605 | return true; | |
1606 | } | |
1607 | ||
1608 | static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src, | |
1609 | int log_event_line) | |
1610 | { | |
1611 | unsigned long flags; | |
1612 | bool is_free = false; | |
1613 | ||
1614 | spin_lock_irqsave(&phy->lock, flags); | |
1615 | if (!log_event_line) { | |
8d318a50 LW |
1616 | phy->allocated_dst = D40_ALLOC_FREE; |
1617 | phy->allocated_src = D40_ALLOC_FREE; | |
1618 | is_free = true; | |
1619 | goto out; | |
1620 | } | |
1621 | ||
1622 | /* Logical channel */ | |
1623 | if (is_src) { | |
1624 | phy->allocated_src &= ~(1 << log_event_line); | |
1625 | if (phy->allocated_src == D40_ALLOC_LOG_FREE) | |
1626 | phy->allocated_src = D40_ALLOC_FREE; | |
1627 | } else { | |
1628 | phy->allocated_dst &= ~(1 << log_event_line); | |
1629 | if (phy->allocated_dst == D40_ALLOC_LOG_FREE) | |
1630 | phy->allocated_dst = D40_ALLOC_FREE; | |
1631 | } | |
1632 | ||
1633 | is_free = ((phy->allocated_src | phy->allocated_dst) == | |
1634 | D40_ALLOC_FREE); | |
1635 | ||
1636 | out: | |
1637 | spin_unlock_irqrestore(&phy->lock, flags); | |
1638 | ||
1639 | return is_free; | |
1640 | } | |
1641 | ||
5cd326fd | 1642 | static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user) |
8d318a50 LW |
1643 | { |
1644 | int dev_type; | |
1645 | int event_group; | |
1646 | int event_line; | |
1647 | struct d40_phy_res *phys; | |
1648 | int i; | |
1649 | int j; | |
1650 | int log_num; | |
1651 | bool is_src; | |
38bdbf02 | 1652 | bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL; |
8d318a50 LW |
1653 | |
1654 | phys = d40c->base->phy_res; | |
1655 | ||
1656 | if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { | |
1657 | dev_type = d40c->dma_cfg.src_dev_type; | |
1658 | log_num = 2 * dev_type; | |
1659 | is_src = true; | |
1660 | } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || | |
1661 | d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { | |
1662 | /* dst event lines are used for logical memcpy */ | |
1663 | dev_type = d40c->dma_cfg.dst_dev_type; | |
1664 | log_num = 2 * dev_type + 1; | |
1665 | is_src = false; | |
1666 | } else | |
1667 | return -EINVAL; | |
1668 | ||
1669 | event_group = D40_TYPE_TO_GROUP(dev_type); | |
1670 | event_line = D40_TYPE_TO_EVENT(dev_type); | |
1671 | ||
1672 | if (!is_log) { | |
1673 | if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { | |
1674 | /* Find physical half channel */ | |
1675 | for (i = 0; i < d40c->base->num_phy_chans; i++) { | |
1676 | ||
4aed79b2 | 1677 | if (d40_alloc_mask_set(&phys[i], is_src, |
5cd326fd N |
1678 | 0, is_log, |
1679 | first_phy_user)) | |
8d318a50 LW |
1680 | goto found_phy; |
1681 | } | |
1682 | } else | |
1683 | for (j = 0; j < d40c->base->num_phy_chans; j += 8) { | |
1684 | int phy_num = j + event_group * 2; | |
1685 | for (i = phy_num; i < phy_num + 2; i++) { | |
508849ad LW |
1686 | if (d40_alloc_mask_set(&phys[i], |
1687 | is_src, | |
1688 | 0, | |
5cd326fd N |
1689 | is_log, |
1690 | first_phy_user)) | |
8d318a50 LW |
1691 | goto found_phy; |
1692 | } | |
1693 | } | |
1694 | return -EINVAL; | |
1695 | found_phy: | |
1696 | d40c->phy_chan = &phys[i]; | |
1697 | d40c->log_num = D40_PHY_CHAN; | |
1698 | goto out; | |
1699 | } | |
1700 | if (dev_type == -1) | |
1701 | return -EINVAL; | |
1702 | ||
1703 | /* Find logical channel */ | |
1704 | for (j = 0; j < d40c->base->num_phy_chans; j += 8) { | |
1705 | int phy_num = j + event_group * 2; | |
5cd326fd N |
1706 | |
1707 | if (d40c->dma_cfg.use_fixed_channel) { | |
1708 | i = d40c->dma_cfg.phy_channel; | |
1709 | ||
1710 | if ((i != phy_num) && (i != phy_num + 1)) { | |
1711 | dev_err(chan2dev(d40c), | |
1712 | "invalid fixed phy channel %d\n", i); | |
1713 | return -EINVAL; | |
1714 | } | |
1715 | ||
1716 | if (d40_alloc_mask_set(&phys[i], is_src, event_line, | |
1717 | is_log, first_phy_user)) | |
1718 | goto found_log; | |
1719 | ||
1720 | dev_err(chan2dev(d40c), | |
1721 | "could not allocate fixed phy channel %d\n", i); | |
1722 | return -EINVAL; | |
1723 | } | |
1724 | ||
8d318a50 LW |
1725 | /* |
1726 | * Spread logical channels across all available physical rather | |
1727 | * than pack every logical channel at the first available phy | |
1728 | * channels. | |
1729 | */ | |
1730 | if (is_src) { | |
1731 | for (i = phy_num; i < phy_num + 2; i++) { | |
1732 | if (d40_alloc_mask_set(&phys[i], is_src, | |
5cd326fd N |
1733 | event_line, is_log, |
1734 | first_phy_user)) | |
8d318a50 LW |
1735 | goto found_log; |
1736 | } | |
1737 | } else { | |
1738 | for (i = phy_num + 1; i >= phy_num; i--) { | |
1739 | if (d40_alloc_mask_set(&phys[i], is_src, | |
5cd326fd N |
1740 | event_line, is_log, |
1741 | first_phy_user)) | |
8d318a50 LW |
1742 | goto found_log; |
1743 | } | |
1744 | } | |
1745 | } | |
1746 | return -EINVAL; | |
1747 | ||
1748 | found_log: | |
1749 | d40c->phy_chan = &phys[i]; | |
1750 | d40c->log_num = log_num; | |
1751 | out: | |
1752 | ||
1753 | if (is_log) | |
1754 | d40c->base->lookup_log_chans[d40c->log_num] = d40c; | |
1755 | else | |
1756 | d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c; | |
1757 | ||
1758 | return 0; | |
1759 | ||
1760 | } | |
1761 | ||
8d318a50 LW |
1762 | static int d40_config_memcpy(struct d40_chan *d40c) |
1763 | { | |
1764 | dma_cap_mask_t cap = d40c->chan.device->cap_mask; | |
1765 | ||
1766 | if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) { | |
1767 | d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log; | |
1768 | d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY; | |
1769 | d40c->dma_cfg.dst_dev_type = d40c->base->plat_data-> | |
1770 | memcpy[d40c->chan.chan_id]; | |
1771 | ||
1772 | } else if (dma_has_cap(DMA_MEMCPY, cap) && | |
1773 | dma_has_cap(DMA_SLAVE, cap)) { | |
1774 | d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy; | |
1775 | } else { | |
6db5a8ba | 1776 | chan_err(d40c, "No memcpy\n"); |
8d318a50 LW |
1777 | return -EINVAL; |
1778 | } | |
1779 | ||
1780 | return 0; | |
1781 | } | |
1782 | ||
1783 | ||
1784 | static int d40_free_dma(struct d40_chan *d40c) | |
1785 | { | |
1786 | ||
1787 | int res = 0; | |
d181b3a8 | 1788 | u32 event; |
8d318a50 LW |
1789 | struct d40_phy_res *phy = d40c->phy_chan; |
1790 | bool is_src; | |
1791 | ||
1792 | /* Terminate all queued and active transfers */ | |
1793 | d40_term_all(d40c); | |
1794 | ||
1795 | if (phy == NULL) { | |
6db5a8ba | 1796 | chan_err(d40c, "phy == null\n"); |
8d318a50 LW |
1797 | return -EINVAL; |
1798 | } | |
1799 | ||
1800 | if (phy->allocated_src == D40_ALLOC_FREE && | |
1801 | phy->allocated_dst == D40_ALLOC_FREE) { | |
6db5a8ba | 1802 | chan_err(d40c, "channel already free\n"); |
8d318a50 LW |
1803 | return -EINVAL; |
1804 | } | |
1805 | ||
8d318a50 LW |
1806 | if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || |
1807 | d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { | |
1808 | event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); | |
8d318a50 LW |
1809 | is_src = false; |
1810 | } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { | |
1811 | event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); | |
8d318a50 LW |
1812 | is_src = true; |
1813 | } else { | |
6db5a8ba | 1814 | chan_err(d40c, "Unknown direction\n"); |
8d318a50 LW |
1815 | return -EINVAL; |
1816 | } | |
1817 | ||
7fb3e75e | 1818 | pm_runtime_get_sync(d40c->base->dev); |
d181b3a8 JA |
1819 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); |
1820 | if (res) { | |
6db5a8ba | 1821 | chan_err(d40c, "suspend failed\n"); |
7fb3e75e | 1822 | goto out; |
d181b3a8 JA |
1823 | } |
1824 | ||
724a8577 | 1825 | if (chan_is_logical(d40c)) { |
d181b3a8 | 1826 | /* Release logical channel, deactivate the event line */ |
8d318a50 | 1827 | |
d181b3a8 | 1828 | d40_config_set_event(d40c, false); |
8d318a50 LW |
1829 | d40c->base->lookup_log_chans[d40c->log_num] = NULL; |
1830 | ||
1831 | /* | |
1832 | * Check if there are more logical allocation | |
1833 | * on this phy channel. | |
1834 | */ | |
1835 | if (!d40_alloc_mask_free(phy, is_src, event)) { | |
1836 | /* Resume the other logical channels if any */ | |
1837 | if (d40_chan_has_events(d40c)) { | |
1838 | res = d40_channel_execute_command(d40c, | |
1839 | D40_DMA_RUN); | |
7fb3e75e | 1840 | if (res) |
6db5a8ba RV |
1841 | chan_err(d40c, |
1842 | "Executing RUN command\n"); | |
8d318a50 | 1843 | } |
7fb3e75e | 1844 | goto out; |
8d318a50 | 1845 | } |
d181b3a8 JA |
1846 | } else { |
1847 | (void) d40_alloc_mask_free(phy, is_src, 0); | |
1848 | } | |
8d318a50 LW |
1849 | |
1850 | /* Release physical channel */ | |
1851 | res = d40_channel_execute_command(d40c, D40_DMA_STOP); | |
1852 | if (res) { | |
6db5a8ba | 1853 | chan_err(d40c, "Failed to stop channel\n"); |
7fb3e75e | 1854 | goto out; |
8d318a50 | 1855 | } |
7fb3e75e N |
1856 | |
1857 | if (d40c->busy) { | |
1858 | pm_runtime_mark_last_busy(d40c->base->dev); | |
1859 | pm_runtime_put_autosuspend(d40c->base->dev); | |
1860 | } | |
1861 | ||
1862 | d40c->busy = false; | |
8d318a50 | 1863 | d40c->phy_chan = NULL; |
ce2ca125 | 1864 | d40c->configured = false; |
8d318a50 | 1865 | d40c->base->lookup_phy_chans[phy->num] = NULL; |
7fb3e75e | 1866 | out: |
8d318a50 | 1867 | |
7fb3e75e N |
1868 | pm_runtime_mark_last_busy(d40c->base->dev); |
1869 | pm_runtime_put_autosuspend(d40c->base->dev); | |
1870 | return res; | |
8d318a50 LW |
1871 | } |
1872 | ||
a5ebca47 JA |
1873 | static bool d40_is_paused(struct d40_chan *d40c) |
1874 | { | |
8ca84687 | 1875 | void __iomem *chanbase = chan_base(d40c); |
a5ebca47 JA |
1876 | bool is_paused = false; |
1877 | unsigned long flags; | |
1878 | void __iomem *active_reg; | |
1879 | u32 status; | |
1880 | u32 event; | |
a5ebca47 JA |
1881 | |
1882 | spin_lock_irqsave(&d40c->lock, flags); | |
1883 | ||
724a8577 | 1884 | if (chan_is_physical(d40c)) { |
a5ebca47 JA |
1885 | if (d40c->phy_chan->num % 2 == 0) |
1886 | active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; | |
1887 | else | |
1888 | active_reg = d40c->base->virtbase + D40_DREG_ACTIVO; | |
1889 | ||
1890 | status = (readl(active_reg) & | |
1891 | D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> | |
1892 | D40_CHAN_POS(d40c->phy_chan->num); | |
1893 | if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP) | |
1894 | is_paused = true; | |
1895 | ||
1896 | goto _exit; | |
1897 | } | |
1898 | ||
a5ebca47 | 1899 | if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || |
9dbfbd35 | 1900 | d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { |
a5ebca47 | 1901 | event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); |
8ca84687 | 1902 | status = readl(chanbase + D40_CHAN_REG_SDLNK); |
9dbfbd35 | 1903 | } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { |
a5ebca47 | 1904 | event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); |
8ca84687 | 1905 | status = readl(chanbase + D40_CHAN_REG_SSLNK); |
9dbfbd35 | 1906 | } else { |
6db5a8ba | 1907 | chan_err(d40c, "Unknown direction\n"); |
a5ebca47 JA |
1908 | goto _exit; |
1909 | } | |
9dbfbd35 | 1910 | |
a5ebca47 JA |
1911 | status = (status & D40_EVENTLINE_MASK(event)) >> |
1912 | D40_EVENTLINE_POS(event); | |
1913 | ||
1914 | if (status != D40_DMA_RUN) | |
1915 | is_paused = true; | |
a5ebca47 JA |
1916 | _exit: |
1917 | spin_unlock_irqrestore(&d40c->lock, flags); | |
1918 | return is_paused; | |
1919 | ||
1920 | } | |
1921 | ||
1922 | ||
8d318a50 LW |
1923 | static u32 stedma40_residue(struct dma_chan *chan) |
1924 | { | |
1925 | struct d40_chan *d40c = | |
1926 | container_of(chan, struct d40_chan, chan); | |
1927 | u32 bytes_left; | |
1928 | unsigned long flags; | |
1929 | ||
1930 | spin_lock_irqsave(&d40c->lock, flags); | |
1931 | bytes_left = d40_residue(d40c); | |
1932 | spin_unlock_irqrestore(&d40c->lock, flags); | |
1933 | ||
1934 | return bytes_left; | |
1935 | } | |
1936 | ||
3e3a0763 RV |
1937 | static int |
1938 | d40_prep_sg_log(struct d40_chan *chan, struct d40_desc *desc, | |
1939 | struct scatterlist *sg_src, struct scatterlist *sg_dst, | |
822c5676 RV |
1940 | unsigned int sg_len, dma_addr_t src_dev_addr, |
1941 | dma_addr_t dst_dev_addr) | |
3e3a0763 RV |
1942 | { |
1943 | struct stedma40_chan_cfg *cfg = &chan->dma_cfg; | |
1944 | struct stedma40_half_channel_info *src_info = &cfg->src_info; | |
1945 | struct stedma40_half_channel_info *dst_info = &cfg->dst_info; | |
5ed04b85 | 1946 | int ret; |
3e3a0763 | 1947 | |
5ed04b85 RV |
1948 | ret = d40_log_sg_to_lli(sg_src, sg_len, |
1949 | src_dev_addr, | |
1950 | desc->lli_log.src, | |
1951 | chan->log_def.lcsp1, | |
1952 | src_info->data_width, | |
1953 | dst_info->data_width); | |
1954 | ||
1955 | ret = d40_log_sg_to_lli(sg_dst, sg_len, | |
1956 | dst_dev_addr, | |
1957 | desc->lli_log.dst, | |
1958 | chan->log_def.lcsp3, | |
1959 | dst_info->data_width, | |
1960 | src_info->data_width); | |
1961 | ||
1962 | return ret < 0 ? ret : 0; | |
3e3a0763 RV |
1963 | } |
1964 | ||
1965 | static int | |
1966 | d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc, | |
1967 | struct scatterlist *sg_src, struct scatterlist *sg_dst, | |
822c5676 RV |
1968 | unsigned int sg_len, dma_addr_t src_dev_addr, |
1969 | dma_addr_t dst_dev_addr) | |
3e3a0763 | 1970 | { |
3e3a0763 RV |
1971 | struct stedma40_chan_cfg *cfg = &chan->dma_cfg; |
1972 | struct stedma40_half_channel_info *src_info = &cfg->src_info; | |
1973 | struct stedma40_half_channel_info *dst_info = &cfg->dst_info; | |
0c842b55 | 1974 | unsigned long flags = 0; |
3e3a0763 RV |
1975 | int ret; |
1976 | ||
0c842b55 RV |
1977 | if (desc->cyclic) |
1978 | flags |= LLI_CYCLIC | LLI_TERM_INT; | |
1979 | ||
3e3a0763 RV |
1980 | ret = d40_phy_sg_to_lli(sg_src, sg_len, src_dev_addr, |
1981 | desc->lli_phy.src, | |
1982 | virt_to_phys(desc->lli_phy.src), | |
1983 | chan->src_def_cfg, | |
0c842b55 | 1984 | src_info, dst_info, flags); |
3e3a0763 RV |
1985 | |
1986 | ret = d40_phy_sg_to_lli(sg_dst, sg_len, dst_dev_addr, | |
1987 | desc->lli_phy.dst, | |
1988 | virt_to_phys(desc->lli_phy.dst), | |
1989 | chan->dst_def_cfg, | |
0c842b55 | 1990 | dst_info, src_info, flags); |
3e3a0763 RV |
1991 | |
1992 | dma_sync_single_for_device(chan->base->dev, desc->lli_pool.dma_addr, | |
1993 | desc->lli_pool.size, DMA_TO_DEVICE); | |
1994 | ||
1995 | return ret < 0 ? ret : 0; | |
1996 | } | |
1997 | ||
1998 | ||
5f81158f RV |
1999 | static struct d40_desc * |
2000 | d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg, | |
2001 | unsigned int sg_len, unsigned long dma_flags) | |
2002 | { | |
2003 | struct stedma40_chan_cfg *cfg = &chan->dma_cfg; | |
2004 | struct d40_desc *desc; | |
dbd88788 | 2005 | int ret; |
5f81158f RV |
2006 | |
2007 | desc = d40_desc_get(chan); | |
2008 | if (!desc) | |
2009 | return NULL; | |
2010 | ||
2011 | desc->lli_len = d40_sg_2_dmalen(sg, sg_len, cfg->src_info.data_width, | |
2012 | cfg->dst_info.data_width); | |
2013 | if (desc->lli_len < 0) { | |
2014 | chan_err(chan, "Unaligned size\n"); | |
dbd88788 RV |
2015 | goto err; |
2016 | } | |
5f81158f | 2017 | |
dbd88788 RV |
2018 | ret = d40_pool_lli_alloc(chan, desc, desc->lli_len); |
2019 | if (ret < 0) { | |
2020 | chan_err(chan, "Could not allocate lli\n"); | |
2021 | goto err; | |
5f81158f RV |
2022 | } |
2023 | ||
dbd88788 | 2024 | |
5f81158f RV |
2025 | desc->lli_current = 0; |
2026 | desc->txd.flags = dma_flags; | |
2027 | desc->txd.tx_submit = d40_tx_submit; | |
2028 | ||
2029 | dma_async_tx_descriptor_init(&desc->txd, &chan->chan); | |
2030 | ||
2031 | return desc; | |
dbd88788 RV |
2032 | |
2033 | err: | |
2034 | d40_desc_free(chan, desc); | |
2035 | return NULL; | |
5f81158f RV |
2036 | } |
2037 | ||
cade1d30 | 2038 | static dma_addr_t |
db8196df | 2039 | d40_get_dev_addr(struct d40_chan *chan, enum dma_transfer_direction direction) |
8d318a50 | 2040 | { |
cade1d30 RV |
2041 | struct stedma40_platform_data *plat = chan->base->plat_data; |
2042 | struct stedma40_chan_cfg *cfg = &chan->dma_cfg; | |
711b9cea | 2043 | dma_addr_t addr = 0; |
cade1d30 RV |
2044 | |
2045 | if (chan->runtime_addr) | |
2046 | return chan->runtime_addr; | |
2047 | ||
db8196df | 2048 | if (direction == DMA_DEV_TO_MEM) |
cade1d30 | 2049 | addr = plat->dev_rx[cfg->src_dev_type]; |
db8196df | 2050 | else if (direction == DMA_MEM_TO_DEV) |
cade1d30 RV |
2051 | addr = plat->dev_tx[cfg->dst_dev_type]; |
2052 | ||
2053 | return addr; | |
2054 | } | |
2055 | ||
2056 | static struct dma_async_tx_descriptor * | |
2057 | d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, | |
2058 | struct scatterlist *sg_dst, unsigned int sg_len, | |
db8196df | 2059 | enum dma_transfer_direction direction, unsigned long dma_flags) |
cade1d30 RV |
2060 | { |
2061 | struct d40_chan *chan = container_of(dchan, struct d40_chan, chan); | |
822c5676 RV |
2062 | dma_addr_t src_dev_addr = 0; |
2063 | dma_addr_t dst_dev_addr = 0; | |
cade1d30 | 2064 | struct d40_desc *desc; |
2a614340 | 2065 | unsigned long flags; |
cade1d30 | 2066 | int ret; |
8d318a50 | 2067 | |
cade1d30 RV |
2068 | if (!chan->phy_chan) { |
2069 | chan_err(chan, "Cannot prepare unallocated channel\n"); | |
2070 | return NULL; | |
0d0f6b8b JA |
2071 | } |
2072 | ||
0c842b55 | 2073 | |
cade1d30 | 2074 | spin_lock_irqsave(&chan->lock, flags); |
8d318a50 | 2075 | |
cade1d30 RV |
2076 | desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags); |
2077 | if (desc == NULL) | |
8d318a50 LW |
2078 | goto err; |
2079 | ||
0c842b55 RV |
2080 | if (sg_next(&sg_src[sg_len - 1]) == sg_src) |
2081 | desc->cyclic = true; | |
2082 | ||
822c5676 RV |
2083 | if (direction != DMA_NONE) { |
2084 | dma_addr_t dev_addr = d40_get_dev_addr(chan, direction); | |
2085 | ||
db8196df | 2086 | if (direction == DMA_DEV_TO_MEM) |
822c5676 | 2087 | src_dev_addr = dev_addr; |
db8196df | 2088 | else if (direction == DMA_MEM_TO_DEV) |
822c5676 RV |
2089 | dst_dev_addr = dev_addr; |
2090 | } | |
cade1d30 RV |
2091 | |
2092 | if (chan_is_logical(chan)) | |
2093 | ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst, | |
822c5676 | 2094 | sg_len, src_dev_addr, dst_dev_addr); |
cade1d30 RV |
2095 | else |
2096 | ret = d40_prep_sg_phy(chan, desc, sg_src, sg_dst, | |
822c5676 | 2097 | sg_len, src_dev_addr, dst_dev_addr); |
cade1d30 RV |
2098 | |
2099 | if (ret) { | |
2100 | chan_err(chan, "Failed to prepare %s sg job: %d\n", | |
2101 | chan_is_logical(chan) ? "log" : "phy", ret); | |
2102 | goto err; | |
8d318a50 LW |
2103 | } |
2104 | ||
82babbb3 PF |
2105 | /* |
2106 | * add descriptor to the prepare queue in order to be able | |
2107 | * to free them later in terminate_all | |
2108 | */ | |
2109 | list_add_tail(&desc->node, &chan->prepare_queue); | |
2110 | ||
cade1d30 RV |
2111 | spin_unlock_irqrestore(&chan->lock, flags); |
2112 | ||
2113 | return &desc->txd; | |
8d318a50 | 2114 | |
8d318a50 | 2115 | err: |
cade1d30 RV |
2116 | if (desc) |
2117 | d40_desc_free(chan, desc); | |
2118 | spin_unlock_irqrestore(&chan->lock, flags); | |
8d318a50 LW |
2119 | return NULL; |
2120 | } | |
8d318a50 LW |
2121 | |
2122 | bool stedma40_filter(struct dma_chan *chan, void *data) | |
2123 | { | |
2124 | struct stedma40_chan_cfg *info = data; | |
2125 | struct d40_chan *d40c = | |
2126 | container_of(chan, struct d40_chan, chan); | |
2127 | int err; | |
2128 | ||
2129 | if (data) { | |
2130 | err = d40_validate_conf(d40c, info); | |
2131 | if (!err) | |
2132 | d40c->dma_cfg = *info; | |
2133 | } else | |
2134 | err = d40_config_memcpy(d40c); | |
2135 | ||
ce2ca125 RV |
2136 | if (!err) |
2137 | d40c->configured = true; | |
2138 | ||
8d318a50 LW |
2139 | return err == 0; |
2140 | } | |
2141 | EXPORT_SYMBOL(stedma40_filter); | |
2142 | ||
ac2c0a38 RV |
2143 | static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src) |
2144 | { | |
2145 | bool realtime = d40c->dma_cfg.realtime; | |
2146 | bool highprio = d40c->dma_cfg.high_priority; | |
2147 | u32 prioreg = highprio ? D40_DREG_PSEG1 : D40_DREG_PCEG1; | |
2148 | u32 rtreg = realtime ? D40_DREG_RSEG1 : D40_DREG_RCEG1; | |
2149 | u32 event = D40_TYPE_TO_EVENT(dev_type); | |
2150 | u32 group = D40_TYPE_TO_GROUP(dev_type); | |
2151 | u32 bit = 1 << event; | |
2152 | ||
2153 | /* Destination event lines are stored in the upper halfword */ | |
2154 | if (!src) | |
2155 | bit <<= 16; | |
2156 | ||
2157 | writel(bit, d40c->base->virtbase + prioreg + group * 4); | |
2158 | writel(bit, d40c->base->virtbase + rtreg + group * 4); | |
2159 | } | |
2160 | ||
2161 | static void d40_set_prio_realtime(struct d40_chan *d40c) | |
2162 | { | |
2163 | if (d40c->base->rev < 3) | |
2164 | return; | |
2165 | ||
2166 | if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) || | |
2167 | (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) | |
2168 | __d40_set_prio_rt(d40c, d40c->dma_cfg.src_dev_type, true); | |
2169 | ||
2170 | if ((d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH) || | |
2171 | (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) | |
2172 | __d40_set_prio_rt(d40c, d40c->dma_cfg.dst_dev_type, false); | |
2173 | } | |
2174 | ||
8d318a50 LW |
2175 | /* DMA ENGINE functions */ |
2176 | static int d40_alloc_chan_resources(struct dma_chan *chan) | |
2177 | { | |
2178 | int err; | |
2179 | unsigned long flags; | |
2180 | struct d40_chan *d40c = | |
2181 | container_of(chan, struct d40_chan, chan); | |
ef1872ec | 2182 | bool is_free_phy; |
8d318a50 LW |
2183 | spin_lock_irqsave(&d40c->lock, flags); |
2184 | ||
2185 | d40c->completed = chan->cookie = 1; | |
2186 | ||
ce2ca125 RV |
2187 | /* If no dma configuration is set use default configuration (memcpy) */ |
2188 | if (!d40c->configured) { | |
8d318a50 | 2189 | err = d40_config_memcpy(d40c); |
ff0b12ba | 2190 | if (err) { |
6db5a8ba | 2191 | chan_err(d40c, "Failed to configure memcpy channel\n"); |
ff0b12ba JA |
2192 | goto fail; |
2193 | } | |
8d318a50 LW |
2194 | } |
2195 | ||
5cd326fd | 2196 | err = d40_allocate_channel(d40c, &is_free_phy); |
8d318a50 | 2197 | if (err) { |
6db5a8ba | 2198 | chan_err(d40c, "Failed to allocate channel\n"); |
7fb3e75e | 2199 | d40c->configured = false; |
ff0b12ba | 2200 | goto fail; |
8d318a50 LW |
2201 | } |
2202 | ||
7fb3e75e | 2203 | pm_runtime_get_sync(d40c->base->dev); |
ef1872ec LW |
2204 | /* Fill in basic CFG register values */ |
2205 | d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg, | |
724a8577 | 2206 | &d40c->dst_def_cfg, chan_is_logical(d40c)); |
ef1872ec | 2207 | |
ac2c0a38 RV |
2208 | d40_set_prio_realtime(d40c); |
2209 | ||
724a8577 | 2210 | if (chan_is_logical(d40c)) { |
ef1872ec LW |
2211 | d40_log_cfg(&d40c->dma_cfg, |
2212 | &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); | |
2213 | ||
2214 | if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) | |
2215 | d40c->lcpa = d40c->base->lcpa_base + | |
2216 | d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE; | |
2217 | else | |
2218 | d40c->lcpa = d40c->base->lcpa_base + | |
2219 | d40c->dma_cfg.dst_dev_type * | |
2220 | D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA; | |
2221 | } | |
2222 | ||
5cd326fd N |
2223 | dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n", |
2224 | chan_is_logical(d40c) ? "logical" : "physical", | |
2225 | d40c->phy_chan->num, | |
2226 | d40c->dma_cfg.use_fixed_channel ? ", fixed" : ""); | |
2227 | ||
2228 | ||
ef1872ec LW |
2229 | /* |
2230 | * Only write channel configuration to the DMA if the physical | |
2231 | * resource is free. In case of multiple logical channels | |
2232 | * on the same physical resource, only the first write is necessary. | |
2233 | */ | |
b55912c6 JA |
2234 | if (is_free_phy) |
2235 | d40_config_write(d40c); | |
ff0b12ba | 2236 | fail: |
7fb3e75e N |
2237 | pm_runtime_mark_last_busy(d40c->base->dev); |
2238 | pm_runtime_put_autosuspend(d40c->base->dev); | |
8d318a50 | 2239 | spin_unlock_irqrestore(&d40c->lock, flags); |
ff0b12ba | 2240 | return err; |
8d318a50 LW |
2241 | } |
2242 | ||
2243 | static void d40_free_chan_resources(struct dma_chan *chan) | |
2244 | { | |
2245 | struct d40_chan *d40c = | |
2246 | container_of(chan, struct d40_chan, chan); | |
2247 | int err; | |
2248 | unsigned long flags; | |
2249 | ||
0d0f6b8b | 2250 | if (d40c->phy_chan == NULL) { |
6db5a8ba | 2251 | chan_err(d40c, "Cannot free unallocated channel\n"); |
0d0f6b8b JA |
2252 | return; |
2253 | } | |
2254 | ||
2255 | ||
8d318a50 LW |
2256 | spin_lock_irqsave(&d40c->lock, flags); |
2257 | ||
2258 | err = d40_free_dma(d40c); | |
2259 | ||
2260 | if (err) | |
6db5a8ba | 2261 | chan_err(d40c, "Failed to free channel\n"); |
8d318a50 LW |
2262 | spin_unlock_irqrestore(&d40c->lock, flags); |
2263 | } | |
2264 | ||
2265 | static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, | |
2266 | dma_addr_t dst, | |
2267 | dma_addr_t src, | |
2268 | size_t size, | |
2a614340 | 2269 | unsigned long dma_flags) |
8d318a50 | 2270 | { |
95944c6e RV |
2271 | struct scatterlist dst_sg; |
2272 | struct scatterlist src_sg; | |
8d318a50 | 2273 | |
95944c6e RV |
2274 | sg_init_table(&dst_sg, 1); |
2275 | sg_init_table(&src_sg, 1); | |
8d318a50 | 2276 | |
95944c6e RV |
2277 | sg_dma_address(&dst_sg) = dst; |
2278 | sg_dma_address(&src_sg) = src; | |
8d318a50 | 2279 | |
95944c6e RV |
2280 | sg_dma_len(&dst_sg) = size; |
2281 | sg_dma_len(&src_sg) = size; | |
8d318a50 | 2282 | |
cade1d30 | 2283 | return d40_prep_sg(chan, &src_sg, &dst_sg, 1, DMA_NONE, dma_flags); |
8d318a50 LW |
2284 | } |
2285 | ||
0d688662 | 2286 | static struct dma_async_tx_descriptor * |
cade1d30 RV |
2287 | d40_prep_memcpy_sg(struct dma_chan *chan, |
2288 | struct scatterlist *dst_sg, unsigned int dst_nents, | |
2289 | struct scatterlist *src_sg, unsigned int src_nents, | |
2290 | unsigned long dma_flags) | |
0d688662 IS |
2291 | { |
2292 | if (dst_nents != src_nents) | |
2293 | return NULL; | |
2294 | ||
cade1d30 | 2295 | return d40_prep_sg(chan, src_sg, dst_sg, src_nents, DMA_NONE, dma_flags); |
00ac0341 RV |
2296 | } |
2297 | ||
8d318a50 LW |
2298 | static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, |
2299 | struct scatterlist *sgl, | |
2300 | unsigned int sg_len, | |
db8196df | 2301 | enum dma_transfer_direction direction, |
2a614340 | 2302 | unsigned long dma_flags) |
8d318a50 | 2303 | { |
db8196df | 2304 | if (direction != DMA_DEV_TO_MEM && direction != DMA_MEM_TO_DEV) |
00ac0341 RV |
2305 | return NULL; |
2306 | ||
cade1d30 | 2307 | return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags); |
8d318a50 LW |
2308 | } |
2309 | ||
0c842b55 RV |
2310 | static struct dma_async_tx_descriptor * |
2311 | dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, | |
2312 | size_t buf_len, size_t period_len, | |
db8196df | 2313 | enum dma_transfer_direction direction) |
0c842b55 RV |
2314 | { |
2315 | unsigned int periods = buf_len / period_len; | |
2316 | struct dma_async_tx_descriptor *txd; | |
2317 | struct scatterlist *sg; | |
2318 | int i; | |
2319 | ||
79ca7ec3 | 2320 | sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_NOWAIT); |
0c842b55 RV |
2321 | for (i = 0; i < periods; i++) { |
2322 | sg_dma_address(&sg[i]) = dma_addr; | |
2323 | sg_dma_len(&sg[i]) = period_len; | |
2324 | dma_addr += period_len; | |
2325 | } | |
2326 | ||
2327 | sg[periods].offset = 0; | |
2328 | sg[periods].length = 0; | |
2329 | sg[periods].page_link = | |
2330 | ((unsigned long)sg | 0x01) & ~0x02; | |
2331 | ||
2332 | txd = d40_prep_sg(chan, sg, sg, periods, direction, | |
2333 | DMA_PREP_INTERRUPT); | |
2334 | ||
2335 | kfree(sg); | |
2336 | ||
2337 | return txd; | |
2338 | } | |
2339 | ||
8d318a50 LW |
2340 | static enum dma_status d40_tx_status(struct dma_chan *chan, |
2341 | dma_cookie_t cookie, | |
2342 | struct dma_tx_state *txstate) | |
2343 | { | |
2344 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); | |
2345 | dma_cookie_t last_used; | |
2346 | dma_cookie_t last_complete; | |
2347 | int ret; | |
2348 | ||
0d0f6b8b | 2349 | if (d40c->phy_chan == NULL) { |
6db5a8ba | 2350 | chan_err(d40c, "Cannot read status of unallocated channel\n"); |
0d0f6b8b JA |
2351 | return -EINVAL; |
2352 | } | |
2353 | ||
8d318a50 LW |
2354 | last_complete = d40c->completed; |
2355 | last_used = chan->cookie; | |
2356 | ||
a5ebca47 JA |
2357 | if (d40_is_paused(d40c)) |
2358 | ret = DMA_PAUSED; | |
2359 | else | |
2360 | ret = dma_async_is_complete(cookie, last_complete, last_used); | |
8d318a50 | 2361 | |
a5ebca47 JA |
2362 | dma_set_tx_state(txstate, last_complete, last_used, |
2363 | stedma40_residue(chan)); | |
8d318a50 LW |
2364 | |
2365 | return ret; | |
2366 | } | |
2367 | ||
2368 | static void d40_issue_pending(struct dma_chan *chan) | |
2369 | { | |
2370 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); | |
2371 | unsigned long flags; | |
2372 | ||
0d0f6b8b | 2373 | if (d40c->phy_chan == NULL) { |
6db5a8ba | 2374 | chan_err(d40c, "Channel is not allocated!\n"); |
0d0f6b8b JA |
2375 | return; |
2376 | } | |
2377 | ||
8d318a50 LW |
2378 | spin_lock_irqsave(&d40c->lock, flags); |
2379 | ||
a8f3067b PF |
2380 | list_splice_tail_init(&d40c->pending_queue, &d40c->queue); |
2381 | ||
2382 | /* Busy means that queued jobs are already being processed */ | |
8d318a50 LW |
2383 | if (!d40c->busy) |
2384 | (void) d40_queue_start(d40c); | |
2385 | ||
2386 | spin_unlock_irqrestore(&d40c->lock, flags); | |
2387 | } | |
2388 | ||
98ca5289 RV |
2389 | static int |
2390 | dma40_config_to_halfchannel(struct d40_chan *d40c, | |
2391 | struct stedma40_half_channel_info *info, | |
2392 | enum dma_slave_buswidth width, | |
2393 | u32 maxburst) | |
2394 | { | |
2395 | enum stedma40_periph_data_width addr_width; | |
2396 | int psize; | |
2397 | ||
2398 | switch (width) { | |
2399 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | |
2400 | addr_width = STEDMA40_BYTE_WIDTH; | |
2401 | break; | |
2402 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | |
2403 | addr_width = STEDMA40_HALFWORD_WIDTH; | |
2404 | break; | |
2405 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | |
2406 | addr_width = STEDMA40_WORD_WIDTH; | |
2407 | break; | |
2408 | case DMA_SLAVE_BUSWIDTH_8_BYTES: | |
2409 | addr_width = STEDMA40_DOUBLEWORD_WIDTH; | |
2410 | break; | |
2411 | default: | |
2412 | dev_err(d40c->base->dev, | |
2413 | "illegal peripheral address width " | |
2414 | "requested (%d)\n", | |
2415 | width); | |
2416 | return -EINVAL; | |
2417 | } | |
2418 | ||
2419 | if (chan_is_logical(d40c)) { | |
2420 | if (maxburst >= 16) | |
2421 | psize = STEDMA40_PSIZE_LOG_16; | |
2422 | else if (maxburst >= 8) | |
2423 | psize = STEDMA40_PSIZE_LOG_8; | |
2424 | else if (maxburst >= 4) | |
2425 | psize = STEDMA40_PSIZE_LOG_4; | |
2426 | else | |
2427 | psize = STEDMA40_PSIZE_LOG_1; | |
2428 | } else { | |
2429 | if (maxburst >= 16) | |
2430 | psize = STEDMA40_PSIZE_PHY_16; | |
2431 | else if (maxburst >= 8) | |
2432 | psize = STEDMA40_PSIZE_PHY_8; | |
2433 | else if (maxburst >= 4) | |
2434 | psize = STEDMA40_PSIZE_PHY_4; | |
2435 | else | |
2436 | psize = STEDMA40_PSIZE_PHY_1; | |
2437 | } | |
2438 | ||
2439 | info->data_width = addr_width; | |
2440 | info->psize = psize; | |
2441 | info->flow_ctrl = STEDMA40_NO_FLOW_CTRL; | |
2442 | ||
2443 | return 0; | |
2444 | } | |
2445 | ||
95e1400f | 2446 | /* Runtime reconfiguration extension */ |
98ca5289 RV |
2447 | static int d40_set_runtime_config(struct dma_chan *chan, |
2448 | struct dma_slave_config *config) | |
95e1400f LW |
2449 | { |
2450 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); | |
2451 | struct stedma40_chan_cfg *cfg = &d40c->dma_cfg; | |
98ca5289 | 2452 | enum dma_slave_buswidth src_addr_width, dst_addr_width; |
95e1400f | 2453 | dma_addr_t config_addr; |
98ca5289 RV |
2454 | u32 src_maxburst, dst_maxburst; |
2455 | int ret; | |
2456 | ||
2457 | src_addr_width = config->src_addr_width; | |
2458 | src_maxburst = config->src_maxburst; | |
2459 | dst_addr_width = config->dst_addr_width; | |
2460 | dst_maxburst = config->dst_maxburst; | |
95e1400f | 2461 | |
db8196df | 2462 | if (config->direction == DMA_DEV_TO_MEM) { |
95e1400f LW |
2463 | dma_addr_t dev_addr_rx = |
2464 | d40c->base->plat_data->dev_rx[cfg->src_dev_type]; | |
2465 | ||
2466 | config_addr = config->src_addr; | |
2467 | if (dev_addr_rx) | |
2468 | dev_dbg(d40c->base->dev, | |
2469 | "channel has a pre-wired RX address %08x " | |
2470 | "overriding with %08x\n", | |
2471 | dev_addr_rx, config_addr); | |
2472 | if (cfg->dir != STEDMA40_PERIPH_TO_MEM) | |
2473 | dev_dbg(d40c->base->dev, | |
2474 | "channel was not configured for peripheral " | |
2475 | "to memory transfer (%d) overriding\n", | |
2476 | cfg->dir); | |
2477 | cfg->dir = STEDMA40_PERIPH_TO_MEM; | |
2478 | ||
98ca5289 RV |
2479 | /* Configure the memory side */ |
2480 | if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) | |
2481 | dst_addr_width = src_addr_width; | |
2482 | if (dst_maxburst == 0) | |
2483 | dst_maxburst = src_maxburst; | |
95e1400f | 2484 | |
db8196df | 2485 | } else if (config->direction == DMA_MEM_TO_DEV) { |
95e1400f LW |
2486 | dma_addr_t dev_addr_tx = |
2487 | d40c->base->plat_data->dev_tx[cfg->dst_dev_type]; | |
2488 | ||
2489 | config_addr = config->dst_addr; | |
2490 | if (dev_addr_tx) | |
2491 | dev_dbg(d40c->base->dev, | |
2492 | "channel has a pre-wired TX address %08x " | |
2493 | "overriding with %08x\n", | |
2494 | dev_addr_tx, config_addr); | |
2495 | if (cfg->dir != STEDMA40_MEM_TO_PERIPH) | |
2496 | dev_dbg(d40c->base->dev, | |
2497 | "channel was not configured for memory " | |
2498 | "to peripheral transfer (%d) overriding\n", | |
2499 | cfg->dir); | |
2500 | cfg->dir = STEDMA40_MEM_TO_PERIPH; | |
2501 | ||
98ca5289 RV |
2502 | /* Configure the memory side */ |
2503 | if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) | |
2504 | src_addr_width = dst_addr_width; | |
2505 | if (src_maxburst == 0) | |
2506 | src_maxburst = dst_maxburst; | |
95e1400f LW |
2507 | } else { |
2508 | dev_err(d40c->base->dev, | |
2509 | "unrecognized channel direction %d\n", | |
2510 | config->direction); | |
98ca5289 | 2511 | return -EINVAL; |
95e1400f LW |
2512 | } |
2513 | ||
98ca5289 | 2514 | if (src_maxburst * src_addr_width != dst_maxburst * dst_addr_width) { |
95e1400f | 2515 | dev_err(d40c->base->dev, |
98ca5289 RV |
2516 | "src/dst width/maxburst mismatch: %d*%d != %d*%d\n", |
2517 | src_maxburst, | |
2518 | src_addr_width, | |
2519 | dst_maxburst, | |
2520 | dst_addr_width); | |
2521 | return -EINVAL; | |
95e1400f LW |
2522 | } |
2523 | ||
98ca5289 RV |
2524 | ret = dma40_config_to_halfchannel(d40c, &cfg->src_info, |
2525 | src_addr_width, | |
2526 | src_maxburst); | |
2527 | if (ret) | |
2528 | return ret; | |
95e1400f | 2529 | |
98ca5289 RV |
2530 | ret = dma40_config_to_halfchannel(d40c, &cfg->dst_info, |
2531 | dst_addr_width, | |
2532 | dst_maxburst); | |
2533 | if (ret) | |
2534 | return ret; | |
95e1400f | 2535 | |
a59670a4 | 2536 | /* Fill in register values */ |
724a8577 | 2537 | if (chan_is_logical(d40c)) |
a59670a4 PF |
2538 | d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); |
2539 | else | |
2540 | d40_phy_cfg(cfg, &d40c->src_def_cfg, | |
2541 | &d40c->dst_def_cfg, false); | |
2542 | ||
95e1400f LW |
2543 | /* These settings will take precedence later */ |
2544 | d40c->runtime_addr = config_addr; | |
2545 | d40c->runtime_direction = config->direction; | |
2546 | dev_dbg(d40c->base->dev, | |
98ca5289 RV |
2547 | "configured channel %s for %s, data width %d/%d, " |
2548 | "maxburst %d/%d elements, LE, no flow control\n", | |
95e1400f | 2549 | dma_chan_name(chan), |
db8196df | 2550 | (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX", |
98ca5289 RV |
2551 | src_addr_width, dst_addr_width, |
2552 | src_maxburst, dst_maxburst); | |
2553 | ||
2554 | return 0; | |
95e1400f LW |
2555 | } |
2556 | ||
05827630 LW |
2557 | static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
2558 | unsigned long arg) | |
8d318a50 | 2559 | { |
8d318a50 LW |
2560 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); |
2561 | ||
0d0f6b8b | 2562 | if (d40c->phy_chan == NULL) { |
6db5a8ba | 2563 | chan_err(d40c, "Channel is not allocated!\n"); |
0d0f6b8b JA |
2564 | return -EINVAL; |
2565 | } | |
2566 | ||
8d318a50 LW |
2567 | switch (cmd) { |
2568 | case DMA_TERMINATE_ALL: | |
86eb5fb6 | 2569 | return d40_terminate_all(d40c); |
8d318a50 | 2570 | case DMA_PAUSE: |
86eb5fb6 | 2571 | return d40_pause(d40c); |
8d318a50 | 2572 | case DMA_RESUME: |
86eb5fb6 | 2573 | return d40_resume(d40c); |
95e1400f | 2574 | case DMA_SLAVE_CONFIG: |
98ca5289 | 2575 | return d40_set_runtime_config(chan, |
95e1400f | 2576 | (struct dma_slave_config *) arg); |
95e1400f LW |
2577 | default: |
2578 | break; | |
8d318a50 LW |
2579 | } |
2580 | ||
2581 | /* Other commands are unimplemented */ | |
2582 | return -ENXIO; | |
2583 | } | |
2584 | ||
2585 | /* Initialization functions */ | |
2586 | ||
2587 | static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma, | |
2588 | struct d40_chan *chans, int offset, | |
2589 | int num_chans) | |
2590 | { | |
2591 | int i = 0; | |
2592 | struct d40_chan *d40c; | |
2593 | ||
2594 | INIT_LIST_HEAD(&dma->channels); | |
2595 | ||
2596 | for (i = offset; i < offset + num_chans; i++) { | |
2597 | d40c = &chans[i]; | |
2598 | d40c->base = base; | |
2599 | d40c->chan.device = dma; | |
2600 | ||
8d318a50 LW |
2601 | spin_lock_init(&d40c->lock); |
2602 | ||
2603 | d40c->log_num = D40_PHY_CHAN; | |
2604 | ||
8d318a50 LW |
2605 | INIT_LIST_HEAD(&d40c->active); |
2606 | INIT_LIST_HEAD(&d40c->queue); | |
a8f3067b | 2607 | INIT_LIST_HEAD(&d40c->pending_queue); |
8d318a50 | 2608 | INIT_LIST_HEAD(&d40c->client); |
82babbb3 | 2609 | INIT_LIST_HEAD(&d40c->prepare_queue); |
8d318a50 | 2610 | |
8d318a50 LW |
2611 | tasklet_init(&d40c->tasklet, dma_tasklet, |
2612 | (unsigned long) d40c); | |
2613 | ||
2614 | list_add_tail(&d40c->chan.device_node, | |
2615 | &dma->channels); | |
2616 | } | |
2617 | } | |
2618 | ||
7ad74a7c RV |
2619 | static void d40_ops_init(struct d40_base *base, struct dma_device *dev) |
2620 | { | |
2621 | if (dma_has_cap(DMA_SLAVE, dev->cap_mask)) | |
2622 | dev->device_prep_slave_sg = d40_prep_slave_sg; | |
2623 | ||
2624 | if (dma_has_cap(DMA_MEMCPY, dev->cap_mask)) { | |
2625 | dev->device_prep_dma_memcpy = d40_prep_memcpy; | |
2626 | ||
2627 | /* | |
2628 | * This controller can only access address at even | |
2629 | * 32bit boundaries, i.e. 2^2 | |
2630 | */ | |
2631 | dev->copy_align = 2; | |
2632 | } | |
2633 | ||
2634 | if (dma_has_cap(DMA_SG, dev->cap_mask)) | |
2635 | dev->device_prep_dma_sg = d40_prep_memcpy_sg; | |
2636 | ||
0c842b55 RV |
2637 | if (dma_has_cap(DMA_CYCLIC, dev->cap_mask)) |
2638 | dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic; | |
2639 | ||
7ad74a7c RV |
2640 | dev->device_alloc_chan_resources = d40_alloc_chan_resources; |
2641 | dev->device_free_chan_resources = d40_free_chan_resources; | |
2642 | dev->device_issue_pending = d40_issue_pending; | |
2643 | dev->device_tx_status = d40_tx_status; | |
2644 | dev->device_control = d40_control; | |
2645 | dev->dev = base->dev; | |
2646 | } | |
2647 | ||
8d318a50 LW |
2648 | static int __init d40_dmaengine_init(struct d40_base *base, |
2649 | int num_reserved_chans) | |
2650 | { | |
2651 | int err ; | |
2652 | ||
2653 | d40_chan_init(base, &base->dma_slave, base->log_chans, | |
2654 | 0, base->num_log_chans); | |
2655 | ||
2656 | dma_cap_zero(base->dma_slave.cap_mask); | |
2657 | dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask); | |
0c842b55 | 2658 | dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask); |
8d318a50 | 2659 | |
7ad74a7c | 2660 | d40_ops_init(base, &base->dma_slave); |
8d318a50 LW |
2661 | |
2662 | err = dma_async_device_register(&base->dma_slave); | |
2663 | ||
2664 | if (err) { | |
6db5a8ba | 2665 | d40_err(base->dev, "Failed to register slave channels\n"); |
8d318a50 LW |
2666 | goto failure1; |
2667 | } | |
2668 | ||
2669 | d40_chan_init(base, &base->dma_memcpy, base->log_chans, | |
2670 | base->num_log_chans, base->plat_data->memcpy_len); | |
2671 | ||
2672 | dma_cap_zero(base->dma_memcpy.cap_mask); | |
2673 | dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask); | |
7ad74a7c RV |
2674 | dma_cap_set(DMA_SG, base->dma_memcpy.cap_mask); |
2675 | ||
2676 | d40_ops_init(base, &base->dma_memcpy); | |
8d318a50 LW |
2677 | |
2678 | err = dma_async_device_register(&base->dma_memcpy); | |
2679 | ||
2680 | if (err) { | |
6db5a8ba RV |
2681 | d40_err(base->dev, |
2682 | "Failed to regsiter memcpy only channels\n"); | |
8d318a50 LW |
2683 | goto failure2; |
2684 | } | |
2685 | ||
2686 | d40_chan_init(base, &base->dma_both, base->phy_chans, | |
2687 | 0, num_reserved_chans); | |
2688 | ||
2689 | dma_cap_zero(base->dma_both.cap_mask); | |
2690 | dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask); | |
2691 | dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask); | |
7ad74a7c | 2692 | dma_cap_set(DMA_SG, base->dma_both.cap_mask); |
0c842b55 | 2693 | dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask); |
7ad74a7c RV |
2694 | |
2695 | d40_ops_init(base, &base->dma_both); | |
8d318a50 LW |
2696 | err = dma_async_device_register(&base->dma_both); |
2697 | ||
2698 | if (err) { | |
6db5a8ba RV |
2699 | d40_err(base->dev, |
2700 | "Failed to register logical and physical capable channels\n"); | |
8d318a50 LW |
2701 | goto failure3; |
2702 | } | |
2703 | return 0; | |
2704 | failure3: | |
2705 | dma_async_device_unregister(&base->dma_memcpy); | |
2706 | failure2: | |
2707 | dma_async_device_unregister(&base->dma_slave); | |
2708 | failure1: | |
2709 | return err; | |
2710 | } | |
2711 | ||
7fb3e75e N |
2712 | /* Suspend resume functionality */ |
2713 | #ifdef CONFIG_PM | |
2714 | static int dma40_pm_suspend(struct device *dev) | |
2715 | { | |
28c7a19d N |
2716 | struct platform_device *pdev = to_platform_device(dev); |
2717 | struct d40_base *base = platform_get_drvdata(pdev); | |
2718 | int ret = 0; | |
7fb3e75e N |
2719 | if (!pm_runtime_suspended(dev)) |
2720 | return -EBUSY; | |
2721 | ||
28c7a19d N |
2722 | if (base->lcpa_regulator) |
2723 | ret = regulator_disable(base->lcpa_regulator); | |
2724 | return ret; | |
7fb3e75e N |
2725 | } |
2726 | ||
2727 | static int dma40_runtime_suspend(struct device *dev) | |
2728 | { | |
2729 | struct platform_device *pdev = to_platform_device(dev); | |
2730 | struct d40_base *base = platform_get_drvdata(pdev); | |
2731 | ||
2732 | d40_save_restore_registers(base, true); | |
2733 | ||
2734 | /* Don't disable/enable clocks for v1 due to HW bugs */ | |
2735 | if (base->rev != 1) | |
2736 | writel_relaxed(base->gcc_pwr_off_mask, | |
2737 | base->virtbase + D40_DREG_GCC); | |
2738 | ||
2739 | return 0; | |
2740 | } | |
2741 | ||
2742 | static int dma40_runtime_resume(struct device *dev) | |
2743 | { | |
2744 | struct platform_device *pdev = to_platform_device(dev); | |
2745 | struct d40_base *base = platform_get_drvdata(pdev); | |
2746 | ||
2747 | if (base->initialized) | |
2748 | d40_save_restore_registers(base, false); | |
2749 | ||
2750 | writel_relaxed(D40_DREG_GCC_ENABLE_ALL, | |
2751 | base->virtbase + D40_DREG_GCC); | |
2752 | return 0; | |
2753 | } | |
2754 | ||
28c7a19d N |
2755 | static int dma40_resume(struct device *dev) |
2756 | { | |
2757 | struct platform_device *pdev = to_platform_device(dev); | |
2758 | struct d40_base *base = platform_get_drvdata(pdev); | |
2759 | int ret = 0; | |
2760 | ||
2761 | if (base->lcpa_regulator) | |
2762 | ret = regulator_enable(base->lcpa_regulator); | |
2763 | ||
2764 | return ret; | |
2765 | } | |
7fb3e75e N |
2766 | |
2767 | static const struct dev_pm_ops dma40_pm_ops = { | |
2768 | .suspend = dma40_pm_suspend, | |
2769 | .runtime_suspend = dma40_runtime_suspend, | |
2770 | .runtime_resume = dma40_runtime_resume, | |
28c7a19d | 2771 | .resume = dma40_resume, |
7fb3e75e N |
2772 | }; |
2773 | #define DMA40_PM_OPS (&dma40_pm_ops) | |
2774 | #else | |
2775 | #define DMA40_PM_OPS NULL | |
2776 | #endif | |
2777 | ||
8d318a50 LW |
2778 | /* Initialization functions. */ |
2779 | ||
2780 | static int __init d40_phy_res_init(struct d40_base *base) | |
2781 | { | |
2782 | int i; | |
2783 | int num_phy_chans_avail = 0; | |
2784 | u32 val[2]; | |
2785 | int odd_even_bit = -2; | |
7fb3e75e | 2786 | int gcc = D40_DREG_GCC_ENA; |
8d318a50 LW |
2787 | |
2788 | val[0] = readl(base->virtbase + D40_DREG_PRSME); | |
2789 | val[1] = readl(base->virtbase + D40_DREG_PRSMO); | |
2790 | ||
2791 | for (i = 0; i < base->num_phy_chans; i++) { | |
2792 | base->phy_res[i].num = i; | |
2793 | odd_even_bit += 2 * ((i % 2) == 0); | |
2794 | if (((val[i % 2] >> odd_even_bit) & 3) == 1) { | |
2795 | /* Mark security only channels as occupied */ | |
2796 | base->phy_res[i].allocated_src = D40_ALLOC_PHY; | |
2797 | base->phy_res[i].allocated_dst = D40_ALLOC_PHY; | |
7fb3e75e N |
2798 | base->phy_res[i].reserved = true; |
2799 | gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i), | |
2800 | D40_DREG_GCC_SRC); | |
2801 | gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i), | |
2802 | D40_DREG_GCC_DST); | |
2803 | ||
2804 | ||
8d318a50 LW |
2805 | } else { |
2806 | base->phy_res[i].allocated_src = D40_ALLOC_FREE; | |
2807 | base->phy_res[i].allocated_dst = D40_ALLOC_FREE; | |
7fb3e75e | 2808 | base->phy_res[i].reserved = false; |
8d318a50 LW |
2809 | num_phy_chans_avail++; |
2810 | } | |
2811 | spin_lock_init(&base->phy_res[i].lock); | |
2812 | } | |
6b7acd84 JA |
2813 | |
2814 | /* Mark disabled channels as occupied */ | |
2815 | for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) { | |
f57b407c RV |
2816 | int chan = base->plat_data->disabled_channels[i]; |
2817 | ||
2818 | base->phy_res[chan].allocated_src = D40_ALLOC_PHY; | |
2819 | base->phy_res[chan].allocated_dst = D40_ALLOC_PHY; | |
7fb3e75e N |
2820 | base->phy_res[chan].reserved = true; |
2821 | gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan), | |
2822 | D40_DREG_GCC_SRC); | |
2823 | gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan), | |
2824 | D40_DREG_GCC_DST); | |
f57b407c | 2825 | num_phy_chans_avail--; |
6b7acd84 JA |
2826 | } |
2827 | ||
8d318a50 LW |
2828 | dev_info(base->dev, "%d of %d physical DMA channels available\n", |
2829 | num_phy_chans_avail, base->num_phy_chans); | |
2830 | ||
2831 | /* Verify settings extended vs standard */ | |
2832 | val[0] = readl(base->virtbase + D40_DREG_PRTYP); | |
2833 | ||
2834 | for (i = 0; i < base->num_phy_chans; i++) { | |
2835 | ||
2836 | if (base->phy_res[i].allocated_src == D40_ALLOC_FREE && | |
2837 | (val[0] & 0x3) != 1) | |
2838 | dev_info(base->dev, | |
2839 | "[%s] INFO: channel %d is misconfigured (%d)\n", | |
2840 | __func__, i, val[0] & 0x3); | |
2841 | ||
2842 | val[0] = val[0] >> 2; | |
2843 | } | |
2844 | ||
7fb3e75e N |
2845 | /* |
2846 | * To keep things simple, Enable all clocks initially. | |
2847 | * The clocks will get managed later post channel allocation. | |
2848 | * The clocks for the event lines on which reserved channels exists | |
2849 | * are not managed here. | |
2850 | */ | |
2851 | writel(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC); | |
2852 | base->gcc_pwr_off_mask = gcc; | |
2853 | ||
8d318a50 LW |
2854 | return num_phy_chans_avail; |
2855 | } | |
2856 | ||
2857 | static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |
2858 | { | |
8d318a50 LW |
2859 | struct stedma40_platform_data *plat_data; |
2860 | struct clk *clk = NULL; | |
2861 | void __iomem *virtbase = NULL; | |
2862 | struct resource *res = NULL; | |
2863 | struct d40_base *base = NULL; | |
2864 | int num_log_chans = 0; | |
2865 | int num_phy_chans; | |
2866 | int i; | |
f4b89764 LW |
2867 | u32 pid; |
2868 | u32 cid; | |
2869 | u8 rev; | |
8d318a50 LW |
2870 | |
2871 | clk = clk_get(&pdev->dev, NULL); | |
2872 | ||
2873 | if (IS_ERR(clk)) { | |
6db5a8ba | 2874 | d40_err(&pdev->dev, "No matching clock found\n"); |
8d318a50 LW |
2875 | goto failure; |
2876 | } | |
2877 | ||
2878 | clk_enable(clk); | |
2879 | ||
2880 | /* Get IO for DMAC base address */ | |
2881 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base"); | |
2882 | if (!res) | |
2883 | goto failure; | |
2884 | ||
2885 | if (request_mem_region(res->start, resource_size(res), | |
2886 | D40_NAME " I/O base") == NULL) | |
2887 | goto failure; | |
2888 | ||
2889 | virtbase = ioremap(res->start, resource_size(res)); | |
2890 | if (!virtbase) | |
2891 | goto failure; | |
2892 | ||
f4b89764 LW |
2893 | /* This is just a regular AMBA PrimeCell ID actually */ |
2894 | for (pid = 0, i = 0; i < 4; i++) | |
2895 | pid |= (readl(virtbase + resource_size(res) - 0x20 + 4 * i) | |
2896 | & 255) << (i * 8); | |
2897 | for (cid = 0, i = 0; i < 4; i++) | |
2898 | cid |= (readl(virtbase + resource_size(res) - 0x10 + 4 * i) | |
2899 | & 255) << (i * 8); | |
8d318a50 | 2900 | |
f4b89764 LW |
2901 | if (cid != AMBA_CID) { |
2902 | d40_err(&pdev->dev, "Unknown hardware! No PrimeCell ID\n"); | |
2903 | goto failure; | |
2904 | } | |
2905 | if (AMBA_MANF_BITS(pid) != AMBA_VENDOR_ST) { | |
6db5a8ba | 2906 | d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n", |
f4b89764 LW |
2907 | AMBA_MANF_BITS(pid), |
2908 | AMBA_VENDOR_ST); | |
8d318a50 LW |
2909 | goto failure; |
2910 | } | |
f4b89764 LW |
2911 | /* |
2912 | * HW revision: | |
2913 | * DB8500ed has revision 0 | |
2914 | * ? has revision 1 | |
2915 | * DB8500v1 has revision 2 | |
2916 | * DB8500v2 has revision 3 | |
2917 | */ | |
2918 | rev = AMBA_REV_BITS(pid); | |
3ae0267f | 2919 | |
8d318a50 LW |
2920 | /* The number of physical channels on this HW */ |
2921 | num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4; | |
2922 | ||
2923 | dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n", | |
3ae0267f | 2924 | rev, res->start); |
8d318a50 LW |
2925 | |
2926 | plat_data = pdev->dev.platform_data; | |
2927 | ||
2928 | /* Count the number of logical channels in use */ | |
2929 | for (i = 0; i < plat_data->dev_len; i++) | |
2930 | if (plat_data->dev_rx[i] != 0) | |
2931 | num_log_chans++; | |
2932 | ||
2933 | for (i = 0; i < plat_data->dev_len; i++) | |
2934 | if (plat_data->dev_tx[i] != 0) | |
2935 | num_log_chans++; | |
2936 | ||
2937 | base = kzalloc(ALIGN(sizeof(struct d40_base), 4) + | |
2938 | (num_phy_chans + num_log_chans + plat_data->memcpy_len) * | |
2939 | sizeof(struct d40_chan), GFP_KERNEL); | |
2940 | ||
2941 | if (base == NULL) { | |
6db5a8ba | 2942 | d40_err(&pdev->dev, "Out of memory\n"); |
8d318a50 LW |
2943 | goto failure; |
2944 | } | |
2945 | ||
3ae0267f | 2946 | base->rev = rev; |
8d318a50 LW |
2947 | base->clk = clk; |
2948 | base->num_phy_chans = num_phy_chans; | |
2949 | base->num_log_chans = num_log_chans; | |
2950 | base->phy_start = res->start; | |
2951 | base->phy_size = resource_size(res); | |
2952 | base->virtbase = virtbase; | |
2953 | base->plat_data = plat_data; | |
2954 | base->dev = &pdev->dev; | |
2955 | base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4); | |
2956 | base->log_chans = &base->phy_chans[num_phy_chans]; | |
2957 | ||
2958 | base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res), | |
2959 | GFP_KERNEL); | |
2960 | if (!base->phy_res) | |
2961 | goto failure; | |
2962 | ||
2963 | base->lookup_phy_chans = kzalloc(num_phy_chans * | |
2964 | sizeof(struct d40_chan *), | |
2965 | GFP_KERNEL); | |
2966 | if (!base->lookup_phy_chans) | |
2967 | goto failure; | |
2968 | ||
2969 | if (num_log_chans + plat_data->memcpy_len) { | |
2970 | /* | |
2971 | * The max number of logical channels are event lines for all | |
2972 | * src devices and dst devices | |
2973 | */ | |
2974 | base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 * | |
2975 | sizeof(struct d40_chan *), | |
2976 | GFP_KERNEL); | |
2977 | if (!base->lookup_log_chans) | |
2978 | goto failure; | |
2979 | } | |
698e4732 | 2980 | |
7fb3e75e N |
2981 | base->reg_val_backup_chan = kmalloc(base->num_phy_chans * |
2982 | sizeof(d40_backup_regs_chan), | |
8d318a50 | 2983 | GFP_KERNEL); |
7fb3e75e N |
2984 | if (!base->reg_val_backup_chan) |
2985 | goto failure; | |
2986 | ||
2987 | base->lcla_pool.alloc_map = | |
2988 | kzalloc(num_phy_chans * sizeof(struct d40_desc *) | |
2989 | * D40_LCLA_LINK_PER_EVENT_GRP, GFP_KERNEL); | |
8d318a50 LW |
2990 | if (!base->lcla_pool.alloc_map) |
2991 | goto failure; | |
2992 | ||
c675b1b4 JA |
2993 | base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc), |
2994 | 0, SLAB_HWCACHE_ALIGN, | |
2995 | NULL); | |
2996 | if (base->desc_slab == NULL) | |
2997 | goto failure; | |
2998 | ||
8d318a50 LW |
2999 | return base; |
3000 | ||
3001 | failure: | |
c6134c96 | 3002 | if (!IS_ERR(clk)) { |
8d318a50 LW |
3003 | clk_disable(clk); |
3004 | clk_put(clk); | |
3005 | } | |
3006 | if (virtbase) | |
3007 | iounmap(virtbase); | |
3008 | if (res) | |
3009 | release_mem_region(res->start, | |
3010 | resource_size(res)); | |
3011 | if (virtbase) | |
3012 | iounmap(virtbase); | |
3013 | ||
3014 | if (base) { | |
3015 | kfree(base->lcla_pool.alloc_map); | |
3016 | kfree(base->lookup_log_chans); | |
3017 | kfree(base->lookup_phy_chans); | |
3018 | kfree(base->phy_res); | |
3019 | kfree(base); | |
3020 | } | |
3021 | ||
3022 | return NULL; | |
3023 | } | |
3024 | ||
3025 | static void __init d40_hw_init(struct d40_base *base) | |
3026 | { | |
3027 | ||
7fb3e75e | 3028 | static struct d40_reg_val dma_init_reg[] = { |
8d318a50 | 3029 | /* Clock every part of the DMA block from start */ |
7fb3e75e | 3030 | { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL}, |
8d318a50 LW |
3031 | |
3032 | /* Interrupts on all logical channels */ | |
3033 | { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF}, | |
3034 | { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF}, | |
3035 | { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF}, | |
3036 | { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF}, | |
3037 | { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF}, | |
3038 | { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF}, | |
3039 | { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF}, | |
3040 | { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF}, | |
3041 | { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF}, | |
3042 | { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF}, | |
3043 | { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF}, | |
3044 | { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF} | |
3045 | }; | |
3046 | int i; | |
3047 | u32 prmseo[2] = {0, 0}; | |
3048 | u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF}; | |
3049 | u32 pcmis = 0; | |
3050 | u32 pcicr = 0; | |
3051 | ||
3052 | for (i = 0; i < ARRAY_SIZE(dma_init_reg); i++) | |
3053 | writel(dma_init_reg[i].val, | |
3054 | base->virtbase + dma_init_reg[i].reg); | |
3055 | ||
3056 | /* Configure all our dma channels to default settings */ | |
3057 | for (i = 0; i < base->num_phy_chans; i++) { | |
3058 | ||
3059 | activeo[i % 2] = activeo[i % 2] << 2; | |
3060 | ||
3061 | if (base->phy_res[base->num_phy_chans - i - 1].allocated_src | |
3062 | == D40_ALLOC_PHY) { | |
3063 | activeo[i % 2] |= 3; | |
3064 | continue; | |
3065 | } | |
3066 | ||
3067 | /* Enable interrupt # */ | |
3068 | pcmis = (pcmis << 1) | 1; | |
3069 | ||
3070 | /* Clear interrupt # */ | |
3071 | pcicr = (pcicr << 1) | 1; | |
3072 | ||
3073 | /* Set channel to physical mode */ | |
3074 | prmseo[i % 2] = prmseo[i % 2] << 2; | |
3075 | prmseo[i % 2] |= 1; | |
3076 | ||
3077 | } | |
3078 | ||
3079 | writel(prmseo[1], base->virtbase + D40_DREG_PRMSE); | |
3080 | writel(prmseo[0], base->virtbase + D40_DREG_PRMSO); | |
3081 | writel(activeo[1], base->virtbase + D40_DREG_ACTIVE); | |
3082 | writel(activeo[0], base->virtbase + D40_DREG_ACTIVO); | |
3083 | ||
3084 | /* Write which interrupt to enable */ | |
3085 | writel(pcmis, base->virtbase + D40_DREG_PCMIS); | |
3086 | ||
3087 | /* Write which interrupt to clear */ | |
3088 | writel(pcicr, base->virtbase + D40_DREG_PCICR); | |
3089 | ||
3090 | } | |
3091 | ||
508849ad LW |
3092 | static int __init d40_lcla_allocate(struct d40_base *base) |
3093 | { | |
026cbc42 | 3094 | struct d40_lcla_pool *pool = &base->lcla_pool; |
508849ad LW |
3095 | unsigned long *page_list; |
3096 | int i, j; | |
3097 | int ret = 0; | |
3098 | ||
3099 | /* | |
3100 | * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned, | |
3101 | * To full fill this hardware requirement without wasting 256 kb | |
3102 | * we allocate pages until we get an aligned one. | |
3103 | */ | |
3104 | page_list = kmalloc(sizeof(unsigned long) * MAX_LCLA_ALLOC_ATTEMPTS, | |
3105 | GFP_KERNEL); | |
3106 | ||
3107 | if (!page_list) { | |
3108 | ret = -ENOMEM; | |
3109 | goto failure; | |
3110 | } | |
3111 | ||
3112 | /* Calculating how many pages that are required */ | |
3113 | base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE; | |
3114 | ||
3115 | for (i = 0; i < MAX_LCLA_ALLOC_ATTEMPTS; i++) { | |
3116 | page_list[i] = __get_free_pages(GFP_KERNEL, | |
3117 | base->lcla_pool.pages); | |
3118 | if (!page_list[i]) { | |
3119 | ||
6db5a8ba RV |
3120 | d40_err(base->dev, "Failed to allocate %d pages.\n", |
3121 | base->lcla_pool.pages); | |
508849ad LW |
3122 | |
3123 | for (j = 0; j < i; j++) | |
3124 | free_pages(page_list[j], base->lcla_pool.pages); | |
3125 | goto failure; | |
3126 | } | |
3127 | ||
3128 | if ((virt_to_phys((void *)page_list[i]) & | |
3129 | (LCLA_ALIGNMENT - 1)) == 0) | |
3130 | break; | |
3131 | } | |
3132 | ||
3133 | for (j = 0; j < i; j++) | |
3134 | free_pages(page_list[j], base->lcla_pool.pages); | |
3135 | ||
3136 | if (i < MAX_LCLA_ALLOC_ATTEMPTS) { | |
3137 | base->lcla_pool.base = (void *)page_list[i]; | |
3138 | } else { | |
767a9675 JA |
3139 | /* |
3140 | * After many attempts and no succees with finding the correct | |
3141 | * alignment, try with allocating a big buffer. | |
3142 | */ | |
508849ad LW |
3143 | dev_warn(base->dev, |
3144 | "[%s] Failed to get %d pages @ 18 bit align.\n", | |
3145 | __func__, base->lcla_pool.pages); | |
3146 | base->lcla_pool.base_unaligned = kmalloc(SZ_1K * | |
3147 | base->num_phy_chans + | |
3148 | LCLA_ALIGNMENT, | |
3149 | GFP_KERNEL); | |
3150 | if (!base->lcla_pool.base_unaligned) { | |
3151 | ret = -ENOMEM; | |
3152 | goto failure; | |
3153 | } | |
3154 | ||
3155 | base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned, | |
3156 | LCLA_ALIGNMENT); | |
3157 | } | |
3158 | ||
026cbc42 RV |
3159 | pool->dma_addr = dma_map_single(base->dev, pool->base, |
3160 | SZ_1K * base->num_phy_chans, | |
3161 | DMA_TO_DEVICE); | |
3162 | if (dma_mapping_error(base->dev, pool->dma_addr)) { | |
3163 | pool->dma_addr = 0; | |
3164 | ret = -ENOMEM; | |
3165 | goto failure; | |
3166 | } | |
3167 | ||
508849ad LW |
3168 | writel(virt_to_phys(base->lcla_pool.base), |
3169 | base->virtbase + D40_DREG_LCLA); | |
3170 | failure: | |
3171 | kfree(page_list); | |
3172 | return ret; | |
3173 | } | |
3174 | ||
8d318a50 LW |
3175 | static int __init d40_probe(struct platform_device *pdev) |
3176 | { | |
3177 | int err; | |
3178 | int ret = -ENOENT; | |
3179 | struct d40_base *base; | |
3180 | struct resource *res = NULL; | |
3181 | int num_reserved_chans; | |
3182 | u32 val; | |
3183 | ||
3184 | base = d40_hw_detect_init(pdev); | |
3185 | ||
3186 | if (!base) | |
3187 | goto failure; | |
3188 | ||
3189 | num_reserved_chans = d40_phy_res_init(base); | |
3190 | ||
3191 | platform_set_drvdata(pdev, base); | |
3192 | ||
3193 | spin_lock_init(&base->interrupt_lock); | |
3194 | spin_lock_init(&base->execmd_lock); | |
3195 | ||
3196 | /* Get IO for logical channel parameter address */ | |
3197 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa"); | |
3198 | if (!res) { | |
3199 | ret = -ENOENT; | |
6db5a8ba | 3200 | d40_err(&pdev->dev, "No \"lcpa\" memory resource\n"); |
8d318a50 LW |
3201 | goto failure; |
3202 | } | |
3203 | base->lcpa_size = resource_size(res); | |
3204 | base->phy_lcpa = res->start; | |
3205 | ||
3206 | if (request_mem_region(res->start, resource_size(res), | |
3207 | D40_NAME " I/O lcpa") == NULL) { | |
3208 | ret = -EBUSY; | |
6db5a8ba RV |
3209 | d40_err(&pdev->dev, |
3210 | "Failed to request LCPA region 0x%x-0x%x\n", | |
3211 | res->start, res->end); | |
8d318a50 LW |
3212 | goto failure; |
3213 | } | |
3214 | ||
3215 | /* We make use of ESRAM memory for this. */ | |
3216 | val = readl(base->virtbase + D40_DREG_LCPA); | |
3217 | if (res->start != val && val != 0) { | |
3218 | dev_warn(&pdev->dev, | |
3219 | "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n", | |
3220 | __func__, val, res->start); | |
3221 | } else | |
3222 | writel(res->start, base->virtbase + D40_DREG_LCPA); | |
3223 | ||
3224 | base->lcpa_base = ioremap(res->start, resource_size(res)); | |
3225 | if (!base->lcpa_base) { | |
3226 | ret = -ENOMEM; | |
6db5a8ba | 3227 | d40_err(&pdev->dev, "Failed to ioremap LCPA region\n"); |
8d318a50 LW |
3228 | goto failure; |
3229 | } | |
28c7a19d N |
3230 | /* If lcla has to be located in ESRAM we don't need to allocate */ |
3231 | if (base->plat_data->use_esram_lcla) { | |
3232 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, | |
3233 | "lcla_esram"); | |
3234 | if (!res) { | |
3235 | ret = -ENOENT; | |
3236 | d40_err(&pdev->dev, | |
3237 | "No \"lcla_esram\" memory resource\n"); | |
3238 | goto failure; | |
3239 | } | |
3240 | base->lcla_pool.base = ioremap(res->start, | |
3241 | resource_size(res)); | |
3242 | if (!base->lcla_pool.base) { | |
3243 | ret = -ENOMEM; | |
3244 | d40_err(&pdev->dev, "Failed to ioremap LCLA region\n"); | |
3245 | goto failure; | |
3246 | } | |
3247 | writel(res->start, base->virtbase + D40_DREG_LCLA); | |
8d318a50 | 3248 | |
28c7a19d N |
3249 | } else { |
3250 | ret = d40_lcla_allocate(base); | |
3251 | if (ret) { | |
3252 | d40_err(&pdev->dev, "Failed to allocate LCLA area\n"); | |
3253 | goto failure; | |
3254 | } | |
8d318a50 LW |
3255 | } |
3256 | ||
3257 | spin_lock_init(&base->lcla_pool.lock); | |
3258 | ||
8d318a50 LW |
3259 | base->irq = platform_get_irq(pdev, 0); |
3260 | ||
3261 | ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base); | |
8d318a50 | 3262 | if (ret) { |
6db5a8ba | 3263 | d40_err(&pdev->dev, "No IRQ defined\n"); |
8d318a50 LW |
3264 | goto failure; |
3265 | } | |
3266 | ||
7fb3e75e N |
3267 | pm_runtime_irq_safe(base->dev); |
3268 | pm_runtime_set_autosuspend_delay(base->dev, DMA40_AUTOSUSPEND_DELAY); | |
3269 | pm_runtime_use_autosuspend(base->dev); | |
3270 | pm_runtime_enable(base->dev); | |
3271 | pm_runtime_resume(base->dev); | |
28c7a19d N |
3272 | |
3273 | if (base->plat_data->use_esram_lcla) { | |
3274 | ||
3275 | base->lcpa_regulator = regulator_get(base->dev, "lcla_esram"); | |
3276 | if (IS_ERR(base->lcpa_regulator)) { | |
3277 | d40_err(&pdev->dev, "Failed to get lcpa_regulator\n"); | |
3278 | base->lcpa_regulator = NULL; | |
3279 | goto failure; | |
3280 | } | |
3281 | ||
3282 | ret = regulator_enable(base->lcpa_regulator); | |
3283 | if (ret) { | |
3284 | d40_err(&pdev->dev, | |
3285 | "Failed to enable lcpa_regulator\n"); | |
3286 | regulator_put(base->lcpa_regulator); | |
3287 | base->lcpa_regulator = NULL; | |
3288 | goto failure; | |
3289 | } | |
3290 | } | |
3291 | ||
7fb3e75e | 3292 | base->initialized = true; |
8d318a50 LW |
3293 | err = d40_dmaengine_init(base, num_reserved_chans); |
3294 | if (err) | |
3295 | goto failure; | |
3296 | ||
3297 | d40_hw_init(base); | |
3298 | ||
3299 | dev_info(base->dev, "initialized\n"); | |
3300 | return 0; | |
3301 | ||
3302 | failure: | |
3303 | if (base) { | |
c675b1b4 JA |
3304 | if (base->desc_slab) |
3305 | kmem_cache_destroy(base->desc_slab); | |
8d318a50 LW |
3306 | if (base->virtbase) |
3307 | iounmap(base->virtbase); | |
026cbc42 | 3308 | |
28c7a19d N |
3309 | if (base->lcla_pool.base && base->plat_data->use_esram_lcla) { |
3310 | iounmap(base->lcla_pool.base); | |
3311 | base->lcla_pool.base = NULL; | |
3312 | } | |
3313 | ||
026cbc42 RV |
3314 | if (base->lcla_pool.dma_addr) |
3315 | dma_unmap_single(base->dev, base->lcla_pool.dma_addr, | |
3316 | SZ_1K * base->num_phy_chans, | |
3317 | DMA_TO_DEVICE); | |
3318 | ||
508849ad LW |
3319 | if (!base->lcla_pool.base_unaligned && base->lcla_pool.base) |
3320 | free_pages((unsigned long)base->lcla_pool.base, | |
3321 | base->lcla_pool.pages); | |
767a9675 JA |
3322 | |
3323 | kfree(base->lcla_pool.base_unaligned); | |
3324 | ||
8d318a50 LW |
3325 | if (base->phy_lcpa) |
3326 | release_mem_region(base->phy_lcpa, | |
3327 | base->lcpa_size); | |
3328 | if (base->phy_start) | |
3329 | release_mem_region(base->phy_start, | |
3330 | base->phy_size); | |
3331 | if (base->clk) { | |
3332 | clk_disable(base->clk); | |
3333 | clk_put(base->clk); | |
3334 | } | |
3335 | ||
28c7a19d N |
3336 | if (base->lcpa_regulator) { |
3337 | regulator_disable(base->lcpa_regulator); | |
3338 | regulator_put(base->lcpa_regulator); | |
3339 | } | |
3340 | ||
8d318a50 LW |
3341 | kfree(base->lcla_pool.alloc_map); |
3342 | kfree(base->lookup_log_chans); | |
3343 | kfree(base->lookup_phy_chans); | |
3344 | kfree(base->phy_res); | |
3345 | kfree(base); | |
3346 | } | |
3347 | ||
6db5a8ba | 3348 | d40_err(&pdev->dev, "probe failed\n"); |
8d318a50 LW |
3349 | return ret; |
3350 | } | |
3351 | ||
3352 | static struct platform_driver d40_driver = { | |
3353 | .driver = { | |
3354 | .owner = THIS_MODULE, | |
3355 | .name = D40_NAME, | |
7fb3e75e | 3356 | .pm = DMA40_PM_OPS, |
8d318a50 LW |
3357 | }, |
3358 | }; | |
3359 | ||
cb9ab2d8 | 3360 | static int __init stedma40_init(void) |
8d318a50 LW |
3361 | { |
3362 | return platform_driver_probe(&d40_driver, d40_probe); | |
3363 | } | |
a0eb221a | 3364 | subsys_initcall(stedma40_init); |