2 * Renesas SuperH DMA Engine support
4 * base is drivers/dma/flsdma.c
6 * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
7 * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
8 * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
9 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
11 * This is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
16 * - DMA of SuperH does not have Hardware DMA chain mode.
17 * - MAX DMA size is 16MB.
21 #include <linux/delay.h>
22 #include <linux/dmaengine.h>
23 #include <linux/err.h>
24 #include <linux/init.h>
25 #include <linux/interrupt.h>
26 #include <linux/kdebug.h>
27 #include <linux/module.h>
28 #include <linux/notifier.h>
30 #include <linux/of_device.h>
31 #include <linux/platform_device.h>
32 #include <linux/pm_runtime.h>
33 #include <linux/rculist.h>
34 #include <linux/sh_dma.h>
35 #include <linux/slab.h>
36 #include <linux/spinlock.h>
38 #include "../dmaengine.h"
48 #define TEND 0x18 /* USB-DMAC */
50 #define SH_DMAE_DRV_NAME "sh-dma-engine"
52 /* Default MEMCPY transfer size = 2^2 = 4 bytes */
53 #define LOG2_DEFAULT_XFER_SIZE 2
54 #define SH_DMA_SLAVE_NUMBER 256
55 #define SH_DMA_TCR_MAX (16 * 1024 * 1024 - 1)
58 * Used for write-side mutual exclusion for the global device list,
59 * read-side synchronization by way of RCU, and per-controller data.
61 static DEFINE_SPINLOCK(sh_dmae_lock
);
62 static LIST_HEAD(sh_dmae_devices
);
65 * Different DMAC implementations provide different ways to clear DMA channels:
66 * (1) none - no CHCLR registers are available
67 * (2) one CHCLR register per channel - 0 has to be written to it to clear
69 * (3) one CHCLR per several channels - 1 has to be written to the bit,
70 * corresponding to the specific channel to reset it
72 static void channel_clear(struct sh_dmae_chan
*sh_dc
)
74 struct sh_dmae_device
*shdev
= to_sh_dev(sh_dc
);
75 const struct sh_dmae_channel
*chan_pdata
= shdev
->pdata
->channel
+
77 u32 val
= shdev
->pdata
->chclr_bitwise
? 1 << chan_pdata
->chclr_bit
: 0;
79 __raw_writel(val
, shdev
->chan_reg
+ chan_pdata
->chclr_offset
);
82 static void sh_dmae_writel(struct sh_dmae_chan
*sh_dc
, u32 data
, u32 reg
)
84 __raw_writel(data
, sh_dc
->base
+ reg
);
87 static u32
sh_dmae_readl(struct sh_dmae_chan
*sh_dc
, u32 reg
)
89 return __raw_readl(sh_dc
->base
+ reg
);
92 static u16
dmaor_read(struct sh_dmae_device
*shdev
)
94 void __iomem
*addr
= shdev
->chan_reg
+ DMAOR
;
96 if (shdev
->pdata
->dmaor_is_32bit
)
97 return __raw_readl(addr
);
99 return __raw_readw(addr
);
102 static void dmaor_write(struct sh_dmae_device
*shdev
, u16 data
)
104 void __iomem
*addr
= shdev
->chan_reg
+ DMAOR
;
106 if (shdev
->pdata
->dmaor_is_32bit
)
107 __raw_writel(data
, addr
);
109 __raw_writew(data
, addr
);
112 static void chcr_write(struct sh_dmae_chan
*sh_dc
, u32 data
)
114 struct sh_dmae_device
*shdev
= to_sh_dev(sh_dc
);
116 __raw_writel(data
, sh_dc
->base
+ shdev
->chcr_offset
);
119 static u32
chcr_read(struct sh_dmae_chan
*sh_dc
)
121 struct sh_dmae_device
*shdev
= to_sh_dev(sh_dc
);
123 return __raw_readl(sh_dc
->base
+ shdev
->chcr_offset
);
127 * Reset DMA controller
129 * SH7780 has two DMAOR register
131 static void sh_dmae_ctl_stop(struct sh_dmae_device
*shdev
)
133 unsigned short dmaor
;
136 spin_lock_irqsave(&sh_dmae_lock
, flags
);
138 dmaor
= dmaor_read(shdev
);
139 dmaor_write(shdev
, dmaor
& ~(DMAOR_NMIF
| DMAOR_AE
| DMAOR_DME
));
141 spin_unlock_irqrestore(&sh_dmae_lock
, flags
);
144 static int sh_dmae_rst(struct sh_dmae_device
*shdev
)
146 unsigned short dmaor
;
149 spin_lock_irqsave(&sh_dmae_lock
, flags
);
151 dmaor
= dmaor_read(shdev
) & ~(DMAOR_NMIF
| DMAOR_AE
| DMAOR_DME
);
153 if (shdev
->pdata
->chclr_present
) {
155 for (i
= 0; i
< shdev
->pdata
->channel_num
; i
++) {
156 struct sh_dmae_chan
*sh_chan
= shdev
->chan
[i
];
158 channel_clear(sh_chan
);
162 dmaor_write(shdev
, dmaor
| shdev
->pdata
->dmaor_init
);
164 dmaor
= dmaor_read(shdev
);
166 spin_unlock_irqrestore(&sh_dmae_lock
, flags
);
168 if (dmaor
& (DMAOR_AE
| DMAOR_NMIF
)) {
169 dev_warn(shdev
->shdma_dev
.dma_dev
.dev
, "Can't initialize DMAOR.\n");
172 if (shdev
->pdata
->dmaor_init
& ~dmaor
)
173 dev_warn(shdev
->shdma_dev
.dma_dev
.dev
,
174 "DMAOR=0x%x hasn't latched the initial value 0x%x.\n",
175 dmaor
, shdev
->pdata
->dmaor_init
);
179 static bool dmae_is_busy(struct sh_dmae_chan
*sh_chan
)
181 u32 chcr
= chcr_read(sh_chan
);
183 if ((chcr
& (CHCR_DE
| CHCR_TE
)) == CHCR_DE
)
184 return true; /* working */
186 return false; /* waiting */
189 static unsigned int calc_xmit_shift(struct sh_dmae_chan
*sh_chan
, u32 chcr
)
191 struct sh_dmae_device
*shdev
= to_sh_dev(sh_chan
);
192 const struct sh_dmae_pdata
*pdata
= shdev
->pdata
;
193 int cnt
= ((chcr
& pdata
->ts_low_mask
) >> pdata
->ts_low_shift
) |
194 ((chcr
& pdata
->ts_high_mask
) >> pdata
->ts_high_shift
);
196 if (cnt
>= pdata
->ts_shift_num
)
199 return pdata
->ts_shift
[cnt
];
202 static u32
log2size_to_chcr(struct sh_dmae_chan
*sh_chan
, int l2size
)
204 struct sh_dmae_device
*shdev
= to_sh_dev(sh_chan
);
205 const struct sh_dmae_pdata
*pdata
= shdev
->pdata
;
208 for (i
= 0; i
< pdata
->ts_shift_num
; i
++)
209 if (pdata
->ts_shift
[i
] == l2size
)
212 if (i
== pdata
->ts_shift_num
)
215 return ((i
<< pdata
->ts_low_shift
) & pdata
->ts_low_mask
) |
216 ((i
<< pdata
->ts_high_shift
) & pdata
->ts_high_mask
);
219 static void dmae_set_reg(struct sh_dmae_chan
*sh_chan
, struct sh_dmae_regs
*hw
)
221 sh_dmae_writel(sh_chan
, hw
->sar
, SAR
);
222 sh_dmae_writel(sh_chan
, hw
->dar
, DAR
);
223 sh_dmae_writel(sh_chan
, hw
->tcr
>> sh_chan
->xmit_shift
, TCR
);
226 static void dmae_start(struct sh_dmae_chan
*sh_chan
)
228 struct sh_dmae_device
*shdev
= to_sh_dev(sh_chan
);
229 u32 chcr
= chcr_read(sh_chan
);
231 if (shdev
->pdata
->needs_tend_set
)
232 sh_dmae_writel(sh_chan
, 0xFFFFFFFF, TEND
);
234 chcr
|= CHCR_DE
| shdev
->chcr_ie_bit
;
235 chcr_write(sh_chan
, chcr
& ~CHCR_TE
);
238 static void dmae_init(struct sh_dmae_chan
*sh_chan
)
241 * Default configuration for dual address memory-memory transfer.
242 * 0x400 represents auto-request.
244 u32 chcr
= DM_INC
| SM_INC
| 0x400 | log2size_to_chcr(sh_chan
,
245 LOG2_DEFAULT_XFER_SIZE
);
246 sh_chan
->xmit_shift
= calc_xmit_shift(sh_chan
, chcr
);
247 chcr_write(sh_chan
, chcr
);
250 static int dmae_set_chcr(struct sh_dmae_chan
*sh_chan
, u32 val
)
252 /* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */
253 if (dmae_is_busy(sh_chan
))
256 sh_chan
->xmit_shift
= calc_xmit_shift(sh_chan
, val
);
257 chcr_write(sh_chan
, val
);
262 static int dmae_set_dmars(struct sh_dmae_chan
*sh_chan
, u16 val
)
264 struct sh_dmae_device
*shdev
= to_sh_dev(sh_chan
);
265 const struct sh_dmae_pdata
*pdata
= shdev
->pdata
;
266 const struct sh_dmae_channel
*chan_pdata
= &pdata
->channel
[sh_chan
->shdma_chan
.id
];
267 void __iomem
*addr
= shdev
->dmars
;
268 unsigned int shift
= chan_pdata
->dmars_bit
;
270 if (dmae_is_busy(sh_chan
))
276 /* in the case of a missing DMARS resource use first memory window */
278 addr
= shdev
->chan_reg
;
279 addr
+= chan_pdata
->dmars
;
281 __raw_writew((__raw_readw(addr
) & (0xff00 >> shift
)) | (val
<< shift
),
287 static void sh_dmae_start_xfer(struct shdma_chan
*schan
,
288 struct shdma_desc
*sdesc
)
290 struct sh_dmae_chan
*sh_chan
= container_of(schan
, struct sh_dmae_chan
,
292 struct sh_dmae_desc
*sh_desc
= container_of(sdesc
,
293 struct sh_dmae_desc
, shdma_desc
);
294 dev_dbg(sh_chan
->shdma_chan
.dev
, "Queue #%d to %d: %u@%x -> %x\n",
295 sdesc
->async_tx
.cookie
, sh_chan
->shdma_chan
.id
,
296 sh_desc
->hw
.tcr
, sh_desc
->hw
.sar
, sh_desc
->hw
.dar
);
297 /* Get the ld start address from ld_queue */
298 dmae_set_reg(sh_chan
, &sh_desc
->hw
);
302 static bool sh_dmae_channel_busy(struct shdma_chan
*schan
)
304 struct sh_dmae_chan
*sh_chan
= container_of(schan
, struct sh_dmae_chan
,
306 return dmae_is_busy(sh_chan
);
309 static void sh_dmae_setup_xfer(struct shdma_chan
*schan
,
312 struct sh_dmae_chan
*sh_chan
= container_of(schan
, struct sh_dmae_chan
,
316 const struct sh_dmae_slave_config
*cfg
=
319 dmae_set_dmars(sh_chan
, cfg
->mid_rid
);
320 dmae_set_chcr(sh_chan
, cfg
->chcr
);
327 * Find a slave channel configuration from the contoller list by either a slave
328 * ID in the non-DT case, or by a MID/RID value in the DT case
330 static const struct sh_dmae_slave_config
*dmae_find_slave(
331 struct sh_dmae_chan
*sh_chan
, int match
)
333 struct sh_dmae_device
*shdev
= to_sh_dev(sh_chan
);
334 const struct sh_dmae_pdata
*pdata
= shdev
->pdata
;
335 const struct sh_dmae_slave_config
*cfg
;
338 if (!sh_chan
->shdma_chan
.dev
->of_node
) {
339 if (match
>= SH_DMA_SLAVE_NUMBER
)
342 for (i
= 0, cfg
= pdata
->slave
; i
< pdata
->slave_num
; i
++, cfg
++)
343 if (cfg
->slave_id
== match
)
346 for (i
= 0, cfg
= pdata
->slave
; i
< pdata
->slave_num
; i
++, cfg
++)
347 if (cfg
->mid_rid
== match
) {
348 sh_chan
->shdma_chan
.slave_id
= i
;
356 static int sh_dmae_set_slave(struct shdma_chan
*schan
,
357 int slave_id
, dma_addr_t slave_addr
, bool try)
359 struct sh_dmae_chan
*sh_chan
= container_of(schan
, struct sh_dmae_chan
,
361 const struct sh_dmae_slave_config
*cfg
= dmae_find_slave(sh_chan
, slave_id
);
366 sh_chan
->config
= cfg
;
367 sh_chan
->slave_addr
= slave_addr
? : cfg
->addr
;
373 static void dmae_halt(struct sh_dmae_chan
*sh_chan
)
375 struct sh_dmae_device
*shdev
= to_sh_dev(sh_chan
);
376 u32 chcr
= chcr_read(sh_chan
);
378 chcr
&= ~(CHCR_DE
| CHCR_TE
| shdev
->chcr_ie_bit
);
379 chcr_write(sh_chan
, chcr
);
382 static int sh_dmae_desc_setup(struct shdma_chan
*schan
,
383 struct shdma_desc
*sdesc
,
384 dma_addr_t src
, dma_addr_t dst
, size_t *len
)
386 struct sh_dmae_desc
*sh_desc
= container_of(sdesc
,
387 struct sh_dmae_desc
, shdma_desc
);
389 if (*len
> schan
->max_xfer_len
)
390 *len
= schan
->max_xfer_len
;
392 sh_desc
->hw
.sar
= src
;
393 sh_desc
->hw
.dar
= dst
;
394 sh_desc
->hw
.tcr
= *len
;
399 static void sh_dmae_halt(struct shdma_chan
*schan
)
401 struct sh_dmae_chan
*sh_chan
= container_of(schan
, struct sh_dmae_chan
,
406 static bool sh_dmae_chan_irq(struct shdma_chan
*schan
, int irq
)
408 struct sh_dmae_chan
*sh_chan
= container_of(schan
, struct sh_dmae_chan
,
411 if (!(chcr_read(sh_chan
) & CHCR_TE
))
420 static size_t sh_dmae_get_partial(struct shdma_chan
*schan
,
421 struct shdma_desc
*sdesc
)
423 struct sh_dmae_chan
*sh_chan
= container_of(schan
, struct sh_dmae_chan
,
425 struct sh_dmae_desc
*sh_desc
= container_of(sdesc
,
426 struct sh_dmae_desc
, shdma_desc
);
427 return sh_desc
->hw
.tcr
-
428 (sh_dmae_readl(sh_chan
, TCR
) << sh_chan
->xmit_shift
);
431 /* Called from error IRQ or NMI */
432 static bool sh_dmae_reset(struct sh_dmae_device
*shdev
)
436 /* halt the dma controller */
437 sh_dmae_ctl_stop(shdev
);
439 /* We cannot detect, which channel caused the error, have to reset all */
440 ret
= shdma_reset(&shdev
->shdma_dev
);
447 #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARM)
448 static irqreturn_t
sh_dmae_err(int irq
, void *data
)
450 struct sh_dmae_device
*shdev
= data
;
452 if (!(dmaor_read(shdev
) & DMAOR_AE
))
455 sh_dmae_reset(shdev
);
460 static bool sh_dmae_desc_completed(struct shdma_chan
*schan
,
461 struct shdma_desc
*sdesc
)
463 struct sh_dmae_chan
*sh_chan
= container_of(schan
,
464 struct sh_dmae_chan
, shdma_chan
);
465 struct sh_dmae_desc
*sh_desc
= container_of(sdesc
,
466 struct sh_dmae_desc
, shdma_desc
);
467 u32 sar_buf
= sh_dmae_readl(sh_chan
, SAR
);
468 u32 dar_buf
= sh_dmae_readl(sh_chan
, DAR
);
470 return (sdesc
->direction
== DMA_DEV_TO_MEM
&&
471 (sh_desc
->hw
.dar
+ sh_desc
->hw
.tcr
) == dar_buf
) ||
472 (sdesc
->direction
!= DMA_DEV_TO_MEM
&&
473 (sh_desc
->hw
.sar
+ sh_desc
->hw
.tcr
) == sar_buf
);
476 static bool sh_dmae_nmi_notify(struct sh_dmae_device
*shdev
)
478 /* Fast path out if NMIF is not asserted for this controller */
479 if ((dmaor_read(shdev
) & DMAOR_NMIF
) == 0)
482 return sh_dmae_reset(shdev
);
485 static int sh_dmae_nmi_handler(struct notifier_block
*self
,
486 unsigned long cmd
, void *data
)
488 struct sh_dmae_device
*shdev
;
489 int ret
= NOTIFY_DONE
;
493 * Only concern ourselves with NMI events.
495 * Normally we would check the die chain value, but as this needs
496 * to be architecture independent, check for NMI context instead.
502 list_for_each_entry_rcu(shdev
, &sh_dmae_devices
, node
) {
504 * Only stop if one of the controllers has NMIF asserted,
505 * we do not want to interfere with regular address error
506 * handling or NMI events that don't concern the DMACs.
508 triggered
= sh_dmae_nmi_notify(shdev
);
509 if (triggered
== true)
517 static struct notifier_block sh_dmae_nmi_notifier __read_mostly
= {
518 .notifier_call
= sh_dmae_nmi_handler
,
520 /* Run before NMI debug handler and KGDB */
524 static int sh_dmae_chan_probe(struct sh_dmae_device
*shdev
, int id
,
525 int irq
, unsigned long flags
)
527 const struct sh_dmae_channel
*chan_pdata
= &shdev
->pdata
->channel
[id
];
528 struct shdma_dev
*sdev
= &shdev
->shdma_dev
;
529 struct platform_device
*pdev
= to_platform_device(sdev
->dma_dev
.dev
);
530 struct sh_dmae_chan
*sh_chan
;
531 struct shdma_chan
*schan
;
534 sh_chan
= devm_kzalloc(sdev
->dma_dev
.dev
, sizeof(struct sh_dmae_chan
),
537 dev_err(sdev
->dma_dev
.dev
,
538 "No free memory for allocating dma channels!\n");
542 schan
= &sh_chan
->shdma_chan
;
543 schan
->max_xfer_len
= SH_DMA_TCR_MAX
+ 1;
545 shdma_chan_probe(sdev
, schan
, id
);
547 sh_chan
->base
= shdev
->chan_reg
+ chan_pdata
->offset
;
549 /* set up channel irq */
551 snprintf(sh_chan
->dev_id
, sizeof(sh_chan
->dev_id
),
552 "sh-dmae%d.%d", pdev
->id
, id
);
554 snprintf(sh_chan
->dev_id
, sizeof(sh_chan
->dev_id
),
557 err
= shdma_request_irq(schan
, irq
, flags
, sh_chan
->dev_id
);
559 dev_err(sdev
->dma_dev
.dev
,
560 "DMA channel %d request_irq error %d\n",
565 shdev
->chan
[id
] = sh_chan
;
569 /* remove from dmaengine device node */
570 shdma_chan_remove(schan
);
574 static void sh_dmae_chan_remove(struct sh_dmae_device
*shdev
)
576 struct dma_device
*dma_dev
= &shdev
->shdma_dev
.dma_dev
;
577 struct shdma_chan
*schan
;
580 shdma_for_each_chan(schan
, &shdev
->shdma_dev
, i
) {
583 shdma_chan_remove(schan
);
585 dma_dev
->chancnt
= 0;
588 static void sh_dmae_shutdown(struct platform_device
*pdev
)
590 struct sh_dmae_device
*shdev
= platform_get_drvdata(pdev
);
591 sh_dmae_ctl_stop(shdev
);
594 static int sh_dmae_runtime_suspend(struct device
*dev
)
599 static int sh_dmae_runtime_resume(struct device
*dev
)
601 struct sh_dmae_device
*shdev
= dev_get_drvdata(dev
);
603 return sh_dmae_rst(shdev
);
607 static int sh_dmae_suspend(struct device
*dev
)
612 static int sh_dmae_resume(struct device
*dev
)
614 struct sh_dmae_device
*shdev
= dev_get_drvdata(dev
);
617 ret
= sh_dmae_rst(shdev
);
619 dev_err(dev
, "Failed to reset!\n");
621 for (i
= 0; i
< shdev
->pdata
->channel_num
; i
++) {
622 struct sh_dmae_chan
*sh_chan
= shdev
->chan
[i
];
624 if (!sh_chan
->shdma_chan
.desc_num
)
627 if (sh_chan
->shdma_chan
.slave_id
>= 0) {
628 const struct sh_dmae_slave_config
*cfg
= sh_chan
->config
;
629 dmae_set_dmars(sh_chan
, cfg
->mid_rid
);
630 dmae_set_chcr(sh_chan
, cfg
->chcr
);
639 #define sh_dmae_suspend NULL
640 #define sh_dmae_resume NULL
643 static const struct dev_pm_ops sh_dmae_pm
= {
644 .suspend
= sh_dmae_suspend
,
645 .resume
= sh_dmae_resume
,
646 .runtime_suspend
= sh_dmae_runtime_suspend
,
647 .runtime_resume
= sh_dmae_runtime_resume
,
650 static dma_addr_t
sh_dmae_slave_addr(struct shdma_chan
*schan
)
652 struct sh_dmae_chan
*sh_chan
= container_of(schan
,
653 struct sh_dmae_chan
, shdma_chan
);
656 * Implicit BUG_ON(!sh_chan->config)
657 * This is an exclusive slave DMA operation, may only be called after a
658 * successful slave configuration.
660 return sh_chan
->slave_addr
;
663 static struct shdma_desc
*sh_dmae_embedded_desc(void *buf
, int i
)
665 return &((struct sh_dmae_desc
*)buf
)[i
].shdma_desc
;
668 static const struct shdma_ops sh_dmae_shdma_ops
= {
669 .desc_completed
= sh_dmae_desc_completed
,
670 .halt_channel
= sh_dmae_halt
,
671 .channel_busy
= sh_dmae_channel_busy
,
672 .slave_addr
= sh_dmae_slave_addr
,
673 .desc_setup
= sh_dmae_desc_setup
,
674 .set_slave
= sh_dmae_set_slave
,
675 .setup_xfer
= sh_dmae_setup_xfer
,
676 .start_xfer
= sh_dmae_start_xfer
,
677 .embedded_desc
= sh_dmae_embedded_desc
,
678 .chan_irq
= sh_dmae_chan_irq
,
679 .get_partial
= sh_dmae_get_partial
,
682 static const struct of_device_id sh_dmae_of_match
[] = {
683 {.compatible
= "renesas,shdma-r8a73a4", .data
= r8a73a4_shdma_devid
,},
686 MODULE_DEVICE_TABLE(of
, sh_dmae_of_match
);
688 static int sh_dmae_probe(struct platform_device
*pdev
)
690 const struct sh_dmae_pdata
*pdata
;
691 unsigned long chan_flag
[SH_DMAE_MAX_CHANNELS
] = {};
692 int chan_irq
[SH_DMAE_MAX_CHANNELS
];
693 #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARM)
694 unsigned long irqflags
= 0;
697 int err
, i
, irq_cnt
= 0, irqres
= 0, irq_cap
= 0;
698 struct sh_dmae_device
*shdev
;
699 struct dma_device
*dma_dev
;
700 struct resource
*chan
, *dmars
, *errirq_res
, *chanirq_res
;
702 if (pdev
->dev
.of_node
)
703 pdata
= of_match_device(sh_dmae_of_match
, &pdev
->dev
)->data
;
705 pdata
= dev_get_platdata(&pdev
->dev
);
707 /* get platform data */
708 if (!pdata
|| !pdata
->channel_num
)
711 chan
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
712 /* DMARS area is optional */
713 dmars
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
716 * 1. there always must be at least one IRQ IO-resource. On SH4 it is
717 * the error IRQ, in which case it is the only IRQ in this resource:
718 * start == end. If it is the only IRQ resource, all channels also
720 * 2. DMA channel IRQ resources can be specified one per resource or in
721 * ranges (start != end)
722 * 3. iff all events (channels and, optionally, error) on this
723 * controller use the same IRQ, only one IRQ resource can be
724 * specified, otherwise there must be one IRQ per channel, even if
725 * some of them are equal
726 * 4. if all IRQs on this controller are equal or if some specific IRQs
727 * specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be
728 * requested with the IRQF_SHARED flag
730 errirq_res
= platform_get_resource(pdev
, IORESOURCE_IRQ
, 0);
731 if (!chan
|| !errirq_res
)
734 shdev
= devm_kzalloc(&pdev
->dev
, sizeof(struct sh_dmae_device
),
737 dev_err(&pdev
->dev
, "Not enough memory\n");
741 dma_dev
= &shdev
->shdma_dev
.dma_dev
;
743 shdev
->chan_reg
= devm_ioremap_resource(&pdev
->dev
, chan
);
744 if (IS_ERR(shdev
->chan_reg
))
745 return PTR_ERR(shdev
->chan_reg
);
747 shdev
->dmars
= devm_ioremap_resource(&pdev
->dev
, dmars
);
748 if (IS_ERR(shdev
->dmars
))
749 return PTR_ERR(shdev
->dmars
);
752 if (!pdata
->slave_only
)
753 dma_cap_set(DMA_MEMCPY
, dma_dev
->cap_mask
);
754 if (pdata
->slave
&& pdata
->slave_num
)
755 dma_cap_set(DMA_SLAVE
, dma_dev
->cap_mask
);
757 /* Default transfer size of 32 bytes requires 32-byte alignment */
758 dma_dev
->copy_align
= LOG2_DEFAULT_XFER_SIZE
;
760 shdev
->shdma_dev
.ops
= &sh_dmae_shdma_ops
;
761 shdev
->shdma_dev
.desc_size
= sizeof(struct sh_dmae_desc
);
762 err
= shdma_init(&pdev
->dev
, &shdev
->shdma_dev
,
768 shdev
->pdata
= pdata
;
770 if (pdata
->chcr_offset
)
771 shdev
->chcr_offset
= pdata
->chcr_offset
;
773 shdev
->chcr_offset
= CHCR
;
775 if (pdata
->chcr_ie_bit
)
776 shdev
->chcr_ie_bit
= pdata
->chcr_ie_bit
;
778 shdev
->chcr_ie_bit
= CHCR_IE
;
780 platform_set_drvdata(pdev
, shdev
);
782 pm_runtime_enable(&pdev
->dev
);
783 err
= pm_runtime_get_sync(&pdev
->dev
);
785 dev_err(&pdev
->dev
, "%s(): GET = %d\n", __func__
, err
);
787 spin_lock_irq(&sh_dmae_lock
);
788 list_add_tail_rcu(&shdev
->node
, &sh_dmae_devices
);
789 spin_unlock_irq(&sh_dmae_lock
);
791 /* reset dma controller - only needed as a test */
792 err
= sh_dmae_rst(shdev
);
796 #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
797 chanirq_res
= platform_get_resource(pdev
, IORESOURCE_IRQ
, 1);
800 chanirq_res
= errirq_res
;
804 if (chanirq_res
== errirq_res
||
805 (errirq_res
->flags
& IORESOURCE_BITS
) == IORESOURCE_IRQ_SHAREABLE
)
806 irqflags
= IRQF_SHARED
;
808 errirq
= errirq_res
->start
;
810 err
= devm_request_irq(&pdev
->dev
, errirq
, sh_dmae_err
, irqflags
,
811 "DMAC Address Error", shdev
);
814 "DMA failed requesting irq #%d, error %d\n",
820 chanirq_res
= errirq_res
;
821 #endif /* CONFIG_CPU_SH4 || CONFIG_ARCH_SHMOBILE */
823 if (chanirq_res
->start
== chanirq_res
->end
&&
824 !platform_get_resource(pdev
, IORESOURCE_IRQ
, 1)) {
825 /* Special case - all multiplexed */
826 for (; irq_cnt
< pdata
->channel_num
; irq_cnt
++) {
827 if (irq_cnt
< SH_DMAE_MAX_CHANNELS
) {
828 chan_irq
[irq_cnt
] = chanirq_res
->start
;
829 chan_flag
[irq_cnt
] = IRQF_SHARED
;
837 for (i
= chanirq_res
->start
; i
<= chanirq_res
->end
; i
++) {
838 if (irq_cnt
>= SH_DMAE_MAX_CHANNELS
) {
843 if ((errirq_res
->flags
& IORESOURCE_BITS
) ==
844 IORESOURCE_IRQ_SHAREABLE
)
845 chan_flag
[irq_cnt
] = IRQF_SHARED
;
847 chan_flag
[irq_cnt
] = 0;
849 "Found IRQ %d for channel %d\n",
851 chan_irq
[irq_cnt
++] = i
;
854 if (irq_cnt
>= SH_DMAE_MAX_CHANNELS
)
857 chanirq_res
= platform_get_resource(pdev
,
858 IORESOURCE_IRQ
, ++irqres
);
859 } while (irq_cnt
< pdata
->channel_num
&& chanirq_res
);
862 /* Create DMA Channel */
863 for (i
= 0; i
< irq_cnt
; i
++) {
864 err
= sh_dmae_chan_probe(shdev
, i
, chan_irq
[i
], chan_flag
[i
]);
870 dev_notice(&pdev
->dev
, "Attempting to register %d DMA "
871 "channels when a maximum of %d are supported.\n",
872 pdata
->channel_num
, SH_DMAE_MAX_CHANNELS
);
874 pm_runtime_put(&pdev
->dev
);
876 err
= dma_async_device_register(&shdev
->shdma_dev
.dma_dev
);
883 pm_runtime_get(&pdev
->dev
);
886 sh_dmae_chan_remove(shdev
);
888 #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
892 spin_lock_irq(&sh_dmae_lock
);
893 list_del_rcu(&shdev
->node
);
894 spin_unlock_irq(&sh_dmae_lock
);
896 pm_runtime_put(&pdev
->dev
);
897 pm_runtime_disable(&pdev
->dev
);
899 shdma_cleanup(&shdev
->shdma_dev
);
906 static int sh_dmae_remove(struct platform_device
*pdev
)
908 struct sh_dmae_device
*shdev
= platform_get_drvdata(pdev
);
909 struct dma_device
*dma_dev
= &shdev
->shdma_dev
.dma_dev
;
911 dma_async_device_unregister(dma_dev
);
913 spin_lock_irq(&sh_dmae_lock
);
914 list_del_rcu(&shdev
->node
);
915 spin_unlock_irq(&sh_dmae_lock
);
917 pm_runtime_disable(&pdev
->dev
);
919 sh_dmae_chan_remove(shdev
);
920 shdma_cleanup(&shdev
->shdma_dev
);
927 static struct platform_driver sh_dmae_driver
= {
929 .owner
= THIS_MODULE
,
931 .name
= SH_DMAE_DRV_NAME
,
932 .of_match_table
= sh_dmae_of_match
,
934 .remove
= sh_dmae_remove
,
935 .shutdown
= sh_dmae_shutdown
,
938 static int __init
sh_dmae_init(void)
940 /* Wire up NMI handling */
941 int err
= register_die_notifier(&sh_dmae_nmi_notifier
);
945 return platform_driver_probe(&sh_dmae_driver
, sh_dmae_probe
);
947 module_init(sh_dmae_init
);
949 static void __exit
sh_dmae_exit(void)
951 platform_driver_unregister(&sh_dmae_driver
);
953 unregister_die_notifier(&sh_dmae_nmi_notifier
);
955 module_exit(sh_dmae_exit
);
957 MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>");
958 MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
959 MODULE_LICENSE("GPL");
960 MODULE_ALIAS("platform:" SH_DMAE_DRV_NAME
);