Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * arch/sh/drivers/dma/dma-sh.c | |
3 | * | |
4 | * SuperH On-chip DMAC Support | |
5 | * | |
6 | * Copyright (C) 2000 Takashi YOSHII | |
7 | * Copyright (C) 2003, 2004 Paul Mundt | |
0d831770 | 8 | * Copyright (C) 2005 Andriy Skulysh |
1da177e4 LT |
9 | * |
10 | * This file is subject to the terms and conditions of the GNU General Public | |
11 | * License. See the file "COPYING" in the main directory of this archive | |
12 | * for more details. | |
13 | */ | |
1da177e4 | 14 | #include <linux/init.h> |
1da177e4 LT |
15 | #include <linux/interrupt.h> |
16 | #include <linux/module.h> | |
0d831770 | 17 | #include <asm/dreamcast/dma.h> |
1da177e4 LT |
18 | #include <asm/dma.h> |
19 | #include <asm/io.h> | |
20 | #include "dma-sh.h" | |
21 | ||
9f8a5e3a ML |
22 | static int dmte_irq_map[] = { |
23 | DMTE0_IRQ, | |
24 | DMTE1_IRQ, | |
25 | DMTE2_IRQ, | |
26 | DMTE3_IRQ, | |
3ea6bc3d MB |
27 | #if defined(CONFIG_CPU_SUBTYPE_SH7720) || \ |
28 | defined(CONFIG_CPU_SUBTYPE_SH7751R) || \ | |
9f8a5e3a ML |
29 | defined(CONFIG_CPU_SUBTYPE_SH7760) || \ |
30 | defined(CONFIG_CPU_SUBTYPE_SH7780) | |
31 | DMTE4_IRQ, | |
32 | DMTE5_IRQ, | |
3ea6bc3d MB |
33 | #endif |
34 | #if defined(CONFIG_CPU_SUBTYPE_SH7751R) || \ | |
35 | defined(CONFIG_CPU_SUBTYPE_SH7760) || \ | |
36 | defined(CONFIG_CPU_SUBTYPE_SH7780) | |
9f8a5e3a | 37 | DMTE6_IRQ, |
3ea6bc3d | 38 | DMTE7_IRQ, |
bd71ab88 | 39 | #endif |
bd71ab88 | 40 | }; |
1da177e4 | 41 | |
bd71ab88 JL |
42 | static inline unsigned int get_dmte_irq(unsigned int chan) |
43 | { | |
44 | unsigned int irq = 0; | |
9f8a5e3a ML |
45 | if (chan < ARRAY_SIZE(dmte_irq_map)) |
46 | irq = dmte_irq_map[chan]; | |
1da177e4 LT |
47 | return irq; |
48 | } | |
49 | ||
50 | /* | |
51 | * We determine the correct shift size based off of the CHCR transmit size | |
52 | * for the given channel. Since we know that it will take: | |
53 | * | |
54 | * info->count >> ts_shift[transmit_size] | |
55 | * | |
56 | * iterations to complete the transfer. | |
57 | */ | |
58 | static inline unsigned int calc_xmit_shift(struct dma_channel *chan) | |
59 | { | |
60 | u32 chcr = ctrl_inl(CHCR[chan->chan]); | |
61 | ||
0d831770 | 62 | return ts_shift[(chcr & CHCR_TS_MASK)>>CHCR_TS_SHIFT]; |
1da177e4 LT |
63 | } |
64 | ||
65 | /* | |
66 | * The transfer end interrupt must read the chcr register to end the | |
67 | * hardware interrupt active condition. | |
68 | * Besides that it needs to waken any waiting process, which should handle | |
69 | * setting up the next transfer. | |
70 | */ | |
35f3c518 | 71 | static irqreturn_t dma_tei(int irq, void *dev_id) |
1da177e4 | 72 | { |
35f3c518 | 73 | struct dma_channel *chan = dev_id; |
1da177e4 LT |
74 | u32 chcr; |
75 | ||
76 | chcr = ctrl_inl(CHCR[chan->chan]); | |
77 | ||
78 | if (!(chcr & CHCR_TE)) | |
79 | return IRQ_NONE; | |
80 | ||
81 | chcr &= ~(CHCR_IE | CHCR_DE); | |
82 | ctrl_outl(chcr, CHCR[chan->chan]); | |
83 | ||
84 | wake_up(&chan->wait_queue); | |
85 | ||
86 | return IRQ_HANDLED; | |
87 | } | |
88 | ||
89 | static int sh_dmac_request_dma(struct dma_channel *chan) | |
90 | { | |
9e3043c0 PM |
91 | if (unlikely(!chan->flags & DMA_TEI_CAPABLE)) |
92 | return 0; | |
93 | ||
1da177e4 | 94 | return request_irq(get_dmte_irq(chan->chan), dma_tei, |
e803aaf6 | 95 | IRQF_DISABLED, chan->dev_id, chan); |
1da177e4 LT |
96 | } |
97 | ||
98 | static void sh_dmac_free_dma(struct dma_channel *chan) | |
99 | { | |
100 | free_irq(get_dmte_irq(chan->chan), chan); | |
101 | } | |
102 | ||
9f8a5e3a | 103 | static int |
0d831770 | 104 | sh_dmac_configure_channel(struct dma_channel *chan, unsigned long chcr) |
1da177e4 LT |
105 | { |
106 | if (!chcr) | |
0d831770 PM |
107 | chcr = RS_DUAL | CHCR_IE; |
108 | ||
109 | if (chcr & CHCR_IE) { | |
110 | chcr &= ~CHCR_IE; | |
111 | chan->flags |= DMA_TEI_CAPABLE; | |
112 | } else { | |
113 | chan->flags &= ~DMA_TEI_CAPABLE; | |
114 | } | |
1da177e4 LT |
115 | |
116 | ctrl_outl(chcr, CHCR[chan->chan]); | |
117 | ||
118 | chan->flags |= DMA_CONFIGURED; | |
9f8a5e3a | 119 | return 0; |
1da177e4 LT |
120 | } |
121 | ||
122 | static void sh_dmac_enable_dma(struct dma_channel *chan) | |
123 | { | |
0d831770 | 124 | int irq; |
1da177e4 LT |
125 | u32 chcr; |
126 | ||
127 | chcr = ctrl_inl(CHCR[chan->chan]); | |
0d831770 PM |
128 | chcr |= CHCR_DE; |
129 | ||
130 | if (chan->flags & DMA_TEI_CAPABLE) | |
131 | chcr |= CHCR_IE; | |
132 | ||
1da177e4 LT |
133 | ctrl_outl(chcr, CHCR[chan->chan]); |
134 | ||
0d831770 PM |
135 | if (chan->flags & DMA_TEI_CAPABLE) { |
136 | irq = get_dmte_irq(chan->chan); | |
137 | enable_irq(irq); | |
138 | } | |
1da177e4 LT |
139 | } |
140 | ||
141 | static void sh_dmac_disable_dma(struct dma_channel *chan) | |
142 | { | |
0d831770 | 143 | int irq; |
1da177e4 LT |
144 | u32 chcr; |
145 | ||
0d831770 PM |
146 | if (chan->flags & DMA_TEI_CAPABLE) { |
147 | irq = get_dmte_irq(chan->chan); | |
148 | disable_irq(irq); | |
149 | } | |
1da177e4 LT |
150 | |
151 | chcr = ctrl_inl(CHCR[chan->chan]); | |
152 | chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE); | |
153 | ctrl_outl(chcr, CHCR[chan->chan]); | |
154 | } | |
155 | ||
156 | static int sh_dmac_xfer_dma(struct dma_channel *chan) | |
157 | { | |
158 | /* | |
159 | * If we haven't pre-configured the channel with special flags, use | |
160 | * the defaults. | |
161 | */ | |
0d831770 | 162 | if (unlikely(!(chan->flags & DMA_CONFIGURED))) |
1da177e4 LT |
163 | sh_dmac_configure_channel(chan, 0); |
164 | ||
165 | sh_dmac_disable_dma(chan); | |
166 | ||
167 | /* | |
168 | * Single-address mode usage note! | |
169 | * | |
170 | * It's important that we don't accidentally write any value to SAR/DAR | |
171 | * (this includes 0) that hasn't been directly specified by the user if | |
172 | * we're in single-address mode. | |
173 | * | |
174 | * In this case, only one address can be defined, anything else will | |
175 | * result in a DMA address error interrupt (at least on the SH-4), | |
176 | * which will subsequently halt the transfer. | |
177 | * | |
178 | * Channel 2 on the Dreamcast is a special case, as this is used for | |
179 | * cascading to the PVR2 DMAC. In this case, we still need to write | |
180 | * SAR and DAR, regardless of value, in order for cascading to work. | |
181 | */ | |
0d831770 PM |
182 | if (chan->sar || (mach_is_dreamcast() && |
183 | chan->chan == PVR2_CASCADE_CHAN)) | |
1da177e4 | 184 | ctrl_outl(chan->sar, SAR[chan->chan]); |
0d831770 PM |
185 | if (chan->dar || (mach_is_dreamcast() && |
186 | chan->chan == PVR2_CASCADE_CHAN)) | |
1da177e4 LT |
187 | ctrl_outl(chan->dar, DAR[chan->chan]); |
188 | ||
189 | ctrl_outl(chan->count >> calc_xmit_shift(chan), DMATCR[chan->chan]); | |
190 | ||
191 | sh_dmac_enable_dma(chan); | |
192 | ||
193 | return 0; | |
194 | } | |
195 | ||
196 | static int sh_dmac_get_dma_residue(struct dma_channel *chan) | |
197 | { | |
198 | if (!(ctrl_inl(CHCR[chan->chan]) & CHCR_DE)) | |
199 | return 0; | |
200 | ||
201 | return ctrl_inl(DMATCR[chan->chan]) << calc_xmit_shift(chan); | |
202 | } | |
203 | ||
3ea6bc3d MB |
204 | #if defined(CONFIG_CPU_SUBTYPE_SH7720) || \ |
205 | defined(CONFIG_CPU_SUBTYPE_SH7780) | |
0d831770 PM |
206 | #define dmaor_read_reg() ctrl_inw(DMAOR) |
207 | #define dmaor_write_reg(data) ctrl_outw(data, DMAOR) | |
208 | #else | |
209 | #define dmaor_read_reg() ctrl_inl(DMAOR) | |
210 | #define dmaor_write_reg(data) ctrl_outl(data, DMAOR) | |
211 | #endif | |
212 | ||
213 | static inline int dmaor_reset(void) | |
1da177e4 | 214 | { |
0d831770 PM |
215 | unsigned long dmaor = dmaor_read_reg(); |
216 | ||
217 | /* Try to clear the error flags first, incase they are set */ | |
218 | dmaor &= ~(DMAOR_NMIF | DMAOR_AE); | |
219 | dmaor_write_reg(dmaor); | |
1da177e4 | 220 | |
0d831770 PM |
221 | dmaor |= DMAOR_INIT; |
222 | dmaor_write_reg(dmaor); | |
1da177e4 | 223 | |
0d831770 PM |
224 | /* See if we got an error again */ |
225 | if ((dmaor_read_reg() & (DMAOR_AE | DMAOR_NMIF))) { | |
226 | printk(KERN_ERR "dma-sh: Can't initialize DMAOR.\n"); | |
227 | return -EINVAL; | |
228 | } | |
1da177e4 | 229 | |
0d831770 PM |
230 | return 0; |
231 | } | |
232 | ||
233 | #if defined(CONFIG_CPU_SH4) | |
35f3c518 | 234 | static irqreturn_t dma_err(int irq, void *dummy) |
0d831770 PM |
235 | { |
236 | dmaor_reset(); | |
1da177e4 LT |
237 | disable_irq(irq); |
238 | ||
239 | return IRQ_HANDLED; | |
240 | } | |
241 | #endif | |
242 | ||
243 | static struct dma_ops sh_dmac_ops = { | |
244 | .request = sh_dmac_request_dma, | |
245 | .free = sh_dmac_free_dma, | |
246 | .get_residue = sh_dmac_get_dma_residue, | |
247 | .xfer = sh_dmac_xfer_dma, | |
248 | .configure = sh_dmac_configure_channel, | |
249 | }; | |
250 | ||
251 | static struct dma_info sh_dmac_info = { | |
0d831770 PM |
252 | .name = "sh_dmac", |
253 | .nr_channels = CONFIG_NR_ONCHIP_DMA_CHANNELS, | |
1da177e4 LT |
254 | .ops = &sh_dmac_ops, |
255 | .flags = DMAC_CHANNELS_TEI_CAPABLE, | |
256 | }; | |
257 | ||
258 | static int __init sh_dmac_init(void) | |
259 | { | |
260 | struct dma_info *info = &sh_dmac_info; | |
261 | int i; | |
262 | ||
263 | #ifdef CONFIG_CPU_SH4 | |
6d20819f | 264 | i = request_irq(DMAE_IRQ, dma_err, IRQF_DISABLED, "DMAC Address Error", 0); |
9e3043c0 | 265 | if (unlikely(i < 0)) |
1da177e4 LT |
266 | return i; |
267 | #endif | |
268 | ||
0d831770 PM |
269 | /* |
270 | * Initialize DMAOR, and clean up any error flags that may have | |
271 | * been set. | |
272 | */ | |
273 | i = dmaor_reset(); | |
9e3043c0 | 274 | if (unlikely(i != 0)) |
0d831770 | 275 | return i; |
1da177e4 LT |
276 | |
277 | return register_dmac(info); | |
278 | } | |
279 | ||
280 | static void __exit sh_dmac_exit(void) | |
281 | { | |
282 | #ifdef CONFIG_CPU_SH4 | |
283 | free_irq(DMAE_IRQ, 0); | |
284 | #endif | |
0d831770 | 285 | unregister_dmac(&sh_dmac_info); |
1da177e4 LT |
286 | } |
287 | ||
288 | subsys_initcall(sh_dmac_init); | |
289 | module_exit(sh_dmac_exit); | |
290 | ||
0d831770 PM |
291 | MODULE_AUTHOR("Takashi YOSHII, Paul Mundt, Andriy Skulysh"); |
292 | MODULE_DESCRIPTION("SuperH On-Chip DMAC Support"); | |
1da177e4 | 293 | MODULE_LICENSE("GPL"); |