Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * arch/sh/drivers/dma/dma-sh.c | |
3 | * | |
4 | * SuperH On-chip DMAC Support | |
5 | * | |
6 | * Copyright (C) 2000 Takashi YOSHII | |
7 | * Copyright (C) 2003, 2004 Paul Mundt | |
0d831770 | 8 | * Copyright (C) 2005 Andriy Skulysh |
1da177e4 LT |
9 | * |
10 | * This file is subject to the terms and conditions of the GNU General Public | |
11 | * License. See the file "COPYING" in the main directory of this archive | |
12 | * for more details. | |
13 | */ | |
1da177e4 | 14 | #include <linux/init.h> |
1da177e4 LT |
15 | #include <linux/interrupt.h> |
16 | #include <linux/module.h> | |
0d831770 | 17 | #include <asm/dreamcast/dma.h> |
1da177e4 LT |
18 | #include <asm/dma.h> |
19 | #include <asm/io.h> | |
20 | #include "dma-sh.h" | |
21 | ||
9f8a5e3a ML |
22 | static int dmte_irq_map[] = { |
23 | DMTE0_IRQ, | |
24 | DMTE1_IRQ, | |
25 | DMTE2_IRQ, | |
26 | DMTE3_IRQ, | |
27 | #if defined(CONFIG_CPU_SUBTYPE_SH7751R) || \ | |
28 | defined(CONFIG_CPU_SUBTYPE_SH7760) || \ | |
29 | defined(CONFIG_CPU_SUBTYPE_SH7780) | |
30 | DMTE4_IRQ, | |
31 | DMTE5_IRQ, | |
32 | DMTE6_IRQ, | |
33 | DMTE7_IRQ, | |
bd71ab88 | 34 | #endif |
bd71ab88 | 35 | }; |
1da177e4 | 36 | |
bd71ab88 JL |
37 | static inline unsigned int get_dmte_irq(unsigned int chan) |
38 | { | |
39 | unsigned int irq = 0; | |
9f8a5e3a ML |
40 | if (chan < ARRAY_SIZE(dmte_irq_map)) |
41 | irq = dmte_irq_map[chan]; | |
1da177e4 LT |
42 | return irq; |
43 | } | |
44 | ||
45 | /* | |
46 | * We determine the correct shift size based off of the CHCR transmit size | |
47 | * for the given channel. Since we know that it will take: | |
48 | * | |
49 | * info->count >> ts_shift[transmit_size] | |
50 | * | |
51 | * iterations to complete the transfer. | |
52 | */ | |
53 | static inline unsigned int calc_xmit_shift(struct dma_channel *chan) | |
54 | { | |
55 | u32 chcr = ctrl_inl(CHCR[chan->chan]); | |
56 | ||
0d831770 | 57 | return ts_shift[(chcr & CHCR_TS_MASK)>>CHCR_TS_SHIFT]; |
1da177e4 LT |
58 | } |
59 | ||
60 | /* | |
61 | * The transfer end interrupt must read the chcr register to end the | |
62 | * hardware interrupt active condition. | |
63 | * Besides that it needs to waken any waiting process, which should handle | |
64 | * setting up the next transfer. | |
65 | */ | |
35f3c518 | 66 | static irqreturn_t dma_tei(int irq, void *dev_id) |
1da177e4 | 67 | { |
35f3c518 | 68 | struct dma_channel *chan = dev_id; |
1da177e4 LT |
69 | u32 chcr; |
70 | ||
71 | chcr = ctrl_inl(CHCR[chan->chan]); | |
72 | ||
73 | if (!(chcr & CHCR_TE)) | |
74 | return IRQ_NONE; | |
75 | ||
76 | chcr &= ~(CHCR_IE | CHCR_DE); | |
77 | ctrl_outl(chcr, CHCR[chan->chan]); | |
78 | ||
79 | wake_up(&chan->wait_queue); | |
80 | ||
81 | return IRQ_HANDLED; | |
82 | } | |
83 | ||
84 | static int sh_dmac_request_dma(struct dma_channel *chan) | |
85 | { | |
9e3043c0 PM |
86 | if (unlikely(!chan->flags & DMA_TEI_CAPABLE)) |
87 | return 0; | |
88 | ||
1da177e4 | 89 | return request_irq(get_dmte_irq(chan->chan), dma_tei, |
e803aaf6 | 90 | IRQF_DISABLED, chan->dev_id, chan); |
1da177e4 LT |
91 | } |
92 | ||
93 | static void sh_dmac_free_dma(struct dma_channel *chan) | |
94 | { | |
95 | free_irq(get_dmte_irq(chan->chan), chan); | |
96 | } | |
97 | ||
9f8a5e3a | 98 | static int |
0d831770 | 99 | sh_dmac_configure_channel(struct dma_channel *chan, unsigned long chcr) |
1da177e4 LT |
100 | { |
101 | if (!chcr) | |
0d831770 PM |
102 | chcr = RS_DUAL | CHCR_IE; |
103 | ||
104 | if (chcr & CHCR_IE) { | |
105 | chcr &= ~CHCR_IE; | |
106 | chan->flags |= DMA_TEI_CAPABLE; | |
107 | } else { | |
108 | chan->flags &= ~DMA_TEI_CAPABLE; | |
109 | } | |
1da177e4 LT |
110 | |
111 | ctrl_outl(chcr, CHCR[chan->chan]); | |
112 | ||
113 | chan->flags |= DMA_CONFIGURED; | |
9f8a5e3a | 114 | return 0; |
1da177e4 LT |
115 | } |
116 | ||
117 | static void sh_dmac_enable_dma(struct dma_channel *chan) | |
118 | { | |
0d831770 | 119 | int irq; |
1da177e4 LT |
120 | u32 chcr; |
121 | ||
122 | chcr = ctrl_inl(CHCR[chan->chan]); | |
0d831770 PM |
123 | chcr |= CHCR_DE; |
124 | ||
125 | if (chan->flags & DMA_TEI_CAPABLE) | |
126 | chcr |= CHCR_IE; | |
127 | ||
1da177e4 LT |
128 | ctrl_outl(chcr, CHCR[chan->chan]); |
129 | ||
0d831770 PM |
130 | if (chan->flags & DMA_TEI_CAPABLE) { |
131 | irq = get_dmte_irq(chan->chan); | |
132 | enable_irq(irq); | |
133 | } | |
1da177e4 LT |
134 | } |
135 | ||
136 | static void sh_dmac_disable_dma(struct dma_channel *chan) | |
137 | { | |
0d831770 | 138 | int irq; |
1da177e4 LT |
139 | u32 chcr; |
140 | ||
0d831770 PM |
141 | if (chan->flags & DMA_TEI_CAPABLE) { |
142 | irq = get_dmte_irq(chan->chan); | |
143 | disable_irq(irq); | |
144 | } | |
1da177e4 LT |
145 | |
146 | chcr = ctrl_inl(CHCR[chan->chan]); | |
147 | chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE); | |
148 | ctrl_outl(chcr, CHCR[chan->chan]); | |
149 | } | |
150 | ||
151 | static int sh_dmac_xfer_dma(struct dma_channel *chan) | |
152 | { | |
153 | /* | |
154 | * If we haven't pre-configured the channel with special flags, use | |
155 | * the defaults. | |
156 | */ | |
0d831770 | 157 | if (unlikely(!(chan->flags & DMA_CONFIGURED))) |
1da177e4 LT |
158 | sh_dmac_configure_channel(chan, 0); |
159 | ||
160 | sh_dmac_disable_dma(chan); | |
161 | ||
162 | /* | |
163 | * Single-address mode usage note! | |
164 | * | |
165 | * It's important that we don't accidentally write any value to SAR/DAR | |
166 | * (this includes 0) that hasn't been directly specified by the user if | |
167 | * we're in single-address mode. | |
168 | * | |
169 | * In this case, only one address can be defined, anything else will | |
170 | * result in a DMA address error interrupt (at least on the SH-4), | |
171 | * which will subsequently halt the transfer. | |
172 | * | |
173 | * Channel 2 on the Dreamcast is a special case, as this is used for | |
174 | * cascading to the PVR2 DMAC. In this case, we still need to write | |
175 | * SAR and DAR, regardless of value, in order for cascading to work. | |
176 | */ | |
0d831770 PM |
177 | if (chan->sar || (mach_is_dreamcast() && |
178 | chan->chan == PVR2_CASCADE_CHAN)) | |
1da177e4 | 179 | ctrl_outl(chan->sar, SAR[chan->chan]); |
0d831770 PM |
180 | if (chan->dar || (mach_is_dreamcast() && |
181 | chan->chan == PVR2_CASCADE_CHAN)) | |
1da177e4 LT |
182 | ctrl_outl(chan->dar, DAR[chan->chan]); |
183 | ||
184 | ctrl_outl(chan->count >> calc_xmit_shift(chan), DMATCR[chan->chan]); | |
185 | ||
186 | sh_dmac_enable_dma(chan); | |
187 | ||
188 | return 0; | |
189 | } | |
190 | ||
191 | static int sh_dmac_get_dma_residue(struct dma_channel *chan) | |
192 | { | |
193 | if (!(ctrl_inl(CHCR[chan->chan]) & CHCR_DE)) | |
194 | return 0; | |
195 | ||
196 | return ctrl_inl(DMATCR[chan->chan]) << calc_xmit_shift(chan); | |
197 | } | |
198 | ||
0d831770 PM |
199 | #ifdef CONFIG_CPU_SUBTYPE_SH7780 |
200 | #define dmaor_read_reg() ctrl_inw(DMAOR) | |
201 | #define dmaor_write_reg(data) ctrl_outw(data, DMAOR) | |
202 | #else | |
203 | #define dmaor_read_reg() ctrl_inl(DMAOR) | |
204 | #define dmaor_write_reg(data) ctrl_outl(data, DMAOR) | |
205 | #endif | |
206 | ||
207 | static inline int dmaor_reset(void) | |
1da177e4 | 208 | { |
0d831770 PM |
209 | unsigned long dmaor = dmaor_read_reg(); |
210 | ||
211 | /* Try to clear the error flags first, incase they are set */ | |
212 | dmaor &= ~(DMAOR_NMIF | DMAOR_AE); | |
213 | dmaor_write_reg(dmaor); | |
1da177e4 | 214 | |
0d831770 PM |
215 | dmaor |= DMAOR_INIT; |
216 | dmaor_write_reg(dmaor); | |
1da177e4 | 217 | |
0d831770 PM |
218 | /* See if we got an error again */ |
219 | if ((dmaor_read_reg() & (DMAOR_AE | DMAOR_NMIF))) { | |
220 | printk(KERN_ERR "dma-sh: Can't initialize DMAOR.\n"); | |
221 | return -EINVAL; | |
222 | } | |
1da177e4 | 223 | |
0d831770 PM |
224 | return 0; |
225 | } | |
226 | ||
227 | #if defined(CONFIG_CPU_SH4) | |
35f3c518 | 228 | static irqreturn_t dma_err(int irq, void *dummy) |
0d831770 PM |
229 | { |
230 | dmaor_reset(); | |
1da177e4 LT |
231 | disable_irq(irq); |
232 | ||
233 | return IRQ_HANDLED; | |
234 | } | |
235 | #endif | |
236 | ||
237 | static struct dma_ops sh_dmac_ops = { | |
238 | .request = sh_dmac_request_dma, | |
239 | .free = sh_dmac_free_dma, | |
240 | .get_residue = sh_dmac_get_dma_residue, | |
241 | .xfer = sh_dmac_xfer_dma, | |
242 | .configure = sh_dmac_configure_channel, | |
243 | }; | |
244 | ||
245 | static struct dma_info sh_dmac_info = { | |
0d831770 PM |
246 | .name = "sh_dmac", |
247 | .nr_channels = CONFIG_NR_ONCHIP_DMA_CHANNELS, | |
1da177e4 LT |
248 | .ops = &sh_dmac_ops, |
249 | .flags = DMAC_CHANNELS_TEI_CAPABLE, | |
250 | }; | |
251 | ||
252 | static int __init sh_dmac_init(void) | |
253 | { | |
254 | struct dma_info *info = &sh_dmac_info; | |
255 | int i; | |
256 | ||
257 | #ifdef CONFIG_CPU_SH4 | |
6d20819f | 258 | i = request_irq(DMAE_IRQ, dma_err, IRQF_DISABLED, "DMAC Address Error", 0); |
9e3043c0 | 259 | if (unlikely(i < 0)) |
1da177e4 LT |
260 | return i; |
261 | #endif | |
262 | ||
0d831770 PM |
263 | /* |
264 | * Initialize DMAOR, and clean up any error flags that may have | |
265 | * been set. | |
266 | */ | |
267 | i = dmaor_reset(); | |
9e3043c0 | 268 | if (unlikely(i != 0)) |
0d831770 | 269 | return i; |
1da177e4 LT |
270 | |
271 | return register_dmac(info); | |
272 | } | |
273 | ||
274 | static void __exit sh_dmac_exit(void) | |
275 | { | |
276 | #ifdef CONFIG_CPU_SH4 | |
277 | free_irq(DMAE_IRQ, 0); | |
278 | #endif | |
0d831770 | 279 | unregister_dmac(&sh_dmac_info); |
1da177e4 LT |
280 | } |
281 | ||
282 | subsys_initcall(sh_dmac_init); | |
283 | module_exit(sh_dmac_exit); | |
284 | ||
0d831770 PM |
285 | MODULE_AUTHOR("Takashi YOSHII, Paul Mundt, Andriy Skulysh"); |
286 | MODULE_DESCRIPTION("SuperH On-Chip DMAC Support"); | |
1da177e4 | 287 | MODULE_LICENSE("GPL"); |