Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * arch/sh/drivers/dma/dma-sh.c | |
3 | * | |
4 | * SuperH On-chip DMAC Support | |
5 | * | |
6 | * Copyright (C) 2000 Takashi YOSHII | |
7 | * Copyright (C) 2003, 2004 Paul Mundt | |
0d831770 | 8 | * Copyright (C) 2005 Andriy Skulysh |
1da177e4 LT |
9 | * |
10 | * This file is subject to the terms and conditions of the GNU General Public | |
11 | * License. See the file "COPYING" in the main directory of this archive | |
12 | * for more details. | |
13 | */ | |
14 | ||
1da177e4 LT |
15 | #include <linux/init.h> |
16 | #include <linux/irq.h> | |
17 | #include <linux/interrupt.h> | |
18 | #include <linux/module.h> | |
0d831770 | 19 | #include <asm/dreamcast/dma.h> |
1da177e4 LT |
20 | #include <asm/signal.h> |
21 | #include <asm/irq.h> | |
22 | #include <asm/dma.h> | |
23 | #include <asm/io.h> | |
24 | #include "dma-sh.h" | |
25 | ||
1da177e4 LT |
26 | static inline unsigned int get_dmte_irq(unsigned int chan) |
27 | { | |
0d831770 | 28 | unsigned int irq = 0; |
1da177e4 LT |
29 | |
30 | /* | |
31 | * Normally we could just do DMTE0_IRQ + chan outright, though in the | |
32 | * case of the 7751R, the DMTE IRQs for channels > 4 start right above | |
33 | * the SCIF | |
34 | */ | |
1da177e4 LT |
35 | if (chan < 4) { |
36 | irq = DMTE0_IRQ + chan; | |
37 | } else { | |
0d831770 | 38 | #ifdef DMTE4_IRQ |
1da177e4 | 39 | irq = DMTE4_IRQ + chan - 4; |
0d831770 | 40 | #endif |
1da177e4 LT |
41 | } |
42 | ||
43 | return irq; | |
44 | } | |
45 | ||
46 | /* | |
47 | * We determine the correct shift size based off of the CHCR transmit size | |
48 | * for the given channel. Since we know that it will take: | |
49 | * | |
50 | * info->count >> ts_shift[transmit_size] | |
51 | * | |
52 | * iterations to complete the transfer. | |
53 | */ | |
54 | static inline unsigned int calc_xmit_shift(struct dma_channel *chan) | |
55 | { | |
56 | u32 chcr = ctrl_inl(CHCR[chan->chan]); | |
57 | ||
0d831770 | 58 | return ts_shift[(chcr & CHCR_TS_MASK)>>CHCR_TS_SHIFT]; |
1da177e4 LT |
59 | } |
60 | ||
61 | /* | |
62 | * The transfer end interrupt must read the chcr register to end the | |
63 | * hardware interrupt active condition. | |
64 | * Besides that it needs to waken any waiting process, which should handle | |
65 | * setting up the next transfer. | |
66 | */ | |
67 | static irqreturn_t dma_tei(int irq, void *dev_id, struct pt_regs *regs) | |
68 | { | |
69 | struct dma_channel *chan = (struct dma_channel *)dev_id; | |
70 | u32 chcr; | |
71 | ||
72 | chcr = ctrl_inl(CHCR[chan->chan]); | |
73 | ||
74 | if (!(chcr & CHCR_TE)) | |
75 | return IRQ_NONE; | |
76 | ||
77 | chcr &= ~(CHCR_IE | CHCR_DE); | |
78 | ctrl_outl(chcr, CHCR[chan->chan]); | |
79 | ||
80 | wake_up(&chan->wait_queue); | |
81 | ||
82 | return IRQ_HANDLED; | |
83 | } | |
84 | ||
85 | static int sh_dmac_request_dma(struct dma_channel *chan) | |
86 | { | |
0d831770 PM |
87 | char name[32]; |
88 | ||
89 | snprintf(name, sizeof(name), "DMAC Transfer End (Channel %d)", | |
90 | chan->chan); | |
91 | ||
1da177e4 | 92 | return request_irq(get_dmte_irq(chan->chan), dma_tei, |
0d831770 | 93 | SA_INTERRUPT, name, chan); |
1da177e4 LT |
94 | } |
95 | ||
96 | static void sh_dmac_free_dma(struct dma_channel *chan) | |
97 | { | |
98 | free_irq(get_dmte_irq(chan->chan), chan); | |
99 | } | |
100 | ||
0d831770 PM |
101 | static void |
102 | sh_dmac_configure_channel(struct dma_channel *chan, unsigned long chcr) | |
1da177e4 LT |
103 | { |
104 | if (!chcr) | |
0d831770 PM |
105 | chcr = RS_DUAL | CHCR_IE; |
106 | ||
107 | if (chcr & CHCR_IE) { | |
108 | chcr &= ~CHCR_IE; | |
109 | chan->flags |= DMA_TEI_CAPABLE; | |
110 | } else { | |
111 | chan->flags &= ~DMA_TEI_CAPABLE; | |
112 | } | |
1da177e4 LT |
113 | |
114 | ctrl_outl(chcr, CHCR[chan->chan]); | |
115 | ||
116 | chan->flags |= DMA_CONFIGURED; | |
117 | } | |
118 | ||
119 | static void sh_dmac_enable_dma(struct dma_channel *chan) | |
120 | { | |
0d831770 | 121 | int irq; |
1da177e4 LT |
122 | u32 chcr; |
123 | ||
124 | chcr = ctrl_inl(CHCR[chan->chan]); | |
0d831770 PM |
125 | chcr |= CHCR_DE; |
126 | ||
127 | if (chan->flags & DMA_TEI_CAPABLE) | |
128 | chcr |= CHCR_IE; | |
129 | ||
1da177e4 LT |
130 | ctrl_outl(chcr, CHCR[chan->chan]); |
131 | ||
0d831770 PM |
132 | if (chan->flags & DMA_TEI_CAPABLE) { |
133 | irq = get_dmte_irq(chan->chan); | |
134 | enable_irq(irq); | |
135 | } | |
1da177e4 LT |
136 | } |
137 | ||
138 | static void sh_dmac_disable_dma(struct dma_channel *chan) | |
139 | { | |
0d831770 | 140 | int irq; |
1da177e4 LT |
141 | u32 chcr; |
142 | ||
0d831770 PM |
143 | if (chan->flags & DMA_TEI_CAPABLE) { |
144 | irq = get_dmte_irq(chan->chan); | |
145 | disable_irq(irq); | |
146 | } | |
1da177e4 LT |
147 | |
148 | chcr = ctrl_inl(CHCR[chan->chan]); | |
149 | chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE); | |
150 | ctrl_outl(chcr, CHCR[chan->chan]); | |
151 | } | |
152 | ||
153 | static int sh_dmac_xfer_dma(struct dma_channel *chan) | |
154 | { | |
155 | /* | |
156 | * If we haven't pre-configured the channel with special flags, use | |
157 | * the defaults. | |
158 | */ | |
0d831770 | 159 | if (unlikely(!(chan->flags & DMA_CONFIGURED))) |
1da177e4 LT |
160 | sh_dmac_configure_channel(chan, 0); |
161 | ||
162 | sh_dmac_disable_dma(chan); | |
163 | ||
164 | /* | |
165 | * Single-address mode usage note! | |
166 | * | |
167 | * It's important that we don't accidentally write any value to SAR/DAR | |
168 | * (this includes 0) that hasn't been directly specified by the user if | |
169 | * we're in single-address mode. | |
170 | * | |
171 | * In this case, only one address can be defined, anything else will | |
172 | * result in a DMA address error interrupt (at least on the SH-4), | |
173 | * which will subsequently halt the transfer. | |
174 | * | |
175 | * Channel 2 on the Dreamcast is a special case, as this is used for | |
176 | * cascading to the PVR2 DMAC. In this case, we still need to write | |
177 | * SAR and DAR, regardless of value, in order for cascading to work. | |
178 | */ | |
0d831770 PM |
179 | if (chan->sar || (mach_is_dreamcast() && |
180 | chan->chan == PVR2_CASCADE_CHAN)) | |
1da177e4 | 181 | ctrl_outl(chan->sar, SAR[chan->chan]); |
0d831770 PM |
182 | if (chan->dar || (mach_is_dreamcast() && |
183 | chan->chan == PVR2_CASCADE_CHAN)) | |
1da177e4 LT |
184 | ctrl_outl(chan->dar, DAR[chan->chan]); |
185 | ||
186 | ctrl_outl(chan->count >> calc_xmit_shift(chan), DMATCR[chan->chan]); | |
187 | ||
188 | sh_dmac_enable_dma(chan); | |
189 | ||
190 | return 0; | |
191 | } | |
192 | ||
193 | static int sh_dmac_get_dma_residue(struct dma_channel *chan) | |
194 | { | |
195 | if (!(ctrl_inl(CHCR[chan->chan]) & CHCR_DE)) | |
196 | return 0; | |
197 | ||
198 | return ctrl_inl(DMATCR[chan->chan]) << calc_xmit_shift(chan); | |
199 | } | |
200 | ||
0d831770 PM |
201 | #ifdef CONFIG_CPU_SUBTYPE_SH7780 |
202 | #define dmaor_read_reg() ctrl_inw(DMAOR) | |
203 | #define dmaor_write_reg(data) ctrl_outw(data, DMAOR) | |
204 | #else | |
205 | #define dmaor_read_reg() ctrl_inl(DMAOR) | |
206 | #define dmaor_write_reg(data) ctrl_outl(data, DMAOR) | |
207 | #endif | |
208 | ||
209 | static inline int dmaor_reset(void) | |
1da177e4 | 210 | { |
0d831770 PM |
211 | unsigned long dmaor = dmaor_read_reg(); |
212 | ||
213 | /* Try to clear the error flags first, incase they are set */ | |
214 | dmaor &= ~(DMAOR_NMIF | DMAOR_AE); | |
215 | dmaor_write_reg(dmaor); | |
1da177e4 | 216 | |
0d831770 PM |
217 | dmaor |= DMAOR_INIT; |
218 | dmaor_write_reg(dmaor); | |
1da177e4 | 219 | |
0d831770 PM |
220 | /* See if we got an error again */ |
221 | if ((dmaor_read_reg() & (DMAOR_AE | DMAOR_NMIF))) { | |
222 | printk(KERN_ERR "dma-sh: Can't initialize DMAOR.\n"); | |
223 | return -EINVAL; | |
224 | } | |
1da177e4 | 225 | |
0d831770 PM |
226 | return 0; |
227 | } | |
228 | ||
229 | #if defined(CONFIG_CPU_SH4) | |
230 | static irqreturn_t dma_err(int irq, void *dev_id, struct pt_regs *regs) | |
231 | { | |
232 | dmaor_reset(); | |
1da177e4 LT |
233 | disable_irq(irq); |
234 | ||
235 | return IRQ_HANDLED; | |
236 | } | |
237 | #endif | |
238 | ||
239 | static struct dma_ops sh_dmac_ops = { | |
240 | .request = sh_dmac_request_dma, | |
241 | .free = sh_dmac_free_dma, | |
242 | .get_residue = sh_dmac_get_dma_residue, | |
243 | .xfer = sh_dmac_xfer_dma, | |
244 | .configure = sh_dmac_configure_channel, | |
245 | }; | |
246 | ||
247 | static struct dma_info sh_dmac_info = { | |
0d831770 PM |
248 | .name = "sh_dmac", |
249 | .nr_channels = CONFIG_NR_ONCHIP_DMA_CHANNELS, | |
1da177e4 LT |
250 | .ops = &sh_dmac_ops, |
251 | .flags = DMAC_CHANNELS_TEI_CAPABLE, | |
252 | }; | |
253 | ||
254 | static int __init sh_dmac_init(void) | |
255 | { | |
256 | struct dma_info *info = &sh_dmac_info; | |
257 | int i; | |
258 | ||
259 | #ifdef CONFIG_CPU_SH4 | |
260 | make_ipr_irq(DMAE_IRQ, DMA_IPR_ADDR, DMA_IPR_POS, DMA_PRIORITY); | |
261 | i = request_irq(DMAE_IRQ, dma_err, SA_INTERRUPT, "DMAC Address Error", 0); | |
262 | if (i < 0) | |
263 | return i; | |
264 | #endif | |
265 | ||
266 | for (i = 0; i < info->nr_channels; i++) { | |
267 | int irq = get_dmte_irq(i); | |
268 | ||
269 | make_ipr_irq(irq, DMA_IPR_ADDR, DMA_IPR_POS, DMA_PRIORITY); | |
270 | } | |
271 | ||
0d831770 PM |
272 | /* |
273 | * Initialize DMAOR, and clean up any error flags that may have | |
274 | * been set. | |
275 | */ | |
276 | i = dmaor_reset(); | |
277 | if (i < 0) | |
278 | return i; | |
1da177e4 LT |
279 | |
280 | return register_dmac(info); | |
281 | } | |
282 | ||
283 | static void __exit sh_dmac_exit(void) | |
284 | { | |
285 | #ifdef CONFIG_CPU_SH4 | |
286 | free_irq(DMAE_IRQ, 0); | |
287 | #endif | |
0d831770 | 288 | unregister_dmac(&sh_dmac_info); |
1da177e4 LT |
289 | } |
290 | ||
291 | subsys_initcall(sh_dmac_init); | |
292 | module_exit(sh_dmac_exit); | |
293 | ||
0d831770 PM |
294 | MODULE_AUTHOR("Takashi YOSHII, Paul Mundt, Andriy Skulysh"); |
295 | MODULE_DESCRIPTION("SuperH On-Chip DMAC Support"); | |
1da177e4 | 296 | MODULE_LICENSE("GPL"); |