Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __ASM_SH_IO_H |
2 | #define __ASM_SH_IO_H | |
3 | ||
4 | /* | |
5 | * Convention: | |
6 | * read{b,w,l}/write{b,w,l} are for PCI, | |
7 | * while in{b,w,l}/out{b,w,l} are for ISA | |
8 | * These may (will) be platform specific function. | |
9 | * In addition we have 'pausing' versions: in{b,w,l}_p/out{b,w,l}_p | |
10 | * and 'string' versions: ins{b,w,l}/outs{b,w,l} | |
11 | * For read{b,w,l} and write{b,w,l} there are also __raw versions, which | |
12 | * do not have a memory barrier after them. | |
13 | * | |
b66c1a39 | 14 | * In addition, we have |
1da177e4 LT |
15 | * ctrl_in{b,w,l}/ctrl_out{b,w,l} for SuperH specific I/O. |
16 | * which are processor specific. | |
17 | */ | |
18 | ||
19 | /* | |
20 | * We follow the Alpha convention here: | |
21 | * __inb expands to an inline function call (which calls via the mv) | |
22 | * _inb is a real function call (note ___raw fns are _ version of __raw) | |
23 | * inb by default expands to _inb, but the machine specific code may | |
24 | * define it to __inb if it chooses. | |
25 | */ | |
1da177e4 LT |
26 | #include <asm/cache.h> |
27 | #include <asm/system.h> | |
28 | #include <asm/addrspace.h> | |
29 | #include <asm/machvec.h> | |
b66c1a39 PM |
30 | #include <asm/pgtable.h> |
31 | #include <asm-generic/iomap.h> | |
32 | ||
33 | #ifdef __KERNEL__ | |
1da177e4 LT |
34 | |
35 | /* | |
36 | * Depending on which platform we are running on, we need different | |
37 | * I/O functions. | |
38 | */ | |
b66c1a39 PM |
39 | #define __IO_PREFIX generic |
40 | #include <asm/io_generic.h> | |
41 | ||
42 | #define maybebadio(port) \ | |
43 | printk(KERN_ERR "bad PC-like io %s:%u for port 0x%lx at 0x%08x\n", \ | |
44 | __FUNCTION__, __LINE__, (port), (u32)__builtin_return_address(0)) | |
1da177e4 | 45 | |
1da177e4 LT |
46 | /* |
47 | * Since boards are able to define their own set of I/O routines through | |
48 | * their respective machine vector, we always wrap through the mv. | |
49 | * | |
50 | * Also, in the event that a board hasn't provided its own definition for | |
51 | * a given routine, it will be wrapped to generic code at run-time. | |
52 | */ | |
53 | ||
b66c1a39 PM |
54 | #define __inb(p) sh_mv.mv_inb((p)) |
55 | #define __inw(p) sh_mv.mv_inw((p)) | |
56 | #define __inl(p) sh_mv.mv_inl((p)) | |
57 | #define __outb(x,p) sh_mv.mv_outb((x),(p)) | |
58 | #define __outw(x,p) sh_mv.mv_outw((x),(p)) | |
59 | #define __outl(x,p) sh_mv.mv_outl((x),(p)) | |
60 | ||
61 | #define __inb_p(p) sh_mv.mv_inb_p((p)) | |
62 | #define __inw_p(p) sh_mv.mv_inw_p((p)) | |
63 | #define __inl_p(p) sh_mv.mv_inl_p((p)) | |
64 | #define __outb_p(x,p) sh_mv.mv_outb_p((x),(p)) | |
65 | #define __outw_p(x,p) sh_mv.mv_outw_p((x),(p)) | |
66 | #define __outl_p(x,p) sh_mv.mv_outl_p((x),(p)) | |
67 | ||
68 | #define __insb(p,b,c) sh_mv.mv_insb((p), (b), (c)) | |
69 | #define __insw(p,b,c) sh_mv.mv_insw((p), (b), (c)) | |
70 | #define __insl(p,b,c) sh_mv.mv_insl((p), (b), (c)) | |
71 | #define __outsb(p,b,c) sh_mv.mv_outsb((p), (b), (c)) | |
72 | #define __outsw(p,b,c) sh_mv.mv_outsw((p), (b), (c)) | |
73 | #define __outsl(p,b,c) sh_mv.mv_outsl((p), (b), (c)) | |
74 | ||
75 | #define __readb(a) sh_mv.mv_readb((a)) | |
76 | #define __readw(a) sh_mv.mv_readw((a)) | |
77 | #define __readl(a) sh_mv.mv_readl((a)) | |
78 | #define __writeb(v,a) sh_mv.mv_writeb((v),(a)) | |
79 | #define __writew(v,a) sh_mv.mv_writew((v),(a)) | |
80 | #define __writel(v,a) sh_mv.mv_writel((v),(a)) | |
81 | ||
82 | #define inb __inb | |
83 | #define inw __inw | |
84 | #define inl __inl | |
85 | #define outb __outb | |
86 | #define outw __outw | |
87 | #define outl __outl | |
88 | ||
89 | #define inb_p __inb_p | |
90 | #define inw_p __inw_p | |
91 | #define inl_p __inl_p | |
92 | #define outb_p __outb_p | |
93 | #define outw_p __outw_p | |
94 | #define outl_p __outl_p | |
95 | ||
96 | #define insb __insb | |
97 | #define insw __insw | |
98 | #define insl __insl | |
99 | #define outsb __outsb | |
100 | #define outsw __outsw | |
101 | #define outsl __outsl | |
102 | ||
103 | #define __raw_readb(a) __readb((void __iomem *)(a)) | |
104 | #define __raw_readw(a) __readw((void __iomem *)(a)) | |
105 | #define __raw_readl(a) __readl((void __iomem *)(a)) | |
106 | #define __raw_writeb(v, a) __writeb(v, (void __iomem *)(a)) | |
107 | #define __raw_writew(v, a) __writew(v, (void __iomem *)(a)) | |
108 | #define __raw_writel(v, a) __writel(v, (void __iomem *)(a)) | |
1da177e4 | 109 | |
05ae9158 PM |
110 | void __raw_writesl(unsigned long addr, const void *data, int longlen); |
111 | void __raw_readsl(unsigned long addr, void *data, int longlen); | |
112 | ||
1da177e4 LT |
113 | /* |
114 | * The platform header files may define some of these macros to use | |
115 | * the inlined versions where appropriate. These macros may also be | |
116 | * redefined by userlevel programs. | |
117 | */ | |
b66c1a39 | 118 | #ifdef __readb |
66c5227e | 119 | # define readb(a) ({ unsigned int r_ = __raw_readb(a); mb(); r_; }) |
1da177e4 LT |
120 | #endif |
121 | #ifdef __raw_readw | |
66c5227e | 122 | # define readw(a) ({ unsigned int r_ = __raw_readw(a); mb(); r_; }) |
1da177e4 LT |
123 | #endif |
124 | #ifdef __raw_readl | |
66c5227e | 125 | # define readl(a) ({ unsigned int r_ = __raw_readl(a); mb(); r_; }) |
1da177e4 LT |
126 | #endif |
127 | ||
128 | #ifdef __raw_writeb | |
b66c1a39 | 129 | # define writeb(v,a) ({ __raw_writeb((v),(a)); mb(); }) |
1da177e4 LT |
130 | #endif |
131 | #ifdef __raw_writew | |
b66c1a39 | 132 | # define writew(v,a) ({ __raw_writew((v),(a)); mb(); }) |
1da177e4 LT |
133 | #endif |
134 | #ifdef __raw_writel | |
b66c1a39 | 135 | # define writel(v,a) ({ __raw_writel((v),(a)); mb(); }) |
1da177e4 LT |
136 | #endif |
137 | ||
05ae9158 PM |
138 | #define writesl __raw_writesl |
139 | #define readsl __raw_readsl | |
140 | ||
1da177e4 LT |
141 | #define readb_relaxed(a) readb(a) |
142 | #define readw_relaxed(a) readw(a) | |
143 | #define readl_relaxed(a) readl(a) | |
144 | ||
b66c1a39 PM |
145 | /* Simple MMIO */ |
146 | #define ioread8(a) readb(a) | |
147 | #define ioread16(a) readw(a) | |
148 | #define ioread16be(a) be16_to_cpu(__raw_readw((a))) | |
149 | #define ioread32(a) readl(a) | |
150 | #define ioread32be(a) be32_to_cpu(__raw_readl((a))) | |
1da177e4 | 151 | |
b66c1a39 PM |
152 | #define iowrite8(v,a) writeb((v),(a)) |
153 | #define iowrite16(v,a) writew((v),(a)) | |
154 | #define iowrite16be(v,a) __raw_writew(cpu_to_be16((v)),(a)) | |
155 | #define iowrite32(v,a) writel((v),(a)) | |
156 | #define iowrite32be(v,a) __raw_writel(cpu_to_be32((v)),(a)) | |
157 | ||
158 | #define ioread8_rep(a,d,c) insb((a),(d),(c)) | |
159 | #define ioread16_rep(a,d,c) insw((a),(d),(c)) | |
160 | #define ioread32_rep(a,d,c) insl((a),(d),(c)) | |
161 | ||
162 | #define iowrite8_rep(a,s,c) outsb((a),(s),(c)) | |
163 | #define iowrite16_rep(a,s,c) outsw((a),(s),(c)) | |
164 | #define iowrite32_rep(a,s,c) outsl((a),(s),(c)) | |
165 | ||
166 | #define mmiowb() wmb() /* synco on SH-4A, otherwise a nop */ | |
1da177e4 LT |
167 | |
168 | /* | |
169 | * This function provides a method for the generic case where a board-specific | |
b66c1a39 | 170 | * ioport_map simply needs to return the port + some arbitrary port base. |
1da177e4 LT |
171 | * |
172 | * We use this at board setup time to implicitly set the port base, and | |
b66c1a39 | 173 | * as a result, we can use the generic ioport_map. |
1da177e4 LT |
174 | */ |
175 | static inline void __set_io_port_base(unsigned long pbase) | |
176 | { | |
177 | extern unsigned long generic_io_base; | |
178 | ||
179 | generic_io_base = pbase; | |
180 | } | |
181 | ||
1da177e4 | 182 | /* We really want to try and get these to memcpy etc */ |
b66c1a39 PM |
183 | extern void memcpy_fromio(void *, volatile void __iomem *, unsigned long); |
184 | extern void memcpy_toio(volatile void __iomem *, const void *, unsigned long); | |
185 | extern void memset_io(volatile void __iomem *, int, unsigned long); | |
1da177e4 LT |
186 | |
187 | /* SuperH on-chip I/O functions */ | |
b66c1a39 | 188 | static inline unsigned char ctrl_inb(unsigned long addr) |
1da177e4 LT |
189 | { |
190 | return *(volatile unsigned char*)addr; | |
191 | } | |
192 | ||
b66c1a39 | 193 | static inline unsigned short ctrl_inw(unsigned long addr) |
1da177e4 LT |
194 | { |
195 | return *(volatile unsigned short*)addr; | |
196 | } | |
197 | ||
b66c1a39 | 198 | static inline unsigned int ctrl_inl(unsigned long addr) |
1da177e4 LT |
199 | { |
200 | return *(volatile unsigned long*)addr; | |
201 | } | |
202 | ||
b66c1a39 | 203 | static inline void ctrl_outb(unsigned char b, unsigned long addr) |
1da177e4 LT |
204 | { |
205 | *(volatile unsigned char*)addr = b; | |
206 | } | |
207 | ||
b66c1a39 | 208 | static inline void ctrl_outw(unsigned short b, unsigned long addr) |
1da177e4 LT |
209 | { |
210 | *(volatile unsigned short*)addr = b; | |
211 | } | |
212 | ||
b66c1a39 | 213 | static inline void ctrl_outl(unsigned int b, unsigned long addr) |
1da177e4 LT |
214 | { |
215 | *(volatile unsigned long*)addr = b; | |
216 | } | |
217 | ||
959f85f8 PM |
218 | static inline void ctrl_delay(void) |
219 | { | |
220 | ctrl_inw(P2SEG); | |
221 | } | |
222 | ||
1da177e4 LT |
223 | #define IO_SPACE_LIMIT 0xffffffff |
224 | ||
a2d1a5fa | 225 | #ifdef CONFIG_MMU |
1da177e4 LT |
226 | /* |
227 | * Change virtual addresses to physical addresses and vv. | |
228 | * These are trivial on the 1:1 Linux/SuperH mapping | |
229 | */ | |
b66c1a39 | 230 | static inline unsigned long virt_to_phys(volatile void *address) |
1da177e4 LT |
231 | { |
232 | return PHYSADDR(address); | |
233 | } | |
234 | ||
b66c1a39 | 235 | static inline void *phys_to_virt(unsigned long address) |
1da177e4 LT |
236 | { |
237 | return (void *)P1SEGADDR(address); | |
238 | } | |
a2d1a5fa YS |
239 | #else |
240 | #define phys_to_virt(address) ((void *)(address)) | |
241 | #define virt_to_phys(address) ((unsigned long)(address)) | |
242 | #endif | |
1da177e4 | 243 | |
1da177e4 LT |
244 | /* |
245 | * readX/writeX() are used to access memory mapped devices. On some | |
246 | * architectures the memory mapped IO stuff needs to be accessed | |
247 | * differently. On the x86 architecture, we just read/write the | |
248 | * memory location directly. | |
249 | * | |
b66c1a39 PM |
250 | * On SH, we traditionally have the whole physical address space mapped |
251 | * at all times (as MIPS does), so "ioremap()" and "iounmap()" do not | |
252 | * need to do anything but place the address in the proper segment. This | |
253 | * is true for P1 and P2 addresses, as well as some P3 ones. However, | |
254 | * most of the P3 addresses and newer cores using extended addressing | |
255 | * need to map through page tables, so the ioremap() implementation | |
256 | * becomes a bit more complicated. See arch/sh/mm/ioremap.c for | |
257 | * additional notes on this. | |
1da177e4 LT |
258 | * |
259 | * We cheat a bit and always return uncachable areas until we've fixed | |
b66c1a39 | 260 | * the drivers to handle caching properly. |
1da177e4 | 261 | */ |
b66c1a39 PM |
262 | #ifdef CONFIG_MMU |
263 | void __iomem *__ioremap(unsigned long offset, unsigned long size, | |
264 | unsigned long flags); | |
265 | void __iounmap(void __iomem *addr); | |
266 | #else | |
267 | #define __ioremap(offset, size, flags) ((void __iomem *)(offset)) | |
268 | #define __iounmap(addr) do { } while (0) | |
269 | #endif /* CONFIG_MMU */ | |
270 | ||
271 | static inline void __iomem * | |
272 | __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags) | |
1da177e4 | 273 | { |
b66c1a39 PM |
274 | unsigned long last_addr = offset + size - 1; |
275 | ||
276 | /* | |
277 | * For P1 and P2 space this is trivial, as everything is already | |
278 | * mapped. Uncached access for P1 addresses are done through P2. | |
279 | * In the P3 case or for addresses outside of the 29-bit space, | |
280 | * mapping must be done by the PMB or by using page tables. | |
281 | */ | |
282 | if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) { | |
283 | if (unlikely(flags & _PAGE_CACHABLE)) | |
284 | return (void __iomem *)P1SEGADDR(offset); | |
285 | ||
286 | return (void __iomem *)P2SEGADDR(offset); | |
287 | } | |
288 | ||
289 | return __ioremap(offset, size, flags); | |
1da177e4 LT |
290 | } |
291 | ||
b66c1a39 PM |
292 | #define ioremap(offset, size) \ |
293 | __ioremap_mode((offset), (size), 0) | |
294 | #define ioremap_nocache(offset, size) \ | |
295 | __ioremap_mode((offset), (size), 0) | |
296 | #define ioremap_cache(offset, size) \ | |
297 | __ioremap_mode((offset), (size), _PAGE_CACHABLE) | |
298 | #define p3_ioremap(offset, size, flags) \ | |
299 | __ioremap((offset), (size), (flags)) | |
300 | #define iounmap(addr) \ | |
301 | __iounmap((addr)) | |
302 | ||
1da177e4 LT |
303 | /* |
304 | * The caches on some architectures aren't dma-coherent and have need to | |
305 | * handle this in software. There are three types of operations that | |
306 | * can be applied to dma buffers. | |
307 | * | |
308 | * - dma_cache_wback_inv(start, size) makes caches and RAM coherent by | |
309 | * writing the content of the caches back to memory, if necessary. | |
310 | * The function also invalidates the affected part of the caches as | |
311 | * necessary before DMA transfers from outside to memory. | |
312 | * - dma_cache_inv(start, size) invalidates the affected parts of the | |
313 | * caches. Dirty lines of the caches may be written back or simply | |
314 | * be discarded. This operation is necessary before dma operations | |
315 | * to the memory. | |
316 | * - dma_cache_wback(start, size) writes back any dirty lines but does | |
317 | * not invalidate the cache. This can be used before DMA reads from | |
318 | * memory, | |
319 | */ | |
320 | ||
321 | #define dma_cache_wback_inv(_start,_size) \ | |
322 | __flush_purge_region(_start,_size) | |
323 | #define dma_cache_inv(_start,_size) \ | |
324 | __flush_invalidate_region(_start,_size) | |
325 | #define dma_cache_wback(_start,_size) \ | |
326 | __flush_wback_region(_start,_size) | |
327 | ||
328 | /* | |
329 | * Convert a physical pointer to a virtual kernel pointer for /dev/mem | |
330 | * access | |
331 | */ | |
332 | #define xlate_dev_mem_ptr(p) __va(p) | |
333 | ||
334 | /* | |
335 | * Convert a virtual cached pointer to an uncached pointer | |
336 | */ | |
337 | #define xlate_dev_kmem_ptr(p) p | |
338 | ||
339 | #endif /* __KERNEL__ */ | |
340 | ||
341 | #endif /* __ASM_SH_IO_H */ |