powerpc/32/kdump: Implement crash_setup_regs() using ppc_save_regs()
[deliverable/linux.git] / arch / powerpc / kernel / io.c
1 /*
2 * I/O string operations
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Copyright (C) 2006 IBM Corporation
5 *
6 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
7 * and Paul Mackerras.
8 *
9 * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
10 * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
11 *
12 * Rewritten in C by Stephen Rothwell.
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
18 */
19 #include <linux/kernel.h>
20 #include <linux/types.h>
21 #include <linux/compiler.h>
22 #include <linux/module.h>
23
24 #include <asm/io.h>
25 #include <asm/firmware.h>
26 #include <asm/bug.h>
27
28 void _insb(const volatile u8 __iomem *port, void *buf, long count)
29 {
30 u8 *tbuf = buf;
31 u8 tmp;
32
33 if (unlikely(count <= 0))
34 return;
35 asm volatile("sync");
36 do {
37 tmp = *port;
38 eieio();
39 *tbuf++ = tmp;
40 } while (--count != 0);
41 asm volatile("twi 0,%0,0; isync" : : "r" (tmp));
42 }
43 EXPORT_SYMBOL(_insb);
44
45 void _outsb(volatile u8 __iomem *port, const void *buf, long count)
46 {
47 const u8 *tbuf = buf;
48
49 if (unlikely(count <= 0))
50 return;
51 asm volatile("sync");
52 do {
53 *port = *tbuf++;
54 } while (--count != 0);
55 asm volatile("sync");
56 }
57 EXPORT_SYMBOL(_outsb);
58
59 void _insw_ns(const volatile u16 __iomem *port, void *buf, long count)
60 {
61 u16 *tbuf = buf;
62 u16 tmp;
63
64 if (unlikely(count <= 0))
65 return;
66 asm volatile("sync");
67 do {
68 tmp = *port;
69 eieio();
70 *tbuf++ = tmp;
71 } while (--count != 0);
72 asm volatile("twi 0,%0,0; isync" : : "r" (tmp));
73 }
74 EXPORT_SYMBOL(_insw_ns);
75
76 void _outsw_ns(volatile u16 __iomem *port, const void *buf, long count)
77 {
78 const u16 *tbuf = buf;
79
80 if (unlikely(count <= 0))
81 return;
82 asm volatile("sync");
83 do {
84 *port = *tbuf++;
85 } while (--count != 0);
86 asm volatile("sync");
87 }
88 EXPORT_SYMBOL(_outsw_ns);
89
90 void _insl_ns(const volatile u32 __iomem *port, void *buf, long count)
91 {
92 u32 *tbuf = buf;
93 u32 tmp;
94
95 if (unlikely(count <= 0))
96 return;
97 asm volatile("sync");
98 do {
99 tmp = *port;
100 eieio();
101 *tbuf++ = tmp;
102 } while (--count != 0);
103 asm volatile("twi 0,%0,0; isync" : : "r" (tmp));
104 }
105 EXPORT_SYMBOL(_insl_ns);
106
107 void _outsl_ns(volatile u32 __iomem *port, const void *buf, long count)
108 {
109 const u32 *tbuf = buf;
110
111 if (unlikely(count <= 0))
112 return;
113 asm volatile("sync");
114 do {
115 *port = *tbuf++;
116 } while (--count != 0);
117 asm volatile("sync");
118 }
119 EXPORT_SYMBOL(_outsl_ns);
120
121 #define IO_CHECK_ALIGN(v,a) ((((unsigned long)(v)) & ((a) - 1)) == 0)
122
123 notrace void
124 _memset_io(volatile void __iomem *addr, int c, unsigned long n)
125 {
126 void *p = (void __force *)addr;
127 u32 lc = c;
128 lc |= lc << 8;
129 lc |= lc << 16;
130
131 __asm__ __volatile__ ("sync" : : : "memory");
132 while(n && !IO_CHECK_ALIGN(p, 4)) {
133 *((volatile u8 *)p) = c;
134 p++;
135 n--;
136 }
137 while(n >= 4) {
138 *((volatile u32 *)p) = lc;
139 p += 4;
140 n -= 4;
141 }
142 while(n) {
143 *((volatile u8 *)p) = c;
144 p++;
145 n--;
146 }
147 __asm__ __volatile__ ("sync" : : : "memory");
148 }
149 EXPORT_SYMBOL(_memset_io);
150
151 void _memcpy_fromio(void *dest, const volatile void __iomem *src,
152 unsigned long n)
153 {
154 void *vsrc = (void __force *) src;
155
156 __asm__ __volatile__ ("sync" : : : "memory");
157 while(n && (!IO_CHECK_ALIGN(vsrc, 4) || !IO_CHECK_ALIGN(dest, 4))) {
158 *((u8 *)dest) = *((volatile u8 *)vsrc);
159 eieio();
160 vsrc++;
161 dest++;
162 n--;
163 }
164 while(n > 4) {
165 *((u32 *)dest) = *((volatile u32 *)vsrc);
166 eieio();
167 vsrc += 4;
168 dest += 4;
169 n -= 4;
170 }
171 while(n) {
172 *((u8 *)dest) = *((volatile u8 *)vsrc);
173 eieio();
174 vsrc++;
175 dest++;
176 n--;
177 }
178 __asm__ __volatile__ ("sync" : : : "memory");
179 }
180 EXPORT_SYMBOL(_memcpy_fromio);
181
182 void _memcpy_toio(volatile void __iomem *dest, const void *src, unsigned long n)
183 {
184 void *vdest = (void __force *) dest;
185
186 __asm__ __volatile__ ("sync" : : : "memory");
187 while(n && (!IO_CHECK_ALIGN(vdest, 4) || !IO_CHECK_ALIGN(src, 4))) {
188 *((volatile u8 *)vdest) = *((u8 *)src);
189 src++;
190 vdest++;
191 n--;
192 }
193 while(n > 4) {
194 *((volatile u32 *)vdest) = *((volatile u32 *)src);
195 src += 4;
196 vdest += 4;
197 n-=4;
198 }
199 while(n) {
200 *((volatile u8 *)vdest) = *((u8 *)src);
201 src++;
202 vdest++;
203 n--;
204 }
205 __asm__ __volatile__ ("sync" : : : "memory");
206 }
207 EXPORT_SYMBOL(_memcpy_toio);
This page took 0.034597 seconds and 5 git commands to generate.