[PARISC] Add CONFIG_HPPA_IOREMAP to conditionally enable ioremap
[deliverable/linux.git] / arch / parisc / mm / ioremap.c
1 /*
2 * arch/parisc/mm/ioremap.c
3 *
4 * (C) Copyright 1995 1996 Linus Torvalds
5 * (C) Copyright 2001 Helge Deller <deller@gmx.de>
6 * (C) Copyright 2005 Kyle McMartin <kyle@parisc-linux.org>
7 */
8
9 #include <linux/vmalloc.h>
10 #include <linux/errno.h>
11 #include <linux/module.h>
12 #include <asm/io.h>
13 #include <asm/pgalloc.h>
14 #include <asm/tlbflush.h>
15 #include <asm/cacheflush.h>
16
17 static inline void
18 remap_area_pte(pte_t *pte, unsigned long address, unsigned long size,
19 unsigned long phys_addr, unsigned long flags)
20 {
21 unsigned long end, pfn;
22 pgprot_t pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY |
23 _PAGE_ACCESSED | flags);
24
25 address &= ~PMD_MASK;
26
27 end = address + size;
28 if (end > PMD_SIZE)
29 end = PMD_SIZE;
30
31 BUG_ON(address >= end);
32
33 pfn = phys_addr >> PAGE_SHIFT;
34 do {
35 BUG_ON(!pte_none(*pte));
36
37 set_pte(pte, pfn_pte(pfn, pgprot));
38
39 address += PAGE_SIZE;
40 pfn++;
41 pte++;
42 } while (address && (address < end));
43 }
44
45 static inline int
46 remap_area_pmd(pmd_t *pmd, unsigned long address, unsigned long size,
47 unsigned long phys_addr, unsigned long flags)
48 {
49 unsigned long end;
50
51 address &= ~PGDIR_MASK;
52
53 end = address + size;
54 if (end > PGDIR_SIZE)
55 end = PGDIR_SIZE;
56
57 BUG_ON(address >= end);
58
59 phys_addr -= address;
60 do {
61 pte_t *pte = pte_alloc_kernel(pmd, address);
62 if (!pte)
63 return -ENOMEM;
64
65 remap_area_pte(pte, address, end - address,
66 address + phys_addr, flags);
67
68 address = (address + PMD_SIZE) & PMD_MASK;
69 pmd++;
70 } while (address && (address < end));
71
72 return 0;
73 }
74
75 #ifdef CONFIG_HPPA_IOREMAP
76 static int
77 remap_area_pages(unsigned long address, unsigned long phys_addr,
78 unsigned long size, unsigned long flags)
79 {
80 pgd_t *dir;
81 int error = 0;
82 unsigned long end = address + size;
83
84 BUG_ON(address >= end);
85
86 phys_addr -= address;
87 dir = pgd_offset_k(address);
88
89 flush_cache_all();
90
91 do {
92 pud_t *pud;
93 pmd_t *pmd;
94
95 error = -ENOMEM;
96 pud = pud_alloc(&init_mm, dir, address);
97 if (!pud)
98 break;
99
100 pmd = pmd_alloc(&init_mm, pud, address);
101 if (!pmd)
102 break;
103
104 if (remap_area_pmd(pmd, address, end - address,
105 phys_addr + address, flags))
106 break;
107
108 error = 0;
109 address = (address + PGDIR_SIZE) & PGDIR_MASK;
110 dir++;
111 } while (address && (address < end));
112
113 flush_tlb_all();
114
115 return error;
116 }
117 #endif /* CONFIG_HPPA_IOREMAP */
118
119 #ifdef CONFIG_DEBUG_IOREMAP
120 static unsigned long last = 0;
121
122 void gsc_bad_addr(unsigned long addr)
123 {
124 if (time_after(jiffies, last + HZ*10)) {
125 printk("gsc_foo() called with bad address 0x%lx\n", addr);
126 dump_stack();
127 last = jiffies;
128 }
129 }
130 EXPORT_SYMBOL(gsc_bad_addr);
131
132 void __raw_bad_addr(const volatile void __iomem *addr)
133 {
134 if (time_after(jiffies, last + HZ*10)) {
135 printk("__raw_foo() called with bad address 0x%p\n", addr);
136 dump_stack();
137 last = jiffies;
138 }
139 }
140 EXPORT_SYMBOL(__raw_bad_addr);
141 #endif
142
143 /*
144 * Generic mapping function (not visible outside):
145 */
146
147 /*
148 * Remap an arbitrary physical address space into the kernel virtual
149 * address space.
150 *
151 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
152 * have to convert them into an offset in a page-aligned mapping, but the
153 * caller shouldn't need to know that small detail.
154 */
155 void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
156 {
157 #if !defined(CONFIG_HPPA_IOREMAP)
158 unsigned long end = phys_addr + size - 1;
159 /* Support EISA addresses */
160 if ((phys_addr >= 0x00080000 && end < 0x000fffff)
161 || (phys_addr >= 0x00500000 && end < 0x03bfffff)) {
162 phys_addr |= 0xfc000000;
163 }
164
165 #ifdef CONFIG_DEBUG_IOREMAP
166 return (void __iomem *)(phys_addr - (0x1UL << NYBBLE_SHIFT));
167 #else
168 return (void __iomem *)phys_addr;
169 #endif
170
171 #else
172 void *addr;
173 struct vm_struct *area;
174 unsigned long offset, last_addr;
175
176 /* Don't allow wraparound or zero size */
177 last_addr = phys_addr + size - 1;
178 if (!size || last_addr < phys_addr)
179 return NULL;
180
181 /*
182 * Don't allow anybody to remap normal RAM that we're using..
183 */
184 if (phys_addr < virt_to_phys(high_memory)) {
185 char *t_addr, *t_end;
186 struct page *page;
187
188 t_addr = __va(phys_addr);
189 t_end = t_addr + (size - 1);
190
191 for (page = virt_to_page(t_addr);
192 page <= virt_to_page(t_end); page++) {
193 if(!PageReserved(page))
194 return NULL;
195 }
196 }
197
198 /*
199 * Mappings have to be page-aligned
200 */
201 offset = phys_addr & ~PAGE_MASK;
202 phys_addr &= PAGE_MASK;
203 size = PAGE_ALIGN(last_addr) - phys_addr;
204
205 /*
206 * Ok, go for it..
207 */
208 area = get_vm_area(size, VM_IOREMAP);
209 if (!area)
210 return NULL;
211
212 addr = area->addr;
213 if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
214 vfree(addr);
215 return NULL;
216 }
217
218 return (void __iomem *) (offset + (char *)addr);
219 #endif
220 }
221
222 void iounmap(void __iomem *addr)
223 {
224 #ifdef CONFIG_HPPA_IOREMAP
225 if (addr > high_memory)
226 return vfree((void *) (PAGE_MASK & (unsigned long __force) addr));
227 #else
228 return;
229 #endif
230 }
This page took 0.043959 seconds and 5 git commands to generate.