mm,x86: fix kmap_atomic_push vs ioremap_32.c
[deliverable/linux.git] / arch / arm / mm / highmem.c
CommitLineData
d73cd428
NP
1/*
2 * arch/arm/mm/highmem.c -- ARM highmem support
3 *
4 * Author: Nicolas Pitre
5 * Created: september 8, 2008
6 * Copyright: Marvell Semiconductors Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/module.h>
14#include <linux/highmem.h>
15#include <linux/interrupt.h>
16#include <asm/fixmap.h>
17#include <asm/cacheflush.h>
18#include <asm/tlbflush.h>
19#include "mm.h"
20
21void *kmap(struct page *page)
22{
23 might_sleep();
24 if (!PageHighMem(page))
25 return page_address(page);
26 return kmap_high(page);
27}
28EXPORT_SYMBOL(kmap);
29
30void kunmap(struct page *page)
31{
32 BUG_ON(in_interrupt());
33 if (!PageHighMem(page))
34 return;
35 kunmap_high(page);
36}
37EXPORT_SYMBOL(kunmap);
38
3e4d3af5 39void *__kmap_atomic(struct page *page)
d73cd428
NP
40{
41 unsigned int idx;
42 unsigned long vaddr;
7929eb9c 43 void *kmap;
3e4d3af5 44 int type;
d73cd428
NP
45
46 pagefault_disable();
47 if (!PageHighMem(page))
48 return page_address(page);
49
17ebba1f
NP
50#ifdef CONFIG_DEBUG_HIGHMEM
51 /*
52 * There is no cache coherency issue when non VIVT, so force the
53 * dedicated kmap usage for better debugging purposes in that case.
54 */
55 if (!cache_is_vivt())
56 kmap = NULL;
57 else
58#endif
59 kmap = kmap_high_get(page);
7929eb9c
NP
60 if (kmap)
61 return kmap;
62
3e4d3af5
PZ
63 type = kmap_atomic_idx_push();
64
d73cd428
NP
65 idx = type + KM_TYPE_NR * smp_processor_id();
66 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
67#ifdef CONFIG_DEBUG_HIGHMEM
68 /*
69 * With debugging enabled, kunmap_atomic forces that entry to 0.
70 * Make sure it was indeed properly unmapped.
71 */
72 BUG_ON(!pte_none(*(TOP_PTE(vaddr))));
73#endif
74 set_pte_ext(TOP_PTE(vaddr), mk_pte(page, kmap_prot), 0);
75 /*
76 * When debugging is off, kunmap_atomic leaves the previous mapping
77 * in place, so this TLB flush ensures the TLB is updated with the
78 * new mapping.
79 */
80 local_flush_tlb_kernel_page(vaddr);
81
82 return (void *)vaddr;
83}
3e4d3af5 84EXPORT_SYMBOL(__kmap_atomic);
d73cd428 85
3e4d3af5 86void __kunmap_atomic(void *kvaddr)
d73cd428
NP
87{
88 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
3e4d3af5 89 int idx, type;
d73cd428
NP
90
91 if (kvaddr >= (void *)FIXADDR_START) {
3e4d3af5
PZ
92 type = kmap_atomic_idx_pop();
93 idx = type + KM_TYPE_NR * smp_processor_id();
94
7e5a69e8
NP
95 if (cache_is_vivt())
96 __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
d73cd428
NP
97#ifdef CONFIG_DEBUG_HIGHMEM
98 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
99 set_pte_ext(TOP_PTE(vaddr), __pte(0), 0);
100 local_flush_tlb_kernel_page(vaddr);
101#else
102 (void) idx; /* to kill a warning */
103#endif
7929eb9c
NP
104 } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
105 /* this address was obtained through kmap_high_get() */
106 kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
d73cd428
NP
107 }
108 pagefault_enable();
109}
3e4d3af5 110EXPORT_SYMBOL(__kunmap_atomic);
d73cd428 111
3e4d3af5 112void *kmap_atomic_pfn(unsigned long pfn)
d73cd428 113{
d73cd428 114 unsigned long vaddr;
3e4d3af5 115 int idx, type;
d73cd428
NP
116
117 pagefault_disable();
118
3e4d3af5 119 type = kmap_atomic_idx_push();
d73cd428
NP
120 idx = type + KM_TYPE_NR * smp_processor_id();
121 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
122#ifdef CONFIG_DEBUG_HIGHMEM
123 BUG_ON(!pte_none(*(TOP_PTE(vaddr))));
124#endif
125 set_pte_ext(TOP_PTE(vaddr), pfn_pte(pfn, kmap_prot), 0);
126 local_flush_tlb_kernel_page(vaddr);
127
128 return (void *)vaddr;
129}
130
131struct page *kmap_atomic_to_page(const void *ptr)
132{
133 unsigned long vaddr = (unsigned long)ptr;
134 pte_t *pte;
135
136 if (vaddr < FIXADDR_START)
137 return virt_to_page(ptr);
138
139 pte = TOP_PTE(vaddr);
140 return pte_page(*pte);
141}
7e5a69e8
NP
142
143#ifdef CONFIG_CPU_CACHE_VIPT
144
145#include <linux/percpu.h>
146
147/*
148 * The VIVT cache of a highmem page is always flushed before the page
149 * is unmapped. Hence unmapped highmem pages need no cache maintenance
150 * in that case.
151 *
152 * However unmapped pages may still be cached with a VIPT cache, and
153 * it is not possible to perform cache maintenance on them using physical
154 * addresses unfortunately. So we have no choice but to set up a temporary
155 * virtual mapping for that purpose.
156 *
157 * Yet this VIPT cache maintenance may be triggered from DMA support
158 * functions which are possibly called from interrupt context. As we don't
159 * want to keep interrupt disabled all the time when such maintenance is
160 * taking place, we therefore allow for some reentrancy by preserving and
161 * restoring the previous fixmap entry before the interrupted context is
162 * resumed. If the reentrancy depth is 0 then there is no need to restore
163 * the previous fixmap, and leaving the current one in place allow it to
164 * be reused the next time without a TLB flush (common with DMA).
165 */
166
167static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth);
168
169void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte)
170{
831e8047
GK
171 unsigned int idx, cpu;
172 int *depth;
7e5a69e8
NP
173 unsigned long vaddr, flags;
174 pte_t pte, *ptep;
175
831e8047
GK
176 if (!in_interrupt())
177 preempt_disable();
178
179 cpu = smp_processor_id();
180 depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
181
7e5a69e8
NP
182 idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
183 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
184 ptep = TOP_PTE(vaddr);
185 pte = mk_pte(page, kmap_prot);
186
7e5a69e8
NP
187 raw_local_irq_save(flags);
188 (*depth)++;
189 if (pte_val(*ptep) == pte_val(pte)) {
190 *saved_pte = pte;
191 } else {
192 *saved_pte = *ptep;
193 set_pte_ext(ptep, pte, 0);
194 local_flush_tlb_kernel_page(vaddr);
195 }
196 raw_local_irq_restore(flags);
197
198 return (void *)vaddr;
199}
200
201void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte)
202{
203 unsigned int idx, cpu = smp_processor_id();
204 int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
205 unsigned long vaddr, flags;
206 pte_t pte, *ptep;
207
208 idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
209 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
210 ptep = TOP_PTE(vaddr);
211 pte = mk_pte(page, kmap_prot);
212
213 BUG_ON(pte_val(*ptep) != pte_val(pte));
214 BUG_ON(*depth <= 0);
215
216 raw_local_irq_save(flags);
217 (*depth)--;
218 if (*depth != 0 && pte_val(pte) != pte_val(saved_pte)) {
219 set_pte_ext(ptep, saved_pte, 0);
220 local_flush_tlb_kernel_page(vaddr);
221 }
222 raw_local_irq_restore(flags);
223
224 if (!in_interrupt())
225 preempt_enable();
226}
227
228#endif /* CONFIG_CPU_CACHE_VIPT */
This page took 0.177731 seconds and 5 git commands to generate.