Commit | Line | Data |
---|---|---|
d73cd428 NP |
1 | /* |
2 | * arch/arm/mm/highmem.c -- ARM highmem support | |
3 | * | |
4 | * Author: Nicolas Pitre | |
5 | * Created: september 8, 2008 | |
6 | * Copyright: Marvell Semiconductors Inc. | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License version 2 as | |
10 | * published by the Free Software Foundation. | |
11 | */ | |
12 | ||
13 | #include <linux/module.h> | |
14 | #include <linux/highmem.h> | |
15 | #include <linux/interrupt.h> | |
16 | #include <asm/fixmap.h> | |
17 | #include <asm/cacheflush.h> | |
18 | #include <asm/tlbflush.h> | |
19 | #include "mm.h" | |
20 | ||
21 | void *kmap(struct page *page) | |
22 | { | |
23 | might_sleep(); | |
24 | if (!PageHighMem(page)) | |
25 | return page_address(page); | |
26 | return kmap_high(page); | |
27 | } | |
28 | EXPORT_SYMBOL(kmap); | |
29 | ||
30 | void kunmap(struct page *page) | |
31 | { | |
32 | BUG_ON(in_interrupt()); | |
33 | if (!PageHighMem(page)) | |
34 | return; | |
35 | kunmap_high(page); | |
36 | } | |
37 | EXPORT_SYMBOL(kunmap); | |
38 | ||
3e4d3af5 | 39 | void *__kmap_atomic(struct page *page) |
d73cd428 NP |
40 | { |
41 | unsigned int idx; | |
42 | unsigned long vaddr; | |
7929eb9c | 43 | void *kmap; |
3e4d3af5 | 44 | int type; |
d73cd428 NP |
45 | |
46 | pagefault_disable(); | |
47 | if (!PageHighMem(page)) | |
48 | return page_address(page); | |
49 | ||
17ebba1f NP |
50 | #ifdef CONFIG_DEBUG_HIGHMEM |
51 | /* | |
52 | * There is no cache coherency issue when non VIVT, so force the | |
53 | * dedicated kmap usage for better debugging purposes in that case. | |
54 | */ | |
55 | if (!cache_is_vivt()) | |
56 | kmap = NULL; | |
57 | else | |
58 | #endif | |
59 | kmap = kmap_high_get(page); | |
7929eb9c NP |
60 | if (kmap) |
61 | return kmap; | |
62 | ||
3e4d3af5 PZ |
63 | type = kmap_atomic_idx_push(); |
64 | ||
d73cd428 NP |
65 | idx = type + KM_TYPE_NR * smp_processor_id(); |
66 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | |
67 | #ifdef CONFIG_DEBUG_HIGHMEM | |
68 | /* | |
69 | * With debugging enabled, kunmap_atomic forces that entry to 0. | |
70 | * Make sure it was indeed properly unmapped. | |
71 | */ | |
72 | BUG_ON(!pte_none(*(TOP_PTE(vaddr)))); | |
73 | #endif | |
74 | set_pte_ext(TOP_PTE(vaddr), mk_pte(page, kmap_prot), 0); | |
75 | /* | |
76 | * When debugging is off, kunmap_atomic leaves the previous mapping | |
77 | * in place, so this TLB flush ensures the TLB is updated with the | |
78 | * new mapping. | |
79 | */ | |
80 | local_flush_tlb_kernel_page(vaddr); | |
81 | ||
82 | return (void *)vaddr; | |
83 | } | |
3e4d3af5 | 84 | EXPORT_SYMBOL(__kmap_atomic); |
d73cd428 | 85 | |
3e4d3af5 | 86 | void __kunmap_atomic(void *kvaddr) |
d73cd428 NP |
87 | { |
88 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; | |
3e4d3af5 | 89 | int idx, type; |
d73cd428 NP |
90 | |
91 | if (kvaddr >= (void *)FIXADDR_START) { | |
20273941 | 92 | type = kmap_atomic_idx(); |
3e4d3af5 PZ |
93 | idx = type + KM_TYPE_NR * smp_processor_id(); |
94 | ||
7e5a69e8 NP |
95 | if (cache_is_vivt()) |
96 | __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); | |
d73cd428 NP |
97 | #ifdef CONFIG_DEBUG_HIGHMEM |
98 | BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); | |
99 | set_pte_ext(TOP_PTE(vaddr), __pte(0), 0); | |
100 | local_flush_tlb_kernel_page(vaddr); | |
101 | #else | |
102 | (void) idx; /* to kill a warning */ | |
103 | #endif | |
20273941 | 104 | kmap_atomic_idx_pop(); |
7929eb9c NP |
105 | } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) { |
106 | /* this address was obtained through kmap_high_get() */ | |
107 | kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)])); | |
d73cd428 NP |
108 | } |
109 | pagefault_enable(); | |
110 | } | |
3e4d3af5 | 111 | EXPORT_SYMBOL(__kunmap_atomic); |
d73cd428 | 112 | |
3e4d3af5 | 113 | void *kmap_atomic_pfn(unsigned long pfn) |
d73cd428 | 114 | { |
d73cd428 | 115 | unsigned long vaddr; |
3e4d3af5 | 116 | int idx, type; |
d73cd428 NP |
117 | |
118 | pagefault_disable(); | |
119 | ||
3e4d3af5 | 120 | type = kmap_atomic_idx_push(); |
d73cd428 NP |
121 | idx = type + KM_TYPE_NR * smp_processor_id(); |
122 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | |
123 | #ifdef CONFIG_DEBUG_HIGHMEM | |
124 | BUG_ON(!pte_none(*(TOP_PTE(vaddr)))); | |
125 | #endif | |
126 | set_pte_ext(TOP_PTE(vaddr), pfn_pte(pfn, kmap_prot), 0); | |
127 | local_flush_tlb_kernel_page(vaddr); | |
128 | ||
129 | return (void *)vaddr; | |
130 | } | |
131 | ||
132 | struct page *kmap_atomic_to_page(const void *ptr) | |
133 | { | |
134 | unsigned long vaddr = (unsigned long)ptr; | |
135 | pte_t *pte; | |
136 | ||
137 | if (vaddr < FIXADDR_START) | |
138 | return virt_to_page(ptr); | |
139 | ||
140 | pte = TOP_PTE(vaddr); | |
141 | return pte_page(*pte); | |
142 | } | |
7e5a69e8 NP |
143 | |
144 | #ifdef CONFIG_CPU_CACHE_VIPT | |
145 | ||
146 | #include <linux/percpu.h> | |
147 | ||
148 | /* | |
149 | * The VIVT cache of a highmem page is always flushed before the page | |
150 | * is unmapped. Hence unmapped highmem pages need no cache maintenance | |
151 | * in that case. | |
152 | * | |
153 | * However unmapped pages may still be cached with a VIPT cache, and | |
154 | * it is not possible to perform cache maintenance on them using physical | |
155 | * addresses unfortunately. So we have no choice but to set up a temporary | |
156 | * virtual mapping for that purpose. | |
157 | * | |
158 | * Yet this VIPT cache maintenance may be triggered from DMA support | |
159 | * functions which are possibly called from interrupt context. As we don't | |
160 | * want to keep interrupt disabled all the time when such maintenance is | |
161 | * taking place, we therefore allow for some reentrancy by preserving and | |
162 | * restoring the previous fixmap entry before the interrupted context is | |
163 | * resumed. If the reentrancy depth is 0 then there is no need to restore | |
164 | * the previous fixmap, and leaving the current one in place allow it to | |
165 | * be reused the next time without a TLB flush (common with DMA). | |
166 | */ | |
167 | ||
168 | static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth); | |
169 | ||
170 | void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte) | |
171 | { | |
831e8047 GK |
172 | unsigned int idx, cpu; |
173 | int *depth; | |
7e5a69e8 NP |
174 | unsigned long vaddr, flags; |
175 | pte_t pte, *ptep; | |
176 | ||
831e8047 GK |
177 | if (!in_interrupt()) |
178 | preempt_disable(); | |
179 | ||
180 | cpu = smp_processor_id(); | |
181 | depth = &per_cpu(kmap_high_l1_vipt_depth, cpu); | |
182 | ||
7e5a69e8 NP |
183 | idx = KM_L1_CACHE + KM_TYPE_NR * cpu; |
184 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | |
185 | ptep = TOP_PTE(vaddr); | |
186 | pte = mk_pte(page, kmap_prot); | |
187 | ||
7e5a69e8 NP |
188 | raw_local_irq_save(flags); |
189 | (*depth)++; | |
190 | if (pte_val(*ptep) == pte_val(pte)) { | |
191 | *saved_pte = pte; | |
192 | } else { | |
193 | *saved_pte = *ptep; | |
194 | set_pte_ext(ptep, pte, 0); | |
195 | local_flush_tlb_kernel_page(vaddr); | |
196 | } | |
197 | raw_local_irq_restore(flags); | |
198 | ||
199 | return (void *)vaddr; | |
200 | } | |
201 | ||
202 | void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte) | |
203 | { | |
204 | unsigned int idx, cpu = smp_processor_id(); | |
205 | int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu); | |
206 | unsigned long vaddr, flags; | |
207 | pte_t pte, *ptep; | |
208 | ||
209 | idx = KM_L1_CACHE + KM_TYPE_NR * cpu; | |
210 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | |
211 | ptep = TOP_PTE(vaddr); | |
212 | pte = mk_pte(page, kmap_prot); | |
213 | ||
214 | BUG_ON(pte_val(*ptep) != pte_val(pte)); | |
215 | BUG_ON(*depth <= 0); | |
216 | ||
217 | raw_local_irq_save(flags); | |
218 | (*depth)--; | |
219 | if (*depth != 0 && pte_val(pte) != pte_val(saved_pte)) { | |
220 | set_pte_ext(ptep, saved_pte, 0); | |
221 | local_flush_tlb_kernel_page(vaddr); | |
222 | } | |
223 | raw_local_irq_restore(flags); | |
224 | ||
225 | if (!in_interrupt()) | |
226 | preempt_enable(); | |
227 | } | |
228 | ||
229 | #endif /* CONFIG_CPU_CACHE_VIPT */ |