Merge master.kernel.org:/pub/scm/linux/kernel/git/davej/agpgart
[deliverable/linux.git] / include / asm-parisc / cacheflush.h
1 #ifndef _PARISC_CACHEFLUSH_H
2 #define _PARISC_CACHEFLUSH_H
3
4 #include <linux/config.h>
5 #include <linux/mm.h>
6 #include <asm/cache.h> /* for flush_user_dcache_range_asm() proto */
7
8 /* The usual comment is "Caches aren't brain-dead on the <architecture>".
9 * Unfortunately, that doesn't apply to PA-RISC. */
10
11 /* Cache flush operations */
12
13 #ifdef CONFIG_SMP
14 #define flush_cache_mm(mm) flush_cache_all()
15 #else
16 #define flush_cache_mm(mm) flush_cache_all_local()
17 #endif
18
19 #define flush_kernel_dcache_range(start,size) \
20 flush_kernel_dcache_range_asm((start), (start)+(size));
21
22 extern void flush_cache_all_local(void);
23
24 static inline void cacheflush_h_tmp_function(void *dummy)
25 {
26 flush_cache_all_local();
27 }
28
29 static inline void flush_cache_all(void)
30 {
31 on_each_cpu(cacheflush_h_tmp_function, NULL, 1, 1);
32 }
33
34 #define flush_cache_vmap(start, end) flush_cache_all()
35 #define flush_cache_vunmap(start, end) flush_cache_all()
36
37 extern int parisc_cache_flush_threshold;
38 void parisc_setup_cache_timing(void);
39
40 static inline void
41 flush_user_dcache_range(unsigned long start, unsigned long end)
42 {
43 if ((end - start) < parisc_cache_flush_threshold)
44 flush_user_dcache_range_asm(start,end);
45 else
46 flush_data_cache();
47 }
48
49 static inline void
50 flush_user_icache_range(unsigned long start, unsigned long end)
51 {
52 if ((end - start) < parisc_cache_flush_threshold)
53 flush_user_icache_range_asm(start,end);
54 else
55 flush_instruction_cache();
56 }
57
58 extern void flush_dcache_page(struct page *page);
59
60 #define flush_dcache_mmap_lock(mapping) \
61 write_lock_irq(&(mapping)->tree_lock)
62 #define flush_dcache_mmap_unlock(mapping) \
63 write_unlock_irq(&(mapping)->tree_lock)
64
65 #define flush_icache_page(vma,page) do { flush_kernel_dcache_page(page_address(page)); flush_kernel_icache_page(page_address(page)); } while (0)
66
67 #define flush_icache_range(s,e) do { flush_kernel_dcache_range_asm(s,e); flush_kernel_icache_range_asm(s,e); } while (0)
68
69 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
70 do { \
71 flush_cache_page(vma, vaddr, page_to_pfn(page)); \
72 memcpy(dst, src, len); \
73 flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len); \
74 } while (0)
75
76 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
77 do { \
78 flush_cache_page(vma, vaddr, page_to_pfn(page)); \
79 memcpy(dst, src, len); \
80 } while (0)
81
82 static inline void flush_cache_range(struct vm_area_struct *vma,
83 unsigned long start, unsigned long end)
84 {
85 int sr3;
86
87 if (!vma->vm_mm->context) {
88 BUG();
89 return;
90 }
91
92 sr3 = mfsp(3);
93 if (vma->vm_mm->context == sr3) {
94 flush_user_dcache_range(start,end);
95 flush_user_icache_range(start,end);
96 } else {
97 flush_cache_all();
98 }
99 }
100
101 /* Simple function to work out if we have an existing address translation
102 * for a user space vma. */
103 static inline int translation_exists(struct vm_area_struct *vma,
104 unsigned long addr, unsigned long pfn)
105 {
106 pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
107 pmd_t *pmd;
108 pte_t pte;
109
110 if(pgd_none(*pgd))
111 return 0;
112
113 pmd = pmd_offset(pgd, addr);
114 if(pmd_none(*pmd) || pmd_bad(*pmd))
115 return 0;
116
117 /* We cannot take the pte lock here: flush_cache_page is usually
118 * called with pte lock already held. Whereas flush_dcache_page
119 * takes flush_dcache_mmap_lock, which is lower in the hierarchy:
120 * the vma itself is secure, but the pte might come or go racily.
121 */
122 pte = *pte_offset_map(pmd, addr);
123 /* But pte_unmap() does nothing on this architecture */
124
125 /* Filter out coincidental file entries and swap entries */
126 if (!(pte_val(pte) & (_PAGE_FLUSH|_PAGE_PRESENT)))
127 return 0;
128
129 return pte_pfn(pte) == pfn;
130 }
131
132 /* Private function to flush a page from the cache of a non-current
133 * process. cr25 contains the Page Directory of the current user
134 * process; we're going to hijack both it and the user space %sr3 to
135 * temporarily make the non-current process current. We have to do
136 * this because cache flushing may cause a non-access tlb miss which
137 * the handlers have to fill in from the pgd of the non-current
138 * process. */
139 static inline void
140 flush_user_cache_page_non_current(struct vm_area_struct *vma,
141 unsigned long vmaddr)
142 {
143 /* save the current process space and pgd */
144 unsigned long space = mfsp(3), pgd = mfctl(25);
145
146 /* we don't mind taking interrups since they may not
147 * do anything with user space, but we can't
148 * be preempted here */
149 preempt_disable();
150
151 /* make us current */
152 mtctl(__pa(vma->vm_mm->pgd), 25);
153 mtsp(vma->vm_mm->context, 3);
154
155 flush_user_dcache_page(vmaddr);
156 if(vma->vm_flags & VM_EXEC)
157 flush_user_icache_page(vmaddr);
158
159 /* put the old current process back */
160 mtsp(space, 3);
161 mtctl(pgd, 25);
162 preempt_enable();
163 }
164
165 static inline void
166 __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr)
167 {
168 if (likely(vma->vm_mm->context == mfsp(3))) {
169 flush_user_dcache_page(vmaddr);
170 if (vma->vm_flags & VM_EXEC)
171 flush_user_icache_page(vmaddr);
172 } else {
173 flush_user_cache_page_non_current(vma, vmaddr);
174 }
175 }
176
177 static inline void
178 flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
179 {
180 BUG_ON(!vma->vm_mm->context);
181
182 if (likely(translation_exists(vma, vmaddr, pfn)))
183 __flush_cache_page(vma, vmaddr);
184
185 }
186 #endif
This page took 0.036545 seconds and 6 git commands to generate.