Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _S390_TLBFLUSH_H |
2 | #define _S390_TLBFLUSH_H | |
3 | ||
1da177e4 LT |
4 | #include <linux/mm.h> |
5 | #include <asm/processor.h> | |
c1821c2e | 6 | #include <asm/pgalloc.h> |
1da177e4 LT |
7 | |
8 | /* | |
9 | * TLB flushing: | |
10 | * | |
11 | * - flush_tlb() flushes the current mm struct TLBs | |
12 | * - flush_tlb_all() flushes all processes TLBs | |
13 | * - flush_tlb_mm(mm) flushes the specified mm context TLB's | |
14 | * - flush_tlb_page(vma, vmaddr) flushes one page | |
15 | * - flush_tlb_range(vma, start, end) flushes a range of pages | |
16 | * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages | |
17 | * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables | |
18 | */ | |
19 | ||
20 | /* | |
21 | * S/390 has three ways of flushing TLBs | |
22 | * 'ptlb' does a flush of the local processor | |
23 | * 'csp' flushes the TLBs on all PUs of a SMP | |
24 | * 'ipte' invalidates a pte in a page table and flushes that out of | |
25 | * the TLBs of all PUs of a SMP | |
26 | */ | |
27 | ||
28 | #define local_flush_tlb() \ | |
94c12cc7 | 29 | do { asm volatile("ptlb": : :"memory"); } while (0) |
1da177e4 LT |
30 | |
31 | #ifndef CONFIG_SMP | |
32 | ||
33 | /* | |
34 | * We always need to flush, since s390 does not flush tlb | |
35 | * on each context switch | |
36 | */ | |
37 | ||
38 | static inline void flush_tlb(void) | |
39 | { | |
40 | local_flush_tlb(); | |
41 | } | |
42 | static inline void flush_tlb_all(void) | |
43 | { | |
44 | local_flush_tlb(); | |
45 | } | |
46 | static inline void flush_tlb_mm(struct mm_struct *mm) | |
47 | { | |
48 | local_flush_tlb(); | |
49 | } | |
50 | static inline void flush_tlb_page(struct vm_area_struct *vma, | |
51 | unsigned long addr) | |
52 | { | |
53 | local_flush_tlb(); | |
54 | } | |
55 | static inline void flush_tlb_range(struct vm_area_struct *vma, | |
56 | unsigned long start, unsigned long end) | |
57 | { | |
58 | local_flush_tlb(); | |
59 | } | |
60 | ||
61 | #define flush_tlb_kernel_range(start, end) \ | |
62 | local_flush_tlb(); | |
63 | ||
64 | #else | |
65 | ||
66 | #include <asm/smp.h> | |
67 | ||
68 | extern void smp_ptlb_all(void); | |
69 | ||
70 | static inline void global_flush_tlb(void) | |
71 | { | |
94c12cc7 MS |
72 | register unsigned long reg2 asm("2"); |
73 | register unsigned long reg3 asm("3"); | |
74 | register unsigned long reg4 asm("4"); | |
75 | long dummy; | |
76 | ||
1da177e4 LT |
77 | #ifndef __s390x__ |
78 | if (!MACHINE_HAS_CSP) { | |
79 | smp_ptlb_all(); | |
80 | return; | |
81 | } | |
82 | #endif /* __s390x__ */ | |
94c12cc7 MS |
83 | |
84 | dummy = 0; | |
85 | reg2 = reg3 = 0; | |
86 | reg4 = ((unsigned long) &dummy) + 1; | |
87 | asm volatile( | |
88 | " csp %0,%2" | |
89 | : : "d" (reg2), "d" (reg3), "d" (reg4), "m" (dummy) : "cc" ); | |
1da177e4 LT |
90 | } |
91 | ||
92 | /* | |
93 | * We only have to do global flush of tlb if process run since last | |
94 | * flush on any other pu than current. | |
95 | * If we have threads (mm->count > 1) we always do a global flush, | |
96 | * since the process runs on more than one processor at the same time. | |
97 | */ | |
98 | ||
99 | static inline void __flush_tlb_mm(struct mm_struct * mm) | |
100 | { | |
101 | cpumask_t local_cpumask; | |
102 | ||
103 | if (unlikely(cpus_empty(mm->cpu_vm_mask))) | |
104 | return; | |
105 | if (MACHINE_HAS_IDTE) { | |
c1821c2e GS |
106 | pgd_t *shadow_pgd = get_shadow_pgd(mm->pgd); |
107 | ||
108 | if (shadow_pgd) { | |
109 | asm volatile( | |
110 | " .insn rrf,0xb98e0000,0,%0,%1,0" | |
111 | : : "a" (2048), | |
112 | "a" (__pa(shadow_pgd) & PAGE_MASK) : "cc" ); | |
113 | } | |
94c12cc7 MS |
114 | asm volatile( |
115 | " .insn rrf,0xb98e0000,0,%0,%1,0" | |
116 | : : "a" (2048), "a" (__pa(mm->pgd)&PAGE_MASK) : "cc"); | |
1da177e4 LT |
117 | return; |
118 | } | |
119 | preempt_disable(); | |
120 | local_cpumask = cpumask_of_cpu(smp_processor_id()); | |
121 | if (cpus_equal(mm->cpu_vm_mask, local_cpumask)) | |
122 | local_flush_tlb(); | |
123 | else | |
124 | global_flush_tlb(); | |
125 | preempt_enable(); | |
126 | } | |
127 | ||
128 | static inline void flush_tlb(void) | |
129 | { | |
130 | __flush_tlb_mm(current->mm); | |
131 | } | |
132 | static inline void flush_tlb_all(void) | |
133 | { | |
134 | global_flush_tlb(); | |
135 | } | |
136 | static inline void flush_tlb_mm(struct mm_struct *mm) | |
137 | { | |
138 | __flush_tlb_mm(mm); | |
139 | } | |
140 | static inline void flush_tlb_page(struct vm_area_struct *vma, | |
141 | unsigned long addr) | |
142 | { | |
143 | __flush_tlb_mm(vma->vm_mm); | |
144 | } | |
145 | static inline void flush_tlb_range(struct vm_area_struct *vma, | |
146 | unsigned long start, unsigned long end) | |
147 | { | |
148 | __flush_tlb_mm(vma->vm_mm); | |
149 | } | |
150 | ||
151 | #define flush_tlb_kernel_range(start, end) global_flush_tlb() | |
152 | ||
153 | #endif | |
154 | ||
155 | static inline void flush_tlb_pgtables(struct mm_struct *mm, | |
156 | unsigned long start, unsigned long end) | |
157 | { | |
158 | /* S/390 does not keep any page table caches in TLB */ | |
159 | } | |
160 | ||
161 | #endif /* _S390_TLBFLUSH_H */ |