Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _ASM_IA64_MMU_CONTEXT_H |
2 | #define _ASM_IA64_MMU_CONTEXT_H | |
3 | ||
4 | /* | |
5 | * Copyright (C) 1998-2002 Hewlett-Packard Co | |
6 | * David Mosberger-Tang <davidm@hpl.hp.com> | |
7 | */ | |
8 | ||
9 | /* | |
58cd9082 CK |
10 | * Routines to manage the allocation of task context numbers. Task context |
11 | * numbers are used to reduce or eliminate the need to perform TLB flushes | |
12 | * due to context switches. Context numbers are implemented using ia-64 | |
13 | * region ids. Since the IA-64 TLB does not consider the region number when | |
14 | * performing a TLB lookup, we need to assign a unique region id to each | |
15 | * region in a process. We use the least significant three bits in aregion | |
16 | * id for this purpose. | |
1da177e4 LT |
17 | */ |
18 | ||
19 | #define IA64_REGION_ID_KERNEL 0 /* the kernel's region id (tlb.c depends on this being 0) */ | |
20 | ||
21 | #define ia64_rid(ctx,addr) (((ctx) << 3) | (addr >> 61)) | |
22 | ||
0a41e250 | 23 | # include <asm/page.h> |
1da177e4 LT |
24 | # ifndef __ASSEMBLY__ |
25 | ||
26 | #include <linux/compiler.h> | |
27 | #include <linux/percpu.h> | |
28 | #include <linux/sched.h> | |
29 | #include <linux/spinlock.h> | |
30 | ||
31 | #include <asm/processor.h> | |
d6dd61c8 | 32 | #include <asm-generic/mm_hooks.h> |
1da177e4 LT |
33 | |
34 | struct ia64_ctx { | |
35 | spinlock_t lock; | |
36 | unsigned int next; /* next context number to use */ | |
dcc17d1b PK |
37 | unsigned int limit; /* available free range */ |
38 | unsigned int max_ctx; /* max. context value supported by all CPUs */ | |
39 | /* call wrap_mmu_context when next >= max */ | |
40 | unsigned long *bitmap; /* bitmap size is max_ctx+1 */ | |
41 | unsigned long *flushmap;/* pending rid to be flushed */ | |
1da177e4 LT |
42 | }; |
43 | ||
44 | extern struct ia64_ctx ia64_ctx; | |
45 | DECLARE_PER_CPU(u8, ia64_need_tlb_flush); | |
46 | ||
dcc17d1b | 47 | extern void mmu_context_init (void); |
1da177e4 LT |
48 | extern void wrap_mmu_context (struct mm_struct *mm); |
49 | ||
50 | static inline void | |
51 | enter_lazy_tlb (struct mm_struct *mm, struct task_struct *tsk) | |
52 | { | |
53 | } | |
54 | ||
55 | /* | |
58cd9082 CK |
56 | * When the context counter wraps around all TLBs need to be flushed because |
57 | * an old context number might have been reused. This is signalled by the | |
58 | * ia64_need_tlb_flush per-CPU variable, which is checked in the routine | |
59 | * below. Called by activate_mm(). <efocht@ess.nec.de> | |
1da177e4 LT |
60 | */ |
61 | static inline void | |
62 | delayed_tlb_flush (void) | |
63 | { | |
64 | extern void local_flush_tlb_all (void); | |
badea125 | 65 | unsigned long flags; |
1da177e4 LT |
66 | |
67 | if (unlikely(__ia64_per_cpu_var(ia64_need_tlb_flush))) { | |
badea125 | 68 | spin_lock_irqsave(&ia64_ctx.lock, flags); |
58cd9082 CK |
69 | if (__ia64_per_cpu_var(ia64_need_tlb_flush)) { |
70 | local_flush_tlb_all(); | |
71 | __ia64_per_cpu_var(ia64_need_tlb_flush) = 0; | |
badea125 DMT |
72 | } |
73 | spin_unlock_irqrestore(&ia64_ctx.lock, flags); | |
1da177e4 LT |
74 | } |
75 | } | |
76 | ||
badea125 | 77 | static inline nv_mm_context_t |
1da177e4 LT |
78 | get_mmu_context (struct mm_struct *mm) |
79 | { | |
80 | unsigned long flags; | |
badea125 DMT |
81 | nv_mm_context_t context = mm->context; |
82 | ||
58cd9082 CK |
83 | if (likely(context)) |
84 | goto out; | |
85 | ||
86 | spin_lock_irqsave(&ia64_ctx.lock, flags); | |
87 | /* re-check, now that we've got the lock: */ | |
88 | context = mm->context; | |
89 | if (context == 0) { | |
5d8c39f6 | 90 | cpumask_clear(mm_cpumask(mm)); |
58cd9082 CK |
91 | if (ia64_ctx.next >= ia64_ctx.limit) { |
92 | ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap, | |
93 | ia64_ctx.max_ctx, ia64_ctx.next); | |
94 | ia64_ctx.limit = find_next_bit(ia64_ctx.bitmap, | |
95 | ia64_ctx.max_ctx, ia64_ctx.next); | |
96 | if (ia64_ctx.next >= ia64_ctx.max_ctx) | |
97 | wrap_mmu_context(mm); | |
1da177e4 | 98 | } |
58cd9082 CK |
99 | mm->context = context = ia64_ctx.next++; |
100 | __set_bit(context, ia64_ctx.bitmap); | |
1da177e4 | 101 | } |
58cd9082 CK |
102 | spin_unlock_irqrestore(&ia64_ctx.lock, flags); |
103 | out: | |
badea125 DMT |
104 | /* |
105 | * Ensure we're not starting to use "context" before any old | |
106 | * uses of it are gone from our TLB. | |
107 | */ | |
108 | delayed_tlb_flush(); | |
109 | ||
1da177e4 LT |
110 | return context; |
111 | } | |
112 | ||
113 | /* | |
58cd9082 CK |
114 | * Initialize context number to some sane value. MM is guaranteed to be a |
115 | * brand-new address-space, so no TLB flushing is needed, ever. | |
1da177e4 LT |
116 | */ |
117 | static inline int | |
118 | init_new_context (struct task_struct *p, struct mm_struct *mm) | |
119 | { | |
120 | mm->context = 0; | |
121 | return 0; | |
122 | } | |
123 | ||
124 | static inline void | |
125 | destroy_context (struct mm_struct *mm) | |
126 | { | |
127 | /* Nothing to do. */ | |
128 | } | |
129 | ||
130 | static inline void | |
badea125 | 131 | reload_context (nv_mm_context_t context) |
1da177e4 LT |
132 | { |
133 | unsigned long rid; | |
134 | unsigned long rid_incr = 0; | |
135 | unsigned long rr0, rr1, rr2, rr3, rr4, old_rr4; | |
136 | ||
0a41e250 | 137 | old_rr4 = ia64_get_rr(RGN_BASE(RGN_HPAGE)); |
1da177e4 LT |
138 | rid = context << 3; /* make space for encoding the region number */ |
139 | rid_incr = 1 << 8; | |
140 | ||
141 | /* encode the region id, preferred page size, and VHPT enable bit: */ | |
142 | rr0 = (rid << 8) | (PAGE_SHIFT << 2) | 1; | |
143 | rr1 = rr0 + 1*rid_incr; | |
144 | rr2 = rr0 + 2*rid_incr; | |
145 | rr3 = rr0 + 3*rid_incr; | |
146 | rr4 = rr0 + 4*rid_incr; | |
147 | #ifdef CONFIG_HUGETLB_PAGE | |
148 | rr4 = (rr4 & (~(0xfcUL))) | (old_rr4 & 0xfc); | |
0a41e250 PC |
149 | |
150 | # if RGN_HPAGE != 4 | |
151 | # error "reload_context assumes RGN_HPAGE is 4" | |
152 | # endif | |
1da177e4 LT |
153 | #endif |
154 | ||
0e1a77cc | 155 | ia64_set_rr0_to_rr4(rr0, rr1, rr2, rr3, rr4); |
1da177e4 LT |
156 | ia64_srlz_i(); /* srlz.i implies srlz.d */ |
157 | } | |
158 | ||
a68db763 PC |
159 | /* |
160 | * Must be called with preemption off | |
161 | */ | |
1da177e4 LT |
162 | static inline void |
163 | activate_context (struct mm_struct *mm) | |
164 | { | |
badea125 | 165 | nv_mm_context_t context; |
1da177e4 LT |
166 | |
167 | do { | |
168 | context = get_mmu_context(mm); | |
5d8c39f6 RR |
169 | if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) |
170 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); | |
1da177e4 | 171 | reload_context(context); |
58cd9082 CK |
172 | /* |
173 | * in the unlikely event of a TLB-flush by another thread, | |
174 | * redo the load. | |
175 | */ | |
1da177e4 LT |
176 | } while (unlikely(context != mm->context)); |
177 | } | |
178 | ||
179 | #define deactivate_mm(tsk,mm) do { } while (0) | |
180 | ||
181 | /* | |
182 | * Switch from address space PREV to address space NEXT. | |
183 | */ | |
184 | static inline void | |
185 | activate_mm (struct mm_struct *prev, struct mm_struct *next) | |
186 | { | |
1da177e4 | 187 | /* |
58cd9082 CK |
188 | * We may get interrupts here, but that's OK because interrupt |
189 | * handlers cannot touch user-space. | |
1da177e4 LT |
190 | */ |
191 | ia64_set_kr(IA64_KR_PT_BASE, __pa(next->pgd)); | |
192 | activate_context(next); | |
193 | } | |
194 | ||
195 | #define switch_mm(prev_mm,next_mm,next_task) activate_mm(prev_mm, next_mm) | |
196 | ||
197 | # endif /* ! __ASSEMBLY__ */ | |
198 | #endif /* _ASM_IA64_MMU_CONTEXT_H */ |