Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _ASM_IA64_MMU_CONTEXT_H |
2 | #define _ASM_IA64_MMU_CONTEXT_H | |
3 | ||
4 | /* | |
5 | * Copyright (C) 1998-2002 Hewlett-Packard Co | |
6 | * David Mosberger-Tang <davidm@hpl.hp.com> | |
7 | */ | |
8 | ||
9 | /* | |
58cd9082 CK |
10 | * Routines to manage the allocation of task context numbers. Task context |
11 | * numbers are used to reduce or eliminate the need to perform TLB flushes | |
12 | * due to context switches. Context numbers are implemented using ia-64 | |
13 | * region ids. Since the IA-64 TLB does not consider the region number when | |
14 | * performing a TLB lookup, we need to assign a unique region id to each | |
15 | * region in a process. We use the least significant three bits in aregion | |
16 | * id for this purpose. | |
1da177e4 LT |
17 | */ |
18 | ||
19 | #define IA64_REGION_ID_KERNEL 0 /* the kernel's region id (tlb.c depends on this being 0) */ | |
20 | ||
21 | #define ia64_rid(ctx,addr) (((ctx) << 3) | (addr >> 61)) | |
22 | ||
0a41e250 | 23 | # include <asm/page.h> |
1da177e4 LT |
24 | # ifndef __ASSEMBLY__ |
25 | ||
26 | #include <linux/compiler.h> | |
27 | #include <linux/percpu.h> | |
28 | #include <linux/sched.h> | |
29 | #include <linux/spinlock.h> | |
30 | ||
31 | #include <asm/processor.h> | |
32 | ||
33 | struct ia64_ctx { | |
34 | spinlock_t lock; | |
35 | unsigned int next; /* next context number to use */ | |
dcc17d1b PK |
36 | unsigned int limit; /* available free range */ |
37 | unsigned int max_ctx; /* max. context value supported by all CPUs */ | |
38 | /* call wrap_mmu_context when next >= max */ | |
39 | unsigned long *bitmap; /* bitmap size is max_ctx+1 */ | |
40 | unsigned long *flushmap;/* pending rid to be flushed */ | |
1da177e4 LT |
41 | }; |
42 | ||
43 | extern struct ia64_ctx ia64_ctx; | |
44 | DECLARE_PER_CPU(u8, ia64_need_tlb_flush); | |
45 | ||
dcc17d1b | 46 | extern void mmu_context_init (void); |
1da177e4 LT |
47 | extern void wrap_mmu_context (struct mm_struct *mm); |
48 | ||
49 | static inline void | |
50 | enter_lazy_tlb (struct mm_struct *mm, struct task_struct *tsk) | |
51 | { | |
52 | } | |
53 | ||
54 | /* | |
58cd9082 CK |
55 | * When the context counter wraps around all TLBs need to be flushed because |
56 | * an old context number might have been reused. This is signalled by the | |
57 | * ia64_need_tlb_flush per-CPU variable, which is checked in the routine | |
58 | * below. Called by activate_mm(). <efocht@ess.nec.de> | |
1da177e4 LT |
59 | */ |
60 | static inline void | |
61 | delayed_tlb_flush (void) | |
62 | { | |
63 | extern void local_flush_tlb_all (void); | |
badea125 | 64 | unsigned long flags; |
1da177e4 LT |
65 | |
66 | if (unlikely(__ia64_per_cpu_var(ia64_need_tlb_flush))) { | |
badea125 | 67 | spin_lock_irqsave(&ia64_ctx.lock, flags); |
58cd9082 CK |
68 | if (__ia64_per_cpu_var(ia64_need_tlb_flush)) { |
69 | local_flush_tlb_all(); | |
70 | __ia64_per_cpu_var(ia64_need_tlb_flush) = 0; | |
badea125 DMT |
71 | } |
72 | spin_unlock_irqrestore(&ia64_ctx.lock, flags); | |
1da177e4 LT |
73 | } |
74 | } | |
75 | ||
badea125 | 76 | static inline nv_mm_context_t |
1da177e4 LT |
77 | get_mmu_context (struct mm_struct *mm) |
78 | { | |
79 | unsigned long flags; | |
badea125 DMT |
80 | nv_mm_context_t context = mm->context; |
81 | ||
58cd9082 CK |
82 | if (likely(context)) |
83 | goto out; | |
84 | ||
85 | spin_lock_irqsave(&ia64_ctx.lock, flags); | |
86 | /* re-check, now that we've got the lock: */ | |
87 | context = mm->context; | |
88 | if (context == 0) { | |
89 | cpus_clear(mm->cpu_vm_mask); | |
90 | if (ia64_ctx.next >= ia64_ctx.limit) { | |
91 | ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap, | |
92 | ia64_ctx.max_ctx, ia64_ctx.next); | |
93 | ia64_ctx.limit = find_next_bit(ia64_ctx.bitmap, | |
94 | ia64_ctx.max_ctx, ia64_ctx.next); | |
95 | if (ia64_ctx.next >= ia64_ctx.max_ctx) | |
96 | wrap_mmu_context(mm); | |
1da177e4 | 97 | } |
58cd9082 CK |
98 | mm->context = context = ia64_ctx.next++; |
99 | __set_bit(context, ia64_ctx.bitmap); | |
1da177e4 | 100 | } |
58cd9082 CK |
101 | spin_unlock_irqrestore(&ia64_ctx.lock, flags); |
102 | out: | |
badea125 DMT |
103 | /* |
104 | * Ensure we're not starting to use "context" before any old | |
105 | * uses of it are gone from our TLB. | |
106 | */ | |
107 | delayed_tlb_flush(); | |
108 | ||
1da177e4 LT |
109 | return context; |
110 | } | |
111 | ||
112 | /* | |
58cd9082 CK |
113 | * Initialize context number to some sane value. MM is guaranteed to be a |
114 | * brand-new address-space, so no TLB flushing is needed, ever. | |
1da177e4 LT |
115 | */ |
116 | static inline int | |
117 | init_new_context (struct task_struct *p, struct mm_struct *mm) | |
118 | { | |
119 | mm->context = 0; | |
120 | return 0; | |
121 | } | |
122 | ||
123 | static inline void | |
124 | destroy_context (struct mm_struct *mm) | |
125 | { | |
126 | /* Nothing to do. */ | |
127 | } | |
128 | ||
129 | static inline void | |
badea125 | 130 | reload_context (nv_mm_context_t context) |
1da177e4 LT |
131 | { |
132 | unsigned long rid; | |
133 | unsigned long rid_incr = 0; | |
134 | unsigned long rr0, rr1, rr2, rr3, rr4, old_rr4; | |
135 | ||
0a41e250 | 136 | old_rr4 = ia64_get_rr(RGN_BASE(RGN_HPAGE)); |
1da177e4 LT |
137 | rid = context << 3; /* make space for encoding the region number */ |
138 | rid_incr = 1 << 8; | |
139 | ||
140 | /* encode the region id, preferred page size, and VHPT enable bit: */ | |
141 | rr0 = (rid << 8) | (PAGE_SHIFT << 2) | 1; | |
142 | rr1 = rr0 + 1*rid_incr; | |
143 | rr2 = rr0 + 2*rid_incr; | |
144 | rr3 = rr0 + 3*rid_incr; | |
145 | rr4 = rr0 + 4*rid_incr; | |
146 | #ifdef CONFIG_HUGETLB_PAGE | |
147 | rr4 = (rr4 & (~(0xfcUL))) | (old_rr4 & 0xfc); | |
0a41e250 PC |
148 | |
149 | # if RGN_HPAGE != 4 | |
150 | # error "reload_context assumes RGN_HPAGE is 4" | |
151 | # endif | |
1da177e4 LT |
152 | #endif |
153 | ||
154 | ia64_set_rr(0x0000000000000000UL, rr0); | |
155 | ia64_set_rr(0x2000000000000000UL, rr1); | |
156 | ia64_set_rr(0x4000000000000000UL, rr2); | |
157 | ia64_set_rr(0x6000000000000000UL, rr3); | |
158 | ia64_set_rr(0x8000000000000000UL, rr4); | |
159 | ia64_srlz_i(); /* srlz.i implies srlz.d */ | |
160 | } | |
161 | ||
a68db763 PC |
162 | /* |
163 | * Must be called with preemption off | |
164 | */ | |
1da177e4 LT |
165 | static inline void |
166 | activate_context (struct mm_struct *mm) | |
167 | { | |
badea125 | 168 | nv_mm_context_t context; |
1da177e4 LT |
169 | |
170 | do { | |
171 | context = get_mmu_context(mm); | |
172 | if (!cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) | |
173 | cpu_set(smp_processor_id(), mm->cpu_vm_mask); | |
174 | reload_context(context); | |
58cd9082 CK |
175 | /* |
176 | * in the unlikely event of a TLB-flush by another thread, | |
177 | * redo the load. | |
178 | */ | |
1da177e4 LT |
179 | } while (unlikely(context != mm->context)); |
180 | } | |
181 | ||
182 | #define deactivate_mm(tsk,mm) do { } while (0) | |
183 | ||
184 | /* | |
185 | * Switch from address space PREV to address space NEXT. | |
186 | */ | |
187 | static inline void | |
188 | activate_mm (struct mm_struct *prev, struct mm_struct *next) | |
189 | { | |
1da177e4 | 190 | /* |
58cd9082 CK |
191 | * We may get interrupts here, but that's OK because interrupt |
192 | * handlers cannot touch user-space. | |
1da177e4 LT |
193 | */ |
194 | ia64_set_kr(IA64_KR_PT_BASE, __pa(next->pgd)); | |
195 | activate_context(next); | |
196 | } | |
197 | ||
198 | #define switch_mm(prev_mm,next_mm,next_task) activate_mm(prev_mm, next_mm) | |
199 | ||
200 | # endif /* ! __ASSEMBLY__ */ | |
201 | #endif /* _ASM_IA64_MMU_CONTEXT_H */ |