Merge ../linus
[deliverable/linux.git] / include / asm-xtensa / mmu_context.h
1 /*
2 * include/asm-xtensa/mmu_context.h
3 *
4 * Switch an MMU context.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 *
10 * Copyright (C) 2001 - 2005 Tensilica Inc.
11 */
12
13 #ifndef _XTENSA_MMU_CONTEXT_H
14 #define _XTENSA_MMU_CONTEXT_H
15
16 #include <linux/stringify.h>
17
18 #include <asm/pgtable.h>
19 #include <asm/cacheflush.h>
20 #include <asm/tlbflush.h>
21
22 #define XCHAL_MMU_ASID_BITS 8
23
24 #if (XCHAL_HAVE_TLBS != 1)
25 # error "Linux must have an MMU!"
26 #endif
27
28 extern unsigned long asid_cache;
29
30 /*
31 * NO_CONTEXT is the invalid ASID value that we don't ever assign to
32 * any user or kernel context.
33 *
34 * 0 invalid
35 * 1 kernel
36 * 2 reserved
37 * 3 reserved
38 * 4...255 available
39 */
40
41 #define NO_CONTEXT 0
42 #define ASID_USER_FIRST 4
43 #define ASID_MASK ((1 << XCHAL_MMU_ASID_BITS) - 1)
44 #define ASID_INSERT(x) (0x03020001 | (((x) & ASID_MASK) << 8))
45
46 static inline void set_rasid_register (unsigned long val)
47 {
48 __asm__ __volatile__ (" wsr %0, "__stringify(RASID)"\n\t"
49 " isync\n" : : "a" (val));
50 }
51
52 static inline unsigned long get_rasid_register (void)
53 {
54 unsigned long tmp;
55 __asm__ __volatile__ (" rsr %0,"__stringify(RASID)"\n\t" : "=a" (tmp));
56 return tmp;
57 }
58
59 static inline void
60 __get_new_mmu_context(struct mm_struct *mm)
61 {
62 extern void flush_tlb_all(void);
63 if (! (++asid_cache & ASID_MASK) ) {
64 flush_tlb_all(); /* start new asid cycle */
65 asid_cache += ASID_USER_FIRST;
66 }
67 mm->context = asid_cache;
68 }
69
70 static inline void
71 __load_mmu_context(struct mm_struct *mm)
72 {
73 set_rasid_register(ASID_INSERT(mm->context));
74 invalidate_page_directory();
75 }
76
77 /*
78 * Initialize the context related info for a new mm_struct
79 * instance.
80 */
81
82 static inline int
83 init_new_context(struct task_struct *tsk, struct mm_struct *mm)
84 {
85 mm->context = NO_CONTEXT;
86 return 0;
87 }
88
89 /*
90 * After we have set current->mm to a new value, this activates
91 * the context for the new mm so we see the new mappings.
92 */
93 static inline void
94 activate_mm(struct mm_struct *prev, struct mm_struct *next)
95 {
96 /* Unconditionally get a new ASID. */
97
98 __get_new_mmu_context(next);
99 __load_mmu_context(next);
100 }
101
102
103 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
104 struct task_struct *tsk)
105 {
106 unsigned long asid = asid_cache;
107
108 /* Check if our ASID is of an older version and thus invalid */
109
110 if (next->context == NO_CONTEXT || ((next->context^asid) & ~ASID_MASK))
111 __get_new_mmu_context(next);
112
113 __load_mmu_context(next);
114 }
115
116 #define deactivate_mm(tsk, mm) do { } while(0)
117
118 /*
119 * Destroy context related info for an mm_struct that is about
120 * to be put to rest.
121 */
122 static inline void destroy_context(struct mm_struct *mm)
123 {
124 invalidate_page_directory();
125 }
126
127
128 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
129 {
130 /* Nothing to do. */
131
132 }
133
134 #endif /* _XTENSA_MMU_CONTEXT_H */
This page took 0.033382 seconds and 6 git commands to generate.