ARM: Remove __ARCH_WANT_INTERRUPTS_ON_CTXSW on ASID-capable CPUs
[deliverable/linux.git] / arch / arm / mm / context.c
CommitLineData
1da177e4 1/*
d84b4711 2 * linux/arch/arm/mm/context.c
1da177e4
LT
3 *
4 * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/init.h>
11#include <linux/sched.h>
12#include <linux/mm.h>
11805bcf
CM
13#include <linux/smp.h>
14#include <linux/percpu.h>
1da177e4
LT
15
16#include <asm/mmu_context.h>
17#include <asm/tlbflush.h>
18
bd31b859 19static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
8678c1f0 20unsigned int cpu_last_asid = ASID_FIRST_VERSION;
11805bcf
CM
21#ifdef CONFIG_SMP
22DEFINE_PER_CPU(struct mm_struct *, current_mm);
23#endif
1da177e4 24
14d8c951 25#ifdef CONFIG_ARM_LPAE
7fec1b57 26void cpu_set_reserved_ttbr0(void)
3c5f7e7b
WD
27{
28 unsigned long ttbl = __pa(swapper_pg_dir);
29 unsigned long ttbh = 0;
30
31 /*
32 * Set TTBR0 to swapper_pg_dir which contains only global entries. The
33 * ASID is set to 0.
34 */
35 asm volatile(
36 " mcrr p15, 0, %0, %1, c2 @ set TTBR0\n"
37 :
38 : "r" (ttbl), "r" (ttbh));
39 isb();
14d8c951
CM
40}
41#else
7fec1b57 42void cpu_set_reserved_ttbr0(void)
3c5f7e7b
WD
43{
44 u32 ttb;
45 /* Copy TTBR1 into TTBR0 */
46 asm volatile(
47 " mrc p15, 0, %0, c2, c0, 1 @ read TTBR1\n"
48 " mcr p15, 0, %0, c2, c0, 0 @ set TTBR0\n"
49 : "=r" (ttb));
50 isb();
51}
14d8c951
CM
52#endif
53
1da177e4
LT
54/*
55 * We fork()ed a process, and we need a new context for the child
3c5f7e7b 56 * to run in.
1da177e4
LT
57 */
58void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
59{
60 mm->context.id = 0;
bd31b859 61 raw_spin_lock_init(&mm->context.id_lock);
1da177e4
LT
62}
63
11805bcf
CM
64static void flush_context(void)
65{
3c5f7e7b 66 cpu_set_reserved_ttbr0();
11805bcf
CM
67 local_flush_tlb_all();
68 if (icache_is_vivt_asid_tagged()) {
69 __flush_icache_all();
70 dsb();
71 }
72}
73
74#ifdef CONFIG_SMP
75
76static void set_mm_context(struct mm_struct *mm, unsigned int asid)
77{
78 unsigned long flags;
79
80 /*
81 * Locking needed for multi-threaded applications where the
82 * same mm->context.id could be set from different CPUs during
83 * the broadcast. This function is also called via IPI so the
84 * mm->context.id_lock has to be IRQ-safe.
85 */
bd31b859 86 raw_spin_lock_irqsave(&mm->context.id_lock, flags);
11805bcf
CM
87 if (likely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) {
88 /*
89 * Old version of ASID found. Set the new one and
90 * reset mm_cpumask(mm).
91 */
92 mm->context.id = asid;
93 cpumask_clear(mm_cpumask(mm));
94 }
bd31b859 95 raw_spin_unlock_irqrestore(&mm->context.id_lock, flags);
11805bcf
CM
96
97 /*
98 * Set the mm_cpumask(mm) bit for the current CPU.
99 */
100 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
101}
102
103/*
104 * Reset the ASID on the current CPU. This function call is broadcast
105 * from the CPU handling the ASID rollover and holding cpu_asid_lock.
106 */
107static void reset_context(void *info)
108{
109 unsigned int asid;
110 unsigned int cpu = smp_processor_id();
111 struct mm_struct *mm = per_cpu(current_mm, cpu);
112
113 /*
114 * Check if a current_mm was set on this CPU as it might still
115 * be in the early booting stages and using the reserved ASID.
116 */
117 if (!mm)
118 return;
119
120 smp_rmb();
a0a54d37 121 asid = cpu_last_asid + cpu + 1;
11805bcf
CM
122
123 flush_context();
124 set_mm_context(mm, asid);
125
126 /* set the new ASID */
3c5f7e7b 127 cpu_switch_mm(mm->pgd, mm);
11805bcf
CM
128}
129
130#else
131
132static inline void set_mm_context(struct mm_struct *mm, unsigned int asid)
133{
134 mm->context.id = asid;
135 cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id()));
136}
137
138#endif
139
1da177e4
LT
140void __new_context(struct mm_struct *mm)
141{
142 unsigned int asid;
143
bd31b859 144 raw_spin_lock(&cpu_asid_lock);
11805bcf
CM
145#ifdef CONFIG_SMP
146 /*
147 * Check the ASID again, in case the change was broadcast from
148 * another CPU before we acquired the lock.
149 */
150 if (unlikely(((mm->context.id ^ cpu_last_asid) >> ASID_BITS) == 0)) {
151 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
bd31b859 152 raw_spin_unlock(&cpu_asid_lock);
11805bcf
CM
153 return;
154 }
155#endif
156 /*
157 * At this point, it is guaranteed that the current mm (with
158 * an old ASID) isn't active on any other CPU since the ASIDs
159 * are changed simultaneously via IPI.
160 */
1da177e4
LT
161 asid = ++cpu_last_asid;
162 if (asid == 0)
8678c1f0 163 asid = cpu_last_asid = ASID_FIRST_VERSION;
1da177e4
LT
164
165 /*
166 * If we've used up all our ASIDs, we need
167 * to start a new version and flush the TLB.
168 */
8678c1f0 169 if (unlikely((asid & ~ASID_MASK) == 0)) {
a0a54d37 170 asid = cpu_last_asid + smp_processor_id() + 1;
11805bcf
CM
171 flush_context();
172#ifdef CONFIG_SMP
173 smp_wmb();
174 smp_call_function(reset_context, NULL, 1);
175#endif
a0a54d37 176 cpu_last_asid += NR_CPUS;
9d99df4b 177 }
1da177e4 178
11805bcf 179 set_mm_context(mm, asid);
bd31b859 180 raw_spin_unlock(&cpu_asid_lock);
1da177e4 181}
This page took 1.21953 seconds and 5 git commands to generate.