context_tracking: Add stub context_tracking_is_enabled
[deliverable/linux.git] / kernel / context_tracking.c
CommitLineData
4eacdf18
FW
1/*
2 * Context tracking: Probe on high level context boundaries such as kernel
3 * and userspace. This includes syscalls and exceptions entry/exit.
4 *
5 * This is used by RCU to remove its dependency on the timer tick while a CPU
6 * runs in userspace.
7 *
8 * Started by Frederic Weisbecker:
9 *
10 * Copyright (C) 2012 Red Hat, Inc., Frederic Weisbecker <fweisbec@redhat.com>
11 *
12 * Many thanks to Gilad Ben-Yossef, Paul McKenney, Ingo Molnar, Andrew Morton,
13 * Steven Rostedt, Peter Zijlstra for suggestions and improvements.
14 *
15 */
16
91d1aa43
FW
17#include <linux/context_tracking.h>
18#include <linux/rcupdate.h>
19#include <linux/sched.h>
91d1aa43 20#include <linux/hardirq.h>
6a61671b 21#include <linux/export.h>
4cdf77a8 22#include <linux/kprobes.h>
91d1aa43 23
1b6a259a
FW
24#define CREATE_TRACE_POINTS
25#include <trace/events/context_tracking.h>
26
65f382fd 27struct static_key context_tracking_enabled = STATIC_KEY_INIT_FALSE;
48d6a816 28EXPORT_SYMBOL_GPL(context_tracking_enabled);
65f382fd
FW
29
30DEFINE_PER_CPU(struct context_tracking, context_tracking);
48d6a816 31EXPORT_SYMBOL_GPL(context_tracking);
91d1aa43 32
2e709338
FW
33void context_tracking_cpu_set(int cpu)
34{
65f382fd
FW
35 if (!per_cpu(context_tracking.active, cpu)) {
36 per_cpu(context_tracking.active, cpu) = true;
37 static_key_slow_inc(&context_tracking_enabled);
38 }
2e709338
FW
39}
40
4eacdf18 41/**
3aab4f50
RR
42 * context_tracking_enter - Inform the context tracking that the CPU is going
43 * enter user or guest space mode.
4eacdf18
FW
44 *
45 * This function must be called right before we switch from the kernel
3aab4f50
RR
46 * to user or guest space, when it's guaranteed the remaining kernel
47 * instructions to execute won't use any RCU read side critical section
48 * because this function sets RCU in extended quiescent state.
4eacdf18 49 */
3aab4f50 50void context_tracking_enter(enum ctx_state state)
91d1aa43
FW
51{
52 unsigned long flags;
53
0c06a5d4
FW
54 /*
55 * Repeat the user_enter() check here because some archs may be calling
56 * this from asm and if no CPU needs context tracking, they shouldn't
58135f57
FW
57 * go further. Repeat the check here until they support the inline static
58 * key check.
0c06a5d4 59 */
58135f57 60 if (!context_tracking_is_enabled())
0c06a5d4
FW
61 return;
62
91d1aa43
FW
63 /*
64 * Some contexts may involve an exception occuring in an irq,
65 * leading to that nesting:
66 * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
67 * This would mess up the dyntick_nesting count though. And rcu_irq_*()
68 * helpers are enough to protect RCU uses inside the exception. So
69 * just return immediately if we detect we are in an IRQ.
70 */
71 if (in_interrupt())
72 return;
73
4eacdf18 74 /* Kernel threads aren't supposed to go to userspace */
91d1aa43
FW
75 WARN_ON_ONCE(!current->mm);
76
77 local_irq_save(flags);
3aab4f50 78 if ( __this_cpu_read(context_tracking.state) != state) {
d65ec121 79 if (__this_cpu_read(context_tracking.active)) {
1b6a259a 80 trace_user_enter(0);
d65ec121
FW
81 /*
82 * At this stage, only low level arch entry code remains and
83 * then we'll run in userspace. We can assume there won't be
84 * any RCU read-side critical section until the next call to
85 * user_exit() or rcu_irq_enter(). Let's remove RCU's dependency
86 * on the tick.
87 */
88 vtime_user_enter(current);
89 rcu_user_enter();
90 }
4eacdf18 91 /*
d65ec121
FW
92 * Even if context tracking is disabled on this CPU, because it's outside
93 * the full dynticks mask for example, we still have to keep track of the
94 * context transitions and states to prevent inconsistency on those of
95 * other CPUs.
96 * If a task triggers an exception in userspace, sleep on the exception
97 * handler and then migrate to another CPU, that new CPU must know where
98 * the exception returns by the time we call exception_exit().
99 * This information can only be provided by the previous CPU when it called
100 * exception_enter().
101 * OTOH we can spare the calls to vtime and RCU when context_tracking.active
102 * is false because we know that CPU is not tickless.
4eacdf18 103 */
3aab4f50 104 __this_cpu_write(context_tracking.state, state);
91d1aa43
FW
105 }
106 local_irq_restore(flags);
107}
3aab4f50
RR
108NOKPROBE_SYMBOL(context_tracking_enter);
109
110void context_tracking_user_enter(void)
111{
112 context_tracking_enter(CONTEXT_USER);
113}
4cdf77a8 114NOKPROBE_SYMBOL(context_tracking_user_enter);
91d1aa43 115
4eacdf18 116/**
3aab4f50
RR
117 * context_tracking_exit - Inform the context tracking that the CPU is
118 * exiting user or guest mode and entering the kernel.
4eacdf18 119 *
3aab4f50
RR
120 * This function must be called after we entered the kernel from user or
121 * guest space before any use of RCU read side critical section. This
122 * potentially include any high level kernel code like syscalls, exceptions,
123 * signal handling, etc...
4eacdf18
FW
124 *
125 * This call supports re-entrancy. This way it can be called from any exception
126 * handler without needing to know if we came from userspace or not.
127 */
3aab4f50 128void context_tracking_exit(enum ctx_state state)
91d1aa43
FW
129{
130 unsigned long flags;
131
58135f57 132 if (!context_tracking_is_enabled())
0c06a5d4
FW
133 return;
134
91d1aa43
FW
135 if (in_interrupt())
136 return;
137
138 local_irq_save(flags);
3aab4f50 139 if (__this_cpu_read(context_tracking.state) == state) {
d65ec121
FW
140 if (__this_cpu_read(context_tracking.active)) {
141 /*
142 * We are going to run code that may use RCU. Inform
143 * RCU core about that (ie: we may need the tick again).
144 */
145 rcu_user_exit();
146 vtime_user_exit(current);
1b6a259a 147 trace_user_exit(0);
d65ec121 148 }
c467ea76 149 __this_cpu_write(context_tracking.state, CONTEXT_KERNEL);
91d1aa43
FW
150 }
151 local_irq_restore(flags);
152}
3aab4f50
RR
153NOKPROBE_SYMBOL(context_tracking_exit);
154
155void context_tracking_user_exit(void)
156{
157 context_tracking_exit(CONTEXT_USER);
158}
4cdf77a8 159NOKPROBE_SYMBOL(context_tracking_user_exit);
91d1aa43 160
4eacdf18 161/**
73d424f9 162 * __context_tracking_task_switch - context switch the syscall callbacks
4eacdf18
FW
163 * @prev: the task that is being switched out
164 * @next: the task that is being switched in
165 *
166 * The context tracking uses the syscall slow path to implement its user-kernel
167 * boundaries probes on syscalls. This way it doesn't impact the syscall fast
168 * path on CPUs that don't do context tracking.
169 *
170 * But we need to clear the flag on the previous task because it may later
171 * migrate to some CPU that doesn't do the context tracking. As such the TIF
172 * flag may not be desired there.
173 */
73d424f9
FW
174void __context_tracking_task_switch(struct task_struct *prev,
175 struct task_struct *next)
91d1aa43 176{
d65ec121
FW
177 clear_tsk_thread_flag(prev, TIF_NOHZ);
178 set_tsk_thread_flag(next, TIF_NOHZ);
91d1aa43 179}
65f382fd
FW
180
181#ifdef CONFIG_CONTEXT_TRACKING_FORCE
182void __init context_tracking_init(void)
183{
184 int cpu;
185
186 for_each_possible_cpu(cpu)
187 context_tracking_cpu_set(cpu);
188}
189#endif
This page took 0.150199 seconds and 5 git commands to generate.