x86: fold pda into percpu area on SMP
[deliverable/linux.git] / arch / x86 / include / asm / percpu.h
CommitLineData
1965aae3
PA
1#ifndef _ASM_X86_PERCPU_H
2#define _ASM_X86_PERCPU_H
3334052a 3
1a51e3a0
TH
4#ifndef __ASSEMBLY__
5#ifdef CONFIG_X86_64
6extern void load_pda_offset(int cpu);
7#else
8static inline void load_pda_offset(int cpu) { }
9#endif
10#endif
11
3334052a 12#ifdef CONFIG_X86_64
13#include <linux/compiler.h>
14
15/* Same as asm-generic/percpu.h, except that we store the per cpu offset
16 in the PDA. Longer term the PDA and every per cpu variable
17 should be just put into a single section and referenced directly
18 from %gs */
19
20#ifdef CONFIG_SMP
21#include <asm/pda.h>
22
23#define __per_cpu_offset(cpu) (cpu_pda(cpu)->data_offset)
24#define __my_cpu_offset read_pda(data_offset)
25
26#define per_cpu_offset(x) (__per_cpu_offset(x))
27
96a388de 28#endif
3334052a 29#include <asm-generic/percpu.h>
30
31DECLARE_PER_CPU(struct x8664_pda, pda);
32
5b09b287
JF
33/*
34 * These are supposed to be implemented as a single instruction which
35 * operates on the per-cpu data base segment. x86-64 doesn't have
36 * that yet, so this is a fairly inefficient workaround for the
37 * meantime. The single instruction is atomic with respect to
38 * preemption and interrupts, so we need to explicitly disable
39 * interrupts here to achieve the same effect. However, because it
40 * can be used from within interrupt-disable/enable, we can't actually
41 * disable interrupts; disabling preemption is enough.
42 */
43#define x86_read_percpu(var) \
44 ({ \
45 typeof(per_cpu_var(var)) __tmp; \
46 preempt_disable(); \
47 __tmp = __get_cpu_var(var); \
48 preempt_enable(); \
49 __tmp; \
50 })
51
52#define x86_write_percpu(var, val) \
53 do { \
54 preempt_disable(); \
55 __get_cpu_var(var) = (val); \
56 preempt_enable(); \
57 } while(0)
58
3334052a 59#else /* CONFIG_X86_64 */
60
61#ifdef __ASSEMBLY__
62
63/*
64 * PER_CPU finds an address of a per-cpu variable.
65 *
66 * Args:
67 * var - variable name
68 * reg - 32bit register
69 *
70 * The resulting address is stored in the "reg" argument.
71 *
72 * Example:
73 * PER_CPU(cpu_gdt_descr, %ebx)
74 */
75#ifdef CONFIG_SMP
76#define PER_CPU(var, reg) \
77 movl %fs:per_cpu__##this_cpu_off, reg; \
78 lea per_cpu__##var(reg), reg
79#define PER_CPU_VAR(var) %fs:per_cpu__##var
80#else /* ! SMP */
81#define PER_CPU(var, reg) \
82 movl $per_cpu__##var, reg
83#define PER_CPU_VAR(var) per_cpu__##var
84#endif /* SMP */
85
86#else /* ...!ASSEMBLY */
87
88/*
89 * PER_CPU finds an address of a per-cpu variable.
90 *
91 * Args:
92 * var - variable name
93 * cpu - 32bit register containing the current CPU number
94 *
95 * The resulting address is stored in the "cpu" argument.
96 *
97 * Example:
98 * PER_CPU(cpu_gdt_descr, %ebx)
99 */
100#ifdef CONFIG_SMP
101
102#define __my_cpu_offset x86_read_percpu(this_cpu_off)
103
104/* fs segment starts at (positive) offset == __per_cpu_offset[cpu] */
105#define __percpu_seg "%%fs:"
106
107#else /* !SMP */
108
109#define __percpu_seg ""
110
111#endif /* SMP */
112
113#include <asm-generic/percpu.h>
114
115/* We can use this directly for local CPU (faster). */
116DECLARE_PER_CPU(unsigned long, this_cpu_off);
117
118/* For arch-specific code, we can use direct single-insn ops (they
119 * don't give an lvalue though). */
120extern void __bad_percpu_size(void);
121
bc9e3be2
JP
122#define percpu_to_op(op, var, val) \
123do { \
124 typedef typeof(var) T__; \
125 if (0) { \
126 T__ tmp__; \
127 tmp__ = (val); \
128 } \
129 switch (sizeof(var)) { \
130 case 1: \
131 asm(op "b %1,"__percpu_seg"%0" \
132 : "+m" (var) \
133 : "ri" ((T__)val)); \
134 break; \
135 case 2: \
136 asm(op "w %1,"__percpu_seg"%0" \
137 : "+m" (var) \
138 : "ri" ((T__)val)); \
139 break; \
140 case 4: \
141 asm(op "l %1,"__percpu_seg"%0" \
142 : "+m" (var) \
143 : "ri" ((T__)val)); \
144 break; \
145 default: __bad_percpu_size(); \
146 } \
147} while (0)
148
149#define percpu_from_op(op, var) \
150({ \
151 typeof(var) ret__; \
152 switch (sizeof(var)) { \
153 case 1: \
154 asm(op "b "__percpu_seg"%1,%0" \
155 : "=r" (ret__) \
156 : "m" (var)); \
157 break; \
158 case 2: \
159 asm(op "w "__percpu_seg"%1,%0" \
160 : "=r" (ret__) \
161 : "m" (var)); \
162 break; \
163 case 4: \
164 asm(op "l "__percpu_seg"%1,%0" \
165 : "=r" (ret__) \
166 : "m" (var)); \
167 break; \
168 default: __bad_percpu_size(); \
169 } \
170 ret__; \
171})
3334052a 172
173#define x86_read_percpu(var) percpu_from_op("mov", per_cpu__##var)
bc9e3be2
JP
174#define x86_write_percpu(var, val) percpu_to_op("mov", per_cpu__##var, val)
175#define x86_add_percpu(var, val) percpu_to_op("add", per_cpu__##var, val)
176#define x86_sub_percpu(var, val) percpu_to_op("sub", per_cpu__##var, val)
177#define x86_or_percpu(var, val) percpu_to_op("or", per_cpu__##var, val)
3334052a 178#endif /* !__ASSEMBLY__ */
179#endif /* !CONFIG_X86_64 */
23ca4bba
MT
180
181#ifdef CONFIG_SMP
182
183/*
184 * Define the "EARLY_PER_CPU" macros. These are used for some per_cpu
185 * variables that are initialized and accessed before there are per_cpu
186 * areas allocated.
187 */
188
189#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \
190 DEFINE_PER_CPU(_type, _name) = _initvalue; \
191 __typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \
192 { [0 ... NR_CPUS-1] = _initvalue }; \
c6a92a25 193 __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
23ca4bba
MT
194
195#define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \
196 EXPORT_PER_CPU_SYMBOL(_name)
197
198#define DECLARE_EARLY_PER_CPU(_type, _name) \
199 DECLARE_PER_CPU(_type, _name); \
200 extern __typeof__(_type) *_name##_early_ptr; \
201 extern __typeof__(_type) _name##_early_map[]
202
203#define early_per_cpu_ptr(_name) (_name##_early_ptr)
204#define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx])
205#define early_per_cpu(_name, _cpu) \
f10fcd47
TH
206 *(early_per_cpu_ptr(_name) ? \
207 &early_per_cpu_ptr(_name)[_cpu] : \
208 &per_cpu(_name, _cpu))
23ca4bba
MT
209
210#else /* !CONFIG_SMP */
211#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \
212 DEFINE_PER_CPU(_type, _name) = _initvalue
213
214#define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \
215 EXPORT_PER_CPU_SYMBOL(_name)
216
217#define DECLARE_EARLY_PER_CPU(_type, _name) \
218 DECLARE_PER_CPU(_type, _name)
219
220#define early_per_cpu(_name, _cpu) per_cpu(_name, _cpu)
221#define early_per_cpu_ptr(_name) NULL
222/* no early_per_cpu_map() */
223
224#endif /* !CONFIG_SMP */
225
1965aae3 226#endif /* _ASM_X86_PERCPU_H */
This page took 0.242615 seconds and 5 git commands to generate.