Merge remote-tracking branch 'ftrace/for-next'
[deliverable/linux.git] / arch / x86 / kernel / setup_percpu.c
CommitLineData
40685236
JP
1#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2
4fe29a85
GOC
3#include <linux/kernel.h>
4#include <linux/module.h>
5#include <linux/init.h>
6#include <linux/bootmem.h>
7#include <linux/percpu.h>
1ecd2765 8#include <linux/kexec.h>
17b4cceb 9#include <linux/crash_dump.h>
8a87dd9a
JSR
10#include <linux/smp.h>
11#include <linux/topology.h>
5f5d8405 12#include <linux/pfn.h>
4fe29a85
GOC
13#include <asm/sections.h>
14#include <asm/processor.h>
15#include <asm/setup.h>
0fc0906e 16#include <asm/mpspec.h>
76eb4131 17#include <asm/apicdef.h>
1ecd2765 18#include <asm/highmem.h>
1a51e3a0 19#include <asm/proto.h>
06879033 20#include <asm/cpumask.h>
34019be1 21#include <asm/cpu.h>
60a5317f 22#include <asm/stackprotector.h>
76eb4131 23
0816b0f0 24DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
ea927906 25EXPORT_PER_CPU_SYMBOL(cpu_number);
ea927906 26
1688401a
BG
27#ifdef CONFIG_X86_64
28#define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
29#else
30#define BOOT_PERCPU_OFFSET 0
31#endif
32
2c773dd3 33DEFINE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
1688401a
BG
34EXPORT_PER_CPU_SYMBOL(this_cpu_off);
35
404f6aac 36unsigned long __per_cpu_offset[NR_CPUS] __ro_after_init = {
34019be1 37 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
9939ddaf 38};
9939ddaf 39EXPORT_SYMBOL(__per_cpu_offset);
4fe29a85 40
6b19b0c2
TH
41/*
42 * On x86_64 symbols referenced from code should be reachable using
43 * 32bit relocations. Reserve space for static percpu variables in
44 * modules so that they are always served from the first chunk which
45 * is located at the percpu segment base. On x86_32, anything can
46 * address anywhere. No need to reserve space in the first chunk.
47 */
48#ifdef CONFIG_X86_64
49#define PERCPU_FIRST_CHUNK_RESERVE PERCPU_MODULE_RESERVE
50#else
51#define PERCPU_FIRST_CHUNK_RESERVE 0
52#endif
53
4518e6a0 54#ifdef CONFIG_X86_32
89c92151
TH
55/**
56 * pcpu_need_numa - determine percpu allocation needs to consider NUMA
57 *
58 * If NUMA is not configured or there is only one NUMA node available,
59 * there is no reason to consider NUMA. This function determines
60 * whether percpu allocation should consider NUMA or not.
61 *
62 * RETURNS:
63 * true if NUMA should be considered; otherwise, false.
64 */
65static bool __init pcpu_need_numa(void)
66{
67#ifdef CONFIG_NEED_MULTIPLE_NODES
68 pg_data_t *last = NULL;
69 unsigned int cpu;
70
71 for_each_possible_cpu(cpu) {
72 int node = early_cpu_to_node(cpu);
73
74 if (node_online(node) && NODE_DATA(node) &&
75 last && last != NODE_DATA(node))
76 return true;
77
78 last = NODE_DATA(node);
79 }
80#endif
81 return false;
82}
4518e6a0 83#endif
89c92151 84
5f5d8405
TH
85/**
86 * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
87 * @cpu: cpu to allocate for
88 * @size: size allocation in bytes
89 * @align: alignment
90 *
91 * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper
92 * does the right thing for NUMA regardless of the current
93 * configuration.
94 *
95 * RETURNS:
96 * Pointer to the allocated area on success, NULL on failure.
97 */
98static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
99 unsigned long align)
100{
101 const unsigned long goal = __pa(MAX_DMA_ADDRESS);
102#ifdef CONFIG_NEED_MULTIPLE_NODES
103 int node = early_cpu_to_node(cpu);
104 void *ptr;
105
106 if (!node_online(node) || !NODE_DATA(node)) {
107 ptr = __alloc_bootmem_nopanic(size, align, goal);
108 pr_info("cpu %d has no node %d or node-local memory\n",
109 cpu, node);
110 pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
111 cpu, size, __pa(ptr));
112 } else {
113 ptr = __alloc_bootmem_node_nopanic(NODE_DATA(node),
114 size, align, goal);
40685236
JP
115 pr_debug("per cpu data for cpu%d %lu bytes on node%d at %016lx\n",
116 cpu, size, node, __pa(ptr));
5f5d8405
TH
117 }
118 return ptr;
119#else
120 return __alloc_bootmem_nopanic(size, align, goal);
121#endif
122}
123
d4b95f80
TH
124/*
125 * Helpers for first chunk memory allocation
126 */
3cbc8565 127static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
d4b95f80 128{
3cbc8565 129 return pcpu_alloc_bootmem(cpu, size, align);
d4b95f80
TH
130}
131
132static void __init pcpu_fc_free(void *ptr, size_t size)
133{
134 free_bootmem(__pa(ptr), size);
135}
136
4518e6a0 137static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
a530b795 138{
4518e6a0 139#ifdef CONFIG_NEED_MULTIPLE_NODES
a530b795
TH
140 if (early_cpu_to_node(from) == early_cpu_to_node(to))
141 return LOCAL_DISTANCE;
142 else
143 return REMOTE_DISTANCE;
8ac83757 144#else
4518e6a0 145 return LOCAL_DISTANCE;
8ac83757 146#endif
89c92151
TH
147}
148
00ae4064 149static void __init pcpup_populate_pte(unsigned long addr)
458a3e64
TH
150{
151 populate_extra_pte(addr);
152}
153
b2d2f431
BG
154static inline void setup_percpu_segment(int cpu)
155{
156#ifdef CONFIG_X86_32
157 struct desc_struct gdt;
158
159 pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
160 0x2 | DESCTYPE_S, 0x8);
161 gdt.s = 1;
162 write_gdt_entry(get_cpu_gdt_table(cpu),
163 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
164#endif
165}
166
4fe29a85
GOC
167void __init setup_per_cpu_areas(void)
168{
5f5d8405 169 unsigned int cpu;
11124411 170 unsigned long delta;
fb435d52 171 int rc;
a1681965 172
ab14398a 173 pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
a1681965 174 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
11124411 175
8ac83757 176 /*
4518e6a0
TH
177 * Allocate percpu area. Embedding allocator is our favorite;
178 * however, on NUMA configurations, it can result in very
179 * sparse unit mapping and vmalloc area isn't spacious enough
180 * on 32bit. Use page in that case.
8ac83757 181 */
4518e6a0
TH
182#ifdef CONFIG_X86_32
183 if (pcpu_chosen_fc == PCPU_FC_AUTO && pcpu_need_numa())
184 pcpu_chosen_fc = PCPU_FC_PAGE;
185#endif
fb435d52 186 rc = -EINVAL;
4518e6a0 187 if (pcpu_chosen_fc != PCPU_FC_PAGE) {
4518e6a0
TH
188 const size_t dyn_size = PERCPU_MODULE_RESERVE +
189 PERCPU_DYNAMIC_RESERVE - PERCPU_FIRST_CHUNK_RESERVE;
d5e28005
TH
190 size_t atom_size;
191
192 /*
193 * On 64bit, use PMD_SIZE for atom_size so that embedded
194 * percpu areas are aligned to PMD. This, in the future,
195 * can also allow using PMD mappings in vmalloc area. Use
196 * PAGE_SIZE on 32bit as vmalloc space is highly contended
197 * and large vmalloc area allocs can easily fail.
198 */
199#ifdef CONFIG_X86_64
200 atom_size = PMD_SIZE;
201#else
202 atom_size = PAGE_SIZE;
203#endif
4518e6a0
TH
204 rc = pcpu_embed_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
205 dyn_size, atom_size,
206 pcpu_cpu_distance,
207 pcpu_fc_alloc, pcpu_fc_free);
fb435d52 208 if (rc < 0)
40685236 209 pr_warning("%s allocator failed (%d), falling back to page size\n",
4518e6a0 210 pcpu_fc_names[pcpu_chosen_fc], rc);
fa8a7094 211 }
fb435d52 212 if (rc < 0)
4518e6a0
TH
213 rc = pcpu_page_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
214 pcpu_fc_alloc, pcpu_fc_free,
215 pcpup_populate_pte);
fb435d52
TH
216 if (rc < 0)
217 panic("cannot initialize percpu area (err=%d)", rc);
11124411 218
5f5d8405 219 /* alrighty, percpu areas up and running */
11124411
TH
220 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
221 for_each_possible_cpu(cpu) {
fb435d52 222 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
26f80bd6 223 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
ea927906 224 per_cpu(cpu_number, cpu) = cpu;
b2d2f431 225 setup_percpu_segment(cpu);
60a5317f 226 setup_stack_canary_segment(cpu);
0d77e7f0 227 /*
cf3997f5
TH
228 * Copy data used in early init routines from the
229 * initial arrays to the per cpu data areas. These
230 * arrays then become expendable and the *_early_ptr's
231 * are zeroed indicating that the static arrays are
232 * gone.
0d77e7f0 233 */
ec70de8b 234#ifdef CONFIG_X86_LOCAL_APIC
0d77e7f0 235 per_cpu(x86_cpu_to_apicid, cpu) =
cf3997f5 236 early_per_cpu_map(x86_cpu_to_apicid, cpu);
0d77e7f0 237 per_cpu(x86_bios_cpu_apicid, cpu) =
cf3997f5 238 early_per_cpu_map(x86_bios_cpu_apicid, cpu);
3e9e57fa
VK
239 per_cpu(x86_cpu_to_acpiid, cpu) =
240 early_per_cpu_map(x86_cpu_to_acpiid, cpu);
ec70de8b 241#endif
4c321ff8
TH
242#ifdef CONFIG_X86_32
243 per_cpu(x86_cpu_to_logical_apicid, cpu) =
244 early_per_cpu_map(x86_cpu_to_logical_apicid, cpu);
245#endif
1a51e3a0 246#ifdef CONFIG_X86_64
26f80bd6 247 per_cpu(irq_stack_ptr, cpu) =
cf3997f5 248 per_cpu(irq_stack_union.irq_stack, cpu) +
4950d6d4 249 IRQ_STACK_SIZE;
645a7919 250#endif
6470aff6
BG
251#ifdef CONFIG_NUMA
252 per_cpu(x86_cpu_to_node_map, cpu) =
cf3997f5 253 early_per_cpu_map(x86_cpu_to_node_map, cpu);
9aebbdb6 254 /*
a4ce96ac 255 * Ensure that the boot cpu numa_node is correct when the boot
9aebbdb6
YL
256 * cpu is on a node that doesn't have memory installed.
257 * Also cpu_up() will call cpu_to_node() for APs when
258 * MEMORY_HOTPLUG is defined, before per_cpu(numa_node) is set
259 * up later with c_init aka intel_init/amd_init.
260 * So set them all (boot cpu and all APs).
261 */
262 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
6470aff6 263#endif
1a51e3a0 264 /*
c273fb3b 265 * Up to this point, the boot CPU has been using .init.data
2697fbd5 266 * area. Reload any changed state for the boot CPU.
1a51e3a0 267 */
f6e9456c 268 if (!cpu)
552be871 269 switch_to_new_gdt(cpu);
4fe29a85
GOC
270 }
271
0d77e7f0 272 /* indicate the early static arrays will soon be gone */
22f25138 273#ifdef CONFIG_X86_LOCAL_APIC
0d77e7f0
BG
274 early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
275 early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
3e9e57fa 276 early_per_cpu_ptr(x86_cpu_to_acpiid) = NULL;
22f25138 277#endif
4c321ff8
TH
278#ifdef CONFIG_X86_32
279 early_per_cpu_ptr(x86_cpu_to_logical_apicid) = NULL;
280#endif
645a7919 281#ifdef CONFIG_NUMA
0d77e7f0
BG
282 early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
283#endif
9f0e8d04 284
9f248bde
MT
285 /* Setup node to cpumask map */
286 setup_node_to_cpumask_map();
c2d1cec1
MT
287
288 /* Setup cpu initialized, callin, callout masks */
289 setup_cpu_local_masks();
4fe29a85 290}
This page took 0.541049 seconds and 5 git commands to generate.