2 * Copyright 2007 Andi Kleen, SUSE Labs.
3 * Subject to the GPL, v.2
5 * This contains most of the x86 vDSO kernel-side code.
9 #include <linux/sched.h>
10 #include <linux/slab.h>
11 #include <linux/init.h>
12 #include <linux/random.h>
13 #include <linux/elf.h>
14 #include <linux/cpu.h>
15 #include <linux/ptrace.h>
16 #include <asm/pvclock.h>
17 #include <asm/vgtod.h>
18 #include <asm/proto.h>
23 #include <asm/cpufeature.h>
25 #if defined(CONFIG_X86_64)
26 unsigned int __read_mostly vdso64_enabled
= 1;
29 void __init
init_vdso_image(const struct vdso_image
*image
)
31 BUG_ON(image
->size
% PAGE_SIZE
!= 0);
33 apply_alternatives((struct alt_instr
*)(image
->data
+ image
->alt
),
34 (struct alt_instr
*)(image
->data
+ image
->alt
+
41 * Put the vdso above the (randomized) stack with another randomized
42 * offset. This way there is no hole in the middle of address space.
43 * To save memory make sure it is still in the same PTE as the stack
44 * top. This doesn't give that many random bits.
46 * Note that this algorithm is imperfect: the distribution of the vdso
47 * start address within a PMD is biased toward the end.
49 * Only used for the 64-bit and x32 vdsos.
51 static unsigned long vdso_addr(unsigned long start
, unsigned len
)
56 unsigned long addr
, end
;
60 * Round up the start address. It can start out unaligned as a result
61 * of stack start randomization.
63 start
= PAGE_ALIGN(start
);
65 /* Round the lowest possible end address up to a PMD boundary. */
66 end
= (start
+ len
+ PMD_SIZE
- 1) & PMD_MASK
;
67 if (end
>= TASK_SIZE_MAX
)
72 offset
= get_random_int() % (((end
- start
) >> PAGE_SHIFT
) + 1);
73 addr
= start
+ (offset
<< PAGE_SHIFT
);
79 * Forcibly align the final address in case we have a hardware
80 * issue that requires alignment for performance reasons.
82 addr
= align_vdso_addr(addr
);
88 static int vdso_fault(const struct vm_special_mapping
*sm
,
89 struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
91 const struct vdso_image
*image
= vma
->vm_mm
->context
.vdso_image
;
93 if (!image
|| (vmf
->pgoff
<< PAGE_SHIFT
) >= image
->size
)
94 return VM_FAULT_SIGBUS
;
96 vmf
->page
= virt_to_page(image
->data
+ (vmf
->pgoff
<< PAGE_SHIFT
));
101 static void vdso_fix_landing(const struct vdso_image
*image
,
102 struct vm_area_struct
*new_vma
)
104 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
105 if (in_ia32_syscall() && image
== &vdso_image_32
) {
106 struct pt_regs
*regs
= current_pt_regs();
107 unsigned long vdso_land
= image
->sym_int80_landing_pad
;
108 unsigned long old_land_addr
= vdso_land
+
109 (unsigned long)current
->mm
->context
.vdso
;
111 /* Fixing userspace landing - look at do_fast_syscall_32 */
112 if (regs
->ip
== old_land_addr
)
113 regs
->ip
= new_vma
->vm_start
+ vdso_land
;
118 static int vdso_mremap(const struct vm_special_mapping
*sm
,
119 struct vm_area_struct
*new_vma
)
121 unsigned long new_size
= new_vma
->vm_end
- new_vma
->vm_start
;
122 const struct vdso_image
*image
= current
->mm
->context
.vdso_image
;
124 if (image
->size
!= new_size
)
127 if (WARN_ON_ONCE(current
->mm
!= new_vma
->vm_mm
))
130 vdso_fix_landing(image
, new_vma
);
131 current
->mm
->context
.vdso
= (void __user
*)new_vma
->vm_start
;
136 static int vvar_fault(const struct vm_special_mapping
*sm
,
137 struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
139 const struct vdso_image
*image
= vma
->vm_mm
->context
.vdso_image
;
144 return VM_FAULT_SIGBUS
;
146 sym_offset
= (long)(vmf
->pgoff
<< PAGE_SHIFT
) +
147 image
->sym_vvar_start
;
150 * Sanity check: a symbol offset of zero means that the page
151 * does not exist for this vdso image, not that the page is at
152 * offset zero relative to the text mapping. This should be
153 * impossible here, because sym_offset should only be zero for
154 * the page past the end of the vvar mapping.
157 return VM_FAULT_SIGBUS
;
159 if (sym_offset
== image
->sym_vvar_page
) {
160 ret
= vm_insert_pfn(vma
, (unsigned long)vmf
->virtual_address
,
161 __pa_symbol(&__vvar_page
) >> PAGE_SHIFT
);
162 } else if (sym_offset
== image
->sym_pvclock_page
) {
163 struct pvclock_vsyscall_time_info
*pvti
=
164 pvclock_pvti_cpu0_va();
165 if (pvti
&& vclock_was_used(VCLOCK_PVCLOCK
)) {
168 (unsigned long)vmf
->virtual_address
,
169 __pa(pvti
) >> PAGE_SHIFT
);
173 if (ret
== 0 || ret
== -EBUSY
)
174 return VM_FAULT_NOPAGE
;
176 return VM_FAULT_SIGBUS
;
179 static int map_vdso(const struct vdso_image
*image
, bool calculate_addr
)
181 struct mm_struct
*mm
= current
->mm
;
182 struct vm_area_struct
*vma
;
183 unsigned long addr
, text_start
;
186 static const struct vm_special_mapping vdso_mapping
= {
189 .mremap
= vdso_mremap
,
191 static const struct vm_special_mapping vvar_mapping
= {
196 if (calculate_addr
) {
197 addr
= vdso_addr(current
->mm
->start_stack
,
198 image
->size
- image
->sym_vvar_start
);
203 if (down_write_killable(&mm
->mmap_sem
))
206 addr
= get_unmapped_area(NULL
, addr
,
207 image
->size
- image
->sym_vvar_start
, 0, 0);
208 if (IS_ERR_VALUE(addr
)) {
213 text_start
= addr
- image
->sym_vvar_start
;
214 current
->mm
->context
.vdso
= (void __user
*)text_start
;
215 current
->mm
->context
.vdso_image
= image
;
218 * MAYWRITE to allow gdb to COW and set breakpoints
220 vma
= _install_special_mapping(mm
,
224 VM_MAYREAD
|VM_MAYWRITE
|VM_MAYEXEC
,
232 vma
= _install_special_mapping(mm
,
234 -image
->sym_vvar_start
,
235 VM_READ
|VM_MAYREAD
|VM_IO
|VM_DONTDUMP
|
246 current
->mm
->context
.vdso
= NULL
;
248 up_write(&mm
->mmap_sem
);
252 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
253 static int load_vdso32(void)
255 if (vdso32_enabled
!= 1) /* Other values all mean "disabled" */
258 return map_vdso(&vdso_image_32
, false);
263 int arch_setup_additional_pages(struct linux_binprm
*bprm
, int uses_interp
)
268 return map_vdso(&vdso_image_64
, true);
272 int compat_arch_setup_additional_pages(struct linux_binprm
*bprm
,
275 #ifdef CONFIG_X86_X32_ABI
276 if (test_thread_flag(TIF_X32
)) {
280 return map_vdso(&vdso_image_x32
, true);
283 #ifdef CONFIG_IA32_EMULATION
284 return load_vdso32();
291 int arch_setup_additional_pages(struct linux_binprm
*bprm
, int uses_interp
)
293 return load_vdso32();
298 static __init
int vdso_setup(char *s
)
300 vdso64_enabled
= simple_strtoul(s
, NULL
, 0);
303 __setup("vdso=", vdso_setup
);
307 static void vgetcpu_cpu_init(void *arg
)
309 int cpu
= smp_processor_id();
310 struct desc_struct d
= { };
311 unsigned long node
= 0;
313 node
= cpu_to_node(cpu
);
315 if (static_cpu_has(X86_FEATURE_RDTSCP
))
316 write_rdtscp_aux((node
<< 12) | cpu
);
319 * Store cpu number in limit so that it can be loaded
320 * quickly in user space in vgetcpu. (12 bits for the CPU
321 * and 8 bits for the node)
323 d
.limit0
= cpu
| ((node
& 0xf) << 12);
325 d
.type
= 5; /* RO data, expand down, accessed */
326 d
.dpl
= 3; /* Visible to user code */
327 d
.s
= 1; /* Not a system segment */
328 d
.p
= 1; /* Present */
329 d
.d
= 1; /* 32-bit */
331 write_gdt_entry(get_cpu_gdt_table(cpu
), GDT_ENTRY_PER_CPU
, &d
, DESCTYPE_S
);
335 vgetcpu_cpu_notifier(struct notifier_block
*n
, unsigned long action
, void *arg
)
337 long cpu
= (long)arg
;
339 if (action
== CPU_ONLINE
|| action
== CPU_ONLINE_FROZEN
)
340 smp_call_function_single(cpu
, vgetcpu_cpu_init
, NULL
, 1);
345 static int __init
init_vdso(void)
347 init_vdso_image(&vdso_image_64
);
349 #ifdef CONFIG_X86_X32_ABI
350 init_vdso_image(&vdso_image_x32
);
353 cpu_notifier_register_begin();
355 on_each_cpu(vgetcpu_cpu_init
, NULL
, 1);
356 /* notifier priority > KVM */
357 __hotcpu_notifier(vgetcpu_cpu_notifier
, 30);
359 cpu_notifier_register_done();
363 subsys_initcall(init_vdso
);
364 #endif /* CONFIG_X86_64 */