Merge branches 'pm-core', 'pm-clk', 'pm-domains' and 'pm-pci'
[deliverable/linux.git] / arch / x86 / include / asm / efi.h
1 #ifndef _ASM_X86_EFI_H
2 #define _ASM_X86_EFI_H
3
4 #include <asm/fpu/api.h>
5 #include <asm/pgtable.h>
6 #include <asm/processor-flags.h>
7 #include <asm/tlb.h>
8
9 /*
10 * We map the EFI regions needed for runtime services non-contiguously,
11 * with preserved alignment on virtual addresses starting from -4G down
12 * for a total max space of 64G. This way, we provide for stable runtime
13 * services addresses across kernels so that a kexec'd kernel can still
14 * use them.
15 *
16 * This is the main reason why we're doing stable VA mappings for RT
17 * services.
18 *
19 * This flag is used in conjuction with a chicken bit called
20 * "efi=old_map" which can be used as a fallback to the old runtime
21 * services mapping method in case there's some b0rkage with a
22 * particular EFI implementation (haha, it is hard to hold up the
23 * sarcasm here...).
24 */
25 #define EFI_OLD_MEMMAP EFI_ARCH_1
26
27 #define EFI32_LOADER_SIGNATURE "EL32"
28 #define EFI64_LOADER_SIGNATURE "EL64"
29
30 #define MAX_CMDLINE_ADDRESS UINT_MAX
31
32 #define ARCH_EFI_IRQ_FLAGS_MASK X86_EFLAGS_IF
33
34 #ifdef CONFIG_X86_32
35
36 extern unsigned long asmlinkage efi_call_phys(void *, ...);
37
38 #define arch_efi_call_virt_setup() kernel_fpu_begin()
39 #define arch_efi_call_virt_teardown() kernel_fpu_end()
40
41 /*
42 * Wrap all the virtual calls in a way that forces the parameters on the stack.
43 */
44 #define arch_efi_call_virt(f, args...) \
45 ({ \
46 ((efi_##f##_t __attribute__((regparm(0)))*) \
47 efi.systab->runtime->f)(args); \
48 })
49
50 #define efi_ioremap(addr, size, type, attr) ioremap_cache(addr, size)
51
52 #else /* !CONFIG_X86_32 */
53
54 #define EFI_LOADER_SIGNATURE "EL64"
55
56 extern u64 asmlinkage efi_call(void *fp, ...);
57
58 #define efi_call_phys(f, args...) efi_call((f), args)
59
60 /*
61 * Scratch space used for switching the pagetable in the EFI stub
62 */
63 struct efi_scratch {
64 u64 r15;
65 u64 prev_cr3;
66 pgd_t *efi_pgt;
67 bool use_pgd;
68 u64 phys_stack;
69 } __packed;
70
71 #define arch_efi_call_virt_setup() \
72 ({ \
73 efi_sync_low_kernel_mappings(); \
74 preempt_disable(); \
75 __kernel_fpu_begin(); \
76 \
77 if (efi_scratch.use_pgd) { \
78 efi_scratch.prev_cr3 = read_cr3(); \
79 write_cr3((unsigned long)efi_scratch.efi_pgt); \
80 __flush_tlb_all(); \
81 } \
82 })
83
84 #define arch_efi_call_virt(f, args...) \
85 efi_call((void *)efi.systab->runtime->f, args) \
86
87 #define arch_efi_call_virt_teardown() \
88 ({ \
89 if (efi_scratch.use_pgd) { \
90 write_cr3(efi_scratch.prev_cr3); \
91 __flush_tlb_all(); \
92 } \
93 \
94 __kernel_fpu_end(); \
95 preempt_enable(); \
96 })
97
98 extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size,
99 u32 type, u64 attribute);
100
101 #ifdef CONFIG_KASAN
102 /*
103 * CONFIG_KASAN may redefine memset to __memset. __memset function is present
104 * only in kernel binary. Since the EFI stub linked into a separate binary it
105 * doesn't have __memset(). So we should use standard memset from
106 * arch/x86/boot/compressed/string.c. The same applies to memcpy and memmove.
107 */
108 #undef memcpy
109 #undef memset
110 #undef memmove
111 #endif
112
113 #endif /* CONFIG_X86_32 */
114
115 extern struct efi_scratch efi_scratch;
116 extern void __init efi_set_executable(efi_memory_desc_t *md, bool executable);
117 extern int __init efi_memblock_x86_reserve_range(void);
118 extern pgd_t * __init efi_call_phys_prolog(void);
119 extern void __init efi_call_phys_epilog(pgd_t *save_pgd);
120 extern void __init efi_print_memmap(void);
121 extern void __init efi_unmap_memmap(void);
122 extern void __init efi_memory_uc(u64 addr, unsigned long size);
123 extern void __init efi_map_region(efi_memory_desc_t *md);
124 extern void __init efi_map_region_fixed(efi_memory_desc_t *md);
125 extern void efi_sync_low_kernel_mappings(void);
126 extern int __init efi_alloc_page_tables(void);
127 extern int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages);
128 extern void __init efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages);
129 extern void __init old_map_region(efi_memory_desc_t *md);
130 extern void __init runtime_code_page_mkexec(void);
131 extern void __init efi_runtime_update_mappings(void);
132 extern void __init efi_dump_pagetable(void);
133 extern void __init efi_apply_memmap_quirks(void);
134 extern int __init efi_reuse_config(u64 tables, int nr_tables);
135 extern void efi_delete_dummy_variable(void);
136
137 struct efi_setup_data {
138 u64 fw_vendor;
139 u64 runtime;
140 u64 tables;
141 u64 smbios;
142 u64 reserved[8];
143 };
144
145 extern u64 efi_setup;
146
147 #ifdef CONFIG_EFI
148
149 static inline bool efi_is_native(void)
150 {
151 return IS_ENABLED(CONFIG_X86_64) == efi_enabled(EFI_64BIT);
152 }
153
154 static inline bool efi_runtime_supported(void)
155 {
156 if (efi_is_native())
157 return true;
158
159 if (IS_ENABLED(CONFIG_EFI_MIXED) && !efi_enabled(EFI_OLD_MEMMAP))
160 return true;
161
162 return false;
163 }
164
165 extern struct console early_efi_console;
166 extern void parse_efi_setup(u64 phys_addr, u32 data_len);
167
168 extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt);
169
170 #ifdef CONFIG_EFI_MIXED
171 extern void efi_thunk_runtime_setup(void);
172 extern efi_status_t efi_thunk_set_virtual_address_map(
173 void *phys_set_virtual_address_map,
174 unsigned long memory_map_size,
175 unsigned long descriptor_size,
176 u32 descriptor_version,
177 efi_memory_desc_t *virtual_map);
178 #else
179 static inline void efi_thunk_runtime_setup(void) {}
180 static inline efi_status_t efi_thunk_set_virtual_address_map(
181 void *phys_set_virtual_address_map,
182 unsigned long memory_map_size,
183 unsigned long descriptor_size,
184 u32 descriptor_version,
185 efi_memory_desc_t *virtual_map)
186 {
187 return EFI_SUCCESS;
188 }
189 #endif /* CONFIG_EFI_MIXED */
190
191
192 /* arch specific definitions used by the stub code */
193
194 struct efi_config {
195 u64 image_handle;
196 u64 table;
197 u64 allocate_pool;
198 u64 allocate_pages;
199 u64 get_memory_map;
200 u64 free_pool;
201 u64 free_pages;
202 u64 locate_handle;
203 u64 handle_protocol;
204 u64 exit_boot_services;
205 u64 text_output;
206 efi_status_t (*call)(unsigned long, ...);
207 bool is64;
208 } __packed;
209
210 __pure const struct efi_config *__efi_early(void);
211
212 #define efi_call_early(f, ...) \
213 __efi_early()->call(__efi_early()->f, __VA_ARGS__);
214
215 #define __efi_call_early(f, ...) \
216 __efi_early()->call((unsigned long)f, __VA_ARGS__);
217
218 #define efi_is_64bit() __efi_early()->is64
219
220 extern bool efi_reboot_required(void);
221
222 #else
223 static inline void parse_efi_setup(u64 phys_addr, u32 data_len) {}
224 static inline bool efi_reboot_required(void)
225 {
226 return false;
227 }
228 #endif /* CONFIG_EFI */
229
230 #endif /* _ASM_X86_EFI_H */
This page took 0.34821 seconds and 5 git commands to generate.