arm/xen: Get event-channel irq through HVM_PARAM when booting with ACPI
[deliverable/linux.git] / arch / arm / xen / enlighten.c
CommitLineData
4c071ee5 1#include <xen/xen.h>
0ec53ecf 2#include <xen/events.h>
b3b52fd8
SS
3#include <xen/grant_table.h>
4#include <xen/hvm.h>
9a9ab3cc 5#include <xen/interface/vcpu.h>
4c071ee5
SS
6#include <xen/interface/xen.h>
7#include <xen/interface/memory.h>
b3b52fd8 8#include <xen/interface/hvm/params.h>
ef61ee0d 9#include <xen/features.h>
4c071ee5 10#include <xen/platform_pci.h>
b3b52fd8 11#include <xen/xenbus.h>
c61ba729 12#include <xen/page.h>
6abb749e 13#include <xen/interface/sched.h>
f832da06 14#include <xen/xen-ops.h>
34e38523 15#include <asm/paravirt.h>
4c071ee5
SS
16#include <asm/xen/hypervisor.h>
17#include <asm/xen/hypercall.h>
6abb749e 18#include <asm/system_misc.h>
0ec53ecf
SS
19#include <linux/interrupt.h>
20#include <linux/irqreturn.h>
4c071ee5 21#include <linux/module.h>
2e01f166
SS
22#include <linux/of.h>
23#include <linux/of_irq.h>
24#include <linux/of_address.h>
e1a9c16b
JG
25#include <linux/cpuidle.h>
26#include <linux/cpufreq.h>
8b271d57 27#include <linux/cpu.h>
f1dddd11 28#include <linux/console.h>
7d5f6f81
SS
29#include <linux/pvclock_gtod.h>
30#include <linux/time64.h>
e709fba1 31#include <linux/timekeeping.h>
7d5f6f81 32#include <linux/timekeeper_internal.h>
d22cbe65 33#include <linux/acpi.h>
4c071ee5 34
f832da06
IC
35#include <linux/mm.h>
36
4c071ee5
SS
37struct start_info _xen_start_info;
38struct start_info *xen_start_info = &_xen_start_info;
35c8ab4c 39EXPORT_SYMBOL(xen_start_info);
4c071ee5
SS
40
41enum xen_domain_type xen_domain_type = XEN_NATIVE;
35c8ab4c 42EXPORT_SYMBOL(xen_domain_type);
4c071ee5
SS
43
44struct shared_info xen_dummy_shared_info;
45struct shared_info *HYPERVISOR_shared_info = (void *)&xen_dummy_shared_info;
46
47DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
9a9ab3cc 48static struct vcpu_info __percpu *xen_vcpu_info;
4c071ee5 49
c61ba729
IC
50/* These are unused until we support booting "pre-ballooned" */
51unsigned long xen_released_pages;
52struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
53
81e863c3 54static __read_mostly unsigned int xen_events_irq;
0ec53ecf 55
5882bfef
SS
56static __initdata struct device_node *xen_node;
57
a13d7201 58int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
4c071ee5 59 unsigned long addr,
a13d7201 60 xen_pfn_t *gfn, int nr,
4e8c0c8c
DV
61 int *err_ptr, pgprot_t prot,
62 unsigned domid,
f832da06 63 struct page **pages)
4c071ee5 64{
a13d7201 65 return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr,
628c28ee 66 prot, domid, pages);
4c071ee5 67}
a13d7201 68EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_array);
4e8c0c8c
DV
69
70/* Not used by XENFEAT_auto_translated guests. */
a13d7201 71int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
4e8c0c8c 72 unsigned long addr,
a13d7201 73 xen_pfn_t gfn, int nr,
4e8c0c8c
DV
74 pgprot_t prot, unsigned domid,
75 struct page **pages)
76{
77 return -ENOSYS;
78}
a13d7201 79EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_range);
2e01f166 80
a13d7201 81int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
f832da06
IC
82 int nr, struct page **pages)
83{
628c28ee 84 return xen_xlate_unmap_gfn_range(vma, nr, pages);
f832da06 85}
a13d7201 86EXPORT_SYMBOL_GPL(xen_unmap_domain_gfn_range);
f832da06 87
34e38523
SS
88static unsigned long long xen_stolen_accounting(int cpu)
89{
90 struct vcpu_runstate_info state;
91
92 BUG_ON(cpu != smp_processor_id());
93
94 xen_get_runstate_snapshot(&state);
95
96 WARN_ON(state.state != RUNSTATE_running);
97
98 return state.time[RUNSTATE_runnable] + state.time[RUNSTATE_offline];
99}
100
e709fba1
SS
101static void xen_read_wallclock(struct timespec64 *ts)
102{
103 u32 version;
104 struct timespec64 now, ts_monotonic;
105 struct shared_info *s = HYPERVISOR_shared_info;
106 struct pvclock_wall_clock *wall_clock = &(s->wc);
107
108 /* get wallclock at system boot */
109 do {
110 version = wall_clock->version;
111 rmb(); /* fetch version before time */
112 now.tv_sec = ((uint64_t)wall_clock->sec_hi << 32) | wall_clock->sec;
113 now.tv_nsec = wall_clock->nsec;
114 rmb(); /* fetch time before checking version */
115 } while ((wall_clock->version & 1) || (version != wall_clock->version));
116
117 /* time since system boot */
118 ktime_get_ts64(&ts_monotonic);
119 *ts = timespec64_add(now, ts_monotonic);
120}
121
7d5f6f81
SS
122static int xen_pvclock_gtod_notify(struct notifier_block *nb,
123 unsigned long was_set, void *priv)
124{
125 /* Protected by the calling core code serialization */
126 static struct timespec64 next_sync;
127
128 struct xen_platform_op op;
129 struct timespec64 now, system_time;
130 struct timekeeper *tk = priv;
131
132 now.tv_sec = tk->xtime_sec;
133 now.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
134 system_time = timespec64_add(now, tk->wall_to_monotonic);
135
136 /*
137 * We only take the expensive HV call when the clock was set
138 * or when the 11 minutes RTC synchronization time elapsed.
139 */
140 if (!was_set && timespec64_compare(&now, &next_sync) < 0)
141 return NOTIFY_OK;
142
143 op.cmd = XENPF_settime64;
144 op.u.settime64.mbz = 0;
145 op.u.settime64.secs = now.tv_sec;
146 op.u.settime64.nsecs = now.tv_nsec;
147 op.u.settime64.system_time = timespec64_to_ns(&system_time);
148 (void)HYPERVISOR_platform_op(&op);
149
150 /*
151 * Move the next drift compensation time 11 minutes
152 * ahead. That's emulating the sync_cmos_clock() update for
153 * the hardware RTC.
154 */
155 next_sync = now;
156 next_sync.tv_sec += 11 * 60;
157
158 return NOTIFY_OK;
159}
160
161static struct notifier_block xen_pvclock_gtod_notifier = {
162 .notifier_call = xen_pvclock_gtod_notify,
163};
164
8b271d57 165static void xen_percpu_init(void)
9a9ab3cc
SS
166{
167 struct vcpu_register_vcpu_info info;
168 struct vcpu_info *vcpup;
169 int err;
3cc8e40e 170 int cpu = get_cpu();
9a9ab3cc 171
cb9644bf
SS
172 /*
173 * VCPUOP_register_vcpu_info cannot be called twice for the same
174 * vcpu, so if vcpu_info is already registered, just get out. This
175 * can happen with cpu-hotplug.
176 */
177 if (per_cpu(xen_vcpu, cpu) != NULL)
178 goto after_register_vcpu_info;
179
9a9ab3cc
SS
180 pr_info("Xen: initializing cpu%d\n", cpu);
181 vcpup = per_cpu_ptr(xen_vcpu_info, cpu);
182
250c9af3
JG
183 info.mfn = virt_to_gfn(vcpup);
184 info.offset = xen_offset_in_page(vcpup);
9a9ab3cc
SS
185
186 err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info);
d7266d78
SS
187 BUG_ON(err);
188 per_cpu(xen_vcpu, cpu) = vcpup;
189
34e38523
SS
190 xen_setup_runstate_info(cpu);
191
cb9644bf 192after_register_vcpu_info:
3cc8e40e 193 enable_percpu_irq(xen_events_irq, 0);
0d7febe5 194 put_cpu();
9a9ab3cc
SS
195}
196
2451ade0 197static void xen_restart(enum reboot_mode reboot_mode, const char *cmd)
6abb749e
SS
198{
199 struct sched_shutdown r = { .reason = SHUTDOWN_reboot };
200 int rc;
201 rc = HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
a91c7775 202 BUG_ON(rc);
6abb749e
SS
203}
204
205static void xen_power_off(void)
206{
207 struct sched_shutdown r = { .reason = SHUTDOWN_poweroff };
208 int rc;
209 rc = HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
a91c7775 210 BUG_ON(rc);
6abb749e
SS
211}
212
8b271d57
JG
213static int xen_cpu_notification(struct notifier_block *self,
214 unsigned long action,
215 void *hcpu)
216{
217 switch (action) {
218 case CPU_STARTING:
219 xen_percpu_init();
220 break;
cb9644bf
SS
221 case CPU_DYING:
222 disable_percpu_irq(xen_events_irq);
223 break;
8b271d57
JG
224 default:
225 break;
226 }
227
228 return NOTIFY_OK;
229}
230
231static struct notifier_block xen_cpu_notifier = {
232 .notifier_call = xen_cpu_notification,
233};
234
235static irqreturn_t xen_arm_callback(int irq, void *arg)
236{
237 xen_hvm_evtchn_do_upcall();
238 return IRQ_HANDLED;
239}
240
2e01f166
SS
241/*
242 * see Documentation/devicetree/bindings/arm/xen.txt for the
243 * documentation of the Xen Device Tree format.
244 */
b3b52fd8 245#define GRANT_TABLE_PHYSADDR 0
5882bfef 246void __init xen_early_init(void)
2e01f166 247{
2e01f166
SS
248 int len;
249 const char *s = NULL;
250 const char *version = NULL;
251 const char *xen_prefix = "xen,xen-";
252
5882bfef
SS
253 xen_node = of_find_compatible_node(NULL, NULL, "xen,xen");
254 if (!xen_node) {
2e01f166 255 pr_debug("No Xen support\n");
5882bfef 256 return;
2e01f166 257 }
5882bfef 258 s = of_get_property(xen_node, "compatible", &len);
2e01f166
SS
259 if (strlen(xen_prefix) + 3 < len &&
260 !strncmp(xen_prefix, s, strlen(xen_prefix)))
261 version = s + strlen(xen_prefix);
262 if (version == NULL) {
263 pr_debug("Xen version not found\n");
5882bfef 264 return;
81e863c3
JG
265 }
266
5882bfef 267 pr_info("Xen %s support found\n", version);
8b271d57 268
2e01f166
SS
269 xen_domain_type = XEN_HVM_DOMAIN;
270
ef61ee0d 271 xen_setup_features();
5ebc77de 272
ef61ee0d
SS
273 if (xen_feature(XENFEAT_dom0))
274 xen_start_info->flags |= SIF_INITDOMAIN|SIF_PRIVILEGED;
275 else
276 xen_start_info->flags &= ~(SIF_INITDOMAIN|SIF_PRIVILEGED);
f1dddd11
AB
277
278 if (!console_set_on_cmdline && !xen_initial_domain())
279 add_preferred_console("hvc", 0, NULL);
5882bfef
SS
280}
281
d22cbe65
SZ
282static void __init xen_acpi_guest_init(void)
283{
284#ifdef CONFIG_ACPI
285 struct xen_hvm_param a;
286 int interrupt, trigger, polarity;
287
288 a.domid = DOMID_SELF;
289 a.index = HVM_PARAM_CALLBACK_IRQ;
290
291 if (HYPERVISOR_hvm_op(HVMOP_get_param, &a)
292 || (a.value >> 56) != HVM_PARAM_CALLBACK_TYPE_PPI) {
293 xen_events_irq = 0;
294 return;
295 }
296
297 interrupt = a.value & 0xff;
298 trigger = ((a.value >> 8) & 0x1) ? ACPI_EDGE_SENSITIVE
299 : ACPI_LEVEL_SENSITIVE;
300 polarity = ((a.value >> 8) & 0x2) ? ACPI_ACTIVE_LOW
301 : ACPI_ACTIVE_HIGH;
302 xen_events_irq = acpi_register_gsi(NULL, interrupt, trigger, polarity);
303#endif
304}
305
306static void __init xen_dt_guest_init(void)
307{
308 xen_events_irq = irq_of_parse_and_map(xen_node, 0);
309}
310
5882bfef
SS
311static int __init xen_guest_init(void)
312{
313 struct xen_add_to_physmap xatp;
314 struct shared_info *shared_info_page = NULL;
5882bfef
SS
315
316 if (!xen_domain())
317 return 0;
318
d22cbe65
SZ
319 if (!acpi_disabled)
320 xen_acpi_guest_init();
321 else
322 xen_dt_guest_init();
323
5882bfef
SS
324 if (!xen_events_irq) {
325 pr_err("Xen event channel interrupt not found\n");
326 return -ENODEV;
327 }
328
329 shared_info_page = (struct shared_info *)get_zeroed_page(GFP_KERNEL);
ef61ee0d 330
2e01f166
SS
331 if (!shared_info_page) {
332 pr_err("not enough memory\n");
333 return -ENOMEM;
334 }
335 xatp.domid = DOMID_SELF;
336 xatp.idx = 0;
337 xatp.space = XENMAPSPACE_shared_info;
250c9af3 338 xatp.gpfn = virt_to_gfn(shared_info_page);
2e01f166
SS
339 if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
340 BUG();
341
342 HYPERVISOR_shared_info = (struct shared_info *)shared_info_page;
343
344 /* xen_vcpu is a pointer to the vcpu_info struct in the shared_info
345 * page, we use it in the event channel upcall and in some pvclock
9a9ab3cc 346 * related functions.
2e01f166
SS
347 * The shared info contains exactly 1 CPU (the boot CPU). The guest
348 * is required to use VCPUOP_register_vcpu_info to place vcpu info
9a9ab3cc
SS
349 * for secondary CPUs as they are brought up.
350 * For uniformity we use VCPUOP_register_vcpu_info even on cpu0.
351 */
352 xen_vcpu_info = __alloc_percpu(sizeof(struct vcpu_info),
353 sizeof(struct vcpu_info));
354 if (xen_vcpu_info == NULL)
355 return -ENOMEM;
b3b52fd8 356
3cf4095d
SZ
357 xen_auto_xlat_grant_frames.count = gnttab_max_grant_frames();
358 if (xen_xlate_map_ballooned_pages(&xen_auto_xlat_grant_frames.pfn,
359 &xen_auto_xlat_grant_frames.vaddr,
360 xen_auto_xlat_grant_frames.count)) {
efaf30a3
KRW
361 free_percpu(xen_vcpu_info);
362 return -ENOMEM;
363 }
b3b52fd8
SS
364 gnttab_init();
365 if (!xen_initial_domain())
366 xenbus_probe(NULL);
367
e1a9c16b
JG
368 /*
369 * Making sure board specific code will not set up ops for
370 * cpu idle and cpu freq.
371 */
372 disable_cpuidle();
373 disable_cpufreq();
374
8b271d57
JG
375 xen_init_IRQ();
376
377 if (request_percpu_irq(xen_events_irq, xen_arm_callback,
378 "events", &xen_vcpu)) {
379 pr_err("Error request IRQ %d\n", xen_events_irq);
380 return -EINVAL;
381 }
382
383 xen_percpu_init();
384
385 register_cpu_notifier(&xen_cpu_notifier);
386
34e38523
SS
387 pv_time_ops.steal_clock = xen_stolen_accounting;
388 static_key_slow_inc(&paravirt_steal_enabled);
7d5f6f81
SS
389 if (xen_initial_domain())
390 pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier);
34e38523 391
1aa3d8d9
SS
392 return 0;
393}
8b271d57 394early_initcall(xen_guest_init);
1aa3d8d9
SS
395
396static int __init xen_pm_init(void)
397{
9dd4b294
RH
398 if (!xen_domain())
399 return -ENODEV;
400
6abb749e
SS
401 pm_power_off = xen_power_off;
402 arm_pm_restart = xen_restart;
e709fba1
SS
403 if (!xen_initial_domain()) {
404 struct timespec64 ts;
405 xen_read_wallclock(&ts);
406 do_settimeofday64(&ts);
407 }
6abb749e 408
2e01f166
SS
409 return 0;
410}
9dd4b294 411late_initcall(xen_pm_init);
0ec53ecf 412
79390289
SS
413
414/* empty stubs */
415void xen_arch_pre_suspend(void) { }
416void xen_arch_post_suspend(int suspend_cancelled) { }
417void xen_timer_resume(void) { }
418void xen_arch_resume(void) { }
ffb7dbed 419void xen_arch_suspend(void) { }
79390289
SS
420
421
d5f985c8 422/* In the hypercall.S file. */
911dec0d
KRW
423EXPORT_SYMBOL_GPL(HYPERVISOR_event_channel_op);
424EXPORT_SYMBOL_GPL(HYPERVISOR_grant_table_op);
ab277bbf
SS
425EXPORT_SYMBOL_GPL(HYPERVISOR_xen_version);
426EXPORT_SYMBOL_GPL(HYPERVISOR_console_io);
427EXPORT_SYMBOL_GPL(HYPERVISOR_sched_op);
428EXPORT_SYMBOL_GPL(HYPERVISOR_hvm_op);
429EXPORT_SYMBOL_GPL(HYPERVISOR_memory_op);
430EXPORT_SYMBOL_GPL(HYPERVISOR_physdev_op);
ea0af613 431EXPORT_SYMBOL_GPL(HYPERVISOR_vcpu_op);
176455e9 432EXPORT_SYMBOL_GPL(HYPERVISOR_tmem_op);
72d39c69 433EXPORT_SYMBOL_GPL(HYPERVISOR_platform_op);
9f1d3414 434EXPORT_SYMBOL_GPL(HYPERVISOR_multicall);
911dec0d 435EXPORT_SYMBOL_GPL(privcmd_call);
This page took 0.210755 seconds and 5 git commands to generate.