PM: Warn if interrupts are enabled during suspend-resume of sysdevs
[deliverable/linux.git] / arch / x86 / power / cpu_64.c
CommitLineData
1da177e4 1/*
cf7700fe 2 * Suspend and hibernation support for x86-64
1da177e4
LT
3 *
4 * Distribute under GPLv2
5 *
cf7700fe 6 * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl>
1da177e4
LT
7 * Copyright (c) 2002 Pavel Machek <pavel@suse.cz>
8 * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
9 */
10
55679edb 11#include <linux/smp.h>
1da177e4 12#include <linux/suspend.h>
1da177e4 13#include <asm/proto.h>
3dd08325
RW
14#include <asm/page.h>
15#include <asm/pgtable.h>
3ebad590 16#include <asm/mtrr.h>
83b8e28b 17#include <asm/xcr.h>
a8af7898 18#include <asm/suspend.h>
1da177e4 19
cae45957
JB
20static void fix_processor_context(void);
21
1da177e4
LT
22struct saved_context saved_context;
23
5c9c9bec
RW
24/**
25 * __save_processor_state - save CPU registers before creating a
26 * hibernation image and before restoring the memory state from it
27 * @ctxt - structure to store the registers contents in
28 *
29 * NOTE: If there is a CPU register the modification of which by the
30 * boot kernel (ie. the kernel used for loading the hibernation image)
31 * might affect the operations of the restored target kernel (ie. the one
32 * saved in the hibernation image), then its contents must be saved by this
33 * function. In other words, if kernel A is hibernated and different
34 * kernel B is used for loading the hibernation image into memory, the
35 * kernel A's __save_processor_state() function must save all registers
36 * needed by kernel A, so that it can operate correctly after the resume
37 * regardless of what kernel B does in the meantime.
38 */
cae45957 39static void __save_processor_state(struct saved_context *ctxt)
1da177e4
LT
40{
41 kernel_fpu_begin();
42
43 /*
44 * descriptor tables
45 */
9d1c6e7c
GOC
46 store_gdt((struct desc_ptr *)&ctxt->gdt_limit);
47 store_idt((struct desc_ptr *)&ctxt->idt_limit);
48 store_tr(ctxt->tr);
1da177e4
LT
49
50 /* XMM0..XMM15 should be handled by kernel_fpu_begin(). */
1da177e4
LT
51 /*
52 * segment registers
53 */
54 asm volatile ("movw %%ds, %0" : "=m" (ctxt->ds));
55 asm volatile ("movw %%es, %0" : "=m" (ctxt->es));
56 asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs));
57 asm volatile ("movw %%gs, %0" : "=m" (ctxt->gs));
58 asm volatile ("movw %%ss, %0" : "=m" (ctxt->ss));
59
60 rdmsrl(MSR_FS_BASE, ctxt->fs_base);
61 rdmsrl(MSR_GS_BASE, ctxt->gs_base);
62 rdmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
3ebad590 63 mtrr_save_fixed_ranges(NULL);
1da177e4
LT
64
65 /*
cf7700fe 66 * control registers
1da177e4 67 */
3c321bce 68 rdmsrl(MSR_EFER, ctxt->efer);
f51c9452
GOC
69 ctxt->cr0 = read_cr0();
70 ctxt->cr2 = read_cr2();
71 ctxt->cr3 = read_cr3();
72 ctxt->cr4 = read_cr4();
73 ctxt->cr8 = read_cr8();
1da177e4
LT
74}
75
76void save_processor_state(void)
77{
78 __save_processor_state(&saved_context);
79}
80
08967f94 81static void do_fpu_end(void)
1da177e4 82{
08967f94
SL
83 /*
84 * Restore FPU regs if necessary
85 */
86 kernel_fpu_end();
1da177e4
LT
87}
88
5c9c9bec
RW
89/**
90 * __restore_processor_state - restore the contents of CPU registers saved
91 * by __save_processor_state()
92 * @ctxt - structure to load the registers contents from
93 */
cae45957 94static void __restore_processor_state(struct saved_context *ctxt)
1da177e4
LT
95{
96 /*
97 * control registers
98 */
3c321bce 99 wrmsrl(MSR_EFER, ctxt->efer);
f51c9452
GOC
100 write_cr8(ctxt->cr8);
101 write_cr4(ctxt->cr4);
102 write_cr3(ctxt->cr3);
103 write_cr2(ctxt->cr2);
104 write_cr0(ctxt->cr0);
1da177e4 105
8d783b3e
PM
106 /*
107 * now restore the descriptor tables to their proper values
108 * ltr is done i fix_processor_context().
109 */
9d1c6e7c
GOC
110 load_gdt((const struct desc_ptr *)&ctxt->gdt_limit);
111 load_idt((const struct desc_ptr *)&ctxt->idt_limit);
112
8d783b3e 113
1da177e4
LT
114 /*
115 * segment registers
116 */
117 asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds));
118 asm volatile ("movw %0, %%es" :: "r" (ctxt->es));
119 asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs));
120 load_gs_index(ctxt->gs);
121 asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss));
122
123 wrmsrl(MSR_FS_BASE, ctxt->fs_base);
124 wrmsrl(MSR_GS_BASE, ctxt->gs_base);
125 wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
126
83b8e28b
SS
127 /*
128 * restore XCR0 for xsave capable cpu's.
129 */
130 if (cpu_has_xsave)
131 xsetbv(XCR_XFEATURE_ENABLED_MASK, pcntxt_mask);
132
1da177e4
LT
133 fix_processor_context();
134
135 do_fpu_end();
3b520b23 136 mtrr_ap_init();
1da177e4
LT
137}
138
139void restore_processor_state(void)
140{
141 __restore_processor_state(&saved_context);
142}
143
cae45957 144static void fix_processor_context(void)
1da177e4
LT
145{
146 int cpu = smp_processor_id();
147 struct tss_struct *t = &per_cpu(init_tss, cpu);
148
17b7a89c
BP
149 /*
150 * This just modifies memory; should not be necessary. But... This
151 * is necessary, because 386 hardware has concept of busy TSS or some
152 * similar stupidity.
153 */
154 set_tss_desc(cpu, t);
1da177e4 155
f6dc247c 156 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
1da177e4
LT
157
158 syscall_init(); /* This sets MSR_*STAR and related */
159 load_TR_desc(); /* This does ltr */
160 load_LDT(&current->active_mm->context); /* This does lldt */
161
162 /*
163 * Now maybe reload the debug registers
164 */
165 if (current->thread.debugreg7){
166 loaddebug(&current->thread, 0);
167 loaddebug(&current->thread, 1);
168 loaddebug(&current->thread, 2);
169 loaddebug(&current->thread, 3);
170 /* no 4 and 5 */
171 loaddebug(&current->thread, 6);
172 loaddebug(&current->thread, 7);
173 }
1da177e4 174}
This page took 0.405648 seconds and 5 git commands to generate.