| 1 | /* |
| 2 | * Machine specific setup for xen |
| 3 | * |
| 4 | * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 |
| 5 | */ |
| 6 | |
| 7 | #include <linux/module.h> |
| 8 | #include <linux/sched.h> |
| 9 | #include <linux/mm.h> |
| 10 | #include <linux/pm.h> |
| 11 | #include <linux/memblock.h> |
| 12 | #include <linux/cpuidle.h> |
| 13 | #include <linux/cpufreq.h> |
| 14 | |
| 15 | #include <asm/elf.h> |
| 16 | #include <asm/vdso.h> |
| 17 | #include <asm/e820.h> |
| 18 | #include <asm/setup.h> |
| 19 | #include <asm/acpi.h> |
| 20 | #include <asm/xen/hypervisor.h> |
| 21 | #include <asm/xen/hypercall.h> |
| 22 | |
| 23 | #include <xen/xen.h> |
| 24 | #include <xen/page.h> |
| 25 | #include <xen/interface/callback.h> |
| 26 | #include <xen/interface/memory.h> |
| 27 | #include <xen/interface/physdev.h> |
| 28 | #include <xen/features.h> |
| 29 | #include "xen-ops.h" |
| 30 | #include "vdso.h" |
| 31 | |
| 32 | /* These are code, but not functions. Defined in entry.S */ |
| 33 | extern const char xen_hypervisor_callback[]; |
| 34 | extern const char xen_failsafe_callback[]; |
| 35 | extern void xen_sysenter_target(void); |
| 36 | extern void xen_syscall_target(void); |
| 37 | extern void xen_syscall32_target(void); |
| 38 | |
| 39 | /* Amount of extra memory space we add to the e820 ranges */ |
| 40 | struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata; |
| 41 | |
| 42 | /* Number of pages released from the initial allocation. */ |
| 43 | unsigned long xen_released_pages; |
| 44 | |
| 45 | /* |
| 46 | * The maximum amount of extra memory compared to the base size. The |
| 47 | * main scaling factor is the size of struct page. At extreme ratios |
| 48 | * of base:extra, all the base memory can be filled with page |
| 49 | * structures for the extra memory, leaving no space for anything |
| 50 | * else. |
| 51 | * |
| 52 | * 10x seems like a reasonable balance between scaling flexibility and |
| 53 | * leaving a practically usable system. |
| 54 | */ |
| 55 | #define EXTRA_MEM_RATIO (10) |
| 56 | |
| 57 | static void __init xen_add_extra_mem(u64 start, u64 size) |
| 58 | { |
| 59 | unsigned long pfn; |
| 60 | int i; |
| 61 | |
| 62 | for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) { |
| 63 | /* Add new region. */ |
| 64 | if (xen_extra_mem[i].size == 0) { |
| 65 | xen_extra_mem[i].start = start; |
| 66 | xen_extra_mem[i].size = size; |
| 67 | break; |
| 68 | } |
| 69 | /* Append to existing region. */ |
| 70 | if (xen_extra_mem[i].start + xen_extra_mem[i].size == start) { |
| 71 | xen_extra_mem[i].size += size; |
| 72 | break; |
| 73 | } |
| 74 | } |
| 75 | if (i == XEN_EXTRA_MEM_MAX_REGIONS) |
| 76 | printk(KERN_WARNING "Warning: not enough extra memory regions\n"); |
| 77 | |
| 78 | memblock_reserve(start, size); |
| 79 | |
| 80 | xen_max_p2m_pfn = PFN_DOWN(start + size); |
| 81 | for (pfn = PFN_DOWN(start); pfn < xen_max_p2m_pfn; pfn++) { |
| 82 | unsigned long mfn = pfn_to_mfn(pfn); |
| 83 | |
| 84 | if (WARN(mfn == pfn, "Trying to over-write 1-1 mapping (pfn: %lx)\n", pfn)) |
| 85 | continue; |
| 86 | WARN(mfn != INVALID_P2M_ENTRY, "Trying to remove %lx which has %lx mfn!\n", |
| 87 | pfn, mfn); |
| 88 | |
| 89 | __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); |
| 90 | } |
| 91 | } |
| 92 | |
| 93 | static unsigned long __init xen_do_chunk(unsigned long start, |
| 94 | unsigned long end, bool release) |
| 95 | { |
| 96 | struct xen_memory_reservation reservation = { |
| 97 | .address_bits = 0, |
| 98 | .extent_order = 0, |
| 99 | .domid = DOMID_SELF |
| 100 | }; |
| 101 | unsigned long len = 0; |
| 102 | unsigned long pfn; |
| 103 | int ret; |
| 104 | |
| 105 | for (pfn = start; pfn < end; pfn++) { |
| 106 | unsigned long frame; |
| 107 | unsigned long mfn = pfn_to_mfn(pfn); |
| 108 | |
| 109 | if (release) { |
| 110 | /* Make sure pfn exists to start with */ |
| 111 | if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn) |
| 112 | continue; |
| 113 | frame = mfn; |
| 114 | } else { |
| 115 | if (mfn != INVALID_P2M_ENTRY) |
| 116 | continue; |
| 117 | frame = pfn; |
| 118 | } |
| 119 | set_xen_guest_handle(reservation.extent_start, &frame); |
| 120 | reservation.nr_extents = 1; |
| 121 | |
| 122 | ret = HYPERVISOR_memory_op(release ? XENMEM_decrease_reservation : XENMEM_populate_physmap, |
| 123 | &reservation); |
| 124 | WARN(ret != 1, "Failed to %s pfn %lx err=%d\n", |
| 125 | release ? "release" : "populate", pfn, ret); |
| 126 | |
| 127 | if (ret == 1) { |
| 128 | if (!early_set_phys_to_machine(pfn, release ? INVALID_P2M_ENTRY : frame)) { |
| 129 | if (release) |
| 130 | break; |
| 131 | set_xen_guest_handle(reservation.extent_start, &frame); |
| 132 | reservation.nr_extents = 1; |
| 133 | ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, |
| 134 | &reservation); |
| 135 | break; |
| 136 | } |
| 137 | len++; |
| 138 | } else |
| 139 | break; |
| 140 | } |
| 141 | if (len) |
| 142 | printk(KERN_INFO "%s %lx-%lx pfn range: %lu pages %s\n", |
| 143 | release ? "Freeing" : "Populating", |
| 144 | start, end, len, |
| 145 | release ? "freed" : "added"); |
| 146 | |
| 147 | return len; |
| 148 | } |
| 149 | |
| 150 | static unsigned long __init xen_release_chunk(unsigned long start, |
| 151 | unsigned long end) |
| 152 | { |
| 153 | return xen_do_chunk(start, end, true); |
| 154 | } |
| 155 | |
| 156 | static unsigned long __init xen_populate_chunk( |
| 157 | const struct e820entry *list, size_t map_size, |
| 158 | unsigned long max_pfn, unsigned long *last_pfn, |
| 159 | unsigned long credits_left) |
| 160 | { |
| 161 | const struct e820entry *entry; |
| 162 | unsigned int i; |
| 163 | unsigned long done = 0; |
| 164 | unsigned long dest_pfn; |
| 165 | |
| 166 | for (i = 0, entry = list; i < map_size; i++, entry++) { |
| 167 | unsigned long s_pfn; |
| 168 | unsigned long e_pfn; |
| 169 | unsigned long pfns; |
| 170 | long capacity; |
| 171 | |
| 172 | if (credits_left <= 0) |
| 173 | break; |
| 174 | |
| 175 | if (entry->type != E820_RAM) |
| 176 | continue; |
| 177 | |
| 178 | e_pfn = PFN_DOWN(entry->addr + entry->size); |
| 179 | |
| 180 | /* We only care about E820 after the xen_start_info->nr_pages */ |
| 181 | if (e_pfn <= max_pfn) |
| 182 | continue; |
| 183 | |
| 184 | s_pfn = PFN_UP(entry->addr); |
| 185 | /* If the E820 falls within the nr_pages, we want to start |
| 186 | * at the nr_pages PFN. |
| 187 | * If that would mean going past the E820 entry, skip it |
| 188 | */ |
| 189 | if (s_pfn <= max_pfn) { |
| 190 | capacity = e_pfn - max_pfn; |
| 191 | dest_pfn = max_pfn; |
| 192 | } else { |
| 193 | capacity = e_pfn - s_pfn; |
| 194 | dest_pfn = s_pfn; |
| 195 | } |
| 196 | |
| 197 | if (credits_left < capacity) |
| 198 | capacity = credits_left; |
| 199 | |
| 200 | pfns = xen_do_chunk(dest_pfn, dest_pfn + capacity, false); |
| 201 | done += pfns; |
| 202 | *last_pfn = (dest_pfn + pfns); |
| 203 | if (pfns < capacity) |
| 204 | break; |
| 205 | credits_left -= pfns; |
| 206 | } |
| 207 | return done; |
| 208 | } |
| 209 | |
| 210 | static void __init xen_set_identity_and_release_chunk( |
| 211 | unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages, |
| 212 | unsigned long *released, unsigned long *identity) |
| 213 | { |
| 214 | unsigned long pfn; |
| 215 | |
| 216 | /* |
| 217 | * If the PFNs are currently mapped, the VA mapping also needs |
| 218 | * to be updated to be 1:1. |
| 219 | */ |
| 220 | for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++) |
| 221 | (void)HYPERVISOR_update_va_mapping( |
| 222 | (unsigned long)__va(pfn << PAGE_SHIFT), |
| 223 | mfn_pte(pfn, PAGE_KERNEL_IO), 0); |
| 224 | |
| 225 | if (start_pfn < nr_pages) |
| 226 | *released += xen_release_chunk( |
| 227 | start_pfn, min(end_pfn, nr_pages)); |
| 228 | |
| 229 | *identity += set_phys_range_identity(start_pfn, end_pfn); |
| 230 | } |
| 231 | |
| 232 | static unsigned long __init xen_set_identity_and_release( |
| 233 | const struct e820entry *list, size_t map_size, unsigned long nr_pages) |
| 234 | { |
| 235 | phys_addr_t start = 0; |
| 236 | unsigned long released = 0; |
| 237 | unsigned long identity = 0; |
| 238 | const struct e820entry *entry; |
| 239 | int i; |
| 240 | |
| 241 | /* |
| 242 | * Combine non-RAM regions and gaps until a RAM region (or the |
| 243 | * end of the map) is reached, then set the 1:1 map and |
| 244 | * release the pages (if available) in those non-RAM regions. |
| 245 | * |
| 246 | * The combined non-RAM regions are rounded to a whole number |
| 247 | * of pages so any partial pages are accessible via the 1:1 |
| 248 | * mapping. This is needed for some BIOSes that put (for |
| 249 | * example) the DMI tables in a reserved region that begins on |
| 250 | * a non-page boundary. |
| 251 | */ |
| 252 | for (i = 0, entry = list; i < map_size; i++, entry++) { |
| 253 | phys_addr_t end = entry->addr + entry->size; |
| 254 | if (entry->type == E820_RAM || i == map_size - 1) { |
| 255 | unsigned long start_pfn = PFN_DOWN(start); |
| 256 | unsigned long end_pfn = PFN_UP(end); |
| 257 | |
| 258 | if (entry->type == E820_RAM) |
| 259 | end_pfn = PFN_UP(entry->addr); |
| 260 | |
| 261 | if (start_pfn < end_pfn) |
| 262 | xen_set_identity_and_release_chunk( |
| 263 | start_pfn, end_pfn, nr_pages, |
| 264 | &released, &identity); |
| 265 | |
| 266 | start = end; |
| 267 | } |
| 268 | } |
| 269 | |
| 270 | if (released) |
| 271 | printk(KERN_INFO "Released %lu pages of unused memory\n", released); |
| 272 | if (identity) |
| 273 | printk(KERN_INFO "Set %ld page(s) to 1-1 mapping\n", identity); |
| 274 | |
| 275 | return released; |
| 276 | } |
| 277 | |
| 278 | static unsigned long __init xen_get_max_pages(void) |
| 279 | { |
| 280 | unsigned long max_pages = MAX_DOMAIN_PAGES; |
| 281 | domid_t domid = DOMID_SELF; |
| 282 | int ret; |
| 283 | |
| 284 | /* |
| 285 | * For the initial domain we use the maximum reservation as |
| 286 | * the maximum page. |
| 287 | * |
| 288 | * For guest domains the current maximum reservation reflects |
| 289 | * the current maximum rather than the static maximum. In this |
| 290 | * case the e820 map provided to us will cover the static |
| 291 | * maximum region. |
| 292 | */ |
| 293 | if (xen_initial_domain()) { |
| 294 | ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid); |
| 295 | if (ret > 0) |
| 296 | max_pages = ret; |
| 297 | } |
| 298 | |
| 299 | return min(max_pages, MAX_DOMAIN_PAGES); |
| 300 | } |
| 301 | |
| 302 | static void xen_align_and_add_e820_region(u64 start, u64 size, int type) |
| 303 | { |
| 304 | u64 end = start + size; |
| 305 | |
| 306 | /* Align RAM regions to page boundaries. */ |
| 307 | if (type == E820_RAM) { |
| 308 | start = PAGE_ALIGN(start); |
| 309 | end &= ~((u64)PAGE_SIZE - 1); |
| 310 | } |
| 311 | |
| 312 | e820_add_region(start, end - start, type); |
| 313 | } |
| 314 | |
| 315 | /** |
| 316 | * machine_specific_memory_setup - Hook for machine specific memory setup. |
| 317 | **/ |
| 318 | char * __init xen_memory_setup(void) |
| 319 | { |
| 320 | static struct e820entry map[E820MAX] __initdata; |
| 321 | |
| 322 | unsigned long max_pfn = xen_start_info->nr_pages; |
| 323 | unsigned long long mem_end; |
| 324 | int rc; |
| 325 | struct xen_memory_map memmap; |
| 326 | unsigned long max_pages; |
| 327 | unsigned long last_pfn = 0; |
| 328 | unsigned long extra_pages = 0; |
| 329 | unsigned long populated; |
| 330 | int i; |
| 331 | int op; |
| 332 | |
| 333 | max_pfn = min(MAX_DOMAIN_PAGES, max_pfn); |
| 334 | mem_end = PFN_PHYS(max_pfn); |
| 335 | |
| 336 | memmap.nr_entries = E820MAX; |
| 337 | set_xen_guest_handle(memmap.buffer, map); |
| 338 | |
| 339 | op = xen_initial_domain() ? |
| 340 | XENMEM_machine_memory_map : |
| 341 | XENMEM_memory_map; |
| 342 | rc = HYPERVISOR_memory_op(op, &memmap); |
| 343 | if (rc == -ENOSYS) { |
| 344 | BUG_ON(xen_initial_domain()); |
| 345 | memmap.nr_entries = 1; |
| 346 | map[0].addr = 0ULL; |
| 347 | map[0].size = mem_end; |
| 348 | /* 8MB slack (to balance backend allocations). */ |
| 349 | map[0].size += 8ULL << 20; |
| 350 | map[0].type = E820_RAM; |
| 351 | rc = 0; |
| 352 | } |
| 353 | BUG_ON(rc); |
| 354 | |
| 355 | /* Make sure the Xen-supplied memory map is well-ordered. */ |
| 356 | sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries); |
| 357 | |
| 358 | max_pages = xen_get_max_pages(); |
| 359 | if (max_pages > max_pfn) |
| 360 | extra_pages += max_pages - max_pfn; |
| 361 | |
| 362 | /* |
| 363 | * Set P2M for all non-RAM pages and E820 gaps to be identity |
| 364 | * type PFNs. Any RAM pages that would be made inaccesible by |
| 365 | * this are first released. |
| 366 | */ |
| 367 | xen_released_pages = xen_set_identity_and_release( |
| 368 | map, memmap.nr_entries, max_pfn); |
| 369 | |
| 370 | /* |
| 371 | * Populate back the non-RAM pages and E820 gaps that had been |
| 372 | * released. */ |
| 373 | populated = xen_populate_chunk(map, memmap.nr_entries, |
| 374 | max_pfn, &last_pfn, xen_released_pages); |
| 375 | |
| 376 | xen_released_pages -= populated; |
| 377 | extra_pages += xen_released_pages; |
| 378 | |
| 379 | if (last_pfn > max_pfn) { |
| 380 | max_pfn = min(MAX_DOMAIN_PAGES, last_pfn); |
| 381 | mem_end = PFN_PHYS(max_pfn); |
| 382 | } |
| 383 | /* |
| 384 | * Clamp the amount of extra memory to a EXTRA_MEM_RATIO |
| 385 | * factor the base size. On non-highmem systems, the base |
| 386 | * size is the full initial memory allocation; on highmem it |
| 387 | * is limited to the max size of lowmem, so that it doesn't |
| 388 | * get completely filled. |
| 389 | * |
| 390 | * In principle there could be a problem in lowmem systems if |
| 391 | * the initial memory is also very large with respect to |
| 392 | * lowmem, but we won't try to deal with that here. |
| 393 | */ |
| 394 | extra_pages = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)), |
| 395 | extra_pages); |
| 396 | i = 0; |
| 397 | while (i < memmap.nr_entries) { |
| 398 | u64 addr = map[i].addr; |
| 399 | u64 size = map[i].size; |
| 400 | u32 type = map[i].type; |
| 401 | |
| 402 | if (type == E820_RAM) { |
| 403 | if (addr < mem_end) { |
| 404 | size = min(size, mem_end - addr); |
| 405 | } else if (extra_pages) { |
| 406 | size = min(size, (u64)extra_pages * PAGE_SIZE); |
| 407 | extra_pages -= size / PAGE_SIZE; |
| 408 | xen_add_extra_mem(addr, size); |
| 409 | } else |
| 410 | type = E820_UNUSABLE; |
| 411 | } |
| 412 | |
| 413 | xen_align_and_add_e820_region(addr, size, type); |
| 414 | |
| 415 | map[i].addr += size; |
| 416 | map[i].size -= size; |
| 417 | if (map[i].size == 0) |
| 418 | i++; |
| 419 | } |
| 420 | |
| 421 | /* |
| 422 | * In domU, the ISA region is normal, usable memory, but we |
| 423 | * reserve ISA memory anyway because too many things poke |
| 424 | * about in there. |
| 425 | */ |
| 426 | e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, |
| 427 | E820_RESERVED); |
| 428 | |
| 429 | /* |
| 430 | * Reserve Xen bits: |
| 431 | * - mfn_list |
| 432 | * - xen_start_info |
| 433 | * See comment above "struct start_info" in <xen/interface/xen.h> |
| 434 | */ |
| 435 | memblock_reserve(__pa(xen_start_info->mfn_list), |
| 436 | xen_start_info->pt_base - xen_start_info->mfn_list); |
| 437 | |
| 438 | sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); |
| 439 | |
| 440 | return "Xen"; |
| 441 | } |
| 442 | |
| 443 | /* |
| 444 | * Set the bit indicating "nosegneg" library variants should be used. |
| 445 | * We only need to bother in pure 32-bit mode; compat 32-bit processes |
| 446 | * can have un-truncated segments, so wrapping around is allowed. |
| 447 | */ |
| 448 | static void __init fiddle_vdso(void) |
| 449 | { |
| 450 | #ifdef CONFIG_X86_32 |
| 451 | u32 *mask; |
| 452 | mask = VDSO32_SYMBOL(&vdso32_int80_start, NOTE_MASK); |
| 453 | *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT; |
| 454 | mask = VDSO32_SYMBOL(&vdso32_sysenter_start, NOTE_MASK); |
| 455 | *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT; |
| 456 | #endif |
| 457 | } |
| 458 | |
| 459 | static int __cpuinit register_callback(unsigned type, const void *func) |
| 460 | { |
| 461 | struct callback_register callback = { |
| 462 | .type = type, |
| 463 | .address = XEN_CALLBACK(__KERNEL_CS, func), |
| 464 | .flags = CALLBACKF_mask_events, |
| 465 | }; |
| 466 | |
| 467 | return HYPERVISOR_callback_op(CALLBACKOP_register, &callback); |
| 468 | } |
| 469 | |
| 470 | void __cpuinit xen_enable_sysenter(void) |
| 471 | { |
| 472 | int ret; |
| 473 | unsigned sysenter_feature; |
| 474 | |
| 475 | #ifdef CONFIG_X86_32 |
| 476 | sysenter_feature = X86_FEATURE_SEP; |
| 477 | #else |
| 478 | sysenter_feature = X86_FEATURE_SYSENTER32; |
| 479 | #endif |
| 480 | |
| 481 | if (!boot_cpu_has(sysenter_feature)) |
| 482 | return; |
| 483 | |
| 484 | ret = register_callback(CALLBACKTYPE_sysenter, xen_sysenter_target); |
| 485 | if(ret != 0) |
| 486 | setup_clear_cpu_cap(sysenter_feature); |
| 487 | } |
| 488 | |
| 489 | void __cpuinit xen_enable_syscall(void) |
| 490 | { |
| 491 | #ifdef CONFIG_X86_64 |
| 492 | int ret; |
| 493 | |
| 494 | ret = register_callback(CALLBACKTYPE_syscall, xen_syscall_target); |
| 495 | if (ret != 0) { |
| 496 | printk(KERN_ERR "Failed to set syscall callback: %d\n", ret); |
| 497 | /* Pretty fatal; 64-bit userspace has no other |
| 498 | mechanism for syscalls. */ |
| 499 | } |
| 500 | |
| 501 | if (boot_cpu_has(X86_FEATURE_SYSCALL32)) { |
| 502 | ret = register_callback(CALLBACKTYPE_syscall32, |
| 503 | xen_syscall32_target); |
| 504 | if (ret != 0) |
| 505 | setup_clear_cpu_cap(X86_FEATURE_SYSCALL32); |
| 506 | } |
| 507 | #endif /* CONFIG_X86_64 */ |
| 508 | } |
| 509 | |
| 510 | void __init xen_arch_setup(void) |
| 511 | { |
| 512 | xen_panic_handler_init(); |
| 513 | |
| 514 | HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments); |
| 515 | HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables); |
| 516 | |
| 517 | if (!xen_feature(XENFEAT_auto_translated_physmap)) |
| 518 | HYPERVISOR_vm_assist(VMASST_CMD_enable, |
| 519 | VMASST_TYPE_pae_extended_cr3); |
| 520 | |
| 521 | if (register_callback(CALLBACKTYPE_event, xen_hypervisor_callback) || |
| 522 | register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback)) |
| 523 | BUG(); |
| 524 | |
| 525 | xen_enable_sysenter(); |
| 526 | xen_enable_syscall(); |
| 527 | |
| 528 | #ifdef CONFIG_ACPI |
| 529 | if (!(xen_start_info->flags & SIF_INITDOMAIN)) { |
| 530 | printk(KERN_INFO "ACPI in unprivileged domain disabled\n"); |
| 531 | disable_acpi(); |
| 532 | } |
| 533 | #endif |
| 534 | |
| 535 | memcpy(boot_command_line, xen_start_info->cmd_line, |
| 536 | MAX_GUEST_CMDLINE > COMMAND_LINE_SIZE ? |
| 537 | COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE); |
| 538 | |
| 539 | /* Set up idle, making sure it calls safe_halt() pvop */ |
| 540 | #ifdef CONFIG_X86_32 |
| 541 | boot_cpu_data.hlt_works_ok = 1; |
| 542 | #endif |
| 543 | disable_cpuidle(); |
| 544 | disable_cpufreq(); |
| 545 | WARN_ON(set_pm_idle_to_default()); |
| 546 | fiddle_vdso(); |
| 547 | } |