memblock: s/memblock_analyze()/memblock_allow_resize()/ and update users
[deliverable/linux.git] / arch / powerpc / mm / tlb_nohash.c
1 /*
2 * This file contains the routines for TLB flushing.
3 * On machines where the MMU does not use a hash table to store virtual to
4 * physical translations (ie, SW loaded TLBs or Book3E compilant processors,
5 * this does -not- include 603 however which shares the implementation with
6 * hash based processors)
7 *
8 * -- BenH
9 *
10 * Copyright 2008,2009 Ben Herrenschmidt <benh@kernel.crashing.org>
11 * IBM Corp.
12 *
13 * Derived from arch/ppc/mm/init.c:
14 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
15 *
16 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
17 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
18 * Copyright (C) 1996 Paul Mackerras
19 *
20 * Derived from "arch/i386/mm/init.c"
21 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
22 *
23 * This program is free software; you can redistribute it and/or
24 * modify it under the terms of the GNU General Public License
25 * as published by the Free Software Foundation; either version
26 * 2 of the License, or (at your option) any later version.
27 *
28 */
29
30 #include <linux/kernel.h>
31 #include <linux/export.h>
32 #include <linux/mm.h>
33 #include <linux/init.h>
34 #include <linux/highmem.h>
35 #include <linux/pagemap.h>
36 #include <linux/preempt.h>
37 #include <linux/spinlock.h>
38 #include <linux/memblock.h>
39 #include <linux/of_fdt.h>
40 #include <linux/hugetlb.h>
41
42 #include <asm/tlbflush.h>
43 #include <asm/tlb.h>
44 #include <asm/code-patching.h>
45 #include <asm/hugetlb.h>
46
47 #include "mmu_decl.h"
48
49 /*
50 * This struct lists the sw-supported page sizes. The hardawre MMU may support
51 * other sizes not listed here. The .ind field is only used on MMUs that have
52 * indirect page table entries.
53 */
54 #ifdef CONFIG_PPC_BOOK3E_MMU
55 #ifdef CONFIG_FSL_BOOKE
56 struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
57 [MMU_PAGE_4K] = {
58 .shift = 12,
59 .enc = BOOK3E_PAGESZ_4K,
60 },
61 [MMU_PAGE_4M] = {
62 .shift = 22,
63 .enc = BOOK3E_PAGESZ_4M,
64 },
65 [MMU_PAGE_16M] = {
66 .shift = 24,
67 .enc = BOOK3E_PAGESZ_16M,
68 },
69 [MMU_PAGE_64M] = {
70 .shift = 26,
71 .enc = BOOK3E_PAGESZ_64M,
72 },
73 [MMU_PAGE_256M] = {
74 .shift = 28,
75 .enc = BOOK3E_PAGESZ_256M,
76 },
77 [MMU_PAGE_1G] = {
78 .shift = 30,
79 .enc = BOOK3E_PAGESZ_1GB,
80 },
81 };
82 #else
83 struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
84 [MMU_PAGE_4K] = {
85 .shift = 12,
86 .ind = 20,
87 .enc = BOOK3E_PAGESZ_4K,
88 },
89 [MMU_PAGE_16K] = {
90 .shift = 14,
91 .enc = BOOK3E_PAGESZ_16K,
92 },
93 [MMU_PAGE_64K] = {
94 .shift = 16,
95 .ind = 28,
96 .enc = BOOK3E_PAGESZ_64K,
97 },
98 [MMU_PAGE_1M] = {
99 .shift = 20,
100 .enc = BOOK3E_PAGESZ_1M,
101 },
102 [MMU_PAGE_16M] = {
103 .shift = 24,
104 .ind = 36,
105 .enc = BOOK3E_PAGESZ_16M,
106 },
107 [MMU_PAGE_256M] = {
108 .shift = 28,
109 .enc = BOOK3E_PAGESZ_256M,
110 },
111 [MMU_PAGE_1G] = {
112 .shift = 30,
113 .enc = BOOK3E_PAGESZ_1GB,
114 },
115 };
116 #endif /* CONFIG_FSL_BOOKE */
117
118 static inline int mmu_get_tsize(int psize)
119 {
120 return mmu_psize_defs[psize].enc;
121 }
122 #else
123 static inline int mmu_get_tsize(int psize)
124 {
125 /* This isn't used on !Book3E for now */
126 return 0;
127 }
128 #endif /* CONFIG_PPC_BOOK3E_MMU */
129
130 /* The variables below are currently only used on 64-bit Book3E
131 * though this will probably be made common with other nohash
132 * implementations at some point
133 */
134 #ifdef CONFIG_PPC64
135
136 int mmu_linear_psize; /* Page size used for the linear mapping */
137 int mmu_pte_psize; /* Page size used for PTE pages */
138 int mmu_vmemmap_psize; /* Page size used for the virtual mem map */
139 int book3e_htw_enabled; /* Is HW tablewalk enabled ? */
140 unsigned long linear_map_top; /* Top of linear mapping */
141
142 #endif /* CONFIG_PPC64 */
143
144 #ifdef CONFIG_PPC_FSL_BOOK3E
145 /* next_tlbcam_idx is used to round-robin tlbcam entry assignment */
146 DEFINE_PER_CPU(int, next_tlbcam_idx);
147 EXPORT_PER_CPU_SYMBOL(next_tlbcam_idx);
148 #endif
149
150 /*
151 * Base TLB flushing operations:
152 *
153 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
154 * - flush_tlb_page(vma, vmaddr) flushes one page
155 * - flush_tlb_range(vma, start, end) flushes a range of pages
156 * - flush_tlb_kernel_range(start, end) flushes kernel pages
157 *
158 * - local_* variants of page and mm only apply to the current
159 * processor
160 */
161
162 /*
163 * These are the base non-SMP variants of page and mm flushing
164 */
165 void local_flush_tlb_mm(struct mm_struct *mm)
166 {
167 unsigned int pid;
168
169 preempt_disable();
170 pid = mm->context.id;
171 if (pid != MMU_NO_CONTEXT)
172 _tlbil_pid(pid);
173 preempt_enable();
174 }
175 EXPORT_SYMBOL(local_flush_tlb_mm);
176
177 void __local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
178 int tsize, int ind)
179 {
180 unsigned int pid;
181
182 preempt_disable();
183 pid = mm ? mm->context.id : 0;
184 if (pid != MMU_NO_CONTEXT)
185 _tlbil_va(vmaddr, pid, tsize, ind);
186 preempt_enable();
187 }
188
189 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
190 {
191 __local_flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
192 mmu_get_tsize(mmu_virtual_psize), 0);
193 }
194 EXPORT_SYMBOL(local_flush_tlb_page);
195
196 /*
197 * And here are the SMP non-local implementations
198 */
199 #ifdef CONFIG_SMP
200
201 static DEFINE_RAW_SPINLOCK(tlbivax_lock);
202
203 static int mm_is_core_local(struct mm_struct *mm)
204 {
205 return cpumask_subset(mm_cpumask(mm),
206 topology_thread_cpumask(smp_processor_id()));
207 }
208
209 struct tlb_flush_param {
210 unsigned long addr;
211 unsigned int pid;
212 unsigned int tsize;
213 unsigned int ind;
214 };
215
216 static void do_flush_tlb_mm_ipi(void *param)
217 {
218 struct tlb_flush_param *p = param;
219
220 _tlbil_pid(p ? p->pid : 0);
221 }
222
223 static void do_flush_tlb_page_ipi(void *param)
224 {
225 struct tlb_flush_param *p = param;
226
227 _tlbil_va(p->addr, p->pid, p->tsize, p->ind);
228 }
229
230
231 /* Note on invalidations and PID:
232 *
233 * We snapshot the PID with preempt disabled. At this point, it can still
234 * change either because:
235 * - our context is being stolen (PID -> NO_CONTEXT) on another CPU
236 * - we are invaliating some target that isn't currently running here
237 * and is concurrently acquiring a new PID on another CPU
238 * - some other CPU is re-acquiring a lost PID for this mm
239 * etc...
240 *
241 * However, this shouldn't be a problem as we only guarantee
242 * invalidation of TLB entries present prior to this call, so we
243 * don't care about the PID changing, and invalidating a stale PID
244 * is generally harmless.
245 */
246
247 void flush_tlb_mm(struct mm_struct *mm)
248 {
249 unsigned int pid;
250
251 preempt_disable();
252 pid = mm->context.id;
253 if (unlikely(pid == MMU_NO_CONTEXT))
254 goto no_context;
255 if (!mm_is_core_local(mm)) {
256 struct tlb_flush_param p = { .pid = pid };
257 /* Ignores smp_processor_id() even if set. */
258 smp_call_function_many(mm_cpumask(mm),
259 do_flush_tlb_mm_ipi, &p, 1);
260 }
261 _tlbil_pid(pid);
262 no_context:
263 preempt_enable();
264 }
265 EXPORT_SYMBOL(flush_tlb_mm);
266
267 void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
268 int tsize, int ind)
269 {
270 struct cpumask *cpu_mask;
271 unsigned int pid;
272
273 preempt_disable();
274 pid = mm ? mm->context.id : 0;
275 if (unlikely(pid == MMU_NO_CONTEXT))
276 goto bail;
277 cpu_mask = mm_cpumask(mm);
278 if (!mm_is_core_local(mm)) {
279 /* If broadcast tlbivax is supported, use it */
280 if (mmu_has_feature(MMU_FTR_USE_TLBIVAX_BCAST)) {
281 int lock = mmu_has_feature(MMU_FTR_LOCK_BCAST_INVAL);
282 if (lock)
283 raw_spin_lock(&tlbivax_lock);
284 _tlbivax_bcast(vmaddr, pid, tsize, ind);
285 if (lock)
286 raw_spin_unlock(&tlbivax_lock);
287 goto bail;
288 } else {
289 struct tlb_flush_param p = {
290 .pid = pid,
291 .addr = vmaddr,
292 .tsize = tsize,
293 .ind = ind,
294 };
295 /* Ignores smp_processor_id() even if set in cpu_mask */
296 smp_call_function_many(cpu_mask,
297 do_flush_tlb_page_ipi, &p, 1);
298 }
299 }
300 _tlbil_va(vmaddr, pid, tsize, ind);
301 bail:
302 preempt_enable();
303 }
304
305 void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
306 {
307 #ifdef CONFIG_HUGETLB_PAGE
308 if (is_vm_hugetlb_page(vma))
309 flush_hugetlb_page(vma, vmaddr);
310 #endif
311
312 __flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
313 mmu_get_tsize(mmu_virtual_psize), 0);
314 }
315 EXPORT_SYMBOL(flush_tlb_page);
316
317 #endif /* CONFIG_SMP */
318
319 #ifdef CONFIG_PPC_47x
320 void __init early_init_mmu_47x(void)
321 {
322 #ifdef CONFIG_SMP
323 unsigned long root = of_get_flat_dt_root();
324 if (of_get_flat_dt_prop(root, "cooperative-partition", NULL))
325 mmu_clear_feature(MMU_FTR_USE_TLBIVAX_BCAST);
326 #endif /* CONFIG_SMP */
327 }
328 #endif /* CONFIG_PPC_47x */
329
330 /*
331 * Flush kernel TLB entries in the given range
332 */
333 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
334 {
335 #ifdef CONFIG_SMP
336 preempt_disable();
337 smp_call_function(do_flush_tlb_mm_ipi, NULL, 1);
338 _tlbil_pid(0);
339 preempt_enable();
340 #else
341 _tlbil_pid(0);
342 #endif
343 }
344 EXPORT_SYMBOL(flush_tlb_kernel_range);
345
346 /*
347 * Currently, for range flushing, we just do a full mm flush. This should
348 * be optimized based on a threshold on the size of the range, since
349 * some implementation can stack multiple tlbivax before a tlbsync but
350 * for now, we keep it that way
351 */
352 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
353 unsigned long end)
354
355 {
356 flush_tlb_mm(vma->vm_mm);
357 }
358 EXPORT_SYMBOL(flush_tlb_range);
359
360 void tlb_flush(struct mmu_gather *tlb)
361 {
362 flush_tlb_mm(tlb->mm);
363 }
364
365 /*
366 * Below are functions specific to the 64-bit variant of Book3E though that
367 * may change in the future
368 */
369
370 #ifdef CONFIG_PPC64
371
372 /*
373 * Handling of virtual linear page tables or indirect TLB entries
374 * flushing when PTE pages are freed
375 */
376 void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address)
377 {
378 int tsize = mmu_psize_defs[mmu_pte_psize].enc;
379
380 if (book3e_htw_enabled) {
381 unsigned long start = address & PMD_MASK;
382 unsigned long end = address + PMD_SIZE;
383 unsigned long size = 1UL << mmu_psize_defs[mmu_pte_psize].shift;
384
385 /* This isn't the most optimal, ideally we would factor out the
386 * while preempt & CPU mask mucking around, or even the IPI but
387 * it will do for now
388 */
389 while (start < end) {
390 __flush_tlb_page(tlb->mm, start, tsize, 1);
391 start += size;
392 }
393 } else {
394 unsigned long rmask = 0xf000000000000000ul;
395 unsigned long rid = (address & rmask) | 0x1000000000000000ul;
396 unsigned long vpte = address & ~rmask;
397
398 #ifdef CONFIG_PPC_64K_PAGES
399 vpte = (vpte >> (PAGE_SHIFT - 4)) & ~0xfffful;
400 #else
401 vpte = (vpte >> (PAGE_SHIFT - 3)) & ~0xffful;
402 #endif
403 vpte |= rid;
404 __flush_tlb_page(tlb->mm, vpte, tsize, 0);
405 }
406 }
407
408 static void setup_page_sizes(void)
409 {
410 unsigned int tlb0cfg;
411 unsigned int tlb0ps;
412 unsigned int eptcfg;
413 int i, psize;
414
415 #ifdef CONFIG_PPC_FSL_BOOK3E
416 unsigned int mmucfg = mfspr(SPRN_MMUCFG);
417
418 if (((mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V1) &&
419 (mmu_has_feature(MMU_FTR_TYPE_FSL_E))) {
420 unsigned int tlb1cfg = mfspr(SPRN_TLB1CFG);
421 unsigned int min_pg, max_pg;
422
423 min_pg = (tlb1cfg & TLBnCFG_MINSIZE) >> TLBnCFG_MINSIZE_SHIFT;
424 max_pg = (tlb1cfg & TLBnCFG_MAXSIZE) >> TLBnCFG_MAXSIZE_SHIFT;
425
426 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
427 struct mmu_psize_def *def;
428 unsigned int shift;
429
430 def = &mmu_psize_defs[psize];
431 shift = def->shift;
432
433 if (shift == 0)
434 continue;
435
436 /* adjust to be in terms of 4^shift Kb */
437 shift = (shift - 10) >> 1;
438
439 if ((shift >= min_pg) && (shift <= max_pg))
440 def->flags |= MMU_PAGE_SIZE_DIRECT;
441 }
442
443 goto no_indirect;
444 }
445 #endif
446
447 tlb0cfg = mfspr(SPRN_TLB0CFG);
448 tlb0ps = mfspr(SPRN_TLB0PS);
449 eptcfg = mfspr(SPRN_EPTCFG);
450
451 /* Look for supported direct sizes */
452 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
453 struct mmu_psize_def *def = &mmu_psize_defs[psize];
454
455 if (tlb0ps & (1U << (def->shift - 10)))
456 def->flags |= MMU_PAGE_SIZE_DIRECT;
457 }
458
459 /* Indirect page sizes supported ? */
460 if ((tlb0cfg & TLBnCFG_IND) == 0)
461 goto no_indirect;
462
463 /* Now, we only deal with one IND page size for each
464 * direct size. Hopefully all implementations today are
465 * unambiguous, but we might want to be careful in the
466 * future.
467 */
468 for (i = 0; i < 3; i++) {
469 unsigned int ps, sps;
470
471 sps = eptcfg & 0x1f;
472 eptcfg >>= 5;
473 ps = eptcfg & 0x1f;
474 eptcfg >>= 5;
475 if (!ps || !sps)
476 continue;
477 for (psize = 0; psize < MMU_PAGE_COUNT; psize++) {
478 struct mmu_psize_def *def = &mmu_psize_defs[psize];
479
480 if (ps == (def->shift - 10))
481 def->flags |= MMU_PAGE_SIZE_INDIRECT;
482 if (sps == (def->shift - 10))
483 def->ind = ps + 10;
484 }
485 }
486 no_indirect:
487
488 /* Cleanup array and print summary */
489 pr_info("MMU: Supported page sizes\n");
490 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
491 struct mmu_psize_def *def = &mmu_psize_defs[psize];
492 const char *__page_type_names[] = {
493 "unsupported",
494 "direct",
495 "indirect",
496 "direct & indirect"
497 };
498 if (def->flags == 0) {
499 def->shift = 0;
500 continue;
501 }
502 pr_info(" %8ld KB as %s\n", 1ul << (def->shift - 10),
503 __page_type_names[def->flags & 0x3]);
504 }
505 }
506
507 static void __patch_exception(int exc, unsigned long addr)
508 {
509 extern unsigned int interrupt_base_book3e;
510 unsigned int *ibase = &interrupt_base_book3e;
511
512 /* Our exceptions vectors start with a NOP and -then- a branch
513 * to deal with single stepping from userspace which stops on
514 * the second instruction. Thus we need to patch the second
515 * instruction of the exception, not the first one
516 */
517
518 patch_branch(ibase + (exc / 4) + 1, addr, 0);
519 }
520
521 #define patch_exception(exc, name) do { \
522 extern unsigned int name; \
523 __patch_exception((exc), (unsigned long)&name); \
524 } while (0)
525
526 static void setup_mmu_htw(void)
527 {
528 /* Check if HW tablewalk is present, and if yes, enable it by:
529 *
530 * - patching the TLB miss handlers to branch to the
531 * one dedicates to it
532 *
533 * - setting the global book3e_htw_enabled
534 */
535 unsigned int tlb0cfg = mfspr(SPRN_TLB0CFG);
536
537 if ((tlb0cfg & TLBnCFG_IND) &&
538 (tlb0cfg & TLBnCFG_PT)) {
539 patch_exception(0x1c0, exc_data_tlb_miss_htw_book3e);
540 patch_exception(0x1e0, exc_instruction_tlb_miss_htw_book3e);
541 book3e_htw_enabled = 1;
542 }
543 pr_info("MMU: Book3E HW tablewalk %s\n",
544 book3e_htw_enabled ? "enabled" : "not supported");
545 }
546
547 /*
548 * Early initialization of the MMU TLB code
549 */
550 static void __early_init_mmu(int boot_cpu)
551 {
552 unsigned int mas4;
553
554 /* XXX This will have to be decided at runtime, but right
555 * now our boot and TLB miss code hard wires it. Ideally
556 * we should find out a suitable page size and patch the
557 * TLB miss code (either that or use the PACA to store
558 * the value we want)
559 */
560 mmu_linear_psize = MMU_PAGE_1G;
561
562 /* XXX This should be decided at runtime based on supported
563 * page sizes in the TLB, but for now let's assume 16M is
564 * always there and a good fit (which it probably is)
565 */
566 mmu_vmemmap_psize = MMU_PAGE_16M;
567
568 /* XXX This code only checks for TLB 0 capabilities and doesn't
569 * check what page size combos are supported by the HW. It
570 * also doesn't handle the case where a separate array holds
571 * the IND entries from the array loaded by the PT.
572 */
573 if (boot_cpu) {
574 /* Look for supported page sizes */
575 setup_page_sizes();
576
577 /* Look for HW tablewalk support */
578 setup_mmu_htw();
579 }
580
581 /* Set MAS4 based on page table setting */
582
583 mas4 = 0x4 << MAS4_WIMGED_SHIFT;
584 if (book3e_htw_enabled) {
585 mas4 |= mas4 | MAS4_INDD;
586 #ifdef CONFIG_PPC_64K_PAGES
587 mas4 |= BOOK3E_PAGESZ_256M << MAS4_TSIZED_SHIFT;
588 mmu_pte_psize = MMU_PAGE_256M;
589 #else
590 mas4 |= BOOK3E_PAGESZ_1M << MAS4_TSIZED_SHIFT;
591 mmu_pte_psize = MMU_PAGE_1M;
592 #endif
593 } else {
594 #ifdef CONFIG_PPC_64K_PAGES
595 mas4 |= BOOK3E_PAGESZ_64K << MAS4_TSIZED_SHIFT;
596 #else
597 mas4 |= BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT;
598 #endif
599 mmu_pte_psize = mmu_virtual_psize;
600 }
601 mtspr(SPRN_MAS4, mas4);
602
603 /* Set the global containing the top of the linear mapping
604 * for use by the TLB miss code
605 */
606 linear_map_top = memblock_end_of_DRAM();
607
608 #ifdef CONFIG_PPC_FSL_BOOK3E
609 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
610 unsigned int num_cams;
611
612 /* use a quarter of the TLBCAM for bolted linear map */
613 num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
614 linear_map_top = map_mem_in_cams(linear_map_top, num_cams);
615
616 /* limit memory so we dont have linear faults */
617 memblock_enforce_memory_limit(linear_map_top);
618
619 patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e);
620 patch_exception(0x1e0, exc_instruction_tlb_miss_bolted_book3e);
621 }
622 #endif
623
624 /* A sync won't hurt us after mucking around with
625 * the MMU configuration
626 */
627 mb();
628
629 memblock_set_current_limit(linear_map_top);
630 }
631
632 void __init early_init_mmu(void)
633 {
634 __early_init_mmu(1);
635 }
636
637 void __cpuinit early_init_mmu_secondary(void)
638 {
639 __early_init_mmu(0);
640 }
641
642 void setup_initial_memory_limit(phys_addr_t first_memblock_base,
643 phys_addr_t first_memblock_size)
644 {
645 /* On non-FSL Embedded 64-bit, we adjust the RMA size to match
646 * the bolted TLB entry. We know for now that only 1G
647 * entries are supported though that may eventually
648 * change.
649 *
650 * on FSL Embedded 64-bit, we adjust the RMA size to match the
651 * first bolted TLB entry size. We still limit max to 1G even if
652 * the TLB could cover more. This is due to what the early init
653 * code is setup to do.
654 *
655 * We crop it to the size of the first MEMBLOCK to
656 * avoid going over total available memory just in case...
657 */
658 #ifdef CONFIG_PPC_FSL_BOOK3E
659 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
660 unsigned long linear_sz;
661 linear_sz = calc_cam_sz(first_memblock_size, PAGE_OFFSET,
662 first_memblock_base);
663 ppc64_rma_size = min_t(u64, linear_sz, 0x40000000);
664 } else
665 #endif
666 ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000);
667
668 /* Finally limit subsequent allocations */
669 memblock_set_current_limit(first_memblock_base + ppc64_rma_size);
670 }
671 #else /* ! CONFIG_PPC64 */
672 void __init early_init_mmu(void)
673 {
674 #ifdef CONFIG_PPC_47x
675 early_init_mmu_47x();
676 #endif
677 }
678 #endif /* CONFIG_PPC64 */
This page took 0.046146 seconds and 5 git commands to generate.