Merge branch 'linux-2.6' into for-2.6.24
[deliverable/linux.git] / include / asm-ppc / page.h
CommitLineData
1da177e4
LT
1#ifndef _PPC_PAGE_H
2#define _PPC_PAGE_H
3
a7f290da
BH
4#include <asm/asm-compat.h>
5
1da177e4
LT
6/* PAGE_SHIFT determines the page size */
7#define PAGE_SHIFT 12
a7f290da 8#define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT)
1da177e4
LT
9
10/*
11 * Subtle: this is an int (not an unsigned long) and so it
12 * gets extended to 64 bits the way want (i.e. with 1s). -- paulus
13 */
14#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
15
16#ifdef __KERNEL__
1da177e4
LT
17
18/* This must match what is in arch/ppc/Makefile */
19#define PAGE_OFFSET CONFIG_KERNEL_START
20#define KERNELBASE PAGE_OFFSET
e4de0021 21#define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
1da177e4
LT
22
23#ifndef __ASSEMBLY__
24
25/*
26 * The basic type of a PTE - 64 bits for those CPUs with > 32 bit
27 * physical addressing. For now this just the IBM PPC440.
28 */
29#ifdef CONFIG_PTE_64BIT
30typedef unsigned long long pte_basic_t;
31#define PTE_SHIFT (PAGE_SHIFT - 3) /* 512 ptes per page */
32#define PTE_FMT "%16Lx"
33#else
34typedef unsigned long pte_basic_t;
35#define PTE_SHIFT (PAGE_SHIFT - 2) /* 1024 ptes per page */
36#define PTE_FMT "%.8lx"
37#endif
38
7c8c6b97
PM
39/* align addr on a size boundary - adjust address up/down if needed */
40#define _ALIGN_UP(addr,size) (((addr)+((size)-1))&(~((size)-1)))
41#define _ALIGN_DOWN(addr,size) ((addr)&(~((size)-1)))
42
43/* align addr on a size boundary - adjust address up if needed */
44#define _ALIGN(addr,size) _ALIGN_UP(addr,size)
45
46/* to align the pointer to the (next) page boundary */
47#define PAGE_ALIGN(addr) _ALIGN(addr, PAGE_SIZE)
48
49
1da177e4
LT
50#undef STRICT_MM_TYPECHECKS
51
52#ifdef STRICT_MM_TYPECHECKS
53/*
54 * These are used to make use of C type-checking..
55 */
56typedef struct { pte_basic_t pte; } pte_t;
57typedef struct { unsigned long pmd; } pmd_t;
58typedef struct { unsigned long pgd; } pgd_t;
59typedef struct { unsigned long pgprot; } pgprot_t;
60
61#define pte_val(x) ((x).pte)
62#define pmd_val(x) ((x).pmd)
63#define pgd_val(x) ((x).pgd)
64#define pgprot_val(x) ((x).pgprot)
65
66#define __pte(x) ((pte_t) { (x) } )
67#define __pmd(x) ((pmd_t) { (x) } )
68#define __pgd(x) ((pgd_t) { (x) } )
69#define __pgprot(x) ((pgprot_t) { (x) } )
70
71#else
72/*
73 * .. while these make it easier on the compiler
74 */
75typedef pte_basic_t pte_t;
76typedef unsigned long pmd_t;
77typedef unsigned long pgd_t;
78typedef unsigned long pgprot_t;
79
80#define pte_val(x) (x)
81#define pmd_val(x) (x)
82#define pgd_val(x) (x)
83#define pgprot_val(x) (x)
84
85#define __pte(x) (x)
86#define __pmd(x) (x)
87#define __pgd(x) (x)
88#define __pgprot(x) (x)
89
90#endif
91
1da177e4
LT
92struct page;
93extern void clear_pages(void *page, int order);
94static inline void clear_page(void *page) { clear_pages(page, 0); }
95extern void copy_page(void *to, void *from);
96extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
97extern void copy_user_page(void *to, void *from, unsigned long vaddr,
98 struct page *pg);
99
100#ifndef CONFIG_APUS
101#define PPC_MEMSTART 0
102#define PPC_PGSTART 0
103#define PPC_MEMOFFSET PAGE_OFFSET
104#else
105extern unsigned long ppc_memstart;
106extern unsigned long ppc_pgstart;
107extern unsigned long ppc_memoffset;
108#define PPC_MEMSTART ppc_memstart
109#define PPC_PGSTART ppc_pgstart
110#define PPC_MEMOFFSET ppc_memoffset
111#endif
112
113#if defined(CONFIG_APUS) && !defined(MODULE)
114/* map phys->virtual and virtual->phys for RAM pages */
115static inline unsigned long ___pa(unsigned long v)
116{
117 unsigned long p;
118 asm volatile ("1: addis %0, %1, %2;"
119 ".section \".vtop_fixup\",\"aw\";"
120 ".align 1;"
121 ".long 1b;"
122 ".previous;"
123 : "=r" (p)
124 : "b" (v), "K" (((-PAGE_OFFSET) >> 16) & 0xffff));
125
126 return p;
127}
128static inline void* ___va(unsigned long p)
129{
130 unsigned long v;
131 asm volatile ("1: addis %0, %1, %2;"
132 ".section \".ptov_fixup\",\"aw\";"
133 ".align 1;"
134 ".long 1b;"
135 ".previous;"
136 : "=r" (v)
137 : "b" (p), "K" (((PAGE_OFFSET) >> 16) & 0xffff));
138
139 return (void*) v;
140}
141#else
142#define ___pa(vaddr) ((vaddr)-PPC_MEMOFFSET)
143#define ___va(paddr) ((paddr)+PPC_MEMOFFSET)
144#endif
145
146extern int page_is_ram(unsigned long pfn);
147
148#define __pa(x) ___pa((unsigned long)(x))
149#define __va(x) ((void *)(___va((unsigned long)(x))))
150
f68d4c99 151#define ARCH_PFN_OFFSET (PPC_PGSTART)
1da177e4
LT
152#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
153#define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT)
154
155#define pfn_valid(pfn) (((pfn) - PPC_PGSTART) < max_mapnr)
156#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
157
158/* Pure 2^n version of get_order */
159extern __inline__ int get_order(unsigned long size)
160{
161 int lz;
162
163 size = (size-1) >> PAGE_SHIFT;
164 asm ("cntlzw %0,%1" : "=r" (lz) : "r" (size));
165 return 32 - lz;
166}
167
168#endif /* __ASSEMBLY__ */
169
170#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
171 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
172
b3c2ffd5 173/* We do define AT_SYSINFO_EHDR but don't use the gate mechanism */
a7f290da
BH
174#define __HAVE_ARCH_GATE_AREA 1
175
f68d4c99 176#include <asm-generic/memory_model.h>
1da177e4
LT
177#endif /* __KERNEL__ */
178#endif /* _PPC_PAGE_H */
This page took 0.226316 seconds and 5 git commands to generate.