signal: consolidate {TS,TLF}_RESTORE_SIGMASK code
[deliverable/linux.git] / arch / sh / include / asm / page.h
CommitLineData
1da177e4
LT
1#ifndef __ASM_SH_PAGE_H
2#define __ASM_SH_PAGE_H
3
4/*
5 * Copyright (C) 1999 Niibe Yutaka
6 */
7
d02b08f6
SM
8#include <linux/const.h>
9
1da177e4 10/* PAGE_SHIFT determines the page size */
21440cf0
PM
11#if defined(CONFIG_PAGE_SIZE_4KB)
12# define PAGE_SHIFT 12
13#elif defined(CONFIG_PAGE_SIZE_8KB)
14# define PAGE_SHIFT 13
66dfe181
PM
15#elif defined(CONFIG_PAGE_SIZE_16KB)
16# define PAGE_SHIFT 14
21440cf0
PM
17#elif defined(CONFIG_PAGE_SIZE_64KB)
18# define PAGE_SHIFT 16
19#else
20# error "Bogus kernel page size?"
21#endif
8c12b5dc 22
d02b08f6 23#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
1da177e4
LT
24#define PAGE_MASK (~(PAGE_SIZE-1))
25#define PTE_MASK PAGE_MASK
26
27#if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
28#define HPAGE_SHIFT 16
21440cf0
PM
29#elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K)
30#define HPAGE_SHIFT 18
1da177e4
LT
31#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB)
32#define HPAGE_SHIFT 20
21440cf0
PM
33#elif defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
34#define HPAGE_SHIFT 22
35#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64MB)
36#define HPAGE_SHIFT 26
caff44e7
PM
37#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512MB)
38#define HPAGE_SHIFT 29
1da177e4
LT
39#endif
40
41#ifdef CONFIG_HUGETLB_PAGE
42#define HPAGE_SIZE (1UL << HPAGE_SHIFT)
43#define HPAGE_MASK (~(HPAGE_SIZE-1))
44#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT-PAGE_SHIFT)
45#endif
46
1da177e4 47#ifndef __ASSEMBLY__
d01447b3 48#include <asm/uncached.h>
1da177e4 49
f3c25758 50extern unsigned long shm_align_mask;
01066625 51extern unsigned long max_low_pfn, min_low_pfn;
5e2ff328 52extern unsigned long memory_start, memory_end, memory_limit;
f3c25758 53
2277ab4a
PM
54static inline unsigned long
55pages_do_alias(unsigned long addr1, unsigned long addr2)
56{
57 return (addr1 ^ addr2) & shm_align_mask;
58}
59
dfff0fa6 60#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
379a95d1 61extern void copy_page(void *to, void *from);
934ed25e 62#define copy_user_page(to, from, vaddr, pg) __copy_user(to, from, PAGE_SIZE)
1da177e4 63
dfff0fa6
PM
64struct page;
65struct vm_area_struct;
66
7747b9a4
PM
67extern void copy_user_highpage(struct page *to, struct page *from,
68 unsigned long vaddr, struct vm_area_struct *vma);
69#define __HAVE_ARCH_COPY_USER_HIGHPAGE
dfff0fa6
PM
70extern void clear_user_highpage(struct page *page, unsigned long vaddr);
71#define clear_user_highpage clear_user_highpage
0dfae7d5 72
1da177e4
LT
73/*
74 * These are used to make use of C type-checking..
75 */
21440cf0
PM
76#ifdef CONFIG_X2TLB
77typedef struct { unsigned long pte_low, pte_high; } pte_t;
78typedef struct { unsigned long long pgprot; } pgprot_t;
d04a0f79 79typedef struct { unsigned long long pgd; } pgd_t;
21440cf0
PM
80#define pte_val(x) \
81 ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
82#define __pte(x) \
83 ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
249cfea9 84#elif defined(CONFIG_SUPERH32)
21440cf0 85typedef struct { unsigned long pte_low; } pte_t;
1da177e4 86typedef struct { unsigned long pgprot; } pgprot_t;
d04a0f79 87typedef struct { unsigned long pgd; } pgd_t;
21440cf0 88#define pte_val(x) ((x).pte_low)
249cfea9
PM
89#define __pte(x) ((pte_t) { (x) } )
90#else
91typedef struct { unsigned long long pte_low; } pte_t;
24ef7fc4 92typedef struct { unsigned long long pgprot; } pgprot_t;
249cfea9
PM
93typedef struct { unsigned long pgd; } pgd_t;
94#define pte_val(x) ((x).pte_low)
95#define __pte(x) ((pte_t) { (x) } )
21440cf0
PM
96#endif
97
1da177e4
LT
98#define pgd_val(x) ((x).pgd)
99#define pgprot_val(x) ((x).pgprot)
100
1da177e4
LT
101#define __pgd(x) ((pgd_t) { (x) } )
102#define __pgprot(x) ((pgprot_t) { (x) } )
103
2f569afd
MS
104typedef struct page *pgtable_t;
105
cb700aa4
PM
106#define pte_pgprot(x) __pgprot(pte_val(x) & PTE_FLAGS_MASK)
107
1da177e4
LT
108#endif /* !__ASSEMBLY__ */
109
d02b08f6
SM
110/*
111 * __MEMORY_START and SIZE are the physical addresses and size of RAM.
112 */
1da177e4
LT
113#define __MEMORY_START CONFIG_MEMORY_START
114#define __MEMORY_SIZE CONFIG_MEMORY_SIZE
1da177e4 115
e66ac3f2
SH
116/*
117 * PHYSICAL_OFFSET is the offset in physical memory where the base
118 * of the kernel is loaded.
119 */
120#ifdef CONFIG_PHYSICAL_START
121#define PHYSICAL_OFFSET (CONFIG_PHYSICAL_START - __MEMORY_START)
122#else
123#define PHYSICAL_OFFSET 0
124#endif
125
d02b08f6
SM
126/*
127 * PAGE_OFFSET is the virtual address of the start of kernel address
128 * space.
129 */
e7f93a35 130#define PAGE_OFFSET CONFIG_PAGE_OFFSET
1da177e4 131
d02b08f6
SM
132/*
133 * Virtual to physical RAM address translation.
134 *
135 * In 29 bit mode, the physical offset of RAM from address 0 is visible in
136 * the kernel virtual address space, and thus we don't have to take
137 * this into account when translating. However in 32 bit mode this offset
138 * is not visible (it is part of the PMB mapping) and so needs to be
139 * added or subtracted as required.
140 */
1d5cfcdf 141#ifdef CONFIG_PMB
7c4584d3
MF
142#define ___pa(x) ((x)-PAGE_OFFSET+__MEMORY_START)
143#define ___va(x) ((x)+PAGE_OFFSET-__MEMORY_START)
d02b08f6 144#else
7c4584d3
MF
145#define ___pa(x) ((x)-PAGE_OFFSET)
146#define ___va(x) ((x)+PAGE_OFFSET)
d02b08f6
SM
147#endif
148
7c4584d3
MF
149#ifndef __ASSEMBLY__
150#define __pa(x) ___pa((unsigned long)x)
151#define __va(x) (void *)___va((unsigned long)x)
152#endif /* !__ASSEMBLY__ */
153
9edef286 154#ifdef CONFIG_UNCACHED_MAPPING
dfd3b596
NI
155#if defined(CONFIG_29BIT)
156#define UNCAC_ADDR(addr) P2SEGADDR(addr)
157#define CAC_ADDR(addr) P1SEGADDR(addr)
158#else
9edef286
PM
159#define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + uncached_start)
160#define CAC_ADDR(addr) ((addr) - uncached_start + PAGE_OFFSET)
dfd3b596 161#endif
9edef286
PM
162#else
163#define UNCAC_ADDR(addr) ((addr))
164#define CAC_ADDR(addr) ((addr))
165#endif
166
d02b08f6 167#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
01066625 168#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
1da177e4 169
d02b08f6
SM
170/*
171 * PFN = physical frame number (ie PFN 0 == physical address 0)
172 * PFN_START is the PFN of the first page of RAM. By defining this we
173 * don't have struct page entries for the portion of address space
174 * between physical address 0 and the start of RAM.
175 */
1da177e4 176#define PFN_START (__MEMORY_START >> PAGE_SHIFT)
67bb2c69 177#define ARCH_PFN_OFFSET (PFN_START)
1da177e4 178#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
5900711a 179#ifdef CONFIG_FLATMEM
01066625 180#define pfn_valid(pfn) ((pfn) >= min_low_pfn && (pfn) < max_low_pfn)
5900711a 181#endif
1da177e4
LT
182#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
183
184#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
185 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
186
104b8dea 187#include <asm-generic/memory_model.h>
5b17e1cd 188#include <asm-generic/getorder.h>
fd4fd5aa 189
cbd2d9d8 190/*
66d485b4
PM
191 * Some drivers need to perform DMA into kmalloc'ed buffers
192 * and so we have to increase the kmalloc minalign for this.
cbd2d9d8 193 */
a6eb9fe1 194#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
01fed931 195
66d485b4 196#ifdef CONFIG_SUPERH64
01fed931 197/*
66d485b4
PM
198 * While BYTES_PER_WORD == 4 on the current sh64 ABI, GCC will still
199 * happily generate {ld/st}.q pairs, requiring us to have 8-byte
25985edc 200 * alignment to avoid traps. The kmalloc alignment is guaranteed by
66d485b4
PM
201 * virtue of L1_CACHE_BYTES, requiring this to only be special cased
202 * for slab caches.
01fed931
PM
203 */
204#define ARCH_SLAB_MINALIGN 8
205#endif
cbd2d9d8 206
1da177e4 207#endif /* __ASM_SH_PAGE_H */
This page took 0.859645 seconds and 5 git commands to generate.