[MIPS] Avoid double signal restarting.
[deliverable/linux.git] / arch / mips / mm / cache.c
CommitLineData
1da177e4
LT
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 2003 by Ralf Baechle
7 */
1da177e4
LT
8#include <linux/init.h>
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/sched.h>
12#include <linux/mm.h>
13
14#include <asm/cacheflush.h>
15#include <asm/processor.h>
16#include <asm/cpu.h>
17#include <asm/cpu-features.h>
18
19/* Cache operations. */
20void (*flush_cache_all)(void);
21void (*__flush_cache_all)(void);
22void (*flush_cache_mm)(struct mm_struct *mm);
23void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start,
24 unsigned long end);
53de0d47
RB
25void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page,
26 unsigned long pfn);
d4264f18 27void (*flush_icache_range)(unsigned long start, unsigned long end);
1da177e4
LT
28void (*flush_icache_page)(struct vm_area_struct *vma, struct page *page);
29
30/* MIPS specific cache operations */
31void (*flush_cache_sigtramp)(unsigned long addr);
7e3bfc7c 32void (*local_flush_data_cache_page)(void * addr);
1da177e4
LT
33void (*flush_data_cache_page)(unsigned long addr);
34void (*flush_icache_all)(void);
35
9ff77c46
RB
36EXPORT_SYMBOL(flush_data_cache_page);
37
1da177e4
LT
38#ifdef CONFIG_DMA_NONCOHERENT
39
40/* DMA cache operations. */
41void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
42void (*_dma_cache_wback)(unsigned long start, unsigned long size);
43void (*_dma_cache_inv)(unsigned long start, unsigned long size);
44
45EXPORT_SYMBOL(_dma_cache_wback_inv);
46EXPORT_SYMBOL(_dma_cache_wback);
47EXPORT_SYMBOL(_dma_cache_inv);
48
49#endif /* CONFIG_DMA_NONCOHERENT */
50
51/*
52 * We could optimize the case where the cache argument is not BCACHE but
53 * that seems very atypical use ...
54 */
d4264f18 55asmlinkage int sys_cacheflush(unsigned long addr,
fe00f943 56 unsigned long bytes, unsigned int cache)
1da177e4 57{
750ccf68
AN
58 if (bytes == 0)
59 return 0;
fe00f943 60 if (!access_ok(VERIFY_WRITE, (void __user *) addr, bytes))
1da177e4
LT
61 return -EFAULT;
62
63 flush_icache_range(addr, addr + bytes);
64
65 return 0;
66}
67
68void __flush_dcache_page(struct page *page)
69{
70 struct address_space *mapping = page_mapping(page);
71 unsigned long addr;
72
73 if (mapping && !mapping_mapped(mapping)) {
74 SetPageDcacheDirty(page);
75 return;
76 }
77
78 /*
79 * We could delay the flush for the !page_mapping case too. But that
80 * case is for exec env/arg pages and those are %99 certainly going to
81 * get faulted into the tlb (and thus flushed) anyways.
82 */
83 addr = (unsigned long) page_address(page);
84 flush_data_cache_page(addr);
85}
86
87EXPORT_SYMBOL(__flush_dcache_page);
88
89void __update_cache(struct vm_area_struct *vma, unsigned long address,
90 pte_t pte)
91{
92 struct page *page;
93 unsigned long pfn, addr;
94
95 pfn = pte_pfn(pte);
96 if (pfn_valid(pfn) && (page = pfn_to_page(pfn), page_mapping(page)) &&
97 Page_dcache_dirty(page)) {
98 if (pages_do_alias((unsigned long)page_address(page),
99 address & PAGE_MASK)) {
100 addr = (unsigned long) page_address(page);
101 flush_data_cache_page(addr);
102 }
103
104 ClearPageDcacheDirty(page);
105 }
106}
107
02cf2119
RB
108#define __weak __attribute__((weak))
109
110static char cache_panic[] __initdata = "Yeee, unsupported cache architecture.";
1da177e4
LT
111
112void __init cpu_cache_init(void)
113{
02cf2119
RB
114 if (cpu_has_3k_cache) {
115 extern void __weak r3k_cache_init(void);
116
117 r3k_cache_init();
118 return;
119 }
120 if (cpu_has_6k_cache) {
121 extern void __weak r6k_cache_init(void);
122
123 r6k_cache_init();
124 return;
1da177e4 125 }
02cf2119
RB
126 if (cpu_has_4k_cache) {
127 extern void __weak r4k_cache_init(void);
128
129 r4k_cache_init();
130 return;
131 }
132 if (cpu_has_8k_cache) {
133 extern void __weak r8k_cache_init(void);
134
135 r8k_cache_init();
136 return;
137 }
138 if (cpu_has_tx39_cache) {
139 extern void __weak tx39_cache_init(void);
140
141 tx39_cache_init();
142 return;
143 }
144 if (cpu_has_sb1_cache) {
145 extern void __weak sb1_cache_init(void);
146
147 sb1_cache_init();
148 return;
149 }
150
151 panic(cache_panic);
1da177e4 152}
This page took 0.212713 seconds and 5 git commands to generate.