Merge tag 'efi-urgent' into x86/urgent
[deliverable/linux.git] / arch / sh / mm / cache-sh2a.c
CommitLineData
cce2d453
YS
1/*
2 * arch/sh/mm/cache-sh2a.c
3 *
4 * Copyright (C) 2008 Yoshinori Sato
5 *
6 * Released under the terms of the GNU GPL v2.0.
7 */
8
9#include <linux/init.h>
10#include <linux/mm.h>
11
12#include <asm/cache.h>
13#include <asm/addrspace.h>
14#include <asm/processor.h>
15#include <asm/cacheflush.h>
16#include <asm/io.h>
17
c1537b48
PE
18/*
19 * The maximum number of pages we support up to when doing ranged dcache
20 * flushing. Anything exceeding this will simply flush the dcache in its
21 * entirety.
22 */
23#define MAX_OCACHE_PAGES 32
24#define MAX_ICACHE_PAGES 32
25
1ae911cb 26#ifdef CONFIG_CACHE_WRITEBACK
c1537b48
PE
27static void sh2a_flush_oc_line(unsigned long v, int way)
28{
29 unsigned long addr = (v & 0x000007f0) | (way << 11);
30 unsigned long data;
31
32 data = __raw_readl(CACHE_OC_ADDRESS_ARRAY | addr);
33 if ((data & CACHE_PHYSADDR_MASK) == (v & CACHE_PHYSADDR_MASK)) {
34 data &= ~SH_CACHE_UPDATED;
35 __raw_writel(data, CACHE_OC_ADDRESS_ARRAY | addr);
36 }
37}
1ae911cb 38#endif
c1537b48
PE
39
40static void sh2a_invalidate_line(unsigned long cache_addr, unsigned long v)
41{
42 /* Set associative bit to hit all ways */
43 unsigned long addr = (v & 0x000007f0) | SH_CACHE_ASSOC;
44 __raw_writel((addr & CACHE_PHYSADDR_MASK), cache_addr | addr);
45}
46
47/*
48 * Write back the dirty D-caches, but not invalidate them.
49 */
a58e1a2a 50static void sh2a__flush_wback_region(void *start, int size)
cce2d453 51{
c1537b48 52#ifdef CONFIG_CACHE_WRITEBACK
cce2d453
YS
53 unsigned long v;
54 unsigned long begin, end;
55 unsigned long flags;
c1537b48 56 int nr_ways;
cce2d453
YS
57
58 begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
59 end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
60 & ~(L1_CACHE_BYTES-1);
c1537b48 61 nr_ways = current_cpu_data.dcache.ways;
cce2d453
YS
62
63 local_irq_save(flags);
64 jump_to_uncached();
65
c1537b48
PE
66 /* If there are too many pages then flush the entire cache */
67 if (((end - begin) >> PAGE_SHIFT) >= MAX_OCACHE_PAGES) {
68 begin = CACHE_OC_ADDRESS_ARRAY;
69 end = begin + (nr_ways * current_cpu_data.dcache.way_size);
70
71 for (v = begin; v < end; v += L1_CACHE_BYTES) {
72 unsigned long data = __raw_readl(v);
73 if (data & SH_CACHE_UPDATED)
74 __raw_writel(data & ~SH_CACHE_UPDATED, v);
75 }
76 } else {
cce2d453 77 int way;
c1537b48
PE
78 for (way = 0; way < nr_ways; way++) {
79 for (v = begin; v < end; v += L1_CACHE_BYTES)
80 sh2a_flush_oc_line(v, way);
cce2d453
YS
81 }
82 }
83
84 back_to_cached();
85 local_irq_restore(flags);
c1537b48 86#endif
cce2d453
YS
87}
88
c1537b48
PE
89/*
90 * Write back the dirty D-caches and invalidate them.
91 */
a58e1a2a 92static void sh2a__flush_purge_region(void *start, int size)
cce2d453
YS
93{
94 unsigned long v;
95 unsigned long begin, end;
96 unsigned long flags;
97
98 begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
99 end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
100 & ~(L1_CACHE_BYTES-1);
101
102 local_irq_save(flags);
103 jump_to_uncached();
104
105 for (v = begin; v < end; v+=L1_CACHE_BYTES) {
c1537b48
PE
106#ifdef CONFIG_CACHE_WRITEBACK
107 int way;
108 int nr_ways = current_cpu_data.dcache.ways;
109 for (way = 0; way < nr_ways; way++)
110 sh2a_flush_oc_line(v, way);
111#endif
112 sh2a_invalidate_line(CACHE_OC_ADDRESS_ARRAY, v);
cce2d453 113 }
c1537b48 114
cce2d453
YS
115 back_to_cached();
116 local_irq_restore(flags);
117}
118
c1537b48
PE
119/*
120 * Invalidate the D-caches, but no write back please
121 */
a58e1a2a 122static void sh2a__flush_invalidate_region(void *start, int size)
cce2d453
YS
123{
124 unsigned long v;
125 unsigned long begin, end;
126 unsigned long flags;
127
128 begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
129 end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
130 & ~(L1_CACHE_BYTES-1);
c1537b48 131
cce2d453
YS
132 local_irq_save(flags);
133 jump_to_uncached();
134
c1537b48
PE
135 /* If there are too many pages then just blow the cache */
136 if (((end - begin) >> PAGE_SHIFT) >= MAX_OCACHE_PAGES) {
a5f6ea29
GU
137 __raw_writel(__raw_readl(SH_CCR) | CCR_OCACHE_INVALIDATE,
138 SH_CCR);
c1537b48
PE
139 } else {
140 for (v = begin; v < end; v += L1_CACHE_BYTES)
141 sh2a_invalidate_line(CACHE_OC_ADDRESS_ARRAY, v);
cce2d453 142 }
c1537b48 143
cce2d453
YS
144 back_to_cached();
145 local_irq_restore(flags);
146}
147
c1537b48
PE
148/*
149 * Write back the range of D-cache, and purge the I-cache.
150 */
f26b2a56 151static void sh2a_flush_icache_range(void *args)
cce2d453 152{
f26b2a56
PM
153 struct flusher_data *data = args;
154 unsigned long start, end;
cce2d453 155 unsigned long v;
983f4c51 156 unsigned long flags;
cce2d453 157
f26b2a56
PM
158 start = data->addr1 & ~(L1_CACHE_BYTES-1);
159 end = (data->addr2 + L1_CACHE_BYTES-1) & ~(L1_CACHE_BYTES-1);
cce2d453 160
c1537b48
PE
161#ifdef CONFIG_CACHE_WRITEBACK
162 sh2a__flush_wback_region((void *)start, end-start);
163#endif
164
983f4c51 165 local_irq_save(flags);
cce2d453
YS
166 jump_to_uncached();
167
c1537b48
PE
168 /* I-Cache invalidate */
169 /* If there are too many pages then just blow the cache */
170 if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) {
a5f6ea29
GU
171 __raw_writel(__raw_readl(SH_CCR) | CCR_ICACHE_INVALIDATE,
172 SH_CCR);
c1537b48
PE
173 } else {
174 for (v = start; v < end; v += L1_CACHE_BYTES)
175 sh2a_invalidate_line(CACHE_IC_ADDRESS_ARRAY, v);
cce2d453
YS
176 }
177
178 back_to_cached();
983f4c51 179 local_irq_restore(flags);
cce2d453 180}
a58e1a2a
PM
181
182void __init sh2a_cache_init(void)
183{
f26b2a56 184 local_flush_icache_range = sh2a_flush_icache_range;
a58e1a2a
PM
185
186 __flush_wback_region = sh2a__flush_wback_region;
187 __flush_purge_region = sh2a__flush_purge_region;
188 __flush_invalidate_region = sh2a__flush_invalidate_region;
189}
This page took 0.467347 seconds and 5 git commands to generate.