Merge remote-tracking branch 'lightnvm/for-next'
[deliverable/linux.git] / arch / arm64 / mm / cache.S
1 /*
2 * Cache maintenance
3 *
4 * Copyright (C) 2001 Deep Blue Solutions Ltd.
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include <linux/errno.h>
21 #include <linux/linkage.h>
22 #include <linux/init.h>
23 #include <asm/assembler.h>
24 #include <asm/cpufeature.h>
25 #include <asm/alternative.h>
26
27 /*
28 * flush_icache_range(start,end)
29 *
30 * Ensure that the I and D caches are coherent within specified region.
31 * This is typically used when code has been written to a memory region,
32 * and will be executed.
33 *
34 * - start - virtual start address of region
35 * - end - virtual end address of region
36 */
37 ENTRY(flush_icache_range)
38 /* FALLTHROUGH */
39
40 /*
41 * __flush_cache_user_range(start,end)
42 *
43 * Ensure that the I and D caches are coherent within specified region.
44 * This is typically used when code has been written to a memory region,
45 * and will be executed.
46 *
47 * - start - virtual start address of region
48 * - end - virtual end address of region
49 */
50 ENTRY(__flush_cache_user_range)
51 dcache_line_size x2, x3
52 sub x3, x2, #1
53 bic x4, x0, x3
54 1:
55 user_alt 9f, "dc cvau, x4", "dc civac, x4", ARM64_WORKAROUND_CLEAN_CACHE
56 add x4, x4, x2
57 cmp x4, x1
58 b.lo 1b
59 dsb ish
60
61 icache_line_size x2, x3
62 sub x3, x2, #1
63 bic x4, x0, x3
64 1:
65 USER(9f, ic ivau, x4 ) // invalidate I line PoU
66 add x4, x4, x2
67 cmp x4, x1
68 b.lo 1b
69 dsb ish
70 isb
71 mov x0, #0
72 ret
73 9:
74 mov x0, #-EFAULT
75 ret
76 ENDPROC(flush_icache_range)
77 ENDPROC(__flush_cache_user_range)
78
79 /*
80 * __flush_dcache_area(kaddr, size)
81 *
82 * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
83 * are cleaned and invalidated to the PoC.
84 *
85 * - kaddr - kernel address
86 * - size - size in question
87 */
88 ENTRY(__flush_dcache_area)
89 dcache_by_line_op civac, sy, x0, x1, x2, x3
90 ret
91 ENDPIPROC(__flush_dcache_area)
92
93 /*
94 * __clean_dcache_area_pou(kaddr, size)
95 *
96 * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
97 * are cleaned to the PoU.
98 *
99 * - kaddr - kernel address
100 * - size - size in question
101 */
102 ENTRY(__clean_dcache_area_pou)
103 dcache_by_line_op cvau, ish, x0, x1, x2, x3
104 ret
105 ENDPROC(__clean_dcache_area_pou)
106
107 /*
108 * __dma_inv_area(start, size)
109 * - start - virtual start address of region
110 * - size - size in question
111 */
112 __dma_inv_area:
113 add x1, x1, x0
114 /* FALLTHROUGH */
115
116 /*
117 * __inval_cache_range(start, end)
118 * - start - start address of region
119 * - end - end address of region
120 */
121 ENTRY(__inval_cache_range)
122 dcache_line_size x2, x3
123 sub x3, x2, #1
124 tst x1, x3 // end cache line aligned?
125 bic x1, x1, x3
126 b.eq 1f
127 dc civac, x1 // clean & invalidate D / U line
128 1: tst x0, x3 // start cache line aligned?
129 bic x0, x0, x3
130 b.eq 2f
131 dc civac, x0 // clean & invalidate D / U line
132 b 3f
133 2: dc ivac, x0 // invalidate D / U line
134 3: add x0, x0, x2
135 cmp x0, x1
136 b.lo 2b
137 dsb sy
138 ret
139 ENDPIPROC(__inval_cache_range)
140 ENDPROC(__dma_inv_area)
141
142 /*
143 * __clean_dcache_area_poc(kaddr, size)
144 *
145 * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
146 * are cleaned to the PoC.
147 *
148 * - kaddr - kernel address
149 * - size - size in question
150 */
151 ENTRY(__clean_dcache_area_poc)
152 /* FALLTHROUGH */
153
154 /*
155 * __dma_clean_area(start, size)
156 * - start - virtual start address of region
157 * - size - size in question
158 */
159 __dma_clean_area:
160 dcache_by_line_op cvac, sy, x0, x1, x2, x3
161 ret
162 ENDPIPROC(__clean_dcache_area_poc)
163 ENDPROC(__dma_clean_area)
164
165 /*
166 * __dma_flush_area(start, size)
167 *
168 * clean & invalidate D / U line
169 *
170 * - start - virtual start address of region
171 * - size - size in question
172 */
173 ENTRY(__dma_flush_area)
174 dcache_by_line_op civac, sy, x0, x1, x2, x3
175 ret
176 ENDPIPROC(__dma_flush_area)
177
178 /*
179 * __dma_map_area(start, size, dir)
180 * - start - kernel virtual start address
181 * - size - size of region
182 * - dir - DMA direction
183 */
184 ENTRY(__dma_map_area)
185 cmp w2, #DMA_FROM_DEVICE
186 b.eq __dma_inv_area
187 b __dma_clean_area
188 ENDPIPROC(__dma_map_area)
189
190 /*
191 * __dma_unmap_area(start, size, dir)
192 * - start - kernel virtual start address
193 * - size - size of region
194 * - dir - DMA direction
195 */
196 ENTRY(__dma_unmap_area)
197 cmp w2, #DMA_TO_DEVICE
198 b.ne __dma_inv_area
199 ret
200 ENDPIPROC(__dma_unmap_area)
This page took 0.035774 seconds and 5 git commands to generate.