vfio/pci: Fix typos in comments
[deliverable/linux.git] / arch / arm64 / mm / cache.S
1 /*
2 * Cache maintenance
3 *
4 * Copyright (C) 2001 Deep Blue Solutions Ltd.
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include <linux/errno.h>
21 #include <linux/linkage.h>
22 #include <linux/init.h>
23 #include <asm/assembler.h>
24 #include <asm/cpufeature.h>
25 #include <asm/alternative.h>
26
27 /*
28 * flush_icache_range(start,end)
29 *
30 * Ensure that the I and D caches are coherent within specified region.
31 * This is typically used when code has been written to a memory region,
32 * and will be executed.
33 *
34 * - start - virtual start address of region
35 * - end - virtual end address of region
36 */
37 ENTRY(flush_icache_range)
38 /* FALLTHROUGH */
39
40 /*
41 * __flush_cache_user_range(start,end)
42 *
43 * Ensure that the I and D caches are coherent within specified region.
44 * This is typically used when code has been written to a memory region,
45 * and will be executed.
46 *
47 * - start - virtual start address of region
48 * - end - virtual end address of region
49 */
50 ENTRY(__flush_cache_user_range)
51 dcache_line_size x2, x3
52 sub x3, x2, #1
53 bic x4, x0, x3
54 1:
55 user_alt 9f, "dc cvau, x4", "dc civac, x4", ARM64_WORKAROUND_CLEAN_CACHE
56 add x4, x4, x2
57 cmp x4, x1
58 b.lo 1b
59 dsb ish
60
61 icache_line_size x2, x3
62 sub x3, x2, #1
63 bic x4, x0, x3
64 1:
65 USER(9f, ic ivau, x4 ) // invalidate I line PoU
66 add x4, x4, x2
67 cmp x4, x1
68 b.lo 1b
69 dsb ish
70 isb
71 mov x0, #0
72 ret
73 9:
74 mov x0, #-EFAULT
75 ret
76 ENDPROC(flush_icache_range)
77 ENDPROC(__flush_cache_user_range)
78
79 /*
80 * __flush_dcache_area(kaddr, size)
81 *
82 * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
83 * are cleaned and invalidated to the PoC.
84 *
85 * - kaddr - kernel address
86 * - size - size in question
87 */
88 ENTRY(__flush_dcache_area)
89 dcache_by_line_op civac, sy, x0, x1, x2, x3
90 ret
91 ENDPIPROC(__flush_dcache_area)
92
93 /*
94 * __clean_dcache_area_pou(kaddr, size)
95 *
96 * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
97 * are cleaned to the PoU.
98 *
99 * - kaddr - kernel address
100 * - size - size in question
101 */
102 ENTRY(__clean_dcache_area_pou)
103 dcache_by_line_op cvau, ish, x0, x1, x2, x3
104 ret
105 ENDPROC(__clean_dcache_area_pou)
106
107 /*
108 * __inval_cache_range(start, end)
109 * - start - start address of region
110 * - end - end address of region
111 */
112 ENTRY(__inval_cache_range)
113 /* FALLTHROUGH */
114
115 /*
116 * __dma_inv_range(start, end)
117 * - start - virtual start address of region
118 * - end - virtual end address of region
119 */
120 __dma_inv_range:
121 dcache_line_size x2, x3
122 sub x3, x2, #1
123 tst x1, x3 // end cache line aligned?
124 bic x1, x1, x3
125 b.eq 1f
126 dc civac, x1 // clean & invalidate D / U line
127 1: tst x0, x3 // start cache line aligned?
128 bic x0, x0, x3
129 b.eq 2f
130 dc civac, x0 // clean & invalidate D / U line
131 b 3f
132 2: dc ivac, x0 // invalidate D / U line
133 3: add x0, x0, x2
134 cmp x0, x1
135 b.lo 2b
136 dsb sy
137 ret
138 ENDPIPROC(__inval_cache_range)
139 ENDPROC(__dma_inv_range)
140
141 /*
142 * __dma_clean_range(start, end)
143 * - start - virtual start address of region
144 * - end - virtual end address of region
145 */
146 __dma_clean_range:
147 dcache_line_size x2, x3
148 sub x3, x2, #1
149 bic x0, x0, x3
150 1:
151 alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
152 dc cvac, x0
153 alternative_else
154 dc civac, x0
155 alternative_endif
156 add x0, x0, x2
157 cmp x0, x1
158 b.lo 1b
159 dsb sy
160 ret
161 ENDPROC(__dma_clean_range)
162
163 /*
164 * __dma_flush_range(start, end)
165 * - start - virtual start address of region
166 * - end - virtual end address of region
167 */
168 ENTRY(__dma_flush_range)
169 dcache_line_size x2, x3
170 sub x3, x2, #1
171 bic x0, x0, x3
172 1: dc civac, x0 // clean & invalidate D / U line
173 add x0, x0, x2
174 cmp x0, x1
175 b.lo 1b
176 dsb sy
177 ret
178 ENDPIPROC(__dma_flush_range)
179
180 /*
181 * __dma_map_area(start, size, dir)
182 * - start - kernel virtual start address
183 * - size - size of region
184 * - dir - DMA direction
185 */
186 ENTRY(__dma_map_area)
187 add x1, x1, x0
188 cmp w2, #DMA_FROM_DEVICE
189 b.eq __dma_inv_range
190 b __dma_clean_range
191 ENDPIPROC(__dma_map_area)
192
193 /*
194 * __dma_unmap_area(start, size, dir)
195 * - start - kernel virtual start address
196 * - size - size of region
197 * - dir - DMA direction
198 */
199 ENTRY(__dma_unmap_area)
200 add x1, x1, x0
201 cmp w2, #DMA_TO_DEVICE
202 b.ne __dma_inv_range
203 ret
204 ENDPIPROC(__dma_unmap_area)
This page took 0.052563 seconds and 5 git commands to generate.