ARM: dma-mapping: provide per-cpu type map/unmap functions
[deliverable/linux.git] / arch / arm / mm / proc-arm940.S
CommitLineData
d60674eb
HC
1/*
2 * linux/arch/arm/mm/arm940.S: utility functions for ARM940T
3 *
4 * Copyright (C) 2004-2006 Hyok S. Choi (hyok.choi@samsung.com)
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11#include <linux/linkage.h>
12#include <linux/init.h>
13#include <asm/assembler.h>
5ec9407d 14#include <asm/hwcap.h>
d60674eb
HC
15#include <asm/pgtable-hwdef.h>
16#include <asm/pgtable.h>
d60674eb 17#include <asm/ptrace.h>
8a5544c8 18#include "proc-macros.S"
d60674eb
HC
19
20/* ARM940T has a 4KB DCache comprising 256 lines of 4 words */
21#define CACHE_DLINESIZE 16
22#define CACHE_DSEGMENTS 4
23#define CACHE_DENTRIES 64
24
25 .text
26/*
27 * cpu_arm940_proc_init()
28 * cpu_arm940_switch_mm()
29 *
30 * These are not required.
31 */
32ENTRY(cpu_arm940_proc_init)
33ENTRY(cpu_arm940_switch_mm)
34 mov pc, lr
35
36/*
37 * cpu_arm940_proc_fin()
38 */
39ENTRY(cpu_arm940_proc_fin)
40 stmfd sp!, {lr}
41 mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
42 msr cpsr_c, ip
43 bl arm940_flush_kern_cache_all
44 mrc p15, 0, r0, c1, c0, 0 @ ctrl register
45 bic r0, r0, #0x00001000 @ i-cache
46 bic r0, r0, #0x00000004 @ d-cache
47 mcr p15, 0, r0, c1, c0, 0 @ disable caches
48 ldmfd sp!, {pc}
49
50/*
51 * cpu_arm940_reset(loc)
52 * Params : r0 = address to jump to
53 * Notes : This sets up everything for a reset
54 */
55ENTRY(cpu_arm940_reset)
56 mov ip, #0
57 mcr p15, 0, ip, c7, c5, 0 @ flush I cache
58 mcr p15, 0, ip, c7, c6, 0 @ flush D cache
59 mcr p15, 0, ip, c7, c10, 4 @ drain WB
60 mrc p15, 0, ip, c1, c0, 0 @ ctrl register
61 bic ip, ip, #0x00000005 @ .............c.p
62 bic ip, ip, #0x00001000 @ i-cache
63 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
64 mov pc, r0
65
66/*
67 * cpu_arm940_do_idle()
68 */
69 .align 5
70ENTRY(cpu_arm940_do_idle)
71 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
72 mov pc, lr
73
74/*
75 * flush_user_cache_all()
76 */
77ENTRY(arm940_flush_user_cache_all)
78 /* FALLTHROUGH */
79
80/*
81 * flush_kern_cache_all()
82 *
83 * Clean and invalidate the entire cache.
84 */
85ENTRY(arm940_flush_kern_cache_all)
86 mov r2, #VM_EXEC
87 /* FALLTHROUGH */
88
89/*
90 * flush_user_cache_range(start, end, flags)
91 *
92 * There is no efficient way to flush a range of cache entries
93 * in the specified address range. Thus, flushes all.
94 *
95 * - start - start address (inclusive)
96 * - end - end address (exclusive)
97 * - flags - vm_flags describing address space
98 */
99ENTRY(arm940_flush_user_cache_range)
100 mov ip, #0
101#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
102 mcr p15, 0, ip, c7, c6, 0 @ flush D cache
103#else
104 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
1051: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
1062: mcr p15, 0, r3, c7, c14, 2 @ clean/flush D index
107 subs r3, r3, #1 << 26
108 bcs 2b @ entries 63 to 0
109 subs r1, r1, #1 << 4
110 bcs 1b @ segments 3 to 0
111#endif
112 tst r2, #VM_EXEC
113 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
114 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
115 mov pc, lr
116
117/*
118 * coherent_kern_range(start, end)
119 *
120 * Ensure coherency between the Icache and the Dcache in the
121 * region described by start, end. If you have non-snooping
122 * Harvard caches, you need to implement this function.
123 *
124 * - start - virtual start address
125 * - end - virtual end address
126 */
127ENTRY(arm940_coherent_kern_range)
128 /* FALLTHROUGH */
129
130/*
131 * coherent_user_range(start, end)
132 *
133 * Ensure coherency between the Icache and the Dcache in the
134 * region described by start, end. If you have non-snooping
135 * Harvard caches, you need to implement this function.
136 *
137 * - start - virtual start address
138 * - end - virtual end address
139 */
140ENTRY(arm940_coherent_user_range)
141 /* FALLTHROUGH */
142
143/*
2c9b9c84 144 * flush_kern_dcache_area(void *addr, size_t size)
d60674eb
HC
145 *
146 * Ensure no D cache aliasing occurs, either with itself or
147 * the I cache
148 *
2c9b9c84
RK
149 * - addr - kernel address
150 * - size - region size
d60674eb 151 */
2c9b9c84 152ENTRY(arm940_flush_kern_dcache_area)
d60674eb
HC
153 mov ip, #0
154 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
1551: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
1562: mcr p15, 0, r3, c7, c14, 2 @ clean/flush D index
157 subs r3, r3, #1 << 26
158 bcs 2b @ entries 63 to 0
159 subs r1, r1, #1 << 4
160 bcs 1b @ segments 7 to 0
161 mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
162 mcr p15, 0, ip, c7, c10, 4 @ drain WB
163 mov pc, lr
164
165/*
166 * dma_inv_range(start, end)
167 *
168 * There is no efficient way to invalidate a specifid virtual
169 * address range. Thus, invalidates all.
170 *
171 * - start - virtual start address
172 * - end - virtual end address
173 */
174ENTRY(arm940_dma_inv_range)
175 mov ip, #0
176 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
1771: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
1782: mcr p15, 0, r3, c7, c6, 2 @ flush D entry
179 subs r3, r3, #1 << 26
180 bcs 2b @ entries 63 to 0
181 subs r1, r1, #1 << 4
182 bcs 1b @ segments 7 to 0
183 mcr p15, 0, ip, c7, c10, 4 @ drain WB
184 mov pc, lr
185
186/*
187 * dma_clean_range(start, end)
188 *
189 * There is no efficient way to clean a specifid virtual
190 * address range. Thus, cleans all.
191 *
192 * - start - virtual start address
193 * - end - virtual end address
194 */
195ENTRY(arm940_dma_clean_range)
196ENTRY(cpu_arm940_dcache_clean_area)
197 mov ip, #0
198#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
199 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
2001: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
2012: mcr p15, 0, r3, c7, c10, 2 @ clean D entry
202 subs r3, r3, #1 << 26
203 bcs 2b @ entries 63 to 0
204 subs r1, r1, #1 << 4
205 bcs 1b @ segments 7 to 0
206#endif
207 mcr p15, 0, ip, c7, c10, 4 @ drain WB
208 mov pc, lr
209
210/*
211 * dma_flush_range(start, end)
212 *
213 * There is no efficient way to clean and invalidate a specifid
214 * virtual address range.
215 *
216 * - start - virtual start address
217 * - end - virtual end address
218 */
219ENTRY(arm940_dma_flush_range)
220 mov ip, #0
221 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
2221: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
2232:
224#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
225 mcr p15, 0, r3, c7, c14, 2 @ clean/flush D entry
226#else
b3a8b751 227 mcr p15, 0, r3, c7, c6, 2 @ invalidate D entry
d60674eb
HC
228#endif
229 subs r3, r3, #1 << 26
230 bcs 2b @ entries 63 to 0
231 subs r1, r1, #1 << 4
232 bcs 1b @ segments 7 to 0
233 mcr p15, 0, ip, c7, c10, 4 @ drain WB
234 mov pc, lr
235
a9c9147e
RK
236/*
237 * dma_map_area(start, size, dir)
238 * - start - kernel virtual start address
239 * - size - size of region
240 * - dir - DMA direction
241 */
242ENTRY(arm940_dma_map_area)
243 add r1, r1, r0
244 cmp r2, #DMA_TO_DEVICE
245 beq arm940_dma_clean_range
246 bcs arm940_dma_inv_range
247 b arm940_dma_flush_range
248ENDPROC(arm940_dma_map_area)
249
250/*
251 * dma_unmap_area(start, size, dir)
252 * - start - kernel virtual start address
253 * - size - size of region
254 * - dir - DMA direction
255 */
256ENTRY(arm940_dma_unmap_area)
257 mov pc, lr
258ENDPROC(arm940_dma_unmap_area)
259
d60674eb
HC
260ENTRY(arm940_cache_fns)
261 .long arm940_flush_kern_cache_all
262 .long arm940_flush_user_cache_all
263 .long arm940_flush_user_cache_range
264 .long arm940_coherent_kern_range
265 .long arm940_coherent_user_range
2c9b9c84 266 .long arm940_flush_kern_dcache_area
a9c9147e
RK
267 .long arm940_dma_map_area
268 .long arm940_dma_unmap_area
d60674eb
HC
269 .long arm940_dma_inv_range
270 .long arm940_dma_clean_range
271 .long arm940_dma_flush_range
272
273 __INIT
274
275 .type __arm940_setup, #function
276__arm940_setup:
277 mov r0, #0
278 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
279 mcr p15, 0, r0, c7, c6, 0 @ invalidate D cache
280 mcr p15, 0, r0, c7, c10, 4 @ drain WB
281
282 mcr p15, 0, r0, c6, c3, 0 @ disable data area 3~7
283 mcr p15, 0, r0, c6, c4, 0
284 mcr p15, 0, r0, c6, c5, 0
285 mcr p15, 0, r0, c6, c6, 0
286 mcr p15, 0, r0, c6, c7, 0
287
288 mcr p15, 0, r0, c6, c3, 1 @ disable instruction area 3~7
289 mcr p15, 0, r0, c6, c4, 1
290 mcr p15, 0, r0, c6, c5, 1
291 mcr p15, 0, r0, c6, c6, 1
292 mcr p15, 0, r0, c6, c7, 1
293
294 mov r0, #0x0000003F @ base = 0, size = 4GB
295 mcr p15, 0, r0, c6, c0, 0 @ set area 0, default
296 mcr p15, 0, r0, c6, c0, 1
297
298 ldr r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM
299 ldr r1, =(CONFIG_DRAM_SIZE >> 12) @ size of RAM (must be >= 4KB)
300 mov r2, #10 @ 11 is the minimum (4KB)
3011: add r2, r2, #1 @ area size *= 2
302 mov r1, r1, lsr #1
303 bne 1b @ count not zero r-shift
304 orr r0, r0, r2, lsl #1 @ the area register value
305 orr r0, r0, #1 @ set enable bit
306 mcr p15, 0, r0, c6, c1, 0 @ set area 1, RAM
307 mcr p15, 0, r0, c6, c1, 1
308
309 ldr r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH
310 ldr r1, =(CONFIG_FLASH_SIZE >> 12) @ size of FLASH (must be >= 4KB)
311 mov r2, #10 @ 11 is the minimum (4KB)
3121: add r2, r2, #1 @ area size *= 2
313 mov r1, r1, lsr #1
314 bne 1b @ count not zero r-shift
315 orr r0, r0, r2, lsl #1 @ the area register value
316 orr r0, r0, #1 @ set enable bit
317 mcr p15, 0, r0, c6, c2, 0 @ set area 2, ROM/FLASH
318 mcr p15, 0, r0, c6, c2, 1
319
320 mov r0, #0x06
321 mcr p15, 0, r0, c2, c0, 0 @ Region 1&2 cacheable
322 mcr p15, 0, r0, c2, c0, 1
323#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
324 mov r0, #0x00 @ disable whole write buffer
325#else
326 mov r0, #0x02 @ Region 1 write bufferred
327#endif
328 mcr p15, 0, r0, c3, c0, 0
329
330 mov r0, #0x10000
331 sub r0, r0, #1 @ r0 = 0xffff
332 mcr p15, 0, r0, c5, c0, 0 @ all read/write access
333 mcr p15, 0, r0, c5, c0, 1
334
335 mrc p15, 0, r0, c1, c0 @ get control register
336 orr r0, r0, #0x00001000 @ I-cache
337 orr r0, r0, #0x00000005 @ MPU/D-cache
338
339 mov pc, lr
340
341 .size __arm940_setup, . - __arm940_setup
342
343 __INITDATA
344
345/*
346 * Purpose : Function pointers used to access above functions - all calls
347 * come through these
348 */
349 .type arm940_processor_functions, #object
350ENTRY(arm940_processor_functions)
0f45d7f3 351 .word nommu_early_abort
4fb28474 352 .word legacy_pabort
d60674eb
HC
353 .word cpu_arm940_proc_init
354 .word cpu_arm940_proc_fin
355 .word cpu_arm940_reset
356 .word cpu_arm940_do_idle
357 .word cpu_arm940_dcache_clean_area
358 .word cpu_arm940_switch_mm
359 .word 0 @ cpu_*_set_pte
360 .size arm940_processor_functions, . - arm940_processor_functions
361
362 .section ".rodata"
363
364.type cpu_arch_name, #object
365cpu_arch_name:
366 .asciz "armv4t"
367 .size cpu_arch_name, . - cpu_arch_name
368
369 .type cpu_elf_name, #object
370cpu_elf_name:
371 .asciz "v4"
372 .size cpu_elf_name, . - cpu_elf_name
373
374 .type cpu_arm940_name, #object
375cpu_arm940_name:
376 .ascii "ARM940T"
377 .size cpu_arm940_name, . - cpu_arm940_name
378
379 .align
380
381 .section ".proc.info.init", #alloc, #execinstr
382
383 .type __arm940_proc_info,#object
384__arm940_proc_info:
385 .long 0x41009400
386 .long 0xff00fff0
387 .long 0
388 b __arm940_setup
389 .long cpu_arch_name
390 .long cpu_elf_name
391 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
392 .long cpu_arm940_name
393 .long arm940_processor_functions
394 .long 0
395 .long 0
396 .long arm940_cache_fns
397 .size __arm940_proc_info, . - __arm940_proc_info
398
This page took 0.310114 seconds and 5 git commands to generate.