2 * Based on arch/arm/include/asm/assembler.h, arch/arm/mm/proc-macros.S
4 * Copyright (C) 1996-2000 Russell King
5 * Copyright (C) 2012 ARM Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #error "Only include this from assembly code"
23 #ifndef __ASM_ASSEMBLER_H
24 #define __ASM_ASSEMBLER_H
26 #include <asm/asm-offsets.h>
28 #include <asm/pgtable-hwdef.h>
29 #include <asm/ptrace.h>
30 #include <asm/thread_info.h>
33 * Enable and disable interrupts.
44 * Enable and disable debug exceptions.
54 .macro disable_step_tsk
, flgs
, tmp
55 tbz
\flgs
, #TIF_SINGLESTEP, 9990f
59 isb
// Synchronise with enable_dbg
63 .macro enable_step_tsk
, flgs
, tmp
64 tbz
\flgs
, #TIF_SINGLESTEP, 9990f
73 * Enable both debug exceptions and interrupts. This is likely to be
74 * faster than two daifclr operations, since writes to this register
75 * are self-synchronising.
77 .macro enable_dbg_and_irq
82 * SMP data memory barrier
89 * Emit an entry into the exception table
91 .macro _asm_extable
, from
, to
92 .pushsection __ex_table
, "a"
94 .long (\from
- .), (\to
- .)
98 #define USER(l, x...) \
100 _asm_extable 9999b, l
105 lr
.req x30
// link register
116 * Select code when configured for BE.
118 #ifdef CONFIG_CPU_BIG_ENDIAN
119 #define CPU_BE(code...) code
121 #define CPU_BE(code...)
125 * Select code when configured for LE.
127 #ifdef CONFIG_CPU_BIG_ENDIAN
128 #define CPU_LE(code...)
130 #define CPU_LE(code...) code
134 * Define a macro that constructs a 64-bit value by concatenating two
135 * 32-bit registers. Note that on big endian systems the order of the
136 * registers is swapped.
138 #ifndef CONFIG_CPU_BIG_ENDIAN
139 .macro regs_to_64
, rd
, lbits
, hbits
141 .macro regs_to_64
, rd
, hbits
, lbits
143 orr
\rd
, \lbits
, \hbits
, lsl
#32
147 * Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where
148 * <symbol> is within the range +/- 4 GB of the PC.
151 * @dst: destination register (64 bit wide)
152 * @sym: name of the symbol
153 * @tmp: optional scratch register to be used if <dst> == sp, which
154 * is not allowed in an adrp instruction
156 .macro adr_l
, dst
, sym
, tmp
=
159 add \dst
, \dst
, :lo12
:\sym
162 add \dst
, \tmp
, :lo12
:\sym
167 * @dst: destination register (32 or 64 bit wide)
168 * @sym: name of the symbol
169 * @tmp: optional 64-bit scratch register to be used if <dst> is a
170 * 32-bit wide register, in which case it cannot be used to hold
173 .macro ldr_l
, dst
, sym
, tmp
=
176 ldr \dst
, [\dst
, :lo12
:\sym
]
179 ldr \dst
, [\tmp
, :lo12
:\sym
]
184 * @src: source register (32 or 64 bit wide)
185 * @sym: name of the symbol
186 * @tmp: mandatory 64-bit scratch register to calculate the address
187 * while <src> needs to be preserved.
189 .macro str_l
, src
, sym
, tmp
191 str \src
, [\tmp
, :lo12
:\sym
]
195 * @sym: The name of the per-cpu variable
196 * @reg: Result of per_cpu(sym, smp_processor_id())
197 * @tmp: scratch register
199 .macro this_cpu_ptr
, sym
, reg
, tmp
206 * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm)
208 .macro vma_vm_mm
, rd
, rn
209 ldr
\rd
, [\rn
, #VMA_VM_MM]
213 * mmid - get context id from mm pointer (mm->context.id)
216 ldr
\rd
, [\rn
, #MM_CONTEXT_ID]
220 * dcache_line_size - get the minimum D-cache line size from the CTR register.
222 .macro dcache_line_size
, reg
, tmp
223 mrs
\tmp
, ctr_el0
// read CTR
224 ubfm
\tmp
, \tmp
, #16, #19 // cache line size encoding
225 mov
\reg
, #4 // bytes per word
226 lsl
\reg
, \reg
, \tmp
// actual cache line size
230 * icache_line_size - get the minimum I-cache line size from the CTR register.
232 .macro icache_line_size
, reg
, tmp
233 mrs
\tmp
, ctr_el0
// read CTR
234 and \tmp
, \tmp
, #0xf // cache line size encoding
235 mov
\reg
, #4 // bytes per word
236 lsl
\reg
, \reg
, \tmp
// actual cache line size
240 * tcr_set_idmap_t0sz - update TCR.T0SZ so that we can load the ID map
242 .macro tcr_set_idmap_t0sz
, valreg
, tmpreg
243 #ifndef CONFIG_ARM64_VA_BITS_48
244 ldr_l
\tmpreg
, idmap_t0sz
245 bfi
\valreg
, \tmpreg
, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
250 * Macro to perform a data cache maintenance for the interval
251 * [kaddr, kaddr + size)
253 * op: operation passed to dc instruction
254 * domain: domain used in dsb instruciton
255 * kaddr: starting virtual address of the region
256 * size: size of the region
257 * Corrupts: kaddr, size, tmp1, tmp2
259 .macro dcache_by_line_op op
, domain
, kaddr
, size
, tmp1
, tmp2
260 dcache_line_size
\tmp
1, \tmp
2
261 add \size
, \kaddr
, \size
263 bic \kaddr
, \kaddr
, \tmp
2
265 add \kaddr
, \kaddr
, \tmp
1
272 * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
274 .macro reset_pmuserenr_el0
, tmpreg
275 mrs
\tmpreg
, id_aa64dfr0_el1
// Check ID_AA64DFR0_EL1 PMUVer
276 sbfx
\tmpreg
, \tmpreg
, #8, #4
277 cmp
\tmpreg
, #1 // Skip if no PMU present
279 msr pmuserenr_el0
, xzr
// Disable PMU access from EL0
284 * copy_page - copy src to dest using temp registers t1-t8
286 .macro copy_page dest
:req src
:req t1
:req t2
:req t3
:req t4
:req t5
:req t6
:req t7
:req t8
:req
287 9998: ldp
\t1, \t2, [\src
]
288 ldp
\t3, \t4, [\src
, #16]
289 ldp
\t5, \t6, [\src
, #32]
290 ldp
\t7, \t8, [\src
, #48]
292 stnp
\t1, \t2, [\dest
]
293 stnp
\t3, \t4, [\dest
, #16]
294 stnp
\t5, \t6, [\dest
, #32]
295 stnp
\t7, \t8, [\dest
, #48]
296 add \dest
, \dest
, #64
297 tst \src
, #(PAGE_SIZE - 1)
302 * Annotate a function as position independent, i.e., safe to be called before
303 * the kernel virtual mapping is activated.
305 #define ENDPIPROC(x) \
307 .type __pi_##x, %function; \
309 .size __pi_##x, . - x; \
313 * Emit a 64-bit absolute little endian symbol reference in a way that
314 * ensures that it will be resolved at build time, even when building a
315 * PIE binary. This requires cooperation from the linker script, which
316 * must emit the lo32/hi32 halves individually.
324 * mov_q - move an immediate constant into a 64-bit register using
325 * between 2 and 4 movz/movk instructions (depending on the
326 * magnitude and sign of the operand)
328 .macro mov_q
, reg
, val
329 .if (((\val
) >> 31) == 0 || ((\val
) >> 31) == 0x1ffffffff)
330 movz
\reg
, :abs_g1_s
:\val
332 .if (((\val
) >> 47) == 0 || ((\val
) >> 47) == 0x1ffff)
333 movz
\reg
, :abs_g2_s
:\val
335 movz
\reg
, :abs_g3
:\val
336 movk
\reg
, :abs_g2_nc
:\val
338 movk
\reg
, :abs_g1_nc
:\val
340 movk
\reg
, :abs_g0_nc
:\val
343 #endif /* __ASM_ASSEMBLER_H */