Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
4baa9922 | 2 | * arch/arm/include/asm/cache.h |
1da177e4 LT |
3 | */ |
4 | #ifndef __ASMARM_CACHE_H | |
5 | #define __ASMARM_CACHE_H | |
6 | ||
910a17e5 | 7 | #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT |
1da177e4 LT |
8 | #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) |
9 | ||
eb5f4ca9 MF |
10 | /* |
11 | * Memory returned by kmalloc() may be used for DMA, so we must make | |
12 | * sure that all such allocations are cache aligned. Otherwise, | |
13 | * unrelated code may cause parts of the buffer to be read into the | |
14 | * cache before the transfer is done, causing old data to be seen by | |
15 | * the CPU. | |
16 | */ | |
a6eb9fe1 | 17 | #define ARCH_DMA_MINALIGN L1_CACHE_BYTES |
eb5f4ca9 MF |
18 | |
19 | /* | |
20 | * With EABI on ARMv5 and above we must have 64-bit aligned slab pointers. | |
21 | */ | |
22 | #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) | |
23 | #define ARCH_SLAB_MINALIGN 8 | |
24 | #endif | |
25 | ||
daf87416 RK |
26 | #define __read_mostly __attribute__((__section__(".data..read_mostly"))) |
27 | ||
1da177e4 | 28 | #endif |