nd_blk: change aperture mapping from WC to WB
[deliverable/linux.git] / arch / x86 / include / asm / pmem.h
1 /*
2 * Copyright(c) 2015 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13 #ifndef __ASM_X86_PMEM_H__
14 #define __ASM_X86_PMEM_H__
15
16 #include <linux/uaccess.h>
17 #include <asm/cacheflush.h>
18 #include <asm/cpufeature.h>
19 #include <asm/special_insns.h>
20
21 #define ARCH_MEMREMAP_PMEM MEMREMAP_WB
22
23 #ifdef CONFIG_ARCH_HAS_PMEM_API
24 /**
25 * arch_memcpy_to_pmem - copy data to persistent memory
26 * @dst: destination buffer for the copy
27 * @src: source buffer for the copy
28 * @n: length of the copy in bytes
29 *
30 * Copy data to persistent memory media via non-temporal stores so that
31 * a subsequent arch_wmb_pmem() can flush cpu and memory controller
32 * write buffers to guarantee durability.
33 */
34 static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src,
35 size_t n)
36 {
37 int unwritten;
38
39 /*
40 * We are copying between two kernel buffers, if
41 * __copy_from_user_inatomic_nocache() returns an error (page
42 * fault) we would have already reported a general protection fault
43 * before the WARN+BUG.
44 */
45 unwritten = __copy_from_user_inatomic_nocache((void __force *) dst,
46 (void __user *) src, n);
47 if (WARN(unwritten, "%s: fault copying %p <- %p unwritten: %d\n",
48 __func__, dst, src, unwritten))
49 BUG();
50 }
51
52 /**
53 * arch_wmb_pmem - synchronize writes to persistent memory
54 *
55 * After a series of arch_memcpy_to_pmem() operations this drains data
56 * from cpu write buffers and any platform (memory controller) buffers
57 * to ensure that written data is durable on persistent memory media.
58 */
59 static inline void arch_wmb_pmem(void)
60 {
61 /*
62 * wmb() to 'sfence' all previous writes such that they are
63 * architecturally visible to 'pcommit'. Note, that we've
64 * already arranged for pmem writes to avoid the cache via
65 * arch_memcpy_to_pmem().
66 */
67 wmb();
68 pcommit_sfence();
69 }
70
71 /**
72 * __arch_wb_cache_pmem - write back a cache range with CLWB
73 * @vaddr: virtual start address
74 * @size: number of bytes to write back
75 *
76 * Write back a cache range using the CLWB (cache line write back)
77 * instruction. This function requires explicit ordering with an
78 * arch_wmb_pmem() call. This API is internal to the x86 PMEM implementation.
79 */
80 static inline void __arch_wb_cache_pmem(void *vaddr, size_t size)
81 {
82 u16 x86_clflush_size = boot_cpu_data.x86_clflush_size;
83 unsigned long clflush_mask = x86_clflush_size - 1;
84 void *vend = vaddr + size;
85 void *p;
86
87 for (p = (void *)((unsigned long)vaddr & ~clflush_mask);
88 p < vend; p += x86_clflush_size)
89 clwb(p);
90 }
91
92 /*
93 * copy_from_iter_nocache() on x86 only uses non-temporal stores for iovec
94 * iterators, so for other types (bvec & kvec) we must do a cache write-back.
95 */
96 static inline bool __iter_needs_pmem_wb(struct iov_iter *i)
97 {
98 return iter_is_iovec(i) == false;
99 }
100
101 /**
102 * arch_copy_from_iter_pmem - copy data from an iterator to PMEM
103 * @addr: PMEM destination address
104 * @bytes: number of bytes to copy
105 * @i: iterator with source data
106 *
107 * Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'.
108 * This function requires explicit ordering with an arch_wmb_pmem() call.
109 */
110 static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes,
111 struct iov_iter *i)
112 {
113 void *vaddr = (void __force *)addr;
114 size_t len;
115
116 /* TODO: skip the write-back by always using non-temporal stores */
117 len = copy_from_iter_nocache(vaddr, bytes, i);
118
119 if (__iter_needs_pmem_wb(i))
120 __arch_wb_cache_pmem(vaddr, bytes);
121
122 return len;
123 }
124
125 /**
126 * arch_clear_pmem - zero a PMEM memory range
127 * @addr: virtual start address
128 * @size: number of bytes to zero
129 *
130 * Write zeros into the memory range starting at 'addr' for 'size' bytes.
131 * This function requires explicit ordering with an arch_wmb_pmem() call.
132 */
133 static inline void arch_clear_pmem(void __pmem *addr, size_t size)
134 {
135 void *vaddr = (void __force *)addr;
136
137 /* TODO: implement the zeroing via non-temporal writes */
138 if (size == PAGE_SIZE && ((unsigned long)vaddr & ~PAGE_MASK) == 0)
139 clear_page(vaddr);
140 else
141 memset(vaddr, 0, size);
142
143 __arch_wb_cache_pmem(vaddr, size);
144 }
145
146 static inline bool arch_has_wmb_pmem(void)
147 {
148 #ifdef CONFIG_X86_64
149 /*
150 * We require that wmb() be an 'sfence', that is only guaranteed on
151 * 64-bit builds
152 */
153 return static_cpu_has(X86_FEATURE_PCOMMIT);
154 #else
155 return false;
156 #endif
157 }
158 #endif /* CONFIG_ARCH_HAS_PMEM_API */
159
160 #endif /* __ASM_X86_PMEM_H__ */
This page took 0.035487 seconds and 5 git commands to generate.