lib: lz4: cleanup unaligned access efficiency detection
[deliverable/linux.git] / arch / x86 / include / asm / pmem.h
1 /*
2 * Copyright(c) 2015 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13 #ifndef __ASM_X86_PMEM_H__
14 #define __ASM_X86_PMEM_H__
15
16 #include <linux/uaccess.h>
17 #include <asm/cacheflush.h>
18 #include <asm/cpufeature.h>
19 #include <asm/special_insns.h>
20
21 #ifdef CONFIG_ARCH_HAS_PMEM_API
22 /**
23 * arch_memcpy_to_pmem - copy data to persistent memory
24 * @dst: destination buffer for the copy
25 * @src: source buffer for the copy
26 * @n: length of the copy in bytes
27 *
28 * Copy data to persistent memory media via non-temporal stores so that
29 * a subsequent arch_wmb_pmem() can flush cpu and memory controller
30 * write buffers to guarantee durability.
31 */
32 static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src,
33 size_t n)
34 {
35 int unwritten;
36
37 /*
38 * We are copying between two kernel buffers, if
39 * __copy_from_user_inatomic_nocache() returns an error (page
40 * fault) we would have already reported a general protection fault
41 * before the WARN+BUG.
42 */
43 unwritten = __copy_from_user_inatomic_nocache((void __force *) dst,
44 (void __user *) src, n);
45 if (WARN(unwritten, "%s: fault copying %p <- %p unwritten: %d\n",
46 __func__, dst, src, unwritten))
47 BUG();
48 }
49
50 /**
51 * arch_wmb_pmem - synchronize writes to persistent memory
52 *
53 * After a series of arch_memcpy_to_pmem() operations this drains data
54 * from cpu write buffers and any platform (memory controller) buffers
55 * to ensure that written data is durable on persistent memory media.
56 */
57 static inline void arch_wmb_pmem(void)
58 {
59 /*
60 * wmb() to 'sfence' all previous writes such that they are
61 * architecturally visible to 'pcommit'. Note, that we've
62 * already arranged for pmem writes to avoid the cache via
63 * arch_memcpy_to_pmem().
64 */
65 wmb();
66 pcommit_sfence();
67 }
68
69 /**
70 * arch_wb_cache_pmem - write back a cache range with CLWB
71 * @vaddr: virtual start address
72 * @size: number of bytes to write back
73 *
74 * Write back a cache range using the CLWB (cache line write back)
75 * instruction. This function requires explicit ordering with an
76 * arch_wmb_pmem() call.
77 */
78 static inline void arch_wb_cache_pmem(void __pmem *addr, size_t size)
79 {
80 u16 x86_clflush_size = boot_cpu_data.x86_clflush_size;
81 unsigned long clflush_mask = x86_clflush_size - 1;
82 void *vaddr = (void __force *)addr;
83 void *vend = vaddr + size;
84 void *p;
85
86 for (p = (void *)((unsigned long)vaddr & ~clflush_mask);
87 p < vend; p += x86_clflush_size)
88 clwb(p);
89 }
90
91 /*
92 * copy_from_iter_nocache() on x86 only uses non-temporal stores for iovec
93 * iterators, so for other types (bvec & kvec) we must do a cache write-back.
94 */
95 static inline bool __iter_needs_pmem_wb(struct iov_iter *i)
96 {
97 return iter_is_iovec(i) == false;
98 }
99
100 /**
101 * arch_copy_from_iter_pmem - copy data from an iterator to PMEM
102 * @addr: PMEM destination address
103 * @bytes: number of bytes to copy
104 * @i: iterator with source data
105 *
106 * Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'.
107 * This function requires explicit ordering with an arch_wmb_pmem() call.
108 */
109 static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes,
110 struct iov_iter *i)
111 {
112 void *vaddr = (void __force *)addr;
113 size_t len;
114
115 /* TODO: skip the write-back by always using non-temporal stores */
116 len = copy_from_iter_nocache(vaddr, bytes, i);
117
118 if (__iter_needs_pmem_wb(i))
119 arch_wb_cache_pmem(addr, bytes);
120
121 return len;
122 }
123
124 /**
125 * arch_clear_pmem - zero a PMEM memory range
126 * @addr: virtual start address
127 * @size: number of bytes to zero
128 *
129 * Write zeros into the memory range starting at 'addr' for 'size' bytes.
130 * This function requires explicit ordering with an arch_wmb_pmem() call.
131 */
132 static inline void arch_clear_pmem(void __pmem *addr, size_t size)
133 {
134 void *vaddr = (void __force *)addr;
135
136 memset(vaddr, 0, size);
137 arch_wb_cache_pmem(addr, size);
138 }
139
140 static inline void arch_invalidate_pmem(void __pmem *addr, size_t size)
141 {
142 clflush_cache_range((void __force *) addr, size);
143 }
144
145 static inline bool __arch_has_wmb_pmem(void)
146 {
147 /*
148 * We require that wmb() be an 'sfence', that is only guaranteed on
149 * 64-bit builds
150 */
151 return static_cpu_has(X86_FEATURE_PCOMMIT);
152 }
153 #endif /* CONFIG_ARCH_HAS_PMEM_API */
154 #endif /* __ASM_X86_PMEM_H__ */
This page took 0.033055 seconds and 5 git commands to generate.