Merge remote-tracking branches 'spi/topic/pxa2xx', 'spi/topic/qup', 'spi/topic/rockch...
[deliverable/linux.git] / arch / x86 / include / asm / pmem.h
CommitLineData
40603526
RZ
1/*
2 * Copyright(c) 2015 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13#ifndef __ASM_X86_PMEM_H__
14#define __ASM_X86_PMEM_H__
15
16#include <linux/uaccess.h>
17#include <asm/cacheflush.h>
18#include <asm/cpufeature.h>
19#include <asm/special_insns.h>
20
4a370df5 21#ifdef CONFIG_ARCH_HAS_PMEM_API
40603526
RZ
22/**
23 * arch_memcpy_to_pmem - copy data to persistent memory
24 * @dst: destination buffer for the copy
25 * @src: source buffer for the copy
26 * @n: length of the copy in bytes
27 *
28 * Copy data to persistent memory media via non-temporal stores so that
29 * a subsequent arch_wmb_pmem() can flush cpu and memory controller
30 * write buffers to guarantee durability.
31 */
32static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src,
33 size_t n)
34{
35 int unwritten;
36
37 /*
38 * We are copying between two kernel buffers, if
39 * __copy_from_user_inatomic_nocache() returns an error (page
40 * fault) we would have already reported a general protection fault
41 * before the WARN+BUG.
42 */
43 unwritten = __copy_from_user_inatomic_nocache((void __force *) dst,
44 (void __user *) src, n);
45 if (WARN(unwritten, "%s: fault copying %p <- %p unwritten: %d\n",
46 __func__, dst, src, unwritten))
47 BUG();
48}
49
fc0c2028
DW
50static inline int arch_memcpy_from_pmem(void *dst, const void __pmem *src,
51 size_t n)
52{
53 if (static_cpu_has(X86_FEATURE_MCE_RECOVERY))
54 return memcpy_mcsafe(dst, (void __force *) src, n);
55 memcpy(dst, (void __force *) src, n);
56 return 0;
57}
58
40603526
RZ
59/**
60 * arch_wmb_pmem - synchronize writes to persistent memory
61 *
62 * After a series of arch_memcpy_to_pmem() operations this drains data
63 * from cpu write buffers and any platform (memory controller) buffers
64 * to ensure that written data is durable on persistent memory media.
65 */
66static inline void arch_wmb_pmem(void)
67{
68 /*
69 * wmb() to 'sfence' all previous writes such that they are
70 * architecturally visible to 'pcommit'. Note, that we've
71 * already arranged for pmem writes to avoid the cache via
72 * arch_memcpy_to_pmem().
73 */
74 wmb();
75 pcommit_sfence();
76}
77
5de490da 78/**
3f4a2670 79 * arch_wb_cache_pmem - write back a cache range with CLWB
5de490da
RZ
80 * @vaddr: virtual start address
81 * @size: number of bytes to write back
82 *
83 * Write back a cache range using the CLWB (cache line write back)
84 * instruction. This function requires explicit ordering with an
3f4a2670 85 * arch_wmb_pmem() call.
5de490da 86 */
3f4a2670 87static inline void arch_wb_cache_pmem(void __pmem *addr, size_t size)
5de490da
RZ
88{
89 u16 x86_clflush_size = boot_cpu_data.x86_clflush_size;
90 unsigned long clflush_mask = x86_clflush_size - 1;
3f4a2670 91 void *vaddr = (void __force *)addr;
5de490da
RZ
92 void *vend = vaddr + size;
93 void *p;
94
95 for (p = (void *)((unsigned long)vaddr & ~clflush_mask);
96 p < vend; p += x86_clflush_size)
97 clwb(p);
98}
99
100/*
101 * copy_from_iter_nocache() on x86 only uses non-temporal stores for iovec
102 * iterators, so for other types (bvec & kvec) we must do a cache write-back.
103 */
104static inline bool __iter_needs_pmem_wb(struct iov_iter *i)
105{
106 return iter_is_iovec(i) == false;
107}
108
109/**
110 * arch_copy_from_iter_pmem - copy data from an iterator to PMEM
111 * @addr: PMEM destination address
112 * @bytes: number of bytes to copy
113 * @i: iterator with source data
114 *
115 * Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'.
116 * This function requires explicit ordering with an arch_wmb_pmem() call.
117 */
118static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes,
119 struct iov_iter *i)
120{
121 void *vaddr = (void __force *)addr;
122 size_t len;
123
124 /* TODO: skip the write-back by always using non-temporal stores */
125 len = copy_from_iter_nocache(vaddr, bytes, i);
126
127 if (__iter_needs_pmem_wb(i))
3f4a2670 128 arch_wb_cache_pmem(addr, bytes);
5de490da
RZ
129
130 return len;
131}
132
133/**
134 * arch_clear_pmem - zero a PMEM memory range
135 * @addr: virtual start address
136 * @size: number of bytes to zero
137 *
138 * Write zeros into the memory range starting at 'addr' for 'size' bytes.
139 * This function requires explicit ordering with an arch_wmb_pmem() call.
140 */
141static inline void arch_clear_pmem(void __pmem *addr, size_t size)
142{
143 void *vaddr = (void __force *)addr;
144
52db400f 145 memset(vaddr, 0, size);
3f4a2670 146 arch_wb_cache_pmem(addr, size);
5de490da
RZ
147}
148
59e64739
DW
149static inline void arch_invalidate_pmem(void __pmem *addr, size_t size)
150{
151 clflush_cache_range((void __force *) addr, size);
152}
153
96601adb 154static inline bool __arch_has_wmb_pmem(void)
40603526 155{
40603526
RZ
156 /*
157 * We require that wmb() be an 'sfence', that is only guaranteed on
158 * 64-bit builds
159 */
160 return static_cpu_has(X86_FEATURE_PCOMMIT);
40603526 161}
4a370df5 162#endif /* CONFIG_ARCH_HAS_PMEM_API */
40603526 163#endif /* __ASM_X86_PMEM_H__ */
This page took 0.075894 seconds and 5 git commands to generate.