Commit | Line | Data |
---|---|---|
4f18cd31 AV |
1 | #include <linux/export.h> |
2 | #include <linux/uio.h> | |
3 | #include <linux/pagemap.h> | |
4 | ||
5 | size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, | |
6 | struct iov_iter *i) | |
7 | { | |
8 | size_t skip, copy, left, wanted; | |
9 | const struct iovec *iov; | |
10 | char __user *buf; | |
11 | void *kaddr, *from; | |
12 | ||
13 | if (unlikely(bytes > i->count)) | |
14 | bytes = i->count; | |
15 | ||
16 | if (unlikely(!bytes)) | |
17 | return 0; | |
18 | ||
19 | wanted = bytes; | |
20 | iov = i->iov; | |
21 | skip = i->iov_offset; | |
22 | buf = iov->iov_base + skip; | |
23 | copy = min(bytes, iov->iov_len - skip); | |
24 | ||
25 | if (!fault_in_pages_writeable(buf, copy)) { | |
26 | kaddr = kmap_atomic(page); | |
27 | from = kaddr + offset; | |
28 | ||
29 | /* first chunk, usually the only one */ | |
30 | left = __copy_to_user_inatomic(buf, from, copy); | |
31 | copy -= left; | |
32 | skip += copy; | |
33 | from += copy; | |
34 | bytes -= copy; | |
35 | ||
36 | while (unlikely(!left && bytes)) { | |
37 | iov++; | |
38 | buf = iov->iov_base; | |
39 | copy = min(bytes, iov->iov_len); | |
40 | left = __copy_to_user_inatomic(buf, from, copy); | |
41 | copy -= left; | |
42 | skip = copy; | |
43 | from += copy; | |
44 | bytes -= copy; | |
45 | } | |
46 | if (likely(!bytes)) { | |
47 | kunmap_atomic(kaddr); | |
48 | goto done; | |
49 | } | |
50 | offset = from - kaddr; | |
51 | buf += copy; | |
52 | kunmap_atomic(kaddr); | |
53 | copy = min(bytes, iov->iov_len - skip); | |
54 | } | |
55 | /* Too bad - revert to non-atomic kmap */ | |
56 | kaddr = kmap(page); | |
57 | from = kaddr + offset; | |
58 | left = __copy_to_user(buf, from, copy); | |
59 | copy -= left; | |
60 | skip += copy; | |
61 | from += copy; | |
62 | bytes -= copy; | |
63 | while (unlikely(!left && bytes)) { | |
64 | iov++; | |
65 | buf = iov->iov_base; | |
66 | copy = min(bytes, iov->iov_len); | |
67 | left = __copy_to_user(buf, from, copy); | |
68 | copy -= left; | |
69 | skip = copy; | |
70 | from += copy; | |
71 | bytes -= copy; | |
72 | } | |
73 | kunmap(page); | |
74 | done: | |
75 | i->count -= wanted - bytes; | |
76 | i->nr_segs -= iov - i->iov; | |
77 | i->iov = iov; | |
78 | i->iov_offset = skip; | |
79 | return wanted - bytes; | |
80 | } | |
81 | EXPORT_SYMBOL(copy_page_to_iter); | |
82 | ||
83 | static size_t __iovec_copy_from_user_inatomic(char *vaddr, | |
84 | const struct iovec *iov, size_t base, size_t bytes) | |
85 | { | |
86 | size_t copied = 0, left = 0; | |
87 | ||
88 | while (bytes) { | |
89 | char __user *buf = iov->iov_base + base; | |
90 | int copy = min(bytes, iov->iov_len - base); | |
91 | ||
92 | base = 0; | |
93 | left = __copy_from_user_inatomic(vaddr, buf, copy); | |
94 | copied += copy; | |
95 | bytes -= copy; | |
96 | vaddr += copy; | |
97 | iov++; | |
98 | ||
99 | if (unlikely(left)) | |
100 | break; | |
101 | } | |
102 | return copied - left; | |
103 | } | |
104 | ||
105 | /* | |
106 | * Copy as much as we can into the page and return the number of bytes which | |
107 | * were successfully copied. If a fault is encountered then return the number of | |
108 | * bytes which were copied. | |
109 | */ | |
110 | size_t iov_iter_copy_from_user_atomic(struct page *page, | |
111 | struct iov_iter *i, unsigned long offset, size_t bytes) | |
112 | { | |
113 | char *kaddr; | |
114 | size_t copied; | |
115 | ||
116 | kaddr = kmap_atomic(page); | |
117 | if (likely(i->nr_segs == 1)) { | |
118 | int left; | |
119 | char __user *buf = i->iov->iov_base + i->iov_offset; | |
120 | left = __copy_from_user_inatomic(kaddr + offset, buf, bytes); | |
121 | copied = bytes - left; | |
122 | } else { | |
123 | copied = __iovec_copy_from_user_inatomic(kaddr + offset, | |
124 | i->iov, i->iov_offset, bytes); | |
125 | } | |
126 | kunmap_atomic(kaddr); | |
127 | ||
128 | return copied; | |
129 | } | |
130 | EXPORT_SYMBOL(iov_iter_copy_from_user_atomic); | |
131 | ||
4f18cd31 AV |
132 | void iov_iter_advance(struct iov_iter *i, size_t bytes) |
133 | { | |
134 | BUG_ON(i->count < bytes); | |
135 | ||
136 | if (likely(i->nr_segs == 1)) { | |
137 | i->iov_offset += bytes; | |
138 | i->count -= bytes; | |
139 | } else { | |
140 | const struct iovec *iov = i->iov; | |
141 | size_t base = i->iov_offset; | |
142 | unsigned long nr_segs = i->nr_segs; | |
143 | ||
144 | /* | |
145 | * The !iov->iov_len check ensures we skip over unlikely | |
146 | * zero-length segments (without overruning the iovec). | |
147 | */ | |
148 | while (bytes || unlikely(i->count && !iov->iov_len)) { | |
149 | int copy; | |
150 | ||
151 | copy = min(bytes, iov->iov_len - base); | |
152 | BUG_ON(!i->count || i->count < copy); | |
153 | i->count -= copy; | |
154 | bytes -= copy; | |
155 | base += copy; | |
156 | if (iov->iov_len == base) { | |
157 | iov++; | |
158 | nr_segs--; | |
159 | base = 0; | |
160 | } | |
161 | } | |
162 | i->iov = iov; | |
163 | i->iov_offset = base; | |
164 | i->nr_segs = nr_segs; | |
165 | } | |
166 | } | |
167 | EXPORT_SYMBOL(iov_iter_advance); | |
168 | ||
169 | /* | |
170 | * Fault in the first iovec of the given iov_iter, to a maximum length | |
171 | * of bytes. Returns 0 on success, or non-zero if the memory could not be | |
172 | * accessed (ie. because it is an invalid address). | |
173 | * | |
174 | * writev-intensive code may want this to prefault several iovecs -- that | |
175 | * would be possible (callers must not rely on the fact that _only_ the | |
176 | * first iovec will be faulted with the current implementation). | |
177 | */ | |
178 | int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes) | |
179 | { | |
180 | char __user *buf = i->iov->iov_base + i->iov_offset; | |
181 | bytes = min(bytes, i->iov->iov_len - i->iov_offset); | |
182 | return fault_in_pages_readable(buf, bytes); | |
183 | } | |
184 | EXPORT_SYMBOL(iov_iter_fault_in_readable); | |
185 | ||
186 | /* | |
187 | * Return the count of just the current iov_iter segment. | |
188 | */ | |
189 | size_t iov_iter_single_seg_count(const struct iov_iter *i) | |
190 | { | |
191 | const struct iovec *iov = i->iov; | |
192 | if (i->nr_segs == 1) | |
193 | return i->count; | |
194 | else | |
195 | return min(i->count, iov->iov_len - i->iov_offset); | |
196 | } | |
197 | EXPORT_SYMBOL(iov_iter_single_seg_count); | |
886a3911 AV |
198 | |
199 | unsigned long iov_iter_alignment(const struct iov_iter *i) | |
200 | { | |
201 | const struct iovec *iov = i->iov; | |
202 | unsigned long res; | |
203 | size_t size = i->count; | |
204 | size_t n; | |
205 | ||
206 | if (!size) | |
207 | return 0; | |
208 | ||
209 | res = (unsigned long)iov->iov_base + i->iov_offset; | |
210 | n = iov->iov_len - i->iov_offset; | |
211 | if (n >= size) | |
212 | return res | size; | |
213 | size -= n; | |
214 | res |= n; | |
215 | while (size > (++iov)->iov_len) { | |
216 | res |= (unsigned long)iov->iov_base | iov->iov_len; | |
217 | size -= iov->iov_len; | |
218 | } | |
219 | res |= (unsigned long)iov->iov_base | size; | |
220 | return res; | |
221 | } | |
222 | EXPORT_SYMBOL(iov_iter_alignment); | |
71d8e532 AV |
223 | |
224 | void iov_iter_init(struct iov_iter *i, int direction, | |
225 | const struct iovec *iov, unsigned long nr_segs, | |
226 | size_t count) | |
227 | { | |
228 | /* It will get better. Eventually... */ | |
229 | if (segment_eq(get_fs(), KERNEL_DS)) | |
230 | direction |= REQ_KERNEL; | |
231 | i->type = direction; | |
232 | i->iov = iov; | |
233 | i->nr_segs = nr_segs; | |
234 | i->iov_offset = 0; | |
235 | i->count = count; | |
236 | } | |
237 | EXPORT_SYMBOL(iov_iter_init); | |
7b2c99d1 AV |
238 | |
239 | ssize_t iov_iter_get_pages(struct iov_iter *i, | |
240 | struct page **pages, size_t maxsize, | |
241 | size_t *start) | |
242 | { | |
243 | size_t offset = i->iov_offset; | |
244 | const struct iovec *iov = i->iov; | |
245 | size_t len; | |
246 | unsigned long addr; | |
247 | int n; | |
248 | int res; | |
249 | ||
250 | len = iov->iov_len - offset; | |
251 | if (len > i->count) | |
252 | len = i->count; | |
253 | if (len > maxsize) | |
254 | len = maxsize; | |
255 | addr = (unsigned long)iov->iov_base + offset; | |
256 | len += *start = addr & (PAGE_SIZE - 1); | |
257 | addr &= ~(PAGE_SIZE - 1); | |
258 | n = (len + PAGE_SIZE - 1) / PAGE_SIZE; | |
259 | res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages); | |
260 | if (unlikely(res < 0)) | |
261 | return res; | |
262 | return (res == n ? len : res * PAGE_SIZE) - *start; | |
263 | } | |
264 | EXPORT_SYMBOL(iov_iter_get_pages); |