1 #include <linux/export.h>
3 #include <linux/pagemap.h>
5 size_t copy_page_to_iter(struct page
*page
, size_t offset
, size_t bytes
,
8 size_t skip
, copy
, left
, wanted
;
9 const struct iovec
*iov
;
13 if (unlikely(bytes
> i
->count
))
22 buf
= iov
->iov_base
+ skip
;
23 copy
= min(bytes
, iov
->iov_len
- skip
);
25 if (!fault_in_pages_writeable(buf
, copy
)) {
26 kaddr
= kmap_atomic(page
);
27 from
= kaddr
+ offset
;
29 /* first chunk, usually the only one */
30 left
= __copy_to_user_inatomic(buf
, from
, copy
);
36 while (unlikely(!left
&& bytes
)) {
39 copy
= min(bytes
, iov
->iov_len
);
40 left
= __copy_to_user_inatomic(buf
, from
, copy
);
50 offset
= from
- kaddr
;
53 copy
= min(bytes
, iov
->iov_len
- skip
);
55 /* Too bad - revert to non-atomic kmap */
57 from
= kaddr
+ offset
;
58 left
= __copy_to_user(buf
, from
, copy
);
63 while (unlikely(!left
&& bytes
)) {
66 copy
= min(bytes
, iov
->iov_len
);
67 left
= __copy_to_user(buf
, from
, copy
);
75 i
->count
-= wanted
- bytes
;
76 i
->nr_segs
-= iov
- i
->iov
;
79 return wanted
- bytes
;
81 EXPORT_SYMBOL(copy_page_to_iter
);
83 static size_t __iovec_copy_from_user_inatomic(char *vaddr
,
84 const struct iovec
*iov
, size_t base
, size_t bytes
)
86 size_t copied
= 0, left
= 0;
89 char __user
*buf
= iov
->iov_base
+ base
;
90 int copy
= min(bytes
, iov
->iov_len
- base
);
93 left
= __copy_from_user_inatomic(vaddr
, buf
, copy
);
102 return copied
- left
;
106 * Copy as much as we can into the page and return the number of bytes which
107 * were successfully copied. If a fault is encountered then return the number of
108 * bytes which were copied.
110 size_t iov_iter_copy_from_user_atomic(struct page
*page
,
111 struct iov_iter
*i
, unsigned long offset
, size_t bytes
)
116 kaddr
= kmap_atomic(page
);
117 if (likely(i
->nr_segs
== 1)) {
119 char __user
*buf
= i
->iov
->iov_base
+ i
->iov_offset
;
120 left
= __copy_from_user_inatomic(kaddr
+ offset
, buf
, bytes
);
121 copied
= bytes
- left
;
123 copied
= __iovec_copy_from_user_inatomic(kaddr
+ offset
,
124 i
->iov
, i
->iov_offset
, bytes
);
126 kunmap_atomic(kaddr
);
130 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic
);
132 void iov_iter_advance(struct iov_iter
*i
, size_t bytes
)
134 BUG_ON(i
->count
< bytes
);
136 if (likely(i
->nr_segs
== 1)) {
137 i
->iov_offset
+= bytes
;
140 const struct iovec
*iov
= i
->iov
;
141 size_t base
= i
->iov_offset
;
142 unsigned long nr_segs
= i
->nr_segs
;
145 * The !iov->iov_len check ensures we skip over unlikely
146 * zero-length segments (without overruning the iovec).
148 while (bytes
|| unlikely(i
->count
&& !iov
->iov_len
)) {
151 copy
= min(bytes
, iov
->iov_len
- base
);
152 BUG_ON(!i
->count
|| i
->count
< copy
);
156 if (iov
->iov_len
== base
) {
163 i
->iov_offset
= base
;
164 i
->nr_segs
= nr_segs
;
167 EXPORT_SYMBOL(iov_iter_advance
);
170 * Fault in the first iovec of the given iov_iter, to a maximum length
171 * of bytes. Returns 0 on success, or non-zero if the memory could not be
172 * accessed (ie. because it is an invalid address).
174 * writev-intensive code may want this to prefault several iovecs -- that
175 * would be possible (callers must not rely on the fact that _only_ the
176 * first iovec will be faulted with the current implementation).
178 int iov_iter_fault_in_readable(struct iov_iter
*i
, size_t bytes
)
180 char __user
*buf
= i
->iov
->iov_base
+ i
->iov_offset
;
181 bytes
= min(bytes
, i
->iov
->iov_len
- i
->iov_offset
);
182 return fault_in_pages_readable(buf
, bytes
);
184 EXPORT_SYMBOL(iov_iter_fault_in_readable
);
187 * Return the count of just the current iov_iter segment.
189 size_t iov_iter_single_seg_count(const struct iov_iter
*i
)
191 const struct iovec
*iov
= i
->iov
;
195 return min(i
->count
, iov
->iov_len
- i
->iov_offset
);
197 EXPORT_SYMBOL(iov_iter_single_seg_count
);
199 unsigned long iov_iter_alignment(const struct iov_iter
*i
)
201 const struct iovec
*iov
= i
->iov
;
203 size_t size
= i
->count
;
209 res
= (unsigned long)iov
->iov_base
+ i
->iov_offset
;
210 n
= iov
->iov_len
- i
->iov_offset
;
215 while (size
> (++iov
)->iov_len
) {
216 res
|= (unsigned long)iov
->iov_base
| iov
->iov_len
;
217 size
-= iov
->iov_len
;
219 res
|= (unsigned long)iov
->iov_base
| size
;
222 EXPORT_SYMBOL(iov_iter_alignment
);
224 void iov_iter_init(struct iov_iter
*i
, int direction
,
225 const struct iovec
*iov
, unsigned long nr_segs
,
228 /* It will get better. Eventually... */
229 if (segment_eq(get_fs(), KERNEL_DS
))
230 direction
|= REQ_KERNEL
;
233 i
->nr_segs
= nr_segs
;
237 EXPORT_SYMBOL(iov_iter_init
);