Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * This file contains the procedures for the handling of select and poll | |
3 | * | |
4 | * Created for Linux based loosely upon Mathius Lattner's minix | |
5 | * patches by Peter MacDonald. Heavily edited by Linus. | |
6 | * | |
7 | * 4 February 1994 | |
8 | * COFF/ELF binary emulation. If the process has the STICKY_TIMEOUTS | |
9 | * flag set in its personality we do *not* modify the given timeout | |
10 | * parameter to reflect time remaining. | |
11 | * | |
12 | * 24 January 2000 | |
13 | * Changed sys_poll()/do_poll() to use PAGE_SIZE chunk-based allocation | |
14 | * of fds to overcome nfds < 16390 descriptors limit (Tigran Aivazian). | |
15 | */ | |
16 | ||
022a1692 | 17 | #include <linux/kernel.h> |
a99bbaf5 | 18 | #include <linux/sched.h> |
1da177e4 | 19 | #include <linux/syscalls.h> |
630d9c47 | 20 | #include <linux/export.h> |
1da177e4 | 21 | #include <linux/slab.h> |
1da177e4 LT |
22 | #include <linux/poll.h> |
23 | #include <linux/personality.h> /* for STICKY_TIMEOUTS */ | |
24 | #include <linux/file.h> | |
9f3acc31 | 25 | #include <linux/fdtable.h> |
1da177e4 | 26 | #include <linux/fs.h> |
b835996f | 27 | #include <linux/rcupdate.h> |
8ff3e8e8 | 28 | #include <linux/hrtimer.h> |
8bd75c77 | 29 | #include <linux/sched/rt.h> |
9745cdb3 | 30 | #include <linux/freezer.h> |
076bb0c8 | 31 | #include <net/busy_poll.h> |
1da177e4 LT |
32 | |
33 | #include <asm/uaccess.h> | |
34 | ||
90d6e24a AV |
35 | |
36 | /* | |
37 | * Estimate expected accuracy in ns from a timeval. | |
38 | * | |
39 | * After quite a bit of churning around, we've settled on | |
40 | * a simple thing of taking 0.1% of the timeout as the | |
41 | * slack, with a cap of 100 msec. | |
42 | * "nice" tasks get a 0.5% slack instead. | |
43 | * | |
44 | * Consider this comment an open invitation to come up with even | |
45 | * better solutions.. | |
46 | */ | |
47 | ||
5ae87e79 GK |
48 | #define MAX_SLACK (100 * NSEC_PER_MSEC) |
49 | ||
766b9f92 | 50 | static long __estimate_accuracy(struct timespec64 *tv) |
90d6e24a | 51 | { |
96d2ab48 | 52 | long slack; |
90d6e24a AV |
53 | int divfactor = 1000; |
54 | ||
5ae87e79 GK |
55 | if (tv->tv_sec < 0) |
56 | return 0; | |
57 | ||
4ce105d3 | 58 | if (task_nice(current) > 0) |
90d6e24a AV |
59 | divfactor = divfactor / 5; |
60 | ||
5ae87e79 GK |
61 | if (tv->tv_sec > MAX_SLACK / (NSEC_PER_SEC/divfactor)) |
62 | return MAX_SLACK; | |
63 | ||
90d6e24a AV |
64 | slack = tv->tv_nsec / divfactor; |
65 | slack += tv->tv_sec * (NSEC_PER_SEC/divfactor); | |
66 | ||
5ae87e79 GK |
67 | if (slack > MAX_SLACK) |
68 | return MAX_SLACK; | |
96d2ab48 | 69 | |
90d6e24a AV |
70 | return slack; |
71 | } | |
72 | ||
766b9f92 | 73 | u64 select_estimate_accuracy(struct timespec64 *tv) |
90d6e24a | 74 | { |
da8b44d5 | 75 | u64 ret; |
766b9f92 | 76 | struct timespec64 now; |
90d6e24a AV |
77 | |
78 | /* | |
79 | * Realtime tasks get a slack of 0 for obvious reasons. | |
80 | */ | |
81 | ||
4ce105d3 | 82 | if (rt_task(current)) |
90d6e24a AV |
83 | return 0; |
84 | ||
766b9f92 DD |
85 | ktime_get_ts64(&now); |
86 | now = timespec64_sub(*tv, now); | |
90d6e24a AV |
87 | ret = __estimate_accuracy(&now); |
88 | if (ret < current->timer_slack_ns) | |
89 | return current->timer_slack_ns; | |
90 | return ret; | |
91 | } | |
92 | ||
93 | ||
94 | ||
1da177e4 LT |
95 | struct poll_table_page { |
96 | struct poll_table_page * next; | |
97 | struct poll_table_entry * entry; | |
98 | struct poll_table_entry entries[0]; | |
99 | }; | |
100 | ||
101 | #define POLL_TABLE_FULL(table) \ | |
102 | ((unsigned long)((table)->entry+1) > PAGE_SIZE + (unsigned long)(table)) | |
103 | ||
104 | /* | |
105 | * Ok, Peter made a complicated, but straightforward multiple_wait() function. | |
106 | * I have rewritten this, taking some shortcuts: This code may not be easy to | |
107 | * follow, but it should be free of race-conditions, and it's practical. If you | |
108 | * understand what I'm doing here, then you understand how the linux | |
109 | * sleep/wakeup mechanism works. | |
110 | * | |
111 | * Two very simple procedures, poll_wait() and poll_freewait() make all the | |
112 | * work. poll_wait() is an inline-function defined in <linux/poll.h>, | |
113 | * as all select/poll functions have to call it to add an entry to the | |
114 | * poll table. | |
115 | */ | |
75c96f85 AB |
116 | static void __pollwait(struct file *filp, wait_queue_head_t *wait_address, |
117 | poll_table *p); | |
1da177e4 LT |
118 | |
119 | void poll_initwait(struct poll_wqueues *pwq) | |
120 | { | |
121 | init_poll_funcptr(&pwq->pt, __pollwait); | |
5f820f64 | 122 | pwq->polling_task = current; |
b2add73d | 123 | pwq->triggered = 0; |
1da177e4 LT |
124 | pwq->error = 0; |
125 | pwq->table = NULL; | |
70674f95 | 126 | pwq->inline_index = 0; |
1da177e4 | 127 | } |
1da177e4 LT |
128 | EXPORT_SYMBOL(poll_initwait); |
129 | ||
70674f95 AK |
130 | static void free_poll_entry(struct poll_table_entry *entry) |
131 | { | |
ccf6780d | 132 | remove_wait_queue(entry->wait_address, &entry->wait); |
70674f95 AK |
133 | fput(entry->filp); |
134 | } | |
135 | ||
1da177e4 LT |
136 | void poll_freewait(struct poll_wqueues *pwq) |
137 | { | |
138 | struct poll_table_page * p = pwq->table; | |
70674f95 AK |
139 | int i; |
140 | for (i = 0; i < pwq->inline_index; i++) | |
141 | free_poll_entry(pwq->inline_entries + i); | |
1da177e4 LT |
142 | while (p) { |
143 | struct poll_table_entry * entry; | |
144 | struct poll_table_page *old; | |
145 | ||
146 | entry = p->entry; | |
147 | do { | |
148 | entry--; | |
70674f95 | 149 | free_poll_entry(entry); |
1da177e4 LT |
150 | } while (entry > p->entries); |
151 | old = p; | |
152 | p = p->next; | |
153 | free_page((unsigned long) old); | |
154 | } | |
155 | } | |
1da177e4 LT |
156 | EXPORT_SYMBOL(poll_freewait); |
157 | ||
5f820f64 | 158 | static struct poll_table_entry *poll_get_entry(struct poll_wqueues *p) |
1da177e4 | 159 | { |
1da177e4 LT |
160 | struct poll_table_page *table = p->table; |
161 | ||
70674f95 AK |
162 | if (p->inline_index < N_INLINE_POLL_ENTRIES) |
163 | return p->inline_entries + p->inline_index++; | |
164 | ||
1da177e4 LT |
165 | if (!table || POLL_TABLE_FULL(table)) { |
166 | struct poll_table_page *new_table; | |
167 | ||
168 | new_table = (struct poll_table_page *) __get_free_page(GFP_KERNEL); | |
169 | if (!new_table) { | |
170 | p->error = -ENOMEM; | |
70674f95 | 171 | return NULL; |
1da177e4 LT |
172 | } |
173 | new_table->entry = new_table->entries; | |
174 | new_table->next = table; | |
175 | p->table = new_table; | |
176 | table = new_table; | |
177 | } | |
178 | ||
70674f95 AK |
179 | return table->entry++; |
180 | } | |
181 | ||
4938d7e0 | 182 | static int __pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key) |
5f820f64 TH |
183 | { |
184 | struct poll_wqueues *pwq = wait->private; | |
185 | DECLARE_WAITQUEUE(dummy_wait, pwq->polling_task); | |
186 | ||
187 | /* | |
188 | * Although this function is called under waitqueue lock, LOCK | |
189 | * doesn't imply write barrier and the users expect write | |
190 | * barrier semantics on wakeup functions. The following | |
191 | * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up() | |
b92b8b35 | 192 | * and is paired with smp_store_mb() in poll_schedule_timeout. |
5f820f64 TH |
193 | */ |
194 | smp_wmb(); | |
195 | pwq->triggered = 1; | |
196 | ||
197 | /* | |
198 | * Perform the default wake up operation using a dummy | |
199 | * waitqueue. | |
200 | * | |
201 | * TODO: This is hacky but there currently is no interface to | |
202 | * pass in @sync. @sync is scheduled to be removed and once | |
203 | * that happens, wake_up_process() can be used directly. | |
204 | */ | |
205 | return default_wake_function(&dummy_wait, mode, sync, key); | |
206 | } | |
207 | ||
4938d7e0 ED |
208 | static int pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key) |
209 | { | |
210 | struct poll_table_entry *entry; | |
211 | ||
212 | entry = container_of(wait, struct poll_table_entry, wait); | |
213 | if (key && !((unsigned long)key & entry->key)) | |
214 | return 0; | |
215 | return __pollwake(wait, mode, sync, key); | |
216 | } | |
217 | ||
70674f95 AK |
218 | /* Add a new entry */ |
219 | static void __pollwait(struct file *filp, wait_queue_head_t *wait_address, | |
220 | poll_table *p) | |
221 | { | |
5f820f64 TH |
222 | struct poll_wqueues *pwq = container_of(p, struct poll_wqueues, pt); |
223 | struct poll_table_entry *entry = poll_get_entry(pwq); | |
70674f95 AK |
224 | if (!entry) |
225 | return; | |
cb0942b8 | 226 | entry->filp = get_file(filp); |
70674f95 | 227 | entry->wait_address = wait_address; |
626cf236 | 228 | entry->key = p->_key; |
5f820f64 TH |
229 | init_waitqueue_func_entry(&entry->wait, pollwake); |
230 | entry->wait.private = pwq; | |
ccf6780d | 231 | add_wait_queue(wait_address, &entry->wait); |
1da177e4 LT |
232 | } |
233 | ||
5f820f64 TH |
234 | int poll_schedule_timeout(struct poll_wqueues *pwq, int state, |
235 | ktime_t *expires, unsigned long slack) | |
236 | { | |
237 | int rc = -EINTR; | |
238 | ||
239 | set_current_state(state); | |
240 | if (!pwq->triggered) | |
59612d18 | 241 | rc = schedule_hrtimeout_range(expires, slack, HRTIMER_MODE_ABS); |
5f820f64 TH |
242 | __set_current_state(TASK_RUNNING); |
243 | ||
244 | /* | |
245 | * Prepare for the next iteration. | |
246 | * | |
b92b8b35 | 247 | * The following smp_store_mb() serves two purposes. First, it's |
5f820f64 TH |
248 | * the counterpart rmb of the wmb in pollwake() such that data |
249 | * written before wake up is always visible after wake up. | |
250 | * Second, the full barrier guarantees that triggered clearing | |
251 | * doesn't pass event check of the next iteration. Note that | |
252 | * this problem doesn't exist for the first iteration as | |
253 | * add_wait_queue() has full barrier semantics. | |
254 | */ | |
b92b8b35 | 255 | smp_store_mb(pwq->triggered, 0); |
5f820f64 TH |
256 | |
257 | return rc; | |
258 | } | |
259 | EXPORT_SYMBOL(poll_schedule_timeout); | |
260 | ||
b773ad40 TG |
261 | /** |
262 | * poll_select_set_timeout - helper function to setup the timeout value | |
766b9f92 | 263 | * @to: pointer to timespec64 variable for the final timeout |
b773ad40 TG |
264 | * @sec: seconds (from user space) |
265 | * @nsec: nanoseconds (from user space) | |
266 | * | |
267 | * Note, we do not use a timespec for the user space value here, That | |
268 | * way we can use the function for timeval and compat interfaces as well. | |
269 | * | |
270 | * Returns -EINVAL if sec/nsec are not normalized. Otherwise 0. | |
271 | */ | |
766b9f92 | 272 | int poll_select_set_timeout(struct timespec64 *to, time64_t sec, long nsec) |
b773ad40 | 273 | { |
766b9f92 | 274 | struct timespec64 ts = {.tv_sec = sec, .tv_nsec = nsec}; |
b773ad40 | 275 | |
766b9f92 | 276 | if (!timespec64_valid(&ts)) |
b773ad40 TG |
277 | return -EINVAL; |
278 | ||
279 | /* Optimize for the zero timeout value here */ | |
280 | if (!sec && !nsec) { | |
281 | to->tv_sec = to->tv_nsec = 0; | |
282 | } else { | |
766b9f92 DD |
283 | ktime_get_ts64(to); |
284 | *to = timespec64_add_safe(*to, ts); | |
b773ad40 TG |
285 | } |
286 | return 0; | |
287 | } | |
288 | ||
766b9f92 DD |
289 | static int poll_select_copy_remaining(struct timespec64 *end_time, |
290 | void __user *p, | |
b773ad40 TG |
291 | int timeval, int ret) |
292 | { | |
766b9f92 | 293 | struct timespec64 rts64; |
b773ad40 TG |
294 | struct timespec rts; |
295 | struct timeval rtv; | |
296 | ||
297 | if (!p) | |
298 | return ret; | |
299 | ||
300 | if (current->personality & STICKY_TIMEOUTS) | |
301 | goto sticky; | |
302 | ||
303 | /* No update for zero timeout */ | |
304 | if (!end_time->tv_sec && !end_time->tv_nsec) | |
305 | return ret; | |
306 | ||
766b9f92 DD |
307 | ktime_get_ts64(&rts64); |
308 | rts64 = timespec64_sub(*end_time, rts64); | |
309 | if (rts64.tv_sec < 0) | |
310 | rts64.tv_sec = rts64.tv_nsec = 0; | |
311 | ||
312 | rts = timespec64_to_timespec(rts64); | |
b773ad40 TG |
313 | |
314 | if (timeval) { | |
65329bf4 VK |
315 | if (sizeof(rtv) > sizeof(rtv.tv_sec) + sizeof(rtv.tv_usec)) |
316 | memset(&rtv, 0, sizeof(rtv)); | |
766b9f92 DD |
317 | rtv.tv_sec = rts64.tv_sec; |
318 | rtv.tv_usec = rts64.tv_nsec / NSEC_PER_USEC; | |
b773ad40 TG |
319 | |
320 | if (!copy_to_user(p, &rtv, sizeof(rtv))) | |
321 | return ret; | |
322 | ||
323 | } else if (!copy_to_user(p, &rts, sizeof(rts))) | |
324 | return ret; | |
325 | ||
326 | /* | |
327 | * If an application puts its timeval in read-only memory, we | |
328 | * don't want the Linux-specific update to the timeval to | |
329 | * cause a fault after the select has completed | |
330 | * successfully. However, because we're not updating the | |
331 | * timeval, we can't restart the system call. | |
332 | */ | |
333 | ||
334 | sticky: | |
335 | if (ret == -ERESTARTNOHAND) | |
336 | ret = -EINTR; | |
337 | return ret; | |
338 | } | |
339 | ||
1da177e4 LT |
340 | #define FDS_IN(fds, n) (fds->in + n) |
341 | #define FDS_OUT(fds, n) (fds->out + n) | |
342 | #define FDS_EX(fds, n) (fds->ex + n) | |
343 | ||
344 | #define BITS(fds, n) (*FDS_IN(fds, n)|*FDS_OUT(fds, n)|*FDS_EX(fds, n)) | |
345 | ||
346 | static int max_select_fd(unsigned long n, fd_set_bits *fds) | |
347 | { | |
348 | unsigned long *open_fds; | |
349 | unsigned long set; | |
350 | int max; | |
badf1662 | 351 | struct fdtable *fdt; |
1da177e4 LT |
352 | |
353 | /* handle last in-complete long-word first */ | |
8ded2bbc JB |
354 | set = ~(~0UL << (n & (BITS_PER_LONG-1))); |
355 | n /= BITS_PER_LONG; | |
badf1662 | 356 | fdt = files_fdtable(current->files); |
1fd36adc | 357 | open_fds = fdt->open_fds + n; |
1da177e4 LT |
358 | max = 0; |
359 | if (set) { | |
360 | set &= BITS(fds, n); | |
361 | if (set) { | |
362 | if (!(set & ~*open_fds)) | |
363 | goto get_max; | |
364 | return -EBADF; | |
365 | } | |
366 | } | |
367 | while (n) { | |
368 | open_fds--; | |
369 | n--; | |
370 | set = BITS(fds, n); | |
371 | if (!set) | |
372 | continue; | |
373 | if (set & ~*open_fds) | |
374 | return -EBADF; | |
375 | if (max) | |
376 | continue; | |
377 | get_max: | |
378 | do { | |
379 | max++; | |
380 | set >>= 1; | |
381 | } while (set); | |
8ded2bbc | 382 | max += n * BITS_PER_LONG; |
1da177e4 LT |
383 | } |
384 | ||
385 | return max; | |
386 | } | |
387 | ||
1da177e4 LT |
388 | #define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR) |
389 | #define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR) | |
390 | #define POLLEX_SET (POLLPRI) | |
391 | ||
4938d7e0 | 392 | static inline void wait_key_set(poll_table *wait, unsigned long in, |
2d48d67f ET |
393 | unsigned long out, unsigned long bit, |
394 | unsigned int ll_flag) | |
4938d7e0 | 395 | { |
2d48d67f | 396 | wait->_key = POLLEX_SET | ll_flag; |
626cf236 HV |
397 | if (in & bit) |
398 | wait->_key |= POLLIN_SET; | |
399 | if (out & bit) | |
400 | wait->_key |= POLLOUT_SET; | |
4938d7e0 ED |
401 | } |
402 | ||
766b9f92 | 403 | int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time) |
1da177e4 | 404 | { |
8ff3e8e8 | 405 | ktime_t expire, *to = NULL; |
1da177e4 LT |
406 | struct poll_wqueues table; |
407 | poll_table *wait; | |
8ff3e8e8 | 408 | int retval, i, timed_out = 0; |
da8b44d5 | 409 | u64 slack = 0; |
cbf55001 | 410 | unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0; |
76b1e9b9 | 411 | unsigned long busy_end = 0; |
1da177e4 | 412 | |
b835996f | 413 | rcu_read_lock(); |
1da177e4 | 414 | retval = max_select_fd(n, fds); |
b835996f | 415 | rcu_read_unlock(); |
1da177e4 LT |
416 | |
417 | if (retval < 0) | |
418 | return retval; | |
419 | n = retval; | |
420 | ||
421 | poll_initwait(&table); | |
422 | wait = &table.pt; | |
8ff3e8e8 | 423 | if (end_time && !end_time->tv_sec && !end_time->tv_nsec) { |
626cf236 | 424 | wait->_qproc = NULL; |
8ff3e8e8 AV |
425 | timed_out = 1; |
426 | } | |
427 | ||
96d2ab48 | 428 | if (end_time && !timed_out) |
231f3d39 | 429 | slack = select_estimate_accuracy(end_time); |
90d6e24a | 430 | |
1da177e4 LT |
431 | retval = 0; |
432 | for (;;) { | |
433 | unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp; | |
cbf55001 | 434 | bool can_busy_loop = false; |
1da177e4 | 435 | |
1da177e4 LT |
436 | inp = fds->in; outp = fds->out; exp = fds->ex; |
437 | rinp = fds->res_in; routp = fds->res_out; rexp = fds->res_ex; | |
438 | ||
439 | for (i = 0; i < n; ++rinp, ++routp, ++rexp) { | |
440 | unsigned long in, out, ex, all_bits, bit = 1, mask, j; | |
441 | unsigned long res_in = 0, res_out = 0, res_ex = 0; | |
1da177e4 LT |
442 | |
443 | in = *inp++; out = *outp++; ex = *exp++; | |
444 | all_bits = in | out | ex; | |
445 | if (all_bits == 0) { | |
8ded2bbc | 446 | i += BITS_PER_LONG; |
1da177e4 LT |
447 | continue; |
448 | } | |
449 | ||
8ded2bbc | 450 | for (j = 0; j < BITS_PER_LONG; ++j, ++i, bit <<= 1) { |
2903ff01 | 451 | struct fd f; |
1da177e4 LT |
452 | if (i >= n) |
453 | break; | |
454 | if (!(bit & all_bits)) | |
455 | continue; | |
2903ff01 AV |
456 | f = fdget(i); |
457 | if (f.file) { | |
458 | const struct file_operations *f_op; | |
459 | f_op = f.file->f_op; | |
1da177e4 | 460 | mask = DEFAULT_POLLMASK; |
72c2d531 | 461 | if (f_op->poll) { |
2d48d67f | 462 | wait_key_set(wait, in, out, |
cbf55001 | 463 | bit, busy_flag); |
2903ff01 | 464 | mask = (*f_op->poll)(f.file, wait); |
4938d7e0 | 465 | } |
2903ff01 | 466 | fdput(f); |
1da177e4 LT |
467 | if ((mask & POLLIN_SET) && (in & bit)) { |
468 | res_in |= bit; | |
469 | retval++; | |
626cf236 | 470 | wait->_qproc = NULL; |
1da177e4 LT |
471 | } |
472 | if ((mask & POLLOUT_SET) && (out & bit)) { | |
473 | res_out |= bit; | |
474 | retval++; | |
626cf236 | 475 | wait->_qproc = NULL; |
1da177e4 LT |
476 | } |
477 | if ((mask & POLLEX_SET) && (ex & bit)) { | |
478 | res_ex |= bit; | |
479 | retval++; | |
626cf236 | 480 | wait->_qproc = NULL; |
1da177e4 | 481 | } |
2d48d67f | 482 | /* got something, stop busy polling */ |
cbf55001 ET |
483 | if (retval) { |
484 | can_busy_loop = false; | |
485 | busy_flag = 0; | |
486 | ||
487 | /* | |
488 | * only remember a returned | |
489 | * POLL_BUSY_LOOP if we asked for it | |
490 | */ | |
491 | } else if (busy_flag & mask) | |
492 | can_busy_loop = true; | |
493 | ||
1da177e4 | 494 | } |
1da177e4 LT |
495 | } |
496 | if (res_in) | |
497 | *rinp = res_in; | |
498 | if (res_out) | |
499 | *routp = res_out; | |
500 | if (res_ex) | |
501 | *rexp = res_ex; | |
55d85384 | 502 | cond_resched(); |
1da177e4 | 503 | } |
626cf236 | 504 | wait->_qproc = NULL; |
8ff3e8e8 | 505 | if (retval || timed_out || signal_pending(current)) |
1da177e4 | 506 | break; |
f5264481 | 507 | if (table.error) { |
1da177e4 LT |
508 | retval = table.error; |
509 | break; | |
510 | } | |
9f72949f | 511 | |
cbf55001 | 512 | /* only if found POLL_BUSY_LOOP sockets && not out of time */ |
76b1e9b9 ET |
513 | if (can_busy_loop && !need_resched()) { |
514 | if (!busy_end) { | |
515 | busy_end = busy_loop_end_time(); | |
516 | continue; | |
517 | } | |
518 | if (!busy_loop_timeout(busy_end)) | |
519 | continue; | |
520 | } | |
521 | busy_flag = 0; | |
2d48d67f | 522 | |
8ff3e8e8 AV |
523 | /* |
524 | * If this is the first loop and we have a timeout | |
525 | * given, then we convert to ktime_t and set the to | |
526 | * pointer to the expiry value. | |
527 | */ | |
528 | if (end_time && !to) { | |
766b9f92 | 529 | expire = timespec64_to_ktime(*end_time); |
8ff3e8e8 | 530 | to = &expire; |
9f72949f | 531 | } |
8ff3e8e8 | 532 | |
5f820f64 TH |
533 | if (!poll_schedule_timeout(&table, TASK_INTERRUPTIBLE, |
534 | to, slack)) | |
8ff3e8e8 | 535 | timed_out = 1; |
1da177e4 | 536 | } |
1da177e4 LT |
537 | |
538 | poll_freewait(&table); | |
539 | ||
1da177e4 LT |
540 | return retval; |
541 | } | |
542 | ||
1da177e4 LT |
543 | /* |
544 | * We can actually return ERESTARTSYS instead of EINTR, but I'd | |
545 | * like to be certain this leads to no problems. So I return | |
546 | * EINTR just for safety. | |
547 | * | |
548 | * Update: ERESTARTSYS breaks at least the xview clock binary, so | |
549 | * I'm trying ERESTARTNOHAND which restart only when you want to. | |
550 | */ | |
a2dcb44c | 551 | int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp, |
766b9f92 | 552 | fd_set __user *exp, struct timespec64 *end_time) |
1da177e4 LT |
553 | { |
554 | fd_set_bits fds; | |
29ff2db5 | 555 | void *bits; |
bbea9f69 | 556 | int ret, max_fds; |
b04eb6aa | 557 | unsigned int size; |
badf1662 | 558 | struct fdtable *fdt; |
70674f95 | 559 | /* Allocate small arguments on the stack to save memory and be faster */ |
30c14e40 | 560 | long stack_fds[SELECT_STACK_ALLOC/sizeof(long)]; |
1da177e4 | 561 | |
1da177e4 LT |
562 | ret = -EINVAL; |
563 | if (n < 0) | |
564 | goto out_nofds; | |
565 | ||
bbea9f69 | 566 | /* max_fds can increase, so grab it once to avoid race */ |
b835996f | 567 | rcu_read_lock(); |
badf1662 | 568 | fdt = files_fdtable(current->files); |
bbea9f69 | 569 | max_fds = fdt->max_fds; |
b835996f | 570 | rcu_read_unlock(); |
bbea9f69 VL |
571 | if (n > max_fds) |
572 | n = max_fds; | |
1da177e4 LT |
573 | |
574 | /* | |
575 | * We need 6 bitmaps (in/out/ex for both incoming and outgoing), | |
576 | * since we used fdset we need to allocate memory in units of | |
577 | * long-words. | |
578 | */ | |
1da177e4 | 579 | size = FDS_BYTES(n); |
b04eb6aa MBJ |
580 | bits = stack_fds; |
581 | if (size > sizeof(stack_fds) / 6) { | |
582 | /* Not enough space in on-stack array; must use kmalloc */ | |
583 | ret = -ENOMEM; | |
70674f95 | 584 | bits = kmalloc(6 * size, GFP_KERNEL); |
b04eb6aa MBJ |
585 | if (!bits) |
586 | goto out_nofds; | |
587 | } | |
29ff2db5 AM |
588 | fds.in = bits; |
589 | fds.out = bits + size; | |
590 | fds.ex = bits + 2*size; | |
591 | fds.res_in = bits + 3*size; | |
592 | fds.res_out = bits + 4*size; | |
593 | fds.res_ex = bits + 5*size; | |
1da177e4 LT |
594 | |
595 | if ((ret = get_fd_set(n, inp, fds.in)) || | |
596 | (ret = get_fd_set(n, outp, fds.out)) || | |
597 | (ret = get_fd_set(n, exp, fds.ex))) | |
598 | goto out; | |
599 | zero_fd_set(n, fds.res_in); | |
600 | zero_fd_set(n, fds.res_out); | |
601 | zero_fd_set(n, fds.res_ex); | |
602 | ||
8ff3e8e8 | 603 | ret = do_select(n, &fds, end_time); |
1da177e4 LT |
604 | |
605 | if (ret < 0) | |
606 | goto out; | |
607 | if (!ret) { | |
608 | ret = -ERESTARTNOHAND; | |
609 | if (signal_pending(current)) | |
610 | goto out; | |
611 | ret = 0; | |
612 | } | |
613 | ||
614 | if (set_fd_set(n, inp, fds.res_in) || | |
615 | set_fd_set(n, outp, fds.res_out) || | |
616 | set_fd_set(n, exp, fds.res_ex)) | |
617 | ret = -EFAULT; | |
618 | ||
619 | out: | |
70674f95 AK |
620 | if (bits != stack_fds) |
621 | kfree(bits); | |
1da177e4 LT |
622 | out_nofds: |
623 | return ret; | |
624 | } | |
625 | ||
5a8a82b1 HC |
626 | SYSCALL_DEFINE5(select, int, n, fd_set __user *, inp, fd_set __user *, outp, |
627 | fd_set __user *, exp, struct timeval __user *, tvp) | |
9f72949f | 628 | { |
766b9f92 | 629 | struct timespec64 end_time, *to = NULL; |
9f72949f DW |
630 | struct timeval tv; |
631 | int ret; | |
632 | ||
633 | if (tvp) { | |
634 | if (copy_from_user(&tv, tvp, sizeof(tv))) | |
635 | return -EFAULT; | |
636 | ||
8ff3e8e8 | 637 | to = &end_time; |
4d36a9e6 AV |
638 | if (poll_select_set_timeout(to, |
639 | tv.tv_sec + (tv.tv_usec / USEC_PER_SEC), | |
640 | (tv.tv_usec % USEC_PER_SEC) * NSEC_PER_USEC)) | |
9f72949f | 641 | return -EINVAL; |
9f72949f DW |
642 | } |
643 | ||
8ff3e8e8 AV |
644 | ret = core_sys_select(n, inp, outp, exp, to); |
645 | ret = poll_select_copy_remaining(&end_time, tvp, 1, ret); | |
9f72949f DW |
646 | |
647 | return ret; | |
648 | } | |
649 | ||
c9da9f21 HC |
650 | static long do_pselect(int n, fd_set __user *inp, fd_set __user *outp, |
651 | fd_set __user *exp, struct timespec __user *tsp, | |
652 | const sigset_t __user *sigmask, size_t sigsetsize) | |
9f72949f | 653 | { |
9f72949f | 654 | sigset_t ksigmask, sigsaved; |
766b9f92 DD |
655 | struct timespec ts; |
656 | struct timespec64 ts64, end_time, *to = NULL; | |
9f72949f DW |
657 | int ret; |
658 | ||
659 | if (tsp) { | |
660 | if (copy_from_user(&ts, tsp, sizeof(ts))) | |
661 | return -EFAULT; | |
766b9f92 | 662 | ts64 = timespec_to_timespec64(ts); |
9f72949f | 663 | |
8ff3e8e8 | 664 | to = &end_time; |
766b9f92 | 665 | if (poll_select_set_timeout(to, ts64.tv_sec, ts64.tv_nsec)) |
9f72949f | 666 | return -EINVAL; |
9f72949f DW |
667 | } |
668 | ||
669 | if (sigmask) { | |
670 | /* XXX: Don't preclude handling different sized sigset_t's. */ | |
671 | if (sigsetsize != sizeof(sigset_t)) | |
672 | return -EINVAL; | |
673 | if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask))) | |
674 | return -EFAULT; | |
675 | ||
676 | sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP)); | |
677 | sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved); | |
678 | } | |
679 | ||
62568510 | 680 | ret = core_sys_select(n, inp, outp, exp, to); |
8ff3e8e8 | 681 | ret = poll_select_copy_remaining(&end_time, tsp, 0, ret); |
9f72949f DW |
682 | |
683 | if (ret == -ERESTARTNOHAND) { | |
684 | /* | |
685 | * Don't restore the signal mask yet. Let do_signal() deliver | |
686 | * the signal on the way back to userspace, before the signal | |
687 | * mask is restored. | |
688 | */ | |
689 | if (sigmask) { | |
690 | memcpy(¤t->saved_sigmask, &sigsaved, | |
691 | sizeof(sigsaved)); | |
4e4c22c7 | 692 | set_restore_sigmask(); |
9f72949f DW |
693 | } |
694 | } else if (sigmask) | |
695 | sigprocmask(SIG_SETMASK, &sigsaved, NULL); | |
696 | ||
697 | return ret; | |
698 | } | |
699 | ||
700 | /* | |
701 | * Most architectures can't handle 7-argument syscalls. So we provide a | |
702 | * 6-argument version where the sixth argument is a pointer to a structure | |
703 | * which has a pointer to the sigset_t itself followed by a size_t containing | |
704 | * the sigset size. | |
705 | */ | |
d4e82042 HC |
706 | SYSCALL_DEFINE6(pselect6, int, n, fd_set __user *, inp, fd_set __user *, outp, |
707 | fd_set __user *, exp, struct timespec __user *, tsp, | |
708 | void __user *, sig) | |
9f72949f DW |
709 | { |
710 | size_t sigsetsize = 0; | |
711 | sigset_t __user *up = NULL; | |
712 | ||
713 | if (sig) { | |
714 | if (!access_ok(VERIFY_READ, sig, sizeof(void *)+sizeof(size_t)) | |
e110ab94 | 715 | || __get_user(up, (sigset_t __user * __user *)sig) |
9f72949f | 716 | || __get_user(sigsetsize, |
e110ab94 | 717 | (size_t __user *)(sig+sizeof(void *)))) |
9f72949f DW |
718 | return -EFAULT; |
719 | } | |
720 | ||
c9da9f21 | 721 | return do_pselect(n, inp, outp, exp, tsp, up, sigsetsize); |
9f72949f | 722 | } |
9f72949f | 723 | |
5d0e5283 CH |
724 | #ifdef __ARCH_WANT_SYS_OLD_SELECT |
725 | struct sel_arg_struct { | |
726 | unsigned long n; | |
727 | fd_set __user *inp, *outp, *exp; | |
728 | struct timeval __user *tvp; | |
729 | }; | |
730 | ||
731 | SYSCALL_DEFINE1(old_select, struct sel_arg_struct __user *, arg) | |
732 | { | |
733 | struct sel_arg_struct a; | |
734 | ||
735 | if (copy_from_user(&a, arg, sizeof(a))) | |
736 | return -EFAULT; | |
737 | return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp); | |
738 | } | |
739 | #endif | |
740 | ||
1da177e4 LT |
741 | struct poll_list { |
742 | struct poll_list *next; | |
743 | int len; | |
744 | struct pollfd entries[0]; | |
745 | }; | |
746 | ||
747 | #define POLLFD_PER_PAGE ((PAGE_SIZE-sizeof(struct poll_list)) / sizeof(struct pollfd)) | |
748 | ||
4a4b69f7 VL |
749 | /* |
750 | * Fish for pollable events on the pollfd->fd file descriptor. We're only | |
751 | * interested in events matching the pollfd->events mask, and the result | |
752 | * matching that mask is both recorded in pollfd->revents and returned. The | |
753 | * pwait poll_table will be used by the fd-provided poll handler for waiting, | |
626cf236 | 754 | * if pwait->_qproc is non-NULL. |
4a4b69f7 | 755 | */ |
2d48d67f | 756 | static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait, |
cbf55001 ET |
757 | bool *can_busy_poll, |
758 | unsigned int busy_flag) | |
1da177e4 | 759 | { |
4a4b69f7 VL |
760 | unsigned int mask; |
761 | int fd; | |
762 | ||
763 | mask = 0; | |
764 | fd = pollfd->fd; | |
765 | if (fd >= 0) { | |
2903ff01 | 766 | struct fd f = fdget(fd); |
4a4b69f7 | 767 | mask = POLLNVAL; |
2903ff01 | 768 | if (f.file) { |
4a4b69f7 | 769 | mask = DEFAULT_POLLMASK; |
72c2d531 | 770 | if (f.file->f_op->poll) { |
626cf236 | 771 | pwait->_key = pollfd->events|POLLERR|POLLHUP; |
cbf55001 | 772 | pwait->_key |= busy_flag; |
2903ff01 | 773 | mask = f.file->f_op->poll(f.file, pwait); |
cbf55001 ET |
774 | if (mask & busy_flag) |
775 | *can_busy_poll = true; | |
4938d7e0 | 776 | } |
4a4b69f7 VL |
777 | /* Mask out unneeded events. */ |
778 | mask &= pollfd->events | POLLERR | POLLHUP; | |
2903ff01 | 779 | fdput(f); |
1da177e4 | 780 | } |
1da177e4 | 781 | } |
4a4b69f7 VL |
782 | pollfd->revents = mask; |
783 | ||
784 | return mask; | |
1da177e4 LT |
785 | } |
786 | ||
ccec5ee3 | 787 | static int do_poll(struct poll_list *list, struct poll_wqueues *wait, |
766b9f92 | 788 | struct timespec64 *end_time) |
1da177e4 | 789 | { |
1da177e4 | 790 | poll_table* pt = &wait->pt; |
8ff3e8e8 AV |
791 | ktime_t expire, *to = NULL; |
792 | int timed_out = 0, count = 0; | |
da8b44d5 | 793 | u64 slack = 0; |
cbf55001 | 794 | unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0; |
76b1e9b9 | 795 | unsigned long busy_end = 0; |
1da177e4 | 796 | |
9f72949f | 797 | /* Optimise the no-wait case */ |
8ff3e8e8 | 798 | if (end_time && !end_time->tv_sec && !end_time->tv_nsec) { |
626cf236 | 799 | pt->_qproc = NULL; |
8ff3e8e8 AV |
800 | timed_out = 1; |
801 | } | |
9bf084f7 | 802 | |
96d2ab48 | 803 | if (end_time && !timed_out) |
231f3d39 | 804 | slack = select_estimate_accuracy(end_time); |
90d6e24a | 805 | |
1da177e4 LT |
806 | for (;;) { |
807 | struct poll_list *walk; | |
cbf55001 | 808 | bool can_busy_loop = false; |
9f72949f | 809 | |
4a4b69f7 VL |
810 | for (walk = list; walk != NULL; walk = walk->next) { |
811 | struct pollfd * pfd, * pfd_end; | |
812 | ||
813 | pfd = walk->entries; | |
814 | pfd_end = pfd + walk->len; | |
815 | for (; pfd != pfd_end; pfd++) { | |
816 | /* | |
817 | * Fish for events. If we found one, record it | |
626cf236 | 818 | * and kill poll_table->_qproc, so we don't |
4a4b69f7 VL |
819 | * needlessly register any other waiters after |
820 | * this. They'll get immediately deregistered | |
821 | * when we break out and return. | |
822 | */ | |
cbf55001 ET |
823 | if (do_pollfd(pfd, pt, &can_busy_loop, |
824 | busy_flag)) { | |
4a4b69f7 | 825 | count++; |
626cf236 | 826 | pt->_qproc = NULL; |
cbf55001 ET |
827 | /* found something, stop busy polling */ |
828 | busy_flag = 0; | |
829 | can_busy_loop = false; | |
4a4b69f7 VL |
830 | } |
831 | } | |
1da177e4 | 832 | } |
4a4b69f7 VL |
833 | /* |
834 | * All waiters have already been registered, so don't provide | |
626cf236 | 835 | * a poll_table->_qproc to them on the next loop iteration. |
4a4b69f7 | 836 | */ |
626cf236 | 837 | pt->_qproc = NULL; |
9bf084f7 ON |
838 | if (!count) { |
839 | count = wait->error; | |
840 | if (signal_pending(current)) | |
841 | count = -EINTR; | |
842 | } | |
8ff3e8e8 | 843 | if (count || timed_out) |
1da177e4 | 844 | break; |
9f72949f | 845 | |
cbf55001 | 846 | /* only if found POLL_BUSY_LOOP sockets && not out of time */ |
76b1e9b9 ET |
847 | if (can_busy_loop && !need_resched()) { |
848 | if (!busy_end) { | |
849 | busy_end = busy_loop_end_time(); | |
850 | continue; | |
851 | } | |
852 | if (!busy_loop_timeout(busy_end)) | |
853 | continue; | |
854 | } | |
855 | busy_flag = 0; | |
91e2fd33 | 856 | |
8ff3e8e8 AV |
857 | /* |
858 | * If this is the first loop and we have a timeout | |
859 | * given, then we convert to ktime_t and set the to | |
860 | * pointer to the expiry value. | |
861 | */ | |
862 | if (end_time && !to) { | |
766b9f92 | 863 | expire = timespec64_to_ktime(*end_time); |
8ff3e8e8 | 864 | to = &expire; |
9f72949f DW |
865 | } |
866 | ||
5f820f64 | 867 | if (!poll_schedule_timeout(wait, TASK_INTERRUPTIBLE, to, slack)) |
8ff3e8e8 | 868 | timed_out = 1; |
1da177e4 | 869 | } |
1da177e4 LT |
870 | return count; |
871 | } | |
872 | ||
70674f95 AK |
873 | #define N_STACK_PPS ((sizeof(stack_pps) - sizeof(struct poll_list)) / \ |
874 | sizeof(struct pollfd)) | |
875 | ||
8ff3e8e8 | 876 | int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds, |
766b9f92 | 877 | struct timespec64 *end_time) |
1da177e4 LT |
878 | { |
879 | struct poll_wqueues table; | |
252e5725 | 880 | int err = -EFAULT, fdcount, len, size; |
30c14e40 JS |
881 | /* Allocate small arguments on the stack to save memory and be |
882 | faster - use long to make sure the buffer is aligned properly | |
883 | on 64 bit archs to avoid unaligned access */ | |
884 | long stack_pps[POLL_STACK_ALLOC/sizeof(long)]; | |
252e5725 ON |
885 | struct poll_list *const head = (struct poll_list *)stack_pps; |
886 | struct poll_list *walk = head; | |
887 | unsigned long todo = nfds; | |
1da177e4 | 888 | |
d554ed89 | 889 | if (nfds > rlimit(RLIMIT_NOFILE)) |
1da177e4 LT |
890 | return -EINVAL; |
891 | ||
252e5725 ON |
892 | len = min_t(unsigned int, nfds, N_STACK_PPS); |
893 | for (;;) { | |
894 | walk->next = NULL; | |
895 | walk->len = len; | |
896 | if (!len) | |
897 | break; | |
1da177e4 | 898 | |
252e5725 ON |
899 | if (copy_from_user(walk->entries, ufds + nfds-todo, |
900 | sizeof(struct pollfd) * walk->len)) | |
901 | goto out_fds; | |
902 | ||
903 | todo -= walk->len; | |
904 | if (!todo) | |
905 | break; | |
1da177e4 | 906 | |
252e5725 ON |
907 | len = min(todo, POLLFD_PER_PAGE); |
908 | size = sizeof(struct poll_list) + sizeof(struct pollfd) * len; | |
909 | walk = walk->next = kmalloc(size, GFP_KERNEL); | |
910 | if (!walk) { | |
911 | err = -ENOMEM; | |
1da177e4 LT |
912 | goto out_fds; |
913 | } | |
1da177e4 | 914 | } |
9f72949f | 915 | |
252e5725 | 916 | poll_initwait(&table); |
ccec5ee3 | 917 | fdcount = do_poll(head, &table, end_time); |
252e5725 | 918 | poll_freewait(&table); |
1da177e4 | 919 | |
252e5725 | 920 | for (walk = head; walk; walk = walk->next) { |
1da177e4 LT |
921 | struct pollfd *fds = walk->entries; |
922 | int j; | |
923 | ||
252e5725 ON |
924 | for (j = 0; j < walk->len; j++, ufds++) |
925 | if (__put_user(fds[j].revents, &ufds->revents)) | |
1da177e4 | 926 | goto out_fds; |
1da177e4 | 927 | } |
252e5725 | 928 | |
1da177e4 | 929 | err = fdcount; |
1da177e4 | 930 | out_fds: |
252e5725 ON |
931 | walk = head->next; |
932 | while (walk) { | |
933 | struct poll_list *pos = walk; | |
934 | walk = walk->next; | |
935 | kfree(pos); | |
1da177e4 | 936 | } |
252e5725 | 937 | |
1da177e4 LT |
938 | return err; |
939 | } | |
9f72949f | 940 | |
3075d9da CW |
941 | static long do_restart_poll(struct restart_block *restart_block) |
942 | { | |
8ff3e8e8 AV |
943 | struct pollfd __user *ufds = restart_block->poll.ufds; |
944 | int nfds = restart_block->poll.nfds; | |
766b9f92 | 945 | struct timespec64 *to = NULL, end_time; |
3075d9da CW |
946 | int ret; |
947 | ||
8ff3e8e8 AV |
948 | if (restart_block->poll.has_timeout) { |
949 | end_time.tv_sec = restart_block->poll.tv_sec; | |
950 | end_time.tv_nsec = restart_block->poll.tv_nsec; | |
951 | to = &end_time; | |
952 | } | |
953 | ||
954 | ret = do_sys_poll(ufds, nfds, to); | |
955 | ||
3075d9da CW |
956 | if (ret == -EINTR) { |
957 | restart_block->fn = do_restart_poll; | |
3075d9da CW |
958 | ret = -ERESTART_RESTARTBLOCK; |
959 | } | |
960 | return ret; | |
961 | } | |
962 | ||
5a8a82b1 | 963 | SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds, |
faf30900 | 964 | int, timeout_msecs) |
9f72949f | 965 | { |
766b9f92 | 966 | struct timespec64 end_time, *to = NULL; |
3075d9da | 967 | int ret; |
9f72949f | 968 | |
8ff3e8e8 AV |
969 | if (timeout_msecs >= 0) { |
970 | to = &end_time; | |
971 | poll_select_set_timeout(to, timeout_msecs / MSEC_PER_SEC, | |
972 | NSEC_PER_MSEC * (timeout_msecs % MSEC_PER_SEC)); | |
9f72949f DW |
973 | } |
974 | ||
8ff3e8e8 AV |
975 | ret = do_sys_poll(ufds, nfds, to); |
976 | ||
3075d9da CW |
977 | if (ret == -EINTR) { |
978 | struct restart_block *restart_block; | |
8ff3e8e8 | 979 | |
f56141e3 | 980 | restart_block = ¤t->restart_block; |
3075d9da | 981 | restart_block->fn = do_restart_poll; |
8ff3e8e8 AV |
982 | restart_block->poll.ufds = ufds; |
983 | restart_block->poll.nfds = nfds; | |
984 | ||
985 | if (timeout_msecs >= 0) { | |
986 | restart_block->poll.tv_sec = end_time.tv_sec; | |
987 | restart_block->poll.tv_nsec = end_time.tv_nsec; | |
988 | restart_block->poll.has_timeout = 1; | |
989 | } else | |
990 | restart_block->poll.has_timeout = 0; | |
991 | ||
3075d9da CW |
992 | ret = -ERESTART_RESTARTBLOCK; |
993 | } | |
994 | return ret; | |
9f72949f DW |
995 | } |
996 | ||
d4e82042 HC |
997 | SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, unsigned int, nfds, |
998 | struct timespec __user *, tsp, const sigset_t __user *, sigmask, | |
999 | size_t, sigsetsize) | |
9f72949f DW |
1000 | { |
1001 | sigset_t ksigmask, sigsaved; | |
766b9f92 DD |
1002 | struct timespec ts; |
1003 | struct timespec64 end_time, *to = NULL; | |
9f72949f DW |
1004 | int ret; |
1005 | ||
1006 | if (tsp) { | |
1007 | if (copy_from_user(&ts, tsp, sizeof(ts))) | |
1008 | return -EFAULT; | |
1009 | ||
8ff3e8e8 AV |
1010 | to = &end_time; |
1011 | if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec)) | |
1012 | return -EINVAL; | |
9f72949f DW |
1013 | } |
1014 | ||
1015 | if (sigmask) { | |
1016 | /* XXX: Don't preclude handling different sized sigset_t's. */ | |
1017 | if (sigsetsize != sizeof(sigset_t)) | |
1018 | return -EINVAL; | |
1019 | if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask))) | |
1020 | return -EFAULT; | |
1021 | ||
1022 | sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP)); | |
1023 | sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved); | |
1024 | } | |
1025 | ||
8ff3e8e8 | 1026 | ret = do_sys_poll(ufds, nfds, to); |
9f72949f DW |
1027 | |
1028 | /* We can restart this syscall, usually */ | |
1029 | if (ret == -EINTR) { | |
1030 | /* | |
1031 | * Don't restore the signal mask yet. Let do_signal() deliver | |
1032 | * the signal on the way back to userspace, before the signal | |
1033 | * mask is restored. | |
1034 | */ | |
1035 | if (sigmask) { | |
1036 | memcpy(¤t->saved_sigmask, &sigsaved, | |
1037 | sizeof(sigsaved)); | |
4e4c22c7 | 1038 | set_restore_sigmask(); |
9f72949f DW |
1039 | } |
1040 | ret = -ERESTARTNOHAND; | |
1041 | } else if (sigmask) | |
1042 | sigprocmask(SIG_SETMASK, &sigsaved, NULL); | |
1043 | ||
8ff3e8e8 | 1044 | ret = poll_select_copy_remaining(&end_time, tsp, 0, ret); |
9f72949f DW |
1045 | |
1046 | return ret; | |
1047 | } |