Commit | Line | Data |
---|---|---|
4513c2e9 MD |
1 | /* |
2 | * workqueue.c | |
3 | * | |
4 | * Userspace RCU library - Userspace workqeues | |
5 | * | |
6 | * Copyright (c) 2010 Paul E. McKenney <paulmck@linux.vnet.ibm.com> | |
7 | * Copyright (c) 2017 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
8 | * | |
9 | * This library is free software; you can redistribute it and/or | |
10 | * modify it under the terms of the GNU Lesser General Public | |
11 | * License as published by the Free Software Foundation; either | |
12 | * version 2.1 of the License, or (at your option) any later version. | |
13 | * | |
14 | * This library is distributed in the hope that it will be useful, | |
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
17 | * Lesser General Public License for more details. | |
18 | * | |
19 | * You should have received a copy of the GNU Lesser General Public | |
20 | * License along with this library; if not, write to the Free Software | |
21 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
22 | */ | |
23 | ||
24 | #define _LGPL_SOURCE | |
25 | #define _GNU_SOURCE | |
26 | #include <stdio.h> | |
27 | #include <pthread.h> | |
28 | #include <signal.h> | |
29 | #include <assert.h> | |
30 | #include <stdlib.h> | |
31 | #include <stdint.h> | |
32 | #include <string.h> | |
33 | #include <errno.h> | |
34 | #include <poll.h> | |
35 | #include <sys/time.h> | |
36 | #include <unistd.h> | |
37 | #include <sched.h> | |
38 | ||
39 | #include "compat-getcpu.h" | |
40 | #include "urcu/wfcqueue.h" | |
41 | #include "urcu-call-rcu.h" | |
42 | #include "urcu-pointer.h" | |
43 | #include "urcu/list.h" | |
44 | #include "urcu/futex.h" | |
45 | #include "urcu/tls-compat.h" | |
46 | #include "urcu/ref.h" | |
47 | #include "urcu-die.h" | |
48 | ||
49 | #include "workqueue.h" | |
50 | ||
51 | #define SET_AFFINITY_CHECK_PERIOD (1U << 8) /* 256 */ | |
52 | #define SET_AFFINITY_CHECK_PERIOD_MASK (SET_AFFINITY_CHECK_PERIOD - 1) | |
53 | ||
54 | /* Data structure that identifies a workqueue. */ | |
55 | ||
56 | struct urcu_workqueue { | |
57 | /* | |
58 | * We do not align head on a different cache-line than tail | |
59 | * mainly because call_rcu callback-invocation threads use | |
60 | * batching ("splice") to get an entire list of callbacks, which | |
61 | * effectively empties the queue, and requires to touch the tail | |
62 | * anyway. | |
63 | */ | |
64 | struct cds_wfcq_tail cbs_tail; | |
65 | struct cds_wfcq_head cbs_head; | |
66 | unsigned long flags; | |
67 | int32_t futex; | |
68 | unsigned long qlen; /* maintained for debugging. */ | |
69 | pthread_t tid; | |
70 | int cpu_affinity; | |
71 | unsigned long loop_count; | |
72 | void *priv; | |
73 | void (*grace_period_fct)(struct urcu_workqueue *workqueue, void *priv); | |
74 | void (*initialize_worker_fct)(struct urcu_workqueue *workqueue, void *priv); | |
75 | void (*finalize_worker_fct)(struct urcu_workqueue *workqueue, void *priv); | |
76 | void (*worker_before_pause_fct)(struct urcu_workqueue *workqueue, void *priv); | |
77 | void (*worker_after_resume_fct)(struct urcu_workqueue *workqueue, void *priv); | |
78 | void (*worker_before_wait_fct)(struct urcu_workqueue *workqueue, void *priv); | |
79 | void (*worker_after_wake_up_fct)(struct urcu_workqueue *workqueue, void *priv); | |
80 | } __attribute__((aligned(CAA_CACHE_LINE_SIZE))); | |
81 | ||
82 | struct urcu_workqueue_completion { | |
83 | int barrier_count; | |
84 | int32_t futex; | |
85 | struct urcu_ref ref; | |
86 | }; | |
87 | ||
88 | struct urcu_workqueue_completion_work { | |
89 | struct urcu_work work; | |
90 | struct urcu_workqueue_completion *completion; | |
91 | }; | |
92 | ||
93 | /* | |
94 | * Periodically retry setting CPU affinity if we migrate. | |
95 | * Losing affinity can be caused by CPU hotunplug/hotplug, or by | |
96 | * cpuset(7). | |
97 | */ | |
98 | #if HAVE_SCHED_SETAFFINITY | |
99 | static int set_thread_cpu_affinity(struct urcu_workqueue *workqueue) | |
100 | { | |
101 | cpu_set_t mask; | |
102 | int ret; | |
103 | ||
104 | if (workqueue->cpu_affinity < 0) | |
105 | return 0; | |
106 | if (++workqueue->loop_count & SET_AFFINITY_CHECK_PERIOD_MASK) | |
107 | return 0; | |
108 | if (urcu_sched_getcpu() == workqueue->cpu_affinity) | |
109 | return 0; | |
110 | ||
111 | CPU_ZERO(&mask); | |
112 | CPU_SET(workqueue->cpu_affinity, &mask); | |
113 | #if SCHED_SETAFFINITY_ARGS == 2 | |
114 | ret = sched_setaffinity(0, &mask); | |
115 | #else | |
116 | ret = sched_setaffinity(0, sizeof(mask), &mask); | |
117 | #endif | |
118 | /* | |
119 | * EINVAL is fine: can be caused by hotunplugged CPUs, or by | |
120 | * cpuset(7). This is why we should always retry if we detect | |
121 | * migration. | |
122 | */ | |
123 | if (ret && errno == EINVAL) { | |
124 | ret = 0; | |
125 | errno = 0; | |
126 | } | |
127 | return ret; | |
128 | } | |
129 | #else | |
130 | static int set_thread_cpu_affinity(struct urcu_workqueue *workqueue) | |
131 | { | |
132 | return 0; | |
133 | } | |
134 | #endif | |
135 | ||
136 | static void futex_wait(int32_t *futex) | |
137 | { | |
138 | /* Read condition before read futex */ | |
139 | cmm_smp_mb(); | |
140 | if (uatomic_read(futex) != -1) | |
141 | return; | |
142 | while (futex_async(futex, FUTEX_WAIT, -1, NULL, NULL, 0)) { | |
143 | switch (errno) { | |
144 | case EWOULDBLOCK: | |
145 | /* Value already changed. */ | |
146 | return; | |
147 | case EINTR: | |
148 | /* Retry if interrupted by signal. */ | |
149 | break; /* Get out of switch. */ | |
150 | default: | |
151 | /* Unexpected error. */ | |
152 | urcu_die(errno); | |
153 | } | |
154 | } | |
155 | } | |
156 | ||
157 | static void futex_wake_up(int32_t *futex) | |
158 | { | |
159 | /* Write to condition before reading/writing futex */ | |
160 | cmm_smp_mb(); | |
161 | if (caa_unlikely(uatomic_read(futex) == -1)) { | |
162 | uatomic_set(futex, 0); | |
163 | if (futex_async(futex, FUTEX_WAKE, 1, | |
164 | NULL, NULL, 0) < 0) | |
165 | urcu_die(errno); | |
166 | } | |
167 | } | |
168 | ||
169 | /* This is the code run by each worker thread. */ | |
170 | ||
171 | static void *workqueue_thread(void *arg) | |
172 | { | |
173 | unsigned long cbcount; | |
174 | struct urcu_workqueue *workqueue = (struct urcu_workqueue *) arg; | |
175 | int rt = !!(uatomic_read(&workqueue->flags) & URCU_WORKQUEUE_RT); | |
176 | ||
177 | if (set_thread_cpu_affinity(workqueue)) | |
178 | urcu_die(errno); | |
179 | ||
180 | if (workqueue->initialize_worker_fct) | |
181 | workqueue->initialize_worker_fct(workqueue, workqueue->priv); | |
182 | ||
183 | if (!rt) { | |
184 | uatomic_dec(&workqueue->futex); | |
185 | /* Decrement futex before reading workqueue */ | |
186 | cmm_smp_mb(); | |
187 | } | |
188 | for (;;) { | |
189 | struct cds_wfcq_head cbs_tmp_head; | |
190 | struct cds_wfcq_tail cbs_tmp_tail; | |
191 | struct cds_wfcq_node *cbs, *cbs_tmp_n; | |
192 | enum cds_wfcq_ret splice_ret; | |
193 | ||
194 | if (set_thread_cpu_affinity(workqueue)) | |
195 | urcu_die(errno); | |
196 | ||
197 | if (uatomic_read(&workqueue->flags) & URCU_WORKQUEUE_PAUSE) { | |
198 | /* | |
199 | * Pause requested. Become quiescent: remove | |
200 | * ourself from all global lists, and don't | |
201 | * process any callback. The callback lists may | |
202 | * still be non-empty though. | |
203 | */ | |
204 | if (workqueue->worker_before_pause_fct) | |
205 | workqueue->worker_before_pause_fct(workqueue, workqueue->priv); | |
206 | cmm_smp_mb__before_uatomic_or(); | |
207 | uatomic_or(&workqueue->flags, URCU_WORKQUEUE_PAUSED); | |
208 | while ((uatomic_read(&workqueue->flags) & URCU_WORKQUEUE_PAUSE) != 0) | |
209 | (void) poll(NULL, 0, 1); | |
210 | uatomic_and(&workqueue->flags, ~URCU_WORKQUEUE_PAUSED); | |
211 | cmm_smp_mb__after_uatomic_and(); | |
212 | if (workqueue->worker_after_resume_fct) | |
213 | workqueue->worker_after_resume_fct(workqueue, workqueue->priv); | |
214 | } | |
215 | ||
216 | cds_wfcq_init(&cbs_tmp_head, &cbs_tmp_tail); | |
217 | splice_ret = __cds_wfcq_splice_blocking(&cbs_tmp_head, | |
218 | &cbs_tmp_tail, &workqueue->cbs_head, &workqueue->cbs_tail); | |
219 | assert(splice_ret != CDS_WFCQ_RET_WOULDBLOCK); | |
220 | assert(splice_ret != CDS_WFCQ_RET_DEST_NON_EMPTY); | |
221 | if (splice_ret != CDS_WFCQ_RET_SRC_EMPTY) { | |
222 | if (workqueue->grace_period_fct) | |
223 | workqueue->grace_period_fct(workqueue, workqueue->priv); | |
224 | cbcount = 0; | |
225 | __cds_wfcq_for_each_blocking_safe(&cbs_tmp_head, | |
226 | &cbs_tmp_tail, cbs, cbs_tmp_n) { | |
227 | struct rcu_head *rhp; | |
228 | ||
229 | rhp = caa_container_of(cbs, | |
230 | struct rcu_head, next); | |
231 | rhp->func(rhp); | |
232 | cbcount++; | |
233 | } | |
234 | uatomic_sub(&workqueue->qlen, cbcount); | |
235 | } | |
236 | if (uatomic_read(&workqueue->flags) & URCU_WORKQUEUE_STOP) | |
237 | break; | |
238 | if (workqueue->worker_before_wait_fct) | |
239 | workqueue->worker_before_wait_fct(workqueue, workqueue->priv); | |
240 | if (!rt) { | |
241 | if (cds_wfcq_empty(&workqueue->cbs_head, | |
242 | &workqueue->cbs_tail)) { | |
243 | futex_wait(&workqueue->futex); | |
244 | (void) poll(NULL, 0, 10); | |
245 | uatomic_dec(&workqueue->futex); | |
246 | /* | |
247 | * Decrement futex before reading | |
248 | * call_rcu list. | |
249 | */ | |
250 | cmm_smp_mb(); | |
251 | } else { | |
252 | (void) poll(NULL, 0, 10); | |
253 | } | |
254 | } else { | |
255 | (void) poll(NULL, 0, 10); | |
256 | } | |
257 | if (workqueue->worker_after_wake_up_fct) | |
258 | workqueue->worker_after_wake_up_fct(workqueue, workqueue->priv); | |
259 | } | |
260 | if (!rt) { | |
261 | /* | |
262 | * Read call_rcu list before write futex. | |
263 | */ | |
264 | cmm_smp_mb(); | |
265 | uatomic_set(&workqueue->futex, 0); | |
266 | } | |
267 | if (workqueue->finalize_worker_fct) | |
268 | workqueue->finalize_worker_fct(workqueue, workqueue->priv); | |
269 | return NULL; | |
270 | } | |
271 | ||
272 | struct urcu_workqueue *urcu_workqueue_create(unsigned long flags, | |
273 | int cpu_affinity, void *priv, | |
274 | void (*grace_period_fct)(struct urcu_workqueue *workqueue, void *priv), | |
275 | void (*initialize_worker_fct)(struct urcu_workqueue *workqueue, void *priv), | |
276 | void (*finalize_worker_fct)(struct urcu_workqueue *workqueue, void *priv), | |
277 | void (*worker_before_wait_fct)(struct urcu_workqueue *workqueue, void *priv), | |
278 | void (*worker_after_wake_up_fct)(struct urcu_workqueue *workqueue, void *priv), | |
279 | void (*worker_before_pause_fct)(struct urcu_workqueue *workqueue, void *priv), | |
280 | void (*worker_after_resume_fct)(struct urcu_workqueue *workqueue, void *priv)) | |
281 | { | |
282 | struct urcu_workqueue *workqueue; | |
283 | int ret; | |
284 | ||
285 | workqueue = malloc(sizeof(*workqueue)); | |
286 | if (workqueue == NULL) | |
287 | urcu_die(errno); | |
288 | memset(workqueue, '\0', sizeof(*workqueue)); | |
289 | cds_wfcq_init(&workqueue->cbs_head, &workqueue->cbs_tail); | |
290 | workqueue->qlen = 0; | |
291 | workqueue->futex = 0; | |
292 | workqueue->flags = flags; | |
293 | workqueue->priv = priv; | |
294 | workqueue->grace_period_fct = grace_period_fct; | |
295 | workqueue->initialize_worker_fct = initialize_worker_fct; | |
296 | workqueue->finalize_worker_fct = finalize_worker_fct; | |
297 | workqueue->worker_before_wait_fct = worker_before_wait_fct; | |
298 | workqueue->worker_after_wake_up_fct = worker_after_wake_up_fct; | |
299 | workqueue->worker_before_pause_fct = worker_before_pause_fct; | |
300 | workqueue->worker_after_resume_fct = worker_after_resume_fct; | |
301 | workqueue->cpu_affinity = cpu_affinity; | |
302 | workqueue->loop_count = 0; | |
303 | cmm_smp_mb(); /* Structure initialized before pointer is planted. */ | |
304 | ret = pthread_create(&workqueue->tid, NULL, workqueue_thread, workqueue); | |
305 | if (ret) { | |
306 | urcu_die(ret); | |
307 | } | |
308 | return workqueue; | |
309 | } | |
310 | ||
311 | static void wake_worker_thread(struct urcu_workqueue *workqueue) | |
312 | { | |
313 | if (!(_CMM_LOAD_SHARED(workqueue->flags) & URCU_CALL_RCU_RT)) | |
314 | futex_wake_up(&workqueue->futex); | |
315 | } | |
316 | ||
317 | static int urcu_workqueue_destroy_worker(struct urcu_workqueue *workqueue) | |
318 | { | |
319 | int ret; | |
320 | void *retval; | |
321 | ||
322 | uatomic_or(&workqueue->flags, URCU_WORKQUEUE_STOP); | |
323 | wake_worker_thread(workqueue); | |
324 | ||
325 | ret = pthread_join(workqueue->tid, &retval); | |
326 | if (ret) { | |
327 | urcu_die(ret); | |
328 | } | |
329 | if (retval != NULL) { | |
330 | urcu_die(EINVAL); | |
331 | } | |
332 | workqueue->flags &= ~URCU_WORKQUEUE_STOP; | |
333 | workqueue->tid = 0; | |
334 | return 0; | |
335 | } | |
336 | ||
337 | void urcu_workqueue_destroy(struct urcu_workqueue *workqueue) | |
338 | { | |
339 | if (workqueue == NULL) { | |
340 | return; | |
341 | } | |
342 | if (urcu_workqueue_destroy_worker(workqueue)) { | |
343 | urcu_die(errno); | |
344 | } | |
345 | assert(cds_wfcq_empty(&workqueue->cbs_head, &workqueue->cbs_tail)); | |
346 | free(workqueue); | |
347 | } | |
348 | ||
349 | void urcu_workqueue_queue_work(struct urcu_workqueue *workqueue, | |
350 | struct urcu_work *work, | |
351 | void (*func)(struct urcu_work *work)) | |
352 | { | |
353 | cds_wfcq_node_init(&work->next); | |
354 | work->func = func; | |
355 | cds_wfcq_enqueue(&workqueue->cbs_head, &workqueue->cbs_tail, &work->next); | |
356 | uatomic_inc(&workqueue->qlen); | |
357 | wake_worker_thread(workqueue); | |
358 | } | |
359 | ||
360 | static | |
361 | void free_completion(struct urcu_ref *ref) | |
362 | { | |
363 | struct urcu_workqueue_completion *completion; | |
364 | ||
365 | completion = caa_container_of(ref, struct urcu_workqueue_completion, ref); | |
366 | free(completion); | |
367 | } | |
368 | ||
369 | static | |
370 | void _urcu_workqueue_wait_complete(struct urcu_work *work) | |
371 | { | |
372 | struct urcu_workqueue_completion_work *completion_work; | |
373 | struct urcu_workqueue_completion *completion; | |
374 | ||
375 | completion_work = caa_container_of(work, struct urcu_workqueue_completion_work, work); | |
376 | completion = completion_work->completion; | |
377 | if (!uatomic_sub_return(&completion->barrier_count, 1)) | |
378 | futex_wake_up(&completion->futex); | |
379 | urcu_ref_put(&completion->ref, free_completion); | |
380 | free(completion_work); | |
381 | } | |
382 | ||
383 | struct urcu_workqueue_completion *urcu_workqueue_create_completion(void) | |
384 | { | |
385 | struct urcu_workqueue_completion *completion; | |
386 | ||
387 | completion = calloc(sizeof(*completion), 1); | |
388 | if (!completion) | |
389 | urcu_die(errno); | |
390 | urcu_ref_set(&completion->ref, 1); | |
391 | completion->barrier_count = 0; | |
392 | return completion; | |
393 | } | |
394 | ||
395 | void urcu_workqueue_destroy_completion(struct urcu_workqueue_completion *completion) | |
396 | { | |
397 | urcu_ref_put(&completion->ref, free_completion); | |
398 | } | |
399 | ||
400 | void urcu_workqueue_wait_completion(struct urcu_workqueue_completion *completion) | |
401 | { | |
402 | /* Wait for them */ | |
403 | for (;;) { | |
404 | uatomic_dec(&completion->futex); | |
405 | /* Decrement futex before reading barrier_count */ | |
406 | cmm_smp_mb(); | |
407 | if (!uatomic_read(&completion->barrier_count)) | |
408 | break; | |
409 | futex_wait(&completion->futex); | |
410 | } | |
411 | } | |
412 | ||
413 | void urcu_workqueue_queue_completion(struct urcu_workqueue *workqueue, | |
414 | struct urcu_workqueue_completion *completion) | |
415 | { | |
416 | struct urcu_workqueue_completion_work *work; | |
417 | ||
418 | work = calloc(sizeof(*work), 1); | |
419 | if (!work) | |
420 | urcu_die(errno); | |
421 | work->completion = completion; | |
422 | urcu_ref_get(&completion->ref); | |
423 | uatomic_inc(&completion->barrier_count); | |
424 | urcu_workqueue_queue_work(workqueue, &work->work, _urcu_workqueue_wait_complete); | |
425 | } | |
426 | ||
427 | /* | |
428 | * Wait for all in-flight work to complete execution. | |
429 | */ | |
430 | void urcu_workqueue_flush_queued_work(struct urcu_workqueue *workqueue) | |
431 | { | |
432 | struct urcu_workqueue_completion *completion; | |
433 | ||
434 | completion = urcu_workqueue_create_completion(); | |
435 | if (!completion) | |
436 | urcu_die(ENOMEM); | |
437 | urcu_workqueue_queue_completion(workqueue, completion); | |
438 | urcu_workqueue_wait_completion(completion); | |
439 | urcu_workqueue_destroy_completion(completion); | |
440 | } | |
441 | ||
442 | /* To be used in before fork handler. */ | |
443 | void urcu_workqueue_pause_worker(struct urcu_workqueue *workqueue) | |
444 | { | |
445 | uatomic_or(&workqueue->flags, URCU_WORKQUEUE_PAUSE); | |
446 | cmm_smp_mb__after_uatomic_or(); | |
447 | wake_worker_thread(workqueue); | |
448 | ||
449 | while ((uatomic_read(&workqueue->flags) & URCU_WORKQUEUE_PAUSED) == 0) | |
450 | (void) poll(NULL, 0, 1); | |
451 | } | |
452 | ||
453 | /* To be used in after fork parent handler. */ | |
454 | void urcu_workqueue_resume_worker(struct urcu_workqueue *workqueue) | |
455 | { | |
456 | uatomic_and(&workqueue->flags, ~URCU_WORKQUEUE_PAUSE); | |
457 | while ((uatomic_read(&workqueue->flags) & URCU_WORKQUEUE_PAUSED) != 0) | |
458 | (void) poll(NULL, 0, 1); | |
459 | } | |
460 | ||
461 | void urcu_workqueue_create_worker(struct urcu_workqueue *workqueue) | |
462 | { | |
463 | int ret; | |
464 | ||
465 | /* Clear workqueue state from parent. */ | |
466 | workqueue->flags &= ~URCU_WORKQUEUE_PAUSED; | |
467 | workqueue->flags &= ~URCU_WORKQUEUE_PAUSE; | |
468 | workqueue->tid = 0; | |
469 | ret = pthread_create(&workqueue->tid, NULL, workqueue_thread, workqueue); | |
470 | if (ret) { | |
471 | urcu_die(ret); | |
472 | } | |
473 | } |