4 * Userspace RCU library - Userspace workqeues
6 * Copyright (c) 2010 Paul E. McKenney <paulmck@linux.vnet.ibm.com>
7 * Copyright (c) 2017 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
39 #include "compat-getcpu.h"
40 #include "urcu/wfcqueue.h"
41 #include "urcu-call-rcu.h"
42 #include "urcu-pointer.h"
43 #include "urcu/list.h"
44 #include "urcu/futex.h"
45 #include "urcu/tls-compat.h"
49 #include "workqueue.h"
51 #define SET_AFFINITY_CHECK_PERIOD (1U << 8) /* 256 */
52 #define SET_AFFINITY_CHECK_PERIOD_MASK (SET_AFFINITY_CHECK_PERIOD - 1)
54 /* Data structure that identifies a workqueue. */
56 struct urcu_workqueue
{
58 * We do not align head on a different cache-line than tail
59 * mainly because call_rcu callback-invocation threads use
60 * batching ("splice") to get an entire list of callbacks, which
61 * effectively empties the queue, and requires to touch the tail
64 struct cds_wfcq_tail cbs_tail
;
65 struct cds_wfcq_head cbs_head
;
68 unsigned long qlen
; /* maintained for debugging. */
71 unsigned long loop_count
;
73 void (*grace_period_fct
)(struct urcu_workqueue
*workqueue
, void *priv
);
74 void (*initialize_worker_fct
)(struct urcu_workqueue
*workqueue
, void *priv
);
75 void (*finalize_worker_fct
)(struct urcu_workqueue
*workqueue
, void *priv
);
76 void (*worker_before_pause_fct
)(struct urcu_workqueue
*workqueue
, void *priv
);
77 void (*worker_after_resume_fct
)(struct urcu_workqueue
*workqueue
, void *priv
);
78 void (*worker_before_wait_fct
)(struct urcu_workqueue
*workqueue
, void *priv
);
79 void (*worker_after_wake_up_fct
)(struct urcu_workqueue
*workqueue
, void *priv
);
80 } __attribute__((aligned(CAA_CACHE_LINE_SIZE
)));
82 struct urcu_workqueue_completion
{
88 struct urcu_workqueue_completion_work
{
89 struct urcu_work work
;
90 struct urcu_workqueue_completion
*completion
;
94 * Periodically retry setting CPU affinity if we migrate.
95 * Losing affinity can be caused by CPU hotunplug/hotplug, or by
98 #if HAVE_SCHED_SETAFFINITY
99 static int set_thread_cpu_affinity(struct urcu_workqueue
*workqueue
)
104 if (workqueue
->cpu_affinity
< 0)
106 if (++workqueue
->loop_count
& SET_AFFINITY_CHECK_PERIOD_MASK
)
108 if (urcu_sched_getcpu() == workqueue
->cpu_affinity
)
112 CPU_SET(workqueue
->cpu_affinity
, &mask
);
113 #if SCHED_SETAFFINITY_ARGS == 2
114 ret
= sched_setaffinity(0, &mask
);
116 ret
= sched_setaffinity(0, sizeof(mask
), &mask
);
119 * EINVAL is fine: can be caused by hotunplugged CPUs, or by
120 * cpuset(7). This is why we should always retry if we detect
123 if (ret
&& errno
== EINVAL
) {
130 static int set_thread_cpu_affinity(struct urcu_workqueue
*workqueue
)
136 static void futex_wait(int32_t *futex
)
138 /* Read condition before read futex */
140 if (uatomic_read(futex
) != -1)
142 while (futex_async(futex
, FUTEX_WAIT
, -1, NULL
, NULL
, 0)) {
145 /* Value already changed. */
148 /* Retry if interrupted by signal. */
149 break; /* Get out of switch. */
151 /* Unexpected error. */
157 static void futex_wake_up(int32_t *futex
)
159 /* Write to condition before reading/writing futex */
161 if (caa_unlikely(uatomic_read(futex
) == -1)) {
162 uatomic_set(futex
, 0);
163 if (futex_async(futex
, FUTEX_WAKE
, 1,
169 /* This is the code run by each worker thread. */
171 static void *workqueue_thread(void *arg
)
173 unsigned long cbcount
;
174 struct urcu_workqueue
*workqueue
= (struct urcu_workqueue
*) arg
;
175 int rt
= !!(uatomic_read(&workqueue
->flags
) & URCU_WORKQUEUE_RT
);
177 if (set_thread_cpu_affinity(workqueue
))
180 if (workqueue
->initialize_worker_fct
)
181 workqueue
->initialize_worker_fct(workqueue
, workqueue
->priv
);
184 uatomic_dec(&workqueue
->futex
);
185 /* Decrement futex before reading workqueue */
189 struct cds_wfcq_head cbs_tmp_head
;
190 struct cds_wfcq_tail cbs_tmp_tail
;
191 struct cds_wfcq_node
*cbs
, *cbs_tmp_n
;
192 enum cds_wfcq_ret splice_ret
;
194 if (set_thread_cpu_affinity(workqueue
))
197 if (uatomic_read(&workqueue
->flags
) & URCU_WORKQUEUE_PAUSE
) {
199 * Pause requested. Become quiescent: remove
200 * ourself from all global lists, and don't
201 * process any callback. The callback lists may
202 * still be non-empty though.
204 if (workqueue
->worker_before_pause_fct
)
205 workqueue
->worker_before_pause_fct(workqueue
, workqueue
->priv
);
206 cmm_smp_mb__before_uatomic_or();
207 uatomic_or(&workqueue
->flags
, URCU_WORKQUEUE_PAUSED
);
208 while ((uatomic_read(&workqueue
->flags
) & URCU_WORKQUEUE_PAUSE
) != 0)
209 (void) poll(NULL
, 0, 1);
210 uatomic_and(&workqueue
->flags
, ~URCU_WORKQUEUE_PAUSED
);
211 cmm_smp_mb__after_uatomic_and();
212 if (workqueue
->worker_after_resume_fct
)
213 workqueue
->worker_after_resume_fct(workqueue
, workqueue
->priv
);
216 cds_wfcq_init(&cbs_tmp_head
, &cbs_tmp_tail
);
217 splice_ret
= __cds_wfcq_splice_blocking(&cbs_tmp_head
,
218 &cbs_tmp_tail
, &workqueue
->cbs_head
, &workqueue
->cbs_tail
);
219 assert(splice_ret
!= CDS_WFCQ_RET_WOULDBLOCK
);
220 assert(splice_ret
!= CDS_WFCQ_RET_DEST_NON_EMPTY
);
221 if (splice_ret
!= CDS_WFCQ_RET_SRC_EMPTY
) {
222 if (workqueue
->grace_period_fct
)
223 workqueue
->grace_period_fct(workqueue
, workqueue
->priv
);
225 __cds_wfcq_for_each_blocking_safe(&cbs_tmp_head
,
226 &cbs_tmp_tail
, cbs
, cbs_tmp_n
) {
227 struct rcu_head
*rhp
;
229 rhp
= caa_container_of(cbs
,
230 struct rcu_head
, next
);
234 uatomic_sub(&workqueue
->qlen
, cbcount
);
236 if (uatomic_read(&workqueue
->flags
) & URCU_WORKQUEUE_STOP
)
238 if (workqueue
->worker_before_wait_fct
)
239 workqueue
->worker_before_wait_fct(workqueue
, workqueue
->priv
);
241 if (cds_wfcq_empty(&workqueue
->cbs_head
,
242 &workqueue
->cbs_tail
)) {
243 futex_wait(&workqueue
->futex
);
244 (void) poll(NULL
, 0, 10);
245 uatomic_dec(&workqueue
->futex
);
247 * Decrement futex before reading
252 (void) poll(NULL
, 0, 10);
255 (void) poll(NULL
, 0, 10);
257 if (workqueue
->worker_after_wake_up_fct
)
258 workqueue
->worker_after_wake_up_fct(workqueue
, workqueue
->priv
);
262 * Read call_rcu list before write futex.
265 uatomic_set(&workqueue
->futex
, 0);
267 if (workqueue
->finalize_worker_fct
)
268 workqueue
->finalize_worker_fct(workqueue
, workqueue
->priv
);
272 struct urcu_workqueue
*urcu_workqueue_create(unsigned long flags
,
273 int cpu_affinity
, void *priv
,
274 void (*grace_period_fct
)(struct urcu_workqueue
*workqueue
, void *priv
),
275 void (*initialize_worker_fct
)(struct urcu_workqueue
*workqueue
, void *priv
),
276 void (*finalize_worker_fct
)(struct urcu_workqueue
*workqueue
, void *priv
),
277 void (*worker_before_wait_fct
)(struct urcu_workqueue
*workqueue
, void *priv
),
278 void (*worker_after_wake_up_fct
)(struct urcu_workqueue
*workqueue
, void *priv
),
279 void (*worker_before_pause_fct
)(struct urcu_workqueue
*workqueue
, void *priv
),
280 void (*worker_after_resume_fct
)(struct urcu_workqueue
*workqueue
, void *priv
))
282 struct urcu_workqueue
*workqueue
;
285 workqueue
= malloc(sizeof(*workqueue
));
286 if (workqueue
== NULL
)
288 memset(workqueue
, '\0', sizeof(*workqueue
));
289 cds_wfcq_init(&workqueue
->cbs_head
, &workqueue
->cbs_tail
);
291 workqueue
->futex
= 0;
292 workqueue
->flags
= flags
;
293 workqueue
->priv
= priv
;
294 workqueue
->grace_period_fct
= grace_period_fct
;
295 workqueue
->initialize_worker_fct
= initialize_worker_fct
;
296 workqueue
->finalize_worker_fct
= finalize_worker_fct
;
297 workqueue
->worker_before_wait_fct
= worker_before_wait_fct
;
298 workqueue
->worker_after_wake_up_fct
= worker_after_wake_up_fct
;
299 workqueue
->worker_before_pause_fct
= worker_before_pause_fct
;
300 workqueue
->worker_after_resume_fct
= worker_after_resume_fct
;
301 workqueue
->cpu_affinity
= cpu_affinity
;
302 workqueue
->loop_count
= 0;
303 cmm_smp_mb(); /* Structure initialized before pointer is planted. */
304 ret
= pthread_create(&workqueue
->tid
, NULL
, workqueue_thread
, workqueue
);
311 static void wake_worker_thread(struct urcu_workqueue
*workqueue
)
313 if (!(_CMM_LOAD_SHARED(workqueue
->flags
) & URCU_CALL_RCU_RT
))
314 futex_wake_up(&workqueue
->futex
);
317 static int urcu_workqueue_destroy_worker(struct urcu_workqueue
*workqueue
)
322 uatomic_or(&workqueue
->flags
, URCU_WORKQUEUE_STOP
);
323 wake_worker_thread(workqueue
);
325 ret
= pthread_join(workqueue
->tid
, &retval
);
329 if (retval
!= NULL
) {
332 workqueue
->flags
&= ~URCU_WORKQUEUE_STOP
;
337 void urcu_workqueue_destroy(struct urcu_workqueue
*workqueue
)
339 if (workqueue
== NULL
) {
342 if (urcu_workqueue_destroy_worker(workqueue
)) {
345 assert(cds_wfcq_empty(&workqueue
->cbs_head
, &workqueue
->cbs_tail
));
349 void urcu_workqueue_queue_work(struct urcu_workqueue
*workqueue
,
350 struct urcu_work
*work
,
351 void (*func
)(struct urcu_work
*work
))
353 cds_wfcq_node_init(&work
->next
);
355 cds_wfcq_enqueue(&workqueue
->cbs_head
, &workqueue
->cbs_tail
, &work
->next
);
356 uatomic_inc(&workqueue
->qlen
);
357 wake_worker_thread(workqueue
);
361 void free_completion(struct urcu_ref
*ref
)
363 struct urcu_workqueue_completion
*completion
;
365 completion
= caa_container_of(ref
, struct urcu_workqueue_completion
, ref
);
370 void _urcu_workqueue_wait_complete(struct urcu_work
*work
)
372 struct urcu_workqueue_completion_work
*completion_work
;
373 struct urcu_workqueue_completion
*completion
;
375 completion_work
= caa_container_of(work
, struct urcu_workqueue_completion_work
, work
);
376 completion
= completion_work
->completion
;
377 if (!uatomic_sub_return(&completion
->barrier_count
, 1))
378 futex_wake_up(&completion
->futex
);
379 urcu_ref_put(&completion
->ref
, free_completion
);
380 free(completion_work
);
383 struct urcu_workqueue_completion
*urcu_workqueue_create_completion(void)
385 struct urcu_workqueue_completion
*completion
;
387 completion
= calloc(sizeof(*completion
), 1);
390 urcu_ref_set(&completion
->ref
, 1);
391 completion
->barrier_count
= 0;
395 void urcu_workqueue_destroy_completion(struct urcu_workqueue_completion
*completion
)
397 urcu_ref_put(&completion
->ref
, free_completion
);
400 void urcu_workqueue_wait_completion(struct urcu_workqueue_completion
*completion
)
404 uatomic_dec(&completion
->futex
);
405 /* Decrement futex before reading barrier_count */
407 if (!uatomic_read(&completion
->barrier_count
))
409 futex_wait(&completion
->futex
);
413 void urcu_workqueue_queue_completion(struct urcu_workqueue
*workqueue
,
414 struct urcu_workqueue_completion
*completion
)
416 struct urcu_workqueue_completion_work
*work
;
418 work
= calloc(sizeof(*work
), 1);
421 work
->completion
= completion
;
422 urcu_ref_get(&completion
->ref
);
423 uatomic_inc(&completion
->barrier_count
);
424 urcu_workqueue_queue_work(workqueue
, &work
->work
, _urcu_workqueue_wait_complete
);
428 * Wait for all in-flight work to complete execution.
430 void urcu_workqueue_flush_queued_work(struct urcu_workqueue
*workqueue
)
432 struct urcu_workqueue_completion
*completion
;
434 completion
= urcu_workqueue_create_completion();
437 urcu_workqueue_queue_completion(workqueue
, completion
);
438 urcu_workqueue_wait_completion(completion
);
439 urcu_workqueue_destroy_completion(completion
);
442 /* To be used in before fork handler. */
443 void urcu_workqueue_pause_worker(struct urcu_workqueue
*workqueue
)
445 uatomic_or(&workqueue
->flags
, URCU_WORKQUEUE_PAUSE
);
446 cmm_smp_mb__after_uatomic_or();
447 wake_worker_thread(workqueue
);
449 while ((uatomic_read(&workqueue
->flags
) & URCU_WORKQUEUE_PAUSED
) == 0)
450 (void) poll(NULL
, 0, 1);
453 /* To be used in after fork parent handler. */
454 void urcu_workqueue_resume_worker(struct urcu_workqueue
*workqueue
)
456 uatomic_and(&workqueue
->flags
, ~URCU_WORKQUEUE_PAUSE
);
457 while ((uatomic_read(&workqueue
->flags
) & URCU_WORKQUEUE_PAUSED
) != 0)
458 (void) poll(NULL
, 0, 1);
461 void urcu_workqueue_create_worker(struct urcu_workqueue
*workqueue
)
465 /* Clear workqueue state from parent. */
466 workqueue
->flags
&= ~URCU_WORKQUEUE_PAUSED
;
467 workqueue
->flags
&= ~URCU_WORKQUEUE_PAUSE
;
469 ret
= pthread_create(&workqueue
->tid
, NULL
, workqueue_thread
, workqueue
);