Update Gnulib to the latest git version
[deliverable/binutils-gdb.git] / gnulib / import / glthread / lock.c
CommitLineData
5abebf3c 1/* Locking in multithreaded situations.
c0c3707f 2 Copyright (C) 2005-2019 Free Software Foundation, Inc.
5abebf3c
CB
3
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation; either version 3, or (at your option)
7 any later version.
8
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
13
14 You should have received a copy of the GNU General Public License
c0c3707f 15 along with this program; if not, see <https://www.gnu.org/licenses/>. */
5abebf3c
CB
16
17/* Written by Bruno Haible <bruno@clisp.org>, 2005.
c0c3707f 18 Based on GCC's gthr-posix.h, gthr-posix95.h. */
5abebf3c
CB
19
20#include <config.h>
21
22#include "glthread/lock.h"
23
24/* ========================================================================= */
25
c0c3707f
CB
26#if USE_ISOC_THREADS || USE_ISOC_AND_POSIX_THREADS
27
28/* -------------------------- gl_lock_t datatype -------------------------- */
29
30int
31glthread_lock_init (gl_lock_t *lock)
32{
33 if (mtx_init (&lock->mutex, mtx_plain) != thrd_success)
34 return ENOMEM;
35 lock->init_needed = 0;
36 return 0;
37}
38
39int
40glthread_lock_lock (gl_lock_t *lock)
41{
42 if (lock->init_needed)
43 call_once (&lock->init_once, lock->init_func);
44 if (mtx_lock (&lock->mutex) != thrd_success)
45 return EAGAIN;
46 return 0;
47}
48
49int
50glthread_lock_unlock (gl_lock_t *lock)
51{
52 if (lock->init_needed)
53 call_once (&lock->init_once, lock->init_func);
54 if (mtx_unlock (&lock->mutex) != thrd_success)
55 return EINVAL;
56 return 0;
57}
58
59int
60glthread_lock_destroy (gl_lock_t *lock)
61{
62 if (lock->init_needed)
63 call_once (&lock->init_once, lock->init_func);
64 mtx_destroy (&lock->mutex);
65 return 0;
66}
67
68/* ------------------------- gl_rwlock_t datatype ------------------------- */
69
70int
71glthread_rwlock_init (gl_rwlock_t *lock)
72{
73 if (mtx_init (&lock->lock, mtx_plain) != thrd_success
74 || cnd_init (&lock->waiting_readers) != thrd_success
75 || cnd_init (&lock->waiting_writers) != thrd_success)
76 return ENOMEM;
77 lock->waiting_writers_count = 0;
78 lock->runcount = 0;
79 lock->init_needed = 0;
80 return 0;
81}
82
83int
84glthread_rwlock_rdlock (gl_rwlock_t *lock)
85{
86 if (lock->init_needed)
87 call_once (&lock->init_once, lock->init_func);
88 if (mtx_lock (&lock->lock) != thrd_success)
89 return EAGAIN;
90 /* Test whether only readers are currently running, and whether the runcount
91 field will not overflow, and whether no writer is waiting. The latter
92 condition is because POSIX recommends that "write locks shall take
93 precedence over read locks", to avoid "writer starvation". */
94 while (!(lock->runcount + 1 > 0 && lock->waiting_writers_count == 0))
95 {
96 /* This thread has to wait for a while. Enqueue it among the
97 waiting_readers. */
98 if (cnd_wait (&lock->waiting_readers, &lock->lock) != thrd_success)
99 {
100 mtx_unlock (&lock->lock);
101 return EINVAL;
102 }
103 }
104 lock->runcount++;
105 if (mtx_unlock (&lock->lock) != thrd_success)
106 return EINVAL;
107 return 0;
108}
109
110int
111glthread_rwlock_wrlock (gl_rwlock_t *lock)
112{
113 if (lock->init_needed)
114 call_once (&lock->init_once, lock->init_func);
115 if (mtx_lock (&lock->lock) != thrd_success)
116 return EAGAIN;
117 /* Test whether no readers or writers are currently running. */
118 while (!(lock->runcount == 0))
119 {
120 /* This thread has to wait for a while. Enqueue it among the
121 waiting_writers. */
122 lock->waiting_writers_count++;
123 if (cnd_wait (&lock->waiting_writers, &lock->lock) != thrd_success)
124 {
125 lock->waiting_writers_count--;
126 mtx_unlock (&lock->lock);
127 return EINVAL;
128 }
129 lock->waiting_writers_count--;
130 }
131 lock->runcount--; /* runcount becomes -1 */
132 if (mtx_unlock (&lock->lock) != thrd_success)
133 return EINVAL;
134 return 0;
135}
136
137int
138glthread_rwlock_unlock (gl_rwlock_t *lock)
139{
140 if (lock->init_needed)
141 call_once (&lock->init_once, lock->init_func);
142 if (mtx_lock (&lock->lock) != thrd_success)
143 return EAGAIN;
144 if (lock->runcount < 0)
145 {
146 /* Drop a writer lock. */
147 if (!(lock->runcount == -1))
148 {
149 mtx_unlock (&lock->lock);
150 return EINVAL;
151 }
152 lock->runcount = 0;
153 }
154 else
155 {
156 /* Drop a reader lock. */
157 if (!(lock->runcount > 0))
158 {
159 mtx_unlock (&lock->lock);
160 return EINVAL;
161 }
162 lock->runcount--;
163 }
164 if (lock->runcount == 0)
165 {
166 /* POSIX recommends that "write locks shall take precedence over read
167 locks", to avoid "writer starvation". */
168 if (lock->waiting_writers_count > 0)
169 {
170 /* Wake up one of the waiting writers. */
171 if (cnd_signal (&lock->waiting_writers) != thrd_success)
172 {
173 mtx_unlock (&lock->lock);
174 return EINVAL;
175 }
176 }
177 else
178 {
179 /* Wake up all waiting readers. */
180 if (cnd_broadcast (&lock->waiting_readers) != thrd_success)
181 {
182 mtx_unlock (&lock->lock);
183 return EINVAL;
184 }
185 }
186 }
187 if (mtx_unlock (&lock->lock) != thrd_success)
188 return EINVAL;
189 return 0;
190}
191
192int
193glthread_rwlock_destroy (gl_rwlock_t *lock)
194{
195 if (lock->init_needed)
196 call_once (&lock->init_once, lock->init_func);
197 mtx_destroy (&lock->lock);
198 cnd_destroy (&lock->waiting_readers);
199 cnd_destroy (&lock->waiting_writers);
200 return 0;
201}
202
203/* --------------------- gl_recursive_lock_t datatype --------------------- */
204
205int
206glthread_recursive_lock_init (gl_recursive_lock_t *lock)
207{
208 if (mtx_init (&lock->mutex, mtx_plain | mtx_recursive) != thrd_success)
209 return ENOMEM;
210 lock->init_needed = 0;
211 return 0;
212}
213
214int
215glthread_recursive_lock_lock (gl_recursive_lock_t *lock)
216{
217 if (lock->init_needed)
218 call_once (&lock->init_once, lock->init_func);
219 if (mtx_lock (&lock->mutex) != thrd_success)
220 return EAGAIN;
221 return 0;
222}
223
224int
225glthread_recursive_lock_unlock (gl_recursive_lock_t *lock)
226{
227 if (lock->init_needed)
228 call_once (&lock->init_once, lock->init_func);
229 if (mtx_unlock (&lock->mutex) != thrd_success)
230 return EINVAL;
231 return 0;
232}
233
234int
235glthread_recursive_lock_destroy (gl_recursive_lock_t *lock)
236{
237 if (lock->init_needed)
238 call_once (&lock->init_once, lock->init_func);
239 mtx_destroy (&lock->mutex);
240 return 0;
241}
242
243/* -------------------------- gl_once_t datatype -------------------------- */
244
245#endif
246
247/* ========================================================================= */
248
5abebf3c
CB
249#if USE_POSIX_THREADS
250
251/* -------------------------- gl_lock_t datatype -------------------------- */
252
253/* ------------------------- gl_rwlock_t datatype ------------------------- */
254
c0c3707f
CB
255# if HAVE_PTHREAD_RWLOCK && (HAVE_PTHREAD_RWLOCK_RDLOCK_PREFER_WRITER || (defined PTHREAD_RWLOCK_WRITER_NONRECURSIVE_INITIALIZER_NP && (__GNU_LIBRARY__ > 1)))
256
257# ifdef PTHREAD_RWLOCK_INITIALIZER
258
259# if !HAVE_PTHREAD_RWLOCK_RDLOCK_PREFER_WRITER
260 /* glibc with bug https://sourceware.org/bugzilla/show_bug.cgi?id=13701 */
261
262int
263glthread_rwlock_init_for_glibc (pthread_rwlock_t *lock)
264{
265 pthread_rwlockattr_t attributes;
266 int err;
5abebf3c 267
c0c3707f
CB
268 err = pthread_rwlockattr_init (&attributes);
269 if (err != 0)
270 return err;
271 /* Note: PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP is the only value that
272 causes the writer to be preferred. PTHREAD_RWLOCK_PREFER_WRITER_NP does not
273 do this; see
274 http://man7.org/linux/man-pages/man3/pthread_rwlockattr_setkind_np.3.html */
275 err = pthread_rwlockattr_setkind_np (&attributes,
276 PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP);
277 if (err == 0)
278 err = pthread_rwlock_init(lock, &attributes);
279 /* pthread_rwlockattr_destroy always returns 0. It cannot influence the
280 return value. */
281 pthread_rwlockattr_destroy (&attributes);
282 return err;
283}
284
285# endif
286# else
5abebf3c
CB
287
288int
289glthread_rwlock_init_multithreaded (gl_rwlock_t *lock)
290{
291 int err;
292
293 err = pthread_rwlock_init (&lock->rwlock, NULL);
294 if (err != 0)
295 return err;
296 lock->initialized = 1;
297 return 0;
298}
299
300int
301glthread_rwlock_rdlock_multithreaded (gl_rwlock_t *lock)
302{
303 if (!lock->initialized)
304 {
305 int err;
306
307 err = pthread_mutex_lock (&lock->guard);
308 if (err != 0)
309 return err;
310 if (!lock->initialized)
311 {
312 err = glthread_rwlock_init_multithreaded (lock);
313 if (err != 0)
314 {
315 pthread_mutex_unlock (&lock->guard);
316 return err;
317 }
318 }
319 err = pthread_mutex_unlock (&lock->guard);
320 if (err != 0)
321 return err;
322 }
323 return pthread_rwlock_rdlock (&lock->rwlock);
324}
325
326int
327glthread_rwlock_wrlock_multithreaded (gl_rwlock_t *lock)
328{
329 if (!lock->initialized)
330 {
331 int err;
332
333 err = pthread_mutex_lock (&lock->guard);
334 if (err != 0)
335 return err;
336 if (!lock->initialized)
337 {
338 err = glthread_rwlock_init_multithreaded (lock);
339 if (err != 0)
340 {
341 pthread_mutex_unlock (&lock->guard);
342 return err;
343 }
344 }
345 err = pthread_mutex_unlock (&lock->guard);
346 if (err != 0)
347 return err;
348 }
349 return pthread_rwlock_wrlock (&lock->rwlock);
350}
351
352int
353glthread_rwlock_unlock_multithreaded (gl_rwlock_t *lock)
354{
355 if (!lock->initialized)
356 return EINVAL;
357 return pthread_rwlock_unlock (&lock->rwlock);
358}
359
360int
361glthread_rwlock_destroy_multithreaded (gl_rwlock_t *lock)
362{
363 int err;
364
365 if (!lock->initialized)
366 return EINVAL;
367 err = pthread_rwlock_destroy (&lock->rwlock);
368 if (err != 0)
369 return err;
370 lock->initialized = 0;
371 return 0;
372}
373
374# endif
375
376# else
377
378int
379glthread_rwlock_init_multithreaded (gl_rwlock_t *lock)
380{
381 int err;
382
383 err = pthread_mutex_init (&lock->lock, NULL);
384 if (err != 0)
385 return err;
386 err = pthread_cond_init (&lock->waiting_readers, NULL);
387 if (err != 0)
388 return err;
389 err = pthread_cond_init (&lock->waiting_writers, NULL);
390 if (err != 0)
391 return err;
392 lock->waiting_writers_count = 0;
393 lock->runcount = 0;
394 return 0;
395}
396
397int
398glthread_rwlock_rdlock_multithreaded (gl_rwlock_t *lock)
399{
400 int err;
401
402 err = pthread_mutex_lock (&lock->lock);
403 if (err != 0)
404 return err;
405 /* Test whether only readers are currently running, and whether the runcount
c0c3707f
CB
406 field will not overflow, and whether no writer is waiting. The latter
407 condition is because POSIX recommends that "write locks shall take
408 precedence over read locks", to avoid "writer starvation". */
5abebf3c
CB
409 while (!(lock->runcount + 1 > 0 && lock->waiting_writers_count == 0))
410 {
411 /* This thread has to wait for a while. Enqueue it among the
412 waiting_readers. */
413 err = pthread_cond_wait (&lock->waiting_readers, &lock->lock);
414 if (err != 0)
415 {
416 pthread_mutex_unlock (&lock->lock);
417 return err;
418 }
419 }
420 lock->runcount++;
421 return pthread_mutex_unlock (&lock->lock);
422}
423
424int
425glthread_rwlock_wrlock_multithreaded (gl_rwlock_t *lock)
426{
427 int err;
428
429 err = pthread_mutex_lock (&lock->lock);
430 if (err != 0)
431 return err;
432 /* Test whether no readers or writers are currently running. */
433 while (!(lock->runcount == 0))
434 {
435 /* This thread has to wait for a while. Enqueue it among the
436 waiting_writers. */
437 lock->waiting_writers_count++;
438 err = pthread_cond_wait (&lock->waiting_writers, &lock->lock);
439 if (err != 0)
440 {
441 lock->waiting_writers_count--;
442 pthread_mutex_unlock (&lock->lock);
443 return err;
444 }
445 lock->waiting_writers_count--;
446 }
447 lock->runcount--; /* runcount becomes -1 */
448 return pthread_mutex_unlock (&lock->lock);
449}
450
451int
452glthread_rwlock_unlock_multithreaded (gl_rwlock_t *lock)
453{
454 int err;
455
456 err = pthread_mutex_lock (&lock->lock);
457 if (err != 0)
458 return err;
459 if (lock->runcount < 0)
460 {
461 /* Drop a writer lock. */
462 if (!(lock->runcount == -1))
463 {
464 pthread_mutex_unlock (&lock->lock);
465 return EINVAL;
466 }
467 lock->runcount = 0;
468 }
469 else
470 {
471 /* Drop a reader lock. */
472 if (!(lock->runcount > 0))
473 {
474 pthread_mutex_unlock (&lock->lock);
475 return EINVAL;
476 }
477 lock->runcount--;
478 }
479 if (lock->runcount == 0)
480 {
481 /* POSIX recommends that "write locks shall take precedence over read
482 locks", to avoid "writer starvation". */
483 if (lock->waiting_writers_count > 0)
484 {
485 /* Wake up one of the waiting writers. */
486 err = pthread_cond_signal (&lock->waiting_writers);
487 if (err != 0)
488 {
489 pthread_mutex_unlock (&lock->lock);
490 return err;
491 }
492 }
493 else
494 {
495 /* Wake up all waiting readers. */
496 err = pthread_cond_broadcast (&lock->waiting_readers);
497 if (err != 0)
498 {
499 pthread_mutex_unlock (&lock->lock);
500 return err;
501 }
502 }
503 }
504 return pthread_mutex_unlock (&lock->lock);
505}
506
507int
508glthread_rwlock_destroy_multithreaded (gl_rwlock_t *lock)
509{
510 int err;
511
512 err = pthread_mutex_destroy (&lock->lock);
513 if (err != 0)
514 return err;
515 err = pthread_cond_destroy (&lock->waiting_readers);
516 if (err != 0)
517 return err;
518 err = pthread_cond_destroy (&lock->waiting_writers);
519 if (err != 0)
520 return err;
521 return 0;
522}
523
524# endif
525
526/* --------------------- gl_recursive_lock_t datatype --------------------- */
527
528# if HAVE_PTHREAD_MUTEX_RECURSIVE
529
530# if defined PTHREAD_RECURSIVE_MUTEX_INITIALIZER || defined PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP
531
532int
533glthread_recursive_lock_init_multithreaded (gl_recursive_lock_t *lock)
534{
535 pthread_mutexattr_t attributes;
536 int err;
537
538 err = pthread_mutexattr_init (&attributes);
539 if (err != 0)
540 return err;
541 err = pthread_mutexattr_settype (&attributes, PTHREAD_MUTEX_RECURSIVE);
542 if (err != 0)
543 {
544 pthread_mutexattr_destroy (&attributes);
545 return err;
546 }
547 err = pthread_mutex_init (lock, &attributes);
548 if (err != 0)
549 {
550 pthread_mutexattr_destroy (&attributes);
551 return err;
552 }
553 err = pthread_mutexattr_destroy (&attributes);
554 if (err != 0)
555 return err;
556 return 0;
557}
558
559# else
560
561int
562glthread_recursive_lock_init_multithreaded (gl_recursive_lock_t *lock)
563{
564 pthread_mutexattr_t attributes;
565 int err;
566
567 err = pthread_mutexattr_init (&attributes);
568 if (err != 0)
569 return err;
570 err = pthread_mutexattr_settype (&attributes, PTHREAD_MUTEX_RECURSIVE);
571 if (err != 0)
572 {
573 pthread_mutexattr_destroy (&attributes);
574 return err;
575 }
576 err = pthread_mutex_init (&lock->recmutex, &attributes);
577 if (err != 0)
578 {
579 pthread_mutexattr_destroy (&attributes);
580 return err;
581 }
582 err = pthread_mutexattr_destroy (&attributes);
583 if (err != 0)
584 return err;
585 lock->initialized = 1;
586 return 0;
587}
588
589int
590glthread_recursive_lock_lock_multithreaded (gl_recursive_lock_t *lock)
591{
592 if (!lock->initialized)
593 {
594 int err;
595
596 err = pthread_mutex_lock (&lock->guard);
597 if (err != 0)
598 return err;
599 if (!lock->initialized)
600 {
601 err = glthread_recursive_lock_init_multithreaded (lock);
602 if (err != 0)
603 {
604 pthread_mutex_unlock (&lock->guard);
605 return err;
606 }
607 }
608 err = pthread_mutex_unlock (&lock->guard);
609 if (err != 0)
610 return err;
611 }
612 return pthread_mutex_lock (&lock->recmutex);
613}
614
615int
616glthread_recursive_lock_unlock_multithreaded (gl_recursive_lock_t *lock)
617{
618 if (!lock->initialized)
619 return EINVAL;
620 return pthread_mutex_unlock (&lock->recmutex);
621}
622
623int
624glthread_recursive_lock_destroy_multithreaded (gl_recursive_lock_t *lock)
625{
626 int err;
627
628 if (!lock->initialized)
629 return EINVAL;
630 err = pthread_mutex_destroy (&lock->recmutex);
631 if (err != 0)
632 return err;
633 lock->initialized = 0;
634 return 0;
635}
636
637# endif
638
639# else
640
641int
642glthread_recursive_lock_init_multithreaded (gl_recursive_lock_t *lock)
643{
644 int err;
645
646 err = pthread_mutex_init (&lock->mutex, NULL);
647 if (err != 0)
648 return err;
649 lock->owner = (pthread_t) 0;
650 lock->depth = 0;
651 return 0;
652}
653
654int
655glthread_recursive_lock_lock_multithreaded (gl_recursive_lock_t *lock)
656{
657 pthread_t self = pthread_self ();
658 if (lock->owner != self)
659 {
660 int err;
661
662 err = pthread_mutex_lock (&lock->mutex);
663 if (err != 0)
664 return err;
665 lock->owner = self;
666 }
667 if (++(lock->depth) == 0) /* wraparound? */
668 {
669 lock->depth--;
670 return EAGAIN;
671 }
672 return 0;
673}
674
675int
676glthread_recursive_lock_unlock_multithreaded (gl_recursive_lock_t *lock)
677{
678 if (lock->owner != pthread_self ())
679 return EPERM;
680 if (lock->depth == 0)
681 return EINVAL;
682 if (--(lock->depth) == 0)
683 {
684 lock->owner = (pthread_t) 0;
685 return pthread_mutex_unlock (&lock->mutex);
686 }
687 else
688 return 0;
689}
690
691int
692glthread_recursive_lock_destroy_multithreaded (gl_recursive_lock_t *lock)
693{
694 if (lock->owner != (pthread_t) 0)
695 return EBUSY;
696 return pthread_mutex_destroy (&lock->mutex);
697}
698
699# endif
700
701/* -------------------------- gl_once_t datatype -------------------------- */
702
703static const pthread_once_t fresh_once = PTHREAD_ONCE_INIT;
704
705int
706glthread_once_singlethreaded (pthread_once_t *once_control)
707{
708 /* We don't know whether pthread_once_t is an integer type, a floating-point
709 type, a pointer type, or a structure type. */
710 char *firstbyte = (char *)once_control;
711 if (*firstbyte == *(const char *)&fresh_once)
712 {
713 /* First time use of once_control. Invert the first byte. */
714 *firstbyte = ~ *(const char *)&fresh_once;
715 return 1;
716 }
717 else
718 return 0;
719}
720
721#endif
722
723/* ========================================================================= */
724
5abebf3c
CB
725#if USE_WINDOWS_THREADS
726
5abebf3c
CB
727#endif
728
729/* ========================================================================= */
This page took 0.060206 seconds and 4 git commands to generate.