Update Gnulib to the latest git version
[deliverable/binutils-gdb.git] / gnulib / import / windows-rwlock.c
1 /* Read-write locks (native Windows implementation).
2 Copyright (C) 2005-2019 Free Software Foundation, Inc.
3
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation; either version 3, or (at your option)
7 any later version.
8
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
13
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, see <https://www.gnu.org/licenses/>. */
16
17 /* Written by Bruno Haible <bruno@clisp.org>, 2005.
18 Based on GCC's gthr-win32.h. */
19
20 #include <config.h>
21
22 /* Specification. */
23 #include "windows-rwlock.h"
24
25 #include <errno.h>
26 #include <stdlib.h>
27
28 /* In this file, the waitqueues are implemented as circular arrays. */
29 #define glwthread_waitqueue_t glwthread_carray_waitqueue_t
30
31 static void
32 glwthread_waitqueue_init (glwthread_waitqueue_t *wq)
33 {
34 wq->array = NULL;
35 wq->count = 0;
36 wq->alloc = 0;
37 wq->offset = 0;
38 }
39
40 /* Enqueues the current thread, represented by an event, in a wait queue.
41 Returns INVALID_HANDLE_VALUE if an allocation failure occurs. */
42 static HANDLE
43 glwthread_waitqueue_add (glwthread_waitqueue_t *wq)
44 {
45 HANDLE event;
46 unsigned int index;
47
48 if (wq->count == wq->alloc)
49 {
50 unsigned int new_alloc = 2 * wq->alloc + 1;
51 HANDLE *new_array =
52 (HANDLE *) realloc (wq->array, new_alloc * sizeof (HANDLE));
53 if (new_array == NULL)
54 /* No more memory. */
55 return INVALID_HANDLE_VALUE;
56 /* Now is a good opportunity to rotate the array so that its contents
57 starts at offset 0. */
58 if (wq->offset > 0)
59 {
60 unsigned int old_count = wq->count;
61 unsigned int old_alloc = wq->alloc;
62 unsigned int old_offset = wq->offset;
63 unsigned int i;
64 if (old_offset + old_count > old_alloc)
65 {
66 unsigned int limit = old_offset + old_count - old_alloc;
67 for (i = 0; i < limit; i++)
68 new_array[old_alloc + i] = new_array[i];
69 }
70 for (i = 0; i < old_count; i++)
71 new_array[i] = new_array[old_offset + i];
72 wq->offset = 0;
73 }
74 wq->array = new_array;
75 wq->alloc = new_alloc;
76 }
77 /* Whether the created event is a manual-reset one or an auto-reset one,
78 does not matter, since we will wait on it only once. */
79 event = CreateEvent (NULL, TRUE, FALSE, NULL);
80 if (event == INVALID_HANDLE_VALUE)
81 /* No way to allocate an event. */
82 return INVALID_HANDLE_VALUE;
83 index = wq->offset + wq->count;
84 if (index >= wq->alloc)
85 index -= wq->alloc;
86 wq->array[index] = event;
87 wq->count++;
88 return event;
89 }
90
91 /* Notifies the first thread from a wait queue and dequeues it. */
92 static void
93 glwthread_waitqueue_notify_first (glwthread_waitqueue_t *wq)
94 {
95 SetEvent (wq->array[wq->offset + 0]);
96 wq->offset++;
97 wq->count--;
98 if (wq->count == 0 || wq->offset == wq->alloc)
99 wq->offset = 0;
100 }
101
102 /* Notifies all threads from a wait queue and dequeues them all. */
103 static void
104 glwthread_waitqueue_notify_all (glwthread_waitqueue_t *wq)
105 {
106 unsigned int i;
107
108 for (i = 0; i < wq->count; i++)
109 {
110 unsigned int index = wq->offset + i;
111 if (index >= wq->alloc)
112 index -= wq->alloc;
113 SetEvent (wq->array[index]);
114 }
115 wq->count = 0;
116 wq->offset = 0;
117 }
118
119 void
120 glwthread_rwlock_init (glwthread_rwlock_t *lock)
121 {
122 InitializeCriticalSection (&lock->lock);
123 glwthread_waitqueue_init (&lock->waiting_readers);
124 glwthread_waitqueue_init (&lock->waiting_writers);
125 lock->runcount = 0;
126 lock->guard.done = 1;
127 }
128
129 int
130 glwthread_rwlock_rdlock (glwthread_rwlock_t *lock)
131 {
132 if (!lock->guard.done)
133 {
134 if (InterlockedIncrement (&lock->guard.started) == 0)
135 /* This thread is the first one to need this lock. Initialize it. */
136 glwthread_rwlock_init (lock);
137 else
138 {
139 /* Don't let lock->guard.started grow and wrap around. */
140 InterlockedDecrement (&lock->guard.started);
141 /* Yield the CPU while waiting for another thread to finish
142 initializing this lock. */
143 while (!lock->guard.done)
144 Sleep (0);
145 }
146 }
147 EnterCriticalSection (&lock->lock);
148 /* Test whether only readers are currently running, and whether the runcount
149 field will not overflow, and whether no writer is waiting. The latter
150 condition is because POSIX recommends that "write locks shall take
151 precedence over read locks", to avoid "writer starvation". */
152 if (!(lock->runcount + 1 > 0 && lock->waiting_writers.count == 0))
153 {
154 /* This thread has to wait for a while. Enqueue it among the
155 waiting_readers. */
156 HANDLE event = glwthread_waitqueue_add (&lock->waiting_readers);
157 if (event != INVALID_HANDLE_VALUE)
158 {
159 DWORD result;
160 LeaveCriticalSection (&lock->lock);
161 /* Wait until another thread signals this event. */
162 result = WaitForSingleObject (event, INFINITE);
163 if (result == WAIT_FAILED || result == WAIT_TIMEOUT)
164 abort ();
165 CloseHandle (event);
166 /* The thread which signalled the event already did the bookkeeping:
167 removed us from the waiting_readers, incremented lock->runcount. */
168 if (!(lock->runcount > 0))
169 abort ();
170 return 0;
171 }
172 else
173 {
174 /* Allocation failure. Weird. */
175 do
176 {
177 LeaveCriticalSection (&lock->lock);
178 Sleep (1);
179 EnterCriticalSection (&lock->lock);
180 }
181 while (!(lock->runcount + 1 > 0));
182 }
183 }
184 lock->runcount++;
185 LeaveCriticalSection (&lock->lock);
186 return 0;
187 }
188
189 int
190 glwthread_rwlock_wrlock (glwthread_rwlock_t *lock)
191 {
192 if (!lock->guard.done)
193 {
194 if (InterlockedIncrement (&lock->guard.started) == 0)
195 /* This thread is the first one to need this lock. Initialize it. */
196 glwthread_rwlock_init (lock);
197 else
198 {
199 /* Don't let lock->guard.started grow and wrap around. */
200 InterlockedDecrement (&lock->guard.started);
201 /* Yield the CPU while waiting for another thread to finish
202 initializing this lock. */
203 while (!lock->guard.done)
204 Sleep (0);
205 }
206 }
207 EnterCriticalSection (&lock->lock);
208 /* Test whether no readers or writers are currently running. */
209 if (!(lock->runcount == 0))
210 {
211 /* This thread has to wait for a while. Enqueue it among the
212 waiting_writers. */
213 HANDLE event = glwthread_waitqueue_add (&lock->waiting_writers);
214 if (event != INVALID_HANDLE_VALUE)
215 {
216 DWORD result;
217 LeaveCriticalSection (&lock->lock);
218 /* Wait until another thread signals this event. */
219 result = WaitForSingleObject (event, INFINITE);
220 if (result == WAIT_FAILED || result == WAIT_TIMEOUT)
221 abort ();
222 CloseHandle (event);
223 /* The thread which signalled the event already did the bookkeeping:
224 removed us from the waiting_writers, set lock->runcount = -1. */
225 if (!(lock->runcount == -1))
226 abort ();
227 return 0;
228 }
229 else
230 {
231 /* Allocation failure. Weird. */
232 do
233 {
234 LeaveCriticalSection (&lock->lock);
235 Sleep (1);
236 EnterCriticalSection (&lock->lock);
237 }
238 while (!(lock->runcount == 0));
239 }
240 }
241 lock->runcount--; /* runcount becomes -1 */
242 LeaveCriticalSection (&lock->lock);
243 return 0;
244 }
245
246 int
247 glwthread_rwlock_tryrdlock (glwthread_rwlock_t *lock)
248 {
249 if (!lock->guard.done)
250 {
251 if (InterlockedIncrement (&lock->guard.started) == 0)
252 /* This thread is the first one to need this lock. Initialize it. */
253 glwthread_rwlock_init (lock);
254 else
255 {
256 /* Don't let lock->guard.started grow and wrap around. */
257 InterlockedDecrement (&lock->guard.started);
258 /* Yield the CPU while waiting for another thread to finish
259 initializing this lock. */
260 while (!lock->guard.done)
261 Sleep (0);
262 }
263 }
264 /* It's OK to wait for this critical section, because it is never taken for a
265 long time. */
266 EnterCriticalSection (&lock->lock);
267 /* Test whether only readers are currently running, and whether the runcount
268 field will not overflow, and whether no writer is waiting. The latter
269 condition is because POSIX recommends that "write locks shall take
270 precedence over read locks", to avoid "writer starvation". */
271 if (!(lock->runcount + 1 > 0 && lock->waiting_writers.count == 0))
272 {
273 /* This thread would have to wait for a while. Return instead. */
274 LeaveCriticalSection (&lock->lock);
275 return EBUSY;
276 }
277 lock->runcount++;
278 LeaveCriticalSection (&lock->lock);
279 return 0;
280 }
281
282 int
283 glwthread_rwlock_trywrlock (glwthread_rwlock_t *lock)
284 {
285 if (!lock->guard.done)
286 {
287 if (InterlockedIncrement (&lock->guard.started) == 0)
288 /* This thread is the first one to need this lock. Initialize it. */
289 glwthread_rwlock_init (lock);
290 else
291 {
292 /* Don't let lock->guard.started grow and wrap around. */
293 InterlockedDecrement (&lock->guard.started);
294 /* Yield the CPU while waiting for another thread to finish
295 initializing this lock. */
296 while (!lock->guard.done)
297 Sleep (0);
298 }
299 }
300 /* It's OK to wait for this critical section, because it is never taken for a
301 long time. */
302 EnterCriticalSection (&lock->lock);
303 /* Test whether no readers or writers are currently running. */
304 if (!(lock->runcount == 0))
305 {
306 /* This thread would have to wait for a while. Return instead. */
307 LeaveCriticalSection (&lock->lock);
308 return EBUSY;
309 }
310 lock->runcount--; /* runcount becomes -1 */
311 LeaveCriticalSection (&lock->lock);
312 return 0;
313 }
314
315 int
316 glwthread_rwlock_unlock (glwthread_rwlock_t *lock)
317 {
318 if (!lock->guard.done)
319 return EINVAL;
320 EnterCriticalSection (&lock->lock);
321 if (lock->runcount < 0)
322 {
323 /* Drop a writer lock. */
324 if (!(lock->runcount == -1))
325 abort ();
326 lock->runcount = 0;
327 }
328 else
329 {
330 /* Drop a reader lock. */
331 if (!(lock->runcount > 0))
332 {
333 LeaveCriticalSection (&lock->lock);
334 return EPERM;
335 }
336 lock->runcount--;
337 }
338 if (lock->runcount == 0)
339 {
340 /* POSIX recommends that "write locks shall take precedence over read
341 locks", to avoid "writer starvation". */
342 if (lock->waiting_writers.count > 0)
343 {
344 /* Wake up one of the waiting writers. */
345 lock->runcount--;
346 glwthread_waitqueue_notify_first (&lock->waiting_writers);
347 }
348 else
349 {
350 /* Wake up all waiting readers. */
351 lock->runcount += lock->waiting_readers.count;
352 glwthread_waitqueue_notify_all (&lock->waiting_readers);
353 }
354 }
355 LeaveCriticalSection (&lock->lock);
356 return 0;
357 }
358
359 int
360 glwthread_rwlock_destroy (glwthread_rwlock_t *lock)
361 {
362 if (!lock->guard.done)
363 return EINVAL;
364 if (lock->runcount != 0)
365 return EBUSY;
366 DeleteCriticalSection (&lock->lock);
367 if (lock->waiting_readers.array != NULL)
368 free (lock->waiting_readers.array);
369 if (lock->waiting_writers.array != NULL)
370 free (lock->waiting_writers.array);
371 lock->guard.done = 0;
372 return 0;
373 }
This page took 0.03928 seconds and 4 git commands to generate.