1 |
737 |
jeremybenn |
/* Copyright (C) 2008, 2009, 2011 Free Software Foundation, Inc.
|
2 |
|
|
Contributed by Richard Henderson <rth@redhat.com>.
|
3 |
|
|
|
4 |
|
|
This file is part of the GNU Transactional Memory Library (libitm).
|
5 |
|
|
|
6 |
|
|
Libitm is free software; you can redistribute it and/or modify it
|
7 |
|
|
under the terms of the GNU General Public License as published by
|
8 |
|
|
the Free Software Foundation; either version 3 of the License, or
|
9 |
|
|
(at your option) any later version.
|
10 |
|
|
|
11 |
|
|
Libitm is distributed in the hope that it will be useful, but WITHOUT ANY
|
12 |
|
|
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
|
13 |
|
|
FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
14 |
|
|
more details.
|
15 |
|
|
|
16 |
|
|
Under Section 7 of GPL version 3, you are granted additional
|
17 |
|
|
permissions described in the GCC Runtime Library Exception, version
|
18 |
|
|
3.1, as published by the Free Software Foundation.
|
19 |
|
|
|
20 |
|
|
You should have received a copy of the GNU General Public License and
|
21 |
|
|
a copy of the GCC Runtime Library Exception along with this program;
|
22 |
|
|
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
|
23 |
|
|
<http://www.gnu.org/licenses/>. */
|
24 |
|
|
|
25 |
|
|
#include "libitm_i.h"
|
26 |
|
|
|
27 |
|
|
namespace GTM HIDDEN {
|
28 |
|
|
|
29 |
|
|
// Initialize a new RW lock.
|
30 |
|
|
// ??? Move this back to the header file when constexpr is implemented.
|
31 |
|
|
|
32 |
|
|
gtm_rwlock::gtm_rwlock()
|
33 |
|
|
: mutex (PTHREAD_MUTEX_INITIALIZER),
|
34 |
|
|
c_readers (PTHREAD_COND_INITIALIZER),
|
35 |
|
|
c_writers (PTHREAD_COND_INITIALIZER),
|
36 |
|
|
c_confirmed_writers (PTHREAD_COND_INITIALIZER),
|
37 |
|
|
summary (0),
|
38 |
|
|
a_readers (0),
|
39 |
|
|
w_readers (0),
|
40 |
|
|
w_writers (0)
|
41 |
|
|
{ }
|
42 |
|
|
|
43 |
|
|
gtm_rwlock::~gtm_rwlock()
|
44 |
|
|
{
|
45 |
|
|
pthread_mutex_destroy (&this->mutex);
|
46 |
|
|
pthread_cond_destroy (&this->c_readers);
|
47 |
|
|
pthread_cond_destroy (&this->c_writers);
|
48 |
|
|
}
|
49 |
|
|
|
50 |
|
|
// Acquire a RW lock for reading.
|
51 |
|
|
|
52 |
|
|
void
|
53 |
|
|
gtm_rwlock::read_lock (gtm_thread *tx)
|
54 |
|
|
{
|
55 |
|
|
// Fast path: first announce our intent to read, then check for conflicting
|
56 |
|
|
// intents to write. The fence ensure that this happens in exactly this
|
57 |
|
|
// order.
|
58 |
|
|
tx->shared_state.store (0, memory_order_relaxed);
|
59 |
|
|
atomic_thread_fence (memory_order_seq_cst);
|
60 |
|
|
unsigned int sum = this->summary.load (memory_order_relaxed);
|
61 |
|
|
if (likely(!(sum & (a_writer | w_writer))))
|
62 |
|
|
return;
|
63 |
|
|
|
64 |
|
|
// There seems to be an active, waiting, or confirmed writer, so enter the
|
65 |
|
|
// mutex-based slow path. To try to keep the number of readers small that
|
66 |
|
|
// the writer will see, we clear our read flag right away before entering
|
67 |
|
|
// the critical section. Otherwise, the writer would have to wait for us to
|
68 |
|
|
// get into the critical section. (Note that for correctness, this only has
|
69 |
|
|
// to happen before we leave the slow path and before we wait for any
|
70 |
|
|
// writer).
|
71 |
|
|
// ??? Add a barrier to enforce early visibility of this?
|
72 |
|
|
tx->shared_state.store(-1, memory_order_relaxed);
|
73 |
|
|
|
74 |
|
|
pthread_mutex_lock (&this->mutex);
|
75 |
|
|
|
76 |
|
|
// Read summary again after acquiring the mutex because it might have
|
77 |
|
|
// changed during waiting for the mutex to become free.
|
78 |
|
|
sum = this->summary.load (memory_order_relaxed);
|
79 |
|
|
|
80 |
|
|
// If there is a writer waiting for readers, wake it up. Only do that if we
|
81 |
|
|
// might be the last reader that could do the wake-up, otherwise skip the
|
82 |
|
|
// wake-up but decrease a_readers to show that we have entered the slow path.
|
83 |
|
|
// This has to happen before we wait for any writers or upgraders.
|
84 |
|
|
// See write_lock_generic() for further explanations.
|
85 |
|
|
if (this->a_readers > 0)
|
86 |
|
|
{
|
87 |
|
|
this->a_readers--;
|
88 |
|
|
if (this->a_readers == 0)
|
89 |
|
|
pthread_cond_signal(&this->c_confirmed_writers);
|
90 |
|
|
}
|
91 |
|
|
|
92 |
|
|
// If there is an active or waiting writer, we must wait.
|
93 |
|
|
while (sum & (a_writer | w_writer))
|
94 |
|
|
{
|
95 |
|
|
this->summary.store (sum | w_reader, memory_order_relaxed);
|
96 |
|
|
this->w_readers++;
|
97 |
|
|
pthread_cond_wait (&this->c_readers, &this->mutex);
|
98 |
|
|
sum = this->summary.load (memory_order_relaxed);
|
99 |
|
|
if (--this->w_readers == 0)
|
100 |
|
|
sum &= ~w_reader;
|
101 |
|
|
}
|
102 |
|
|
|
103 |
|
|
// Otherwise we can acquire the lock for read.
|
104 |
|
|
tx->shared_state.store(0, memory_order_relaxed);
|
105 |
|
|
|
106 |
|
|
pthread_mutex_unlock(&this->mutex);
|
107 |
|
|
}
|
108 |
|
|
|
109 |
|
|
|
110 |
|
|
// Acquire a RW lock for writing. Generic version that also works for
|
111 |
|
|
// upgrades.
|
112 |
|
|
// Note that an upgrade might fail (and thus waste previous work done during
|
113 |
|
|
// this transaction) if there is another thread that tried to go into serial
|
114 |
|
|
// mode earlier (i.e., upgrades do not have higher priority than pure writers).
|
115 |
|
|
// However, this seems rare enough to not consider it further as we need both
|
116 |
|
|
// a non-upgrade writer and a writer to happen to switch to serial mode
|
117 |
|
|
// concurrently. If we'd want to handle this, a writer waiting for readers
|
118 |
|
|
// would have to coordinate with later arriving upgrades and hand over the
|
119 |
|
|
// lock to them, including the the reader-waiting state. We can try to support
|
120 |
|
|
// this if this will actually happen often enough in real workloads.
|
121 |
|
|
|
122 |
|
|
bool
|
123 |
|
|
gtm_rwlock::write_lock_generic (gtm_thread *tx)
|
124 |
|
|
{
|
125 |
|
|
pthread_mutex_lock (&this->mutex);
|
126 |
|
|
|
127 |
|
|
unsigned int sum = this->summary.load (memory_order_relaxed);
|
128 |
|
|
|
129 |
|
|
// If there is an active writer, wait.
|
130 |
|
|
while (sum & a_writer)
|
131 |
|
|
{
|
132 |
|
|
if (tx != 0)
|
133 |
|
|
{
|
134 |
|
|
// If this is an upgrade, we must not wait for other writers or
|
135 |
|
|
// upgrades that already have gone in
|
136 |
|
|
pthread_mutex_unlock (&this->mutex);
|
137 |
|
|
return false;
|
138 |
|
|
}
|
139 |
|
|
|
140 |
|
|
this->summary.store (sum | w_writer, memory_order_relaxed);
|
141 |
|
|
this->w_writers++;
|
142 |
|
|
pthread_cond_wait (&this->c_writers, &this->mutex);
|
143 |
|
|
sum = this->summary.load (memory_order_relaxed);
|
144 |
|
|
if (--this->w_writers == 0)
|
145 |
|
|
sum &= ~w_writer;
|
146 |
|
|
}
|
147 |
|
|
|
148 |
|
|
// Otherwise we can acquire the lock for write. As a writer, we have
|
149 |
|
|
// priority, so we don't need to take this back.
|
150 |
|
|
this->summary.store (sum | a_writer, memory_order_relaxed);
|
151 |
|
|
|
152 |
|
|
// We still need to wait for active readers to finish. The barrier makes
|
153 |
|
|
// sure that we first set our write intent and check for active readers
|
154 |
|
|
// after that, in strictly this order (similar to the barrier in the fast
|
155 |
|
|
// path of read_lock()).
|
156 |
|
|
atomic_thread_fence(memory_order_seq_cst);
|
157 |
|
|
|
158 |
|
|
// Count the number of active readers to be able to decrease the number of
|
159 |
|
|
// wake-ups and wait calls that are necessary.
|
160 |
|
|
//
|
161 |
|
|
// This number is an upper bound of the number of readers that actually
|
162 |
|
|
// are still active and which we need to wait for:
|
163 |
|
|
// - We set our write flag before checking the reader flags, and readers
|
164 |
|
|
// check our write flag after clearing their read flags in read_unlock().
|
165 |
|
|
// Therefore, they will enter the slow path whenever we have seen them.
|
166 |
|
|
// - Readers will have cleared their read flags before leaving the slow
|
167 |
|
|
// path in read_lock() (prevents lost wake-ups), and before waiting for
|
168 |
|
|
// any writer (prevents deadlocks).
|
169 |
|
|
//
|
170 |
|
|
// However, this number is also just a lower bound of the number of readers
|
171 |
|
|
// that will actually enter the slow path in read_unlock() or read_lock():
|
172 |
|
|
// - Because the read flag is cleared outside of a critical section, writers
|
173 |
|
|
// can see it as cleared while the reader still goes into the slow path.
|
174 |
|
|
//
|
175 |
|
|
// Therefore, readers can skip (lower bound - 1) wake-ups, but we do need
|
176 |
|
|
// the following loop to check that the readers that we wanted to wait for
|
177 |
|
|
// are actually those that entered the slow path so far (and either skipped
|
178 |
|
|
// or sent a wake-up).
|
179 |
|
|
//
|
180 |
|
|
// ??? Do we need to optimize further? (The writer could publish a list of
|
181 |
|
|
// readers that it suspects to be active. Readers could check this list and
|
182 |
|
|
// only decrement a_readers if they are in this list.)
|
183 |
|
|
for (;;)
|
184 |
|
|
{
|
185 |
|
|
// ??? Keep a list of active readers that we saw and update it on the
|
186 |
|
|
// next retry instead? This might reduce the number of cache misses that
|
187 |
|
|
// we get when checking reader flags.
|
188 |
|
|
int readers = 0;
|
189 |
|
|
for (gtm_thread *it = gtm_thread::list_of_threads; it != 0;
|
190 |
|
|
it = it->next_thread)
|
191 |
|
|
{
|
192 |
|
|
// Don't count ourself if this is an upgrade.
|
193 |
|
|
if (it == tx)
|
194 |
|
|
continue;
|
195 |
|
|
if (it->shared_state.load(memory_order_relaxed) != (gtm_word)-1)
|
196 |
|
|
readers++;
|
197 |
|
|
}
|
198 |
|
|
|
199 |
|
|
// If we have not seen any readers, we will not wait.
|
200 |
|
|
if (readers == 0)
|
201 |
|
|
break;
|
202 |
|
|
|
203 |
|
|
// We've seen a number of readers, so we publish this number and wait.
|
204 |
|
|
this->a_readers = readers;
|
205 |
|
|
pthread_cond_wait (&this->c_confirmed_writers, &this->mutex);
|
206 |
|
|
}
|
207 |
|
|
|
208 |
|
|
pthread_mutex_unlock (&this->mutex);
|
209 |
|
|
return true;
|
210 |
|
|
}
|
211 |
|
|
|
212 |
|
|
// Acquire a RW lock for writing.
|
213 |
|
|
|
214 |
|
|
void
|
215 |
|
|
gtm_rwlock::write_lock ()
|
216 |
|
|
{
|
217 |
|
|
write_lock_generic (0);
|
218 |
|
|
}
|
219 |
|
|
|
220 |
|
|
|
221 |
|
|
// Upgrade a RW lock that has been locked for reading to a writing lock.
|
222 |
|
|
// Do this without possibility of another writer incoming. Return false
|
223 |
|
|
// if this attempt fails (i.e. another thread also upgraded).
|
224 |
|
|
|
225 |
|
|
bool
|
226 |
|
|
gtm_rwlock::write_upgrade (gtm_thread *tx)
|
227 |
|
|
{
|
228 |
|
|
return write_lock_generic (tx);
|
229 |
|
|
}
|
230 |
|
|
|
231 |
|
|
|
232 |
|
|
// Has to be called iff the previous upgrade was successful and after it is
|
233 |
|
|
// safe for the transaction to not be marked as a reader anymore.
|
234 |
|
|
|
235 |
|
|
void
|
236 |
|
|
gtm_rwlock::write_upgrade_finish (gtm_thread *tx)
|
237 |
|
|
{
|
238 |
|
|
// We are not a reader anymore. This is only safe to do after we have
|
239 |
|
|
// acquired the writer lock.
|
240 |
|
|
tx->shared_state.store (-1, memory_order_release);
|
241 |
|
|
}
|
242 |
|
|
|
243 |
|
|
|
244 |
|
|
// Release a RW lock from reading.
|
245 |
|
|
|
246 |
|
|
void
|
247 |
|
|
gtm_rwlock::read_unlock (gtm_thread *tx)
|
248 |
|
|
{
|
249 |
|
|
// We only need release memory order here because of privatization safety
|
250 |
|
|
// (this ensures that marking the transaction as inactive happens after
|
251 |
|
|
// any prior data accesses by this transaction, and that neither the
|
252 |
|
|
// compiler nor the hardware order this store earlier).
|
253 |
|
|
// ??? We might be able to avoid this release here if the compiler can't
|
254 |
|
|
// merge the release fence with the subsequent seq_cst fence.
|
255 |
|
|
tx->shared_state.store (-1, memory_order_release);
|
256 |
|
|
// We need this seq_cst fence here to avoid lost wake-ups. Furthermore,
|
257 |
|
|
// the privatization safety implementation in gtm_thread::try_commit()
|
258 |
|
|
// relies on the existence of this seq_cst fence.
|
259 |
|
|
atomic_thread_fence (memory_order_seq_cst);
|
260 |
|
|
unsigned int sum = this->summary.load (memory_order_relaxed);
|
261 |
|
|
if (likely(!(sum & (a_writer | w_writer))))
|
262 |
|
|
return;
|
263 |
|
|
|
264 |
|
|
// There is a writer, either active or waiting for other readers or writers.
|
265 |
|
|
// Thus, enter the mutex-based slow path.
|
266 |
|
|
pthread_mutex_lock (&this->mutex);
|
267 |
|
|
|
268 |
|
|
// If there is a writer waiting for readers, wake it up. Only do that if we
|
269 |
|
|
// might be the last reader that could do the wake-up, otherwise skip the
|
270 |
|
|
// wake-up and decrease a_readers to publish that we have entered the slow
|
271 |
|
|
// path but skipped the wake-up.
|
272 |
|
|
if (this->a_readers > 0)
|
273 |
|
|
{
|
274 |
|
|
this->a_readers--;
|
275 |
|
|
if (this->a_readers == 0)
|
276 |
|
|
pthread_cond_signal(&this->c_confirmed_writers);
|
277 |
|
|
}
|
278 |
|
|
|
279 |
|
|
// We don't need to wake up any writers waiting for other writers. Active
|
280 |
|
|
// writers will take care of that.
|
281 |
|
|
|
282 |
|
|
pthread_mutex_unlock (&this->mutex);
|
283 |
|
|
}
|
284 |
|
|
|
285 |
|
|
|
286 |
|
|
// Release a RW lock from writing.
|
287 |
|
|
|
288 |
|
|
void
|
289 |
|
|
gtm_rwlock::write_unlock ()
|
290 |
|
|
{
|
291 |
|
|
pthread_mutex_lock (&this->mutex);
|
292 |
|
|
|
293 |
|
|
unsigned int sum = this->summary.load (memory_order_relaxed);
|
294 |
|
|
this->summary.store (sum & ~a_writer, memory_order_relaxed);
|
295 |
|
|
|
296 |
|
|
// If there is a waiting writer, wake it.
|
297 |
|
|
if (unlikely (sum & w_writer))
|
298 |
|
|
pthread_cond_signal (&this->c_writers);
|
299 |
|
|
|
300 |
|
|
// If there are waiting readers, wake them.
|
301 |
|
|
else if (unlikely (sum & w_reader))
|
302 |
|
|
pthread_cond_broadcast (&this->c_readers);
|
303 |
|
|
|
304 |
|
|
pthread_mutex_unlock (&this->mutex);
|
305 |
|
|
}
|
306 |
|
|
|
307 |
|
|
} // namespace GTM
|