1/*
2 * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "runtime/atomic.hpp"
27#include "runtime/orderAccess.hpp"
28#include "runtime/os.hpp"
29#include "utilities/debug.hpp"
30#include "utilities/singleWriterSynchronizer.hpp"
31#include "utilities/macros.hpp"
32
33SingleWriterSynchronizer::SingleWriterSynchronizer() :
34 _enter(0),
35 _exit(),
36 // The initial value of 1 for _waiting_for puts it on the inactive
37 // track, so no thread exiting a critical section will match it.
38 _waiting_for(1),
39 _wakeup()
40 DEBUG_ONLY(COMMA _writers(0))
41{}
42
43// Wait until all threads that entered a critical section before
44// synchronization have exited that critical section.
45void SingleWriterSynchronizer::synchronize() {
46 // Side-effect in assert balanced by debug-only dec at end.
47 assert(Atomic::add(1u, &_writers) == 1u, "multiple writers");
48 // We don't know anything about the muxing between this invocation
49 // and invocations in other threads. We must start with the latest
50 // _enter polarity, else we could clobber the wrong _exit value on
51 // the first iteration. So fence to ensure everything here follows
52 // whatever muxing was used.
53 OrderAccess::fence();
54 uint value = _enter;
55 // (1) Determine the old and new exit counters, based on the
56 // polarity (bit0 value) of the on-entry enter counter.
57 volatile uint* new_ptr = &_exit[(value + 1) & 1];
58 // (2) Change the in-use exit counter to the new counter, by adding
59 // 1 to the enter counter (flipping the polarity), meanwhile
60 // "simultaneously" initializing the new exit counter to that enter
61 // value. Note: The new exit counter is not being used by read
62 // operations until this change of _enter succeeds.
63 uint old;
64 do {
65 old = value;
66 *new_ptr = ++value;
67 value = Atomic::cmpxchg(value, &_enter, old);
68 } while (old != value);
69 // Critical sections entered before we changed the polarity will use
70 // the old exit counter. Critical sections entered after the change
71 // will use the new exit counter.
72 volatile uint* old_ptr = &_exit[old & 1];
73 assert(old_ptr != new_ptr, "invariant");
74 // (3) Inform threads in in-progress critical sections that there is
75 // a pending synchronize waiting. The thread that completes the
76 // request (_exit value == old) will signal the _wakeup semaphore to
77 // allow us to proceed.
78 _waiting_for = old;
79 // Write of _waiting_for must precede read of _exit and associated
80 // conditional semaphore wait. If they were re-ordered then a
81 // critical section exit could miss the wakeup request, failing to
82 // signal us while we're waiting.
83 OrderAccess::fence();
84 // (4) Wait for all the critical sections started before the change
85 // to complete, e.g. for the value of old_ptr to catch up with old.
86 // Loop because there could be pending wakeups unrelated to this
87 // synchronize request.
88 while (old != OrderAccess::load_acquire(old_ptr)) {
89 _wakeup.wait();
90 }
91 // (5) Drain any pending wakeups. A critical section exit may have
92 // completed our request and seen our _waiting_for before we checked
93 // for completion. There are also possible (though rare) spurious
94 // wakeup signals in the timing gap between changing the _enter
95 // polarity and setting _waiting_for. Enough of any of those could
96 // lead to semaphore overflow. This doesn't guarantee no unrelated
97 // wakeups for the next wait, but prevents unbounded accumulation.
98 while (_wakeup.trywait()) {}
99 DEBUG_ONLY(Atomic::dec(&_writers);)
100}
101