1 | /*------------------------------------------------------------------------- |
2 | * |
3 | * lwlock.h |
4 | * Lightweight lock manager |
5 | * |
6 | * |
7 | * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group |
8 | * Portions Copyright (c) 1994, Regents of the University of California |
9 | * |
10 | * src/include/storage/lwlock.h |
11 | * |
12 | *------------------------------------------------------------------------- |
13 | */ |
14 | #ifndef LWLOCK_H |
15 | #define LWLOCK_H |
16 | |
17 | #ifdef FRONTEND |
18 | #error "lwlock.h may not be included from frontend code" |
19 | #endif |
20 | |
21 | #include "storage/proclist_types.h" |
22 | #include "storage/s_lock.h" |
23 | #include "port/atomics.h" |
24 | |
25 | struct PGPROC; |
26 | |
27 | /* |
28 | * Code outside of lwlock.c should not manipulate the contents of this |
29 | * structure directly, but we have to declare it here to allow LWLocks to be |
30 | * incorporated into other data structures. |
31 | */ |
32 | typedef struct LWLock |
33 | { |
34 | uint16 tranche; /* tranche ID */ |
35 | pg_atomic_uint32 state; /* state of exclusive/nonexclusive lockers */ |
36 | proclist_head waiters; /* list of waiting PGPROCs */ |
37 | #ifdef LOCK_DEBUG |
38 | pg_atomic_uint32 nwaiters; /* number of waiters */ |
39 | struct PGPROC *owner; /* last exclusive owner of the lock */ |
40 | #endif |
41 | } LWLock; |
42 | |
43 | /* |
44 | * In most cases, it's desirable to force each tranche of LWLocks to be aligned |
45 | * on a cache line boundary and make the array stride a power of 2. This saves |
46 | * a few cycles in indexing, but more importantly ensures that individual |
47 | * LWLocks don't cross cache line boundaries. This reduces cache contention |
48 | * problems, especially on AMD Opterons. In some cases, it's useful to add |
49 | * even more padding so that each LWLock takes up an entire cache line; this is |
50 | * useful, for example, in the main LWLock array, where the overall number of |
51 | * locks is small but some are heavily contended. |
52 | * |
53 | * When allocating a tranche that contains data other than LWLocks, it is |
54 | * probably best to include a bare LWLock and then pad the resulting structure |
55 | * as necessary for performance. For an array that contains only LWLocks, |
56 | * LWLockMinimallyPadded can be used for cases where we just want to ensure |
57 | * that we don't cross cache line boundaries within a single lock, while |
58 | * LWLockPadded can be used for cases where we want each lock to be an entire |
59 | * cache line. |
60 | * |
61 | * An LWLockMinimallyPadded might contain more than the absolute minimum amount |
62 | * of padding required to keep a lock from crossing a cache line boundary, |
63 | * because an unpadded LWLock will normally fit into 16 bytes. We ignore that |
64 | * possibility when determining the minimal amount of padding. Older releases |
65 | * had larger LWLocks, so 32 really was the minimum, and packing them in |
66 | * tighter might hurt performance. |
67 | * |
68 | * LWLOCK_MINIMAL_SIZE should be 32 on basically all common platforms, but |
69 | * because pg_atomic_uint32 is more than 4 bytes on some obscure platforms, we |
70 | * allow for the possibility that it might be 64. Even on those platforms, |
71 | * we probably won't exceed 32 bytes unless LOCK_DEBUG is defined. |
72 | */ |
73 | #define LWLOCK_PADDED_SIZE PG_CACHE_LINE_SIZE |
74 | #define LWLOCK_MINIMAL_SIZE (sizeof(LWLock) <= 32 ? 32 : 64) |
75 | |
76 | /* LWLock, padded to a full cache line size */ |
77 | typedef union LWLockPadded |
78 | { |
79 | LWLock lock; |
80 | char pad[LWLOCK_PADDED_SIZE]; |
81 | } LWLockPadded; |
82 | |
83 | /* LWLock, minimally padded */ |
84 | typedef union LWLockMinimallyPadded |
85 | { |
86 | LWLock lock; |
87 | char pad[LWLOCK_MINIMAL_SIZE]; |
88 | } LWLockMinimallyPadded; |
89 | |
90 | extern PGDLLIMPORT LWLockPadded *MainLWLockArray; |
91 | extern const char *const MainLWLockNames[]; |
92 | |
93 | /* struct for storing named tranche information */ |
94 | typedef struct NamedLWLockTranche |
95 | { |
96 | int trancheId; |
97 | char *trancheName; |
98 | } NamedLWLockTranche; |
99 | |
100 | extern PGDLLIMPORT NamedLWLockTranche *NamedLWLockTrancheArray; |
101 | extern PGDLLIMPORT int NamedLWLockTrancheRequests; |
102 | |
103 | /* Names for fixed lwlocks */ |
104 | #include "storage/lwlocknames.h" |
105 | |
106 | /* |
107 | * It's a bit odd to declare NUM_BUFFER_PARTITIONS and NUM_LOCK_PARTITIONS |
108 | * here, but we need them to figure out offsets within MainLWLockArray, and |
109 | * having this file include lock.h or bufmgr.h would be backwards. |
110 | */ |
111 | |
112 | /* Number of partitions of the shared buffer mapping hashtable */ |
113 | #define NUM_BUFFER_PARTITIONS 128 |
114 | |
115 | /* Number of partitions the shared lock tables are divided into */ |
116 | #define LOG2_NUM_LOCK_PARTITIONS 4 |
117 | #define NUM_LOCK_PARTITIONS (1 << LOG2_NUM_LOCK_PARTITIONS) |
118 | |
119 | /* Number of partitions the shared predicate lock tables are divided into */ |
120 | #define LOG2_NUM_PREDICATELOCK_PARTITIONS 4 |
121 | #define NUM_PREDICATELOCK_PARTITIONS (1 << LOG2_NUM_PREDICATELOCK_PARTITIONS) |
122 | |
123 | /* Offsets for various chunks of preallocated lwlocks. */ |
124 | #define BUFFER_MAPPING_LWLOCK_OFFSET NUM_INDIVIDUAL_LWLOCKS |
125 | #define LOCK_MANAGER_LWLOCK_OFFSET \ |
126 | (BUFFER_MAPPING_LWLOCK_OFFSET + NUM_BUFFER_PARTITIONS) |
127 | #define PREDICATELOCK_MANAGER_LWLOCK_OFFSET \ |
128 | (LOCK_MANAGER_LWLOCK_OFFSET + NUM_LOCK_PARTITIONS) |
129 | #define NUM_FIXED_LWLOCKS \ |
130 | (PREDICATELOCK_MANAGER_LWLOCK_OFFSET + NUM_PREDICATELOCK_PARTITIONS) |
131 | |
132 | typedef enum LWLockMode |
133 | { |
134 | LW_EXCLUSIVE, |
135 | LW_SHARED, |
136 | LW_WAIT_UNTIL_FREE /* A special mode used in PGPROC->lwlockMode, |
137 | * when waiting for lock to become free. Not |
138 | * to be used as LWLockAcquire argument */ |
139 | } LWLockMode; |
140 | |
141 | |
142 | #ifdef LOCK_DEBUG |
143 | extern bool Trace_lwlocks; |
144 | #endif |
145 | |
146 | extern bool LWLockAcquire(LWLock *lock, LWLockMode mode); |
147 | extern bool LWLockConditionalAcquire(LWLock *lock, LWLockMode mode); |
148 | extern bool LWLockAcquireOrWait(LWLock *lock, LWLockMode mode); |
149 | extern void LWLockRelease(LWLock *lock); |
150 | extern void LWLockReleaseClearVar(LWLock *lock, uint64 *valptr, uint64 val); |
151 | extern void LWLockReleaseAll(void); |
152 | extern bool LWLockHeldByMe(LWLock *lock); |
153 | extern bool LWLockHeldByMeInMode(LWLock *lock, LWLockMode mode); |
154 | |
155 | extern bool LWLockWaitForVar(LWLock *lock, uint64 *valptr, uint64 oldval, uint64 *newval); |
156 | extern void LWLockUpdateVar(LWLock *lock, uint64 *valptr, uint64 value); |
157 | |
158 | extern Size LWLockShmemSize(void); |
159 | extern void CreateLWLocks(void); |
160 | extern void InitLWLockAccess(void); |
161 | |
162 | extern const char *GetLWLockIdentifier(uint32 classId, uint16 eventId); |
163 | |
164 | /* |
165 | * Extensions (or core code) can obtain an LWLocks by calling |
166 | * RequestNamedLWLockTranche() during postmaster startup. Subsequently, |
167 | * call GetNamedLWLockTranche() to obtain a pointer to an array containing |
168 | * the number of LWLocks requested. |
169 | */ |
170 | extern void RequestNamedLWLockTranche(const char *tranche_name, int num_lwlocks); |
171 | extern LWLockPadded *GetNamedLWLockTranche(const char *tranche_name); |
172 | |
173 | /* |
174 | * There is another, more flexible method of obtaining lwlocks. First, call |
175 | * LWLockNewTrancheId just once to obtain a tranche ID; this allocates from |
176 | * a shared counter. Next, each individual process using the tranche should |
177 | * call LWLockRegisterTranche() to associate that tranche ID with a name. |
178 | * Finally, LWLockInitialize should be called just once per lwlock, passing |
179 | * the tranche ID as an argument. |
180 | * |
181 | * It may seem strange that each process using the tranche must register it |
182 | * separately, but dynamic shared memory segments aren't guaranteed to be |
183 | * mapped at the same address in all coordinating backends, so storing the |
184 | * registration in the main shared memory segment wouldn't work for that case. |
185 | */ |
186 | extern int LWLockNewTrancheId(void); |
187 | extern void LWLockRegisterTranche(int tranche_id, const char *tranche_name); |
188 | extern void LWLockInitialize(LWLock *lock, int tranche_id); |
189 | |
190 | /* |
191 | * Every tranche ID less than NUM_INDIVIDUAL_LWLOCKS is reserved; also, |
192 | * we reserve additional tranche IDs for builtin tranches not included in |
193 | * the set of individual LWLocks. A call to LWLockNewTrancheId will never |
194 | * return a value less than LWTRANCHE_FIRST_USER_DEFINED. |
195 | */ |
196 | typedef enum BuiltinTrancheIds |
197 | { |
198 | LWTRANCHE_CLOG_BUFFERS = NUM_INDIVIDUAL_LWLOCKS, |
199 | LWTRANCHE_COMMITTS_BUFFERS, |
200 | LWTRANCHE_SUBTRANS_BUFFERS, |
201 | LWTRANCHE_MXACTOFFSET_BUFFERS, |
202 | LWTRANCHE_MXACTMEMBER_BUFFERS, |
203 | LWTRANCHE_ASYNC_BUFFERS, |
204 | LWTRANCHE_OLDSERXID_BUFFERS, |
205 | LWTRANCHE_WAL_INSERT, |
206 | LWTRANCHE_BUFFER_CONTENT, |
207 | LWTRANCHE_BUFFER_IO_IN_PROGRESS, |
208 | LWTRANCHE_REPLICATION_ORIGIN, |
209 | LWTRANCHE_REPLICATION_SLOT_IO_IN_PROGRESS, |
210 | LWTRANCHE_PROC, |
211 | LWTRANCHE_BUFFER_MAPPING, |
212 | LWTRANCHE_LOCK_MANAGER, |
213 | LWTRANCHE_PREDICATE_LOCK_MANAGER, |
214 | LWTRANCHE_PARALLEL_HASH_JOIN, |
215 | LWTRANCHE_PARALLEL_QUERY_DSA, |
216 | LWTRANCHE_SESSION_DSA, |
217 | LWTRANCHE_SESSION_RECORD_TABLE, |
218 | LWTRANCHE_SESSION_TYPMOD_TABLE, |
219 | LWTRANCHE_SHARED_TUPLESTORE, |
220 | LWTRANCHE_TBM, |
221 | LWTRANCHE_PARALLEL_APPEND, |
222 | LWTRANCHE_SXACT, |
223 | LWTRANCHE_FIRST_USER_DEFINED |
224 | } BuiltinTrancheIds; |
225 | |
226 | /* |
227 | * Prior to PostgreSQL 9.4, we used an enum type called LWLockId to refer |
228 | * to LWLocks. New code should instead use LWLock *. However, for the |
229 | * convenience of third-party code, we include the following typedef. |
230 | */ |
231 | typedef LWLock *LWLockId; |
232 | |
233 | #endif /* LWLOCK_H */ |
234 | |