1/*-------------------------------------------------------------------------
2 *
3 * s_lock.h
4 * Hardware-dependent implementation of spinlocks.
5 *
6 * NOTE: none of the macros in this file are intended to be called directly.
7 * Call them through the hardware-independent macros in spin.h.
8 *
9 * The following hardware-dependent macros must be provided for each
10 * supported platform:
11 *
12 * void S_INIT_LOCK(slock_t *lock)
13 * Initialize a spinlock (to the unlocked state).
14 *
15 * int S_LOCK(slock_t *lock)
16 * Acquire a spinlock, waiting if necessary.
17 * Time out and abort() if unable to acquire the lock in a
18 * "reasonable" amount of time --- typically ~ 1 minute.
19 * Should return number of "delays"; see s_lock.c
20 *
21 * void S_UNLOCK(slock_t *lock)
22 * Unlock a previously acquired lock.
23 *
24 * bool S_LOCK_FREE(slock_t *lock)
25 * Tests if the lock is free. Returns true if free, false if locked.
26 * This does *not* change the state of the lock.
27 *
28 * void SPIN_DELAY(void)
29 * Delay operation to occur inside spinlock wait loop.
30 *
31 * Note to implementors: there are default implementations for all these
32 * macros at the bottom of the file. Check if your platform can use
33 * these or needs to override them.
34 *
35 * Usually, S_LOCK() is implemented in terms of even lower-level macros
36 * TAS() and TAS_SPIN():
37 *
38 * int TAS(slock_t *lock)
39 * Atomic test-and-set instruction. Attempt to acquire the lock,
40 * but do *not* wait. Returns 0 if successful, nonzero if unable
41 * to acquire the lock.
42 *
43 * int TAS_SPIN(slock_t *lock)
44 * Like TAS(), but this version is used when waiting for a lock
45 * previously found to be contended. By default, this is the
46 * same as TAS(), but on some architectures it's better to poll a
47 * contended lock using an unlocked instruction and retry the
48 * atomic test-and-set only when it appears free.
49 *
50 * TAS() and TAS_SPIN() are NOT part of the API, and should never be called
51 * directly.
52 *
53 * CAUTION: on some platforms TAS() and/or TAS_SPIN() may sometimes report
54 * failure to acquire a lock even when the lock is not locked. For example,
55 * on Alpha TAS() will "fail" if interrupted. Therefore a retry loop must
56 * always be used, even if you are certain the lock is free.
57 *
58 * It is the responsibility of these macros to make sure that the compiler
59 * does not re-order accesses to shared memory to precede the actual lock
60 * acquisition, or follow the lock release. Prior to PostgreSQL 9.5, this
61 * was the caller's responsibility, which meant that callers had to use
62 * volatile-qualified pointers to refer to both the spinlock itself and the
63 * shared data being accessed within the spinlocked critical section. This
64 * was notationally awkward, easy to forget (and thus error-prone), and
65 * prevented some useful compiler optimizations. For these reasons, we
66 * now require that the macros themselves prevent compiler re-ordering,
67 * so that the caller doesn't need to take special precautions.
68 *
69 * On platforms with weak memory ordering, the TAS(), TAS_SPIN(), and
70 * S_UNLOCK() macros must further include hardware-level memory fence
71 * instructions to prevent similar re-ordering at the hardware level.
72 * TAS() and TAS_SPIN() must guarantee that loads and stores issued after
73 * the macro are not executed until the lock has been obtained. Conversely,
74 * S_UNLOCK() must guarantee that loads and stores issued before the macro
75 * have been executed before the lock is released.
76 *
77 * On most supported platforms, TAS() uses a tas() function written
78 * in assembly language to execute a hardware atomic-test-and-set
79 * instruction. Equivalent OS-supplied mutex routines could be used too.
80 *
81 * If no system-specific TAS() is available (ie, HAVE_SPINLOCKS is not
82 * defined), then we fall back on an emulation that uses SysV semaphores
83 * (see spin.c). This emulation will be MUCH MUCH slower than a proper TAS()
84 * implementation, because of the cost of a kernel call per lock or unlock.
85 * An old report is that Postgres spends around 40% of its time in semop(2)
86 * when using the SysV semaphore code.
87 *
88 *
89 * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
90 * Portions Copyright (c) 1994, Regents of the University of California
91 *
92 * src/include/storage/s_lock.h
93 *
94 *-------------------------------------------------------------------------
95 */
96#ifndef S_LOCK_H
97#define S_LOCK_H
98
99#ifdef FRONTEND
100#error "s_lock.h may not be included from frontend code"
101#endif
102
103#ifdef HAVE_SPINLOCKS /* skip spinlocks if requested */
104
105#if defined(__GNUC__) || defined(__INTEL_COMPILER)
106/*************************************************************************
107 * All the gcc inlines
108 * Gcc consistently defines the CPU as __cpu__.
109 * Other compilers use __cpu or __cpu__ so we test for both in those cases.
110 */
111
112/*----------
113 * Standard gcc asm format (assuming "volatile slock_t *lock"):
114
115 __asm__ __volatile__(
116 " instruction \n"
117 " instruction \n"
118 " instruction \n"
119: "=r"(_res), "+m"(*lock) // return register, in/out lock value
120: "r"(lock) // lock pointer, in input register
121: "memory", "cc"); // show clobbered registers here
122
123 * The output-operands list (after first colon) should always include
124 * "+m"(*lock), whether or not the asm code actually refers to this
125 * operand directly. This ensures that gcc believes the value in the
126 * lock variable is used and set by the asm code. Also, the clobbers
127 * list (after third colon) should always include "memory"; this prevents
128 * gcc from thinking it can cache the values of shared-memory fields
129 * across the asm code. Add "cc" if your asm code changes the condition
130 * code register, and also list any temp registers the code uses.
131 *----------
132 */
133
134
135#ifdef __i386__ /* 32-bit i386 */
136#define HAS_TEST_AND_SET
137
138typedef unsigned char slock_t;
139
140#define TAS(lock) tas(lock)
141
142static __inline__ int
143tas(volatile slock_t *lock)
144{
145 register slock_t _res = 1;
146
147 /*
148 * Use a non-locking test before asserting the bus lock. Note that the
149 * extra test appears to be a small loss on some x86 platforms and a small
150 * win on others; it's by no means clear that we should keep it.
151 *
152 * When this was last tested, we didn't have separate TAS() and TAS_SPIN()
153 * macros. Nowadays it probably would be better to do a non-locking test
154 * in TAS_SPIN() but not in TAS(), like on x86_64, but no-one's done the
155 * testing to verify that. Without some empirical evidence, better to
156 * leave it alone.
157 */
158 __asm__ __volatile__(
159 " cmpb $0,%1 \n"
160 " jne 1f \n"
161 " lock \n"
162 " xchgb %0,%1 \n"
163 "1: \n"
164: "+q"(_res), "+m"(*lock)
165: /* no inputs */
166: "memory", "cc");
167 return (int) _res;
168}
169
170#define SPIN_DELAY() spin_delay()
171
172static __inline__ void
173spin_delay(void)
174{
175 /*
176 * This sequence is equivalent to the PAUSE instruction ("rep" is
177 * ignored by old IA32 processors if the following instruction is
178 * not a string operation); the IA-32 Architecture Software
179 * Developer's Manual, Vol. 3, Section 7.7.2 describes why using
180 * PAUSE in the inner loop of a spin lock is necessary for good
181 * performance:
182 *
183 * The PAUSE instruction improves the performance of IA-32
184 * processors supporting Hyper-Threading Technology when
185 * executing spin-wait loops and other routines where one
186 * thread is accessing a shared lock or semaphore in a tight
187 * polling loop. When executing a spin-wait loop, the
188 * processor can suffer a severe performance penalty when
189 * exiting the loop because it detects a possible memory order
190 * violation and flushes the core processor's pipeline. The
191 * PAUSE instruction provides a hint to the processor that the
192 * code sequence is a spin-wait loop. The processor uses this
193 * hint to avoid the memory order violation and prevent the
194 * pipeline flush. In addition, the PAUSE instruction
195 * de-pipelines the spin-wait loop to prevent it from
196 * consuming execution resources excessively.
197 */
198 __asm__ __volatile__(
199 " rep; nop \n");
200}
201
202#endif /* __i386__ */
203
204
205#ifdef __x86_64__ /* AMD Opteron, Intel EM64T */
206#define HAS_TEST_AND_SET
207
208typedef unsigned char slock_t;
209
210#define TAS(lock) tas(lock)
211
212/*
213 * On Intel EM64T, it's a win to use a non-locking test before the xchg proper,
214 * but only when spinning.
215 *
216 * See also Implementing Scalable Atomic Locks for Multi-Core Intel(tm) EM64T
217 * and IA32, by Michael Chynoweth and Mary R. Lee. As of this writing, it is
218 * available at:
219 * http://software.intel.com/en-us/articles/implementing-scalable-atomic-locks-for-multi-core-intel-em64t-and-ia32-architectures
220 */
221#define TAS_SPIN(lock) (*(lock) ? 1 : TAS(lock))
222
223static __inline__ int
224tas(volatile slock_t *lock)
225{
226 register slock_t _res = 1;
227
228 __asm__ __volatile__(
229 " lock \n"
230 " xchgb %0,%1 \n"
231: "+q"(_res), "+m"(*lock)
232: /* no inputs */
233: "memory", "cc");
234 return (int) _res;
235}
236
237#define SPIN_DELAY() spin_delay()
238
239static __inline__ void
240spin_delay(void)
241{
242 /*
243 * Adding a PAUSE in the spin delay loop is demonstrably a no-op on
244 * Opteron, but it may be of some use on EM64T, so we keep it.
245 */
246 __asm__ __volatile__(
247 " rep; nop \n");
248}
249
250#endif /* __x86_64__ */
251
252
253#if defined(__ia64__) || defined(__ia64)
254/*
255 * Intel Itanium, gcc or Intel's compiler.
256 *
257 * Itanium has weak memory ordering, but we rely on the compiler to enforce
258 * strict ordering of accesses to volatile data. In particular, while the
259 * xchg instruction implicitly acts as a memory barrier with 'acquire'
260 * semantics, we do not have an explicit memory fence instruction in the
261 * S_UNLOCK macro. We use a regular assignment to clear the spinlock, and
262 * trust that the compiler marks the generated store instruction with the
263 * ".rel" opcode.
264 *
265 * Testing shows that assumption to hold on gcc, although I could not find
266 * any explicit statement on that in the gcc manual. In Intel's compiler,
267 * the -m[no-]serialize-volatile option controls that, and testing shows that
268 * it is enabled by default.
269 *
270 * While icc accepts gcc asm blocks on x86[_64], this is not true on ia64
271 * (at least not in icc versions before 12.x). So we have to carry a separate
272 * compiler-intrinsic-based implementation for it.
273 */
274#define HAS_TEST_AND_SET
275
276typedef unsigned int slock_t;
277
278#define TAS(lock) tas(lock)
279
280/* On IA64, it's a win to use a non-locking test before the xchg proper */
281#define TAS_SPIN(lock) (*(lock) ? 1 : TAS(lock))
282
283#ifndef __INTEL_COMPILER
284
285static __inline__ int
286tas(volatile slock_t *lock)
287{
288 long int ret;
289
290 __asm__ __volatile__(
291 " xchg4 %0=%1,%2 \n"
292: "=r"(ret), "+m"(*lock)
293: "r"(1)
294: "memory");
295 return (int) ret;
296}
297
298#else /* __INTEL_COMPILER */
299
300static __inline__ int
301tas(volatile slock_t *lock)
302{
303 int ret;
304
305 ret = _InterlockedExchange(lock,1); /* this is a xchg asm macro */
306
307 return ret;
308}
309
310/* icc can't use the regular gcc S_UNLOCK() macro either in this case */
311#define S_UNLOCK(lock) \
312 do { __memory_barrier(); *(lock) = 0; } while (0)
313
314#endif /* __INTEL_COMPILER */
315#endif /* __ia64__ || __ia64 */
316
317/*
318 * On ARM and ARM64, we use __sync_lock_test_and_set(int *, int) if available.
319 *
320 * We use the int-width variant of the builtin because it works on more chips
321 * than other widths.
322 */
323#if defined(__arm__) || defined(__arm) || defined(__aarch64__) || defined(__aarch64)
324#ifdef HAVE_GCC__SYNC_INT32_TAS
325#define HAS_TEST_AND_SET
326
327#define TAS(lock) tas(lock)
328
329typedef int slock_t;
330
331static __inline__ int
332tas(volatile slock_t *lock)
333{
334 return __sync_lock_test_and_set(lock, 1);
335}
336
337#define S_UNLOCK(lock) __sync_lock_release(lock)
338
339#endif /* HAVE_GCC__SYNC_INT32_TAS */
340#endif /* __arm__ || __arm || __aarch64__ || __aarch64 */
341
342
343/* S/390 and S/390x Linux (32- and 64-bit zSeries) */
344#if defined(__s390__) || defined(__s390x__)
345#define HAS_TEST_AND_SET
346
347typedef unsigned int slock_t;
348
349#define TAS(lock) tas(lock)
350
351static __inline__ int
352tas(volatile slock_t *lock)
353{
354 int _res = 0;
355
356 __asm__ __volatile__(
357 " cs %0,%3,0(%2) \n"
358: "+d"(_res), "+m"(*lock)
359: "a"(lock), "d"(1)
360: "memory", "cc");
361 return _res;
362}
363
364#endif /* __s390__ || __s390x__ */
365
366
367#if defined(__sparc__) /* Sparc */
368/*
369 * Solaris has always run sparc processors in TSO (total store) mode, but
370 * linux didn't use to and the *BSDs still don't. So, be careful about
371 * acquire/release semantics. The CPU will treat superfluous membars as
372 * NOPs, so it's just code space.
373 */
374#define HAS_TEST_AND_SET
375
376typedef unsigned char slock_t;
377
378#define TAS(lock) tas(lock)
379
380static __inline__ int
381tas(volatile slock_t *lock)
382{
383 register slock_t _res;
384
385 /*
386 * See comment in src/backend/port/tas/sunstudio_sparc.s for why this
387 * uses "ldstub", and that file uses "cas". gcc currently generates
388 * sparcv7-targeted binaries, so "cas" use isn't possible.
389 */
390 __asm__ __volatile__(
391 " ldstub [%2], %0 \n"
392: "=r"(_res), "+m"(*lock)
393: "r"(lock)
394: "memory");
395#if defined(__sparcv7) || defined(__sparc_v7__)
396 /*
397 * No stbar or membar available, luckily no actually produced hardware
398 * requires a barrier.
399 */
400#elif defined(__sparcv8) || defined(__sparc_v8__)
401 /* stbar is available (and required for both PSO, RMO), membar isn't */
402 __asm__ __volatile__ ("stbar \n":::"memory");
403#else
404 /*
405 * #LoadStore (RMO) | #LoadLoad (RMO) together are the appropriate acquire
406 * barrier for sparcv8+ upwards.
407 */
408 __asm__ __volatile__ ("membar #LoadStore | #LoadLoad \n":::"memory");
409#endif
410 return (int) _res;
411}
412
413#if defined(__sparcv7) || defined(__sparc_v7__)
414/*
415 * No stbar or membar available, luckily no actually produced hardware
416 * requires a barrier. We fall through to the default gcc definition of
417 * S_UNLOCK in this case.
418 */
419#elif defined(__sparcv8) || defined(__sparc_v8__)
420/* stbar is available (and required for both PSO, RMO), membar isn't */
421#define S_UNLOCK(lock) \
422do \
423{ \
424 __asm__ __volatile__ ("stbar \n":::"memory"); \
425 *((volatile slock_t *) (lock)) = 0; \
426} while (0)
427#else
428/*
429 * #LoadStore (RMO) | #StoreStore (RMO, PSO) together are the appropriate
430 * release barrier for sparcv8+ upwards.
431 */
432#define S_UNLOCK(lock) \
433do \
434{ \
435 __asm__ __volatile__ ("membar #LoadStore | #StoreStore \n":::"memory"); \
436 *((volatile slock_t *) (lock)) = 0; \
437} while (0)
438#endif
439
440#endif /* __sparc__ */
441
442
443/* PowerPC */
444#if defined(__ppc__) || defined(__powerpc__) || defined(__ppc64__) || defined(__powerpc64__)
445#define HAS_TEST_AND_SET
446
447typedef unsigned int slock_t;
448
449#define TAS(lock) tas(lock)
450
451/* On PPC, it's a win to use a non-locking test before the lwarx */
452#define TAS_SPIN(lock) (*(lock) ? 1 : TAS(lock))
453
454/*
455 * NOTE: per the Enhanced PowerPC Architecture manual, v1.0 dated 7-May-2002,
456 * an isync is a sufficient synchronization barrier after a lwarx/stwcx loop.
457 * On newer machines, we can use lwsync instead for better performance.
458 *
459 * Ordinarily, we'd code the branches here using GNU-style local symbols, that
460 * is "1f" referencing "1:" and so on. But some people run gcc on AIX with
461 * IBM's assembler as backend, and IBM's assembler doesn't do local symbols.
462 * So hand-code the branch offsets; fortunately, all PPC instructions are
463 * exactly 4 bytes each, so it's not too hard to count.
464 */
465static __inline__ int
466tas(volatile slock_t *lock)
467{
468 slock_t _t;
469 int _res;
470
471 __asm__ __volatile__(
472#ifdef USE_PPC_LWARX_MUTEX_HINT
473" lwarx %0,0,%3,1 \n"
474#else
475" lwarx %0,0,%3 \n"
476#endif
477" cmpwi %0,0 \n"
478" bne $+16 \n" /* branch to li %1,1 */
479" addi %0,%0,1 \n"
480" stwcx. %0,0,%3 \n"
481" beq $+12 \n" /* branch to lwsync/isync */
482" li %1,1 \n"
483" b $+12 \n" /* branch to end of asm sequence */
484#ifdef USE_PPC_LWSYNC
485" lwsync \n"
486#else
487" isync \n"
488#endif
489" li %1,0 \n"
490
491: "=&r"(_t), "=r"(_res), "+m"(*lock)
492: "r"(lock)
493: "memory", "cc");
494 return _res;
495}
496
497/*
498 * PowerPC S_UNLOCK is almost standard but requires a "sync" instruction.
499 * On newer machines, we can use lwsync instead for better performance.
500 */
501#ifdef USE_PPC_LWSYNC
502#define S_UNLOCK(lock) \
503do \
504{ \
505 __asm__ __volatile__ (" lwsync \n" ::: "memory"); \
506 *((volatile slock_t *) (lock)) = 0; \
507} while (0)
508#else
509#define S_UNLOCK(lock) \
510do \
511{ \
512 __asm__ __volatile__ (" sync \n" ::: "memory"); \
513 *((volatile slock_t *) (lock)) = 0; \
514} while (0)
515#endif /* USE_PPC_LWSYNC */
516
517#endif /* powerpc */
518
519
520/* Linux Motorola 68k */
521#if (defined(__mc68000__) || defined(__m68k__)) && defined(__linux__)
522#define HAS_TEST_AND_SET
523
524typedef unsigned char slock_t;
525
526#define TAS(lock) tas(lock)
527
528static __inline__ int
529tas(volatile slock_t *lock)
530{
531 register int rv;
532
533 __asm__ __volatile__(
534 " clrl %0 \n"
535 " tas %1 \n"
536 " sne %0 \n"
537: "=d"(rv), "+m"(*lock)
538: /* no inputs */
539: "memory", "cc");
540 return rv;
541}
542
543#endif /* (__mc68000__ || __m68k__) && __linux__ */
544
545
546/* Motorola 88k */
547#if defined(__m88k__)
548#define HAS_TEST_AND_SET
549
550typedef unsigned int slock_t;
551
552#define TAS(lock) tas(lock)
553
554static __inline__ int
555tas(volatile slock_t *lock)
556{
557 register slock_t _res = 1;
558
559 __asm__ __volatile__(
560 " xmem %0, %2, %%r0 \n"
561: "+r"(_res), "+m"(*lock)
562: "r"(lock)
563: "memory");
564 return (int) _res;
565}
566
567#endif /* __m88k__ */
568
569
570/*
571 * VAXen -- even multiprocessor ones
572 * (thanks to Tom Ivar Helbekkmo)
573 */
574#if defined(__vax__)
575#define HAS_TEST_AND_SET
576
577typedef unsigned char slock_t;
578
579#define TAS(lock) tas(lock)
580
581static __inline__ int
582tas(volatile slock_t *lock)
583{
584 register int _res;
585
586 __asm__ __volatile__(
587 " movl $1, %0 \n"
588 " bbssi $0, (%2), 1f \n"
589 " clrl %0 \n"
590 "1: \n"
591: "=&r"(_res), "+m"(*lock)
592: "r"(lock)
593: "memory");
594 return _res;
595}
596
597#endif /* __vax__ */
598
599
600#if defined(__mips__) && !defined(__sgi) /* non-SGI MIPS */
601#define HAS_TEST_AND_SET
602
603typedef unsigned int slock_t;
604
605#define TAS(lock) tas(lock)
606
607/*
608 * Original MIPS-I processors lacked the LL/SC instructions, but if we are
609 * so unfortunate as to be running on one of those, we expect that the kernel
610 * will handle the illegal-instruction traps and emulate them for us. On
611 * anything newer (and really, MIPS-I is extinct) LL/SC is the only sane
612 * choice because any other synchronization method must involve a kernel
613 * call. Unfortunately, many toolchains still default to MIPS-I as the
614 * codegen target; if the symbol __mips shows that that's the case, we
615 * have to force the assembler to accept LL/SC.
616 *
617 * R10000 and up processors require a separate SYNC, which has the same
618 * issues as LL/SC.
619 */
620#if __mips < 2
621#define MIPS_SET_MIPS2 " .set mips2 \n"
622#else
623#define MIPS_SET_MIPS2
624#endif
625
626static __inline__ int
627tas(volatile slock_t *lock)
628{
629 register volatile slock_t *_l = lock;
630 register int _res;
631 register int _tmp;
632
633 __asm__ __volatile__(
634 " .set push \n"
635 MIPS_SET_MIPS2
636 " .set noreorder \n"
637 " .set nomacro \n"
638 " ll %0, %2 \n"
639 " or %1, %0, 1 \n"
640 " sc %1, %2 \n"
641 " xori %1, 1 \n"
642 " or %0, %0, %1 \n"
643 " sync \n"
644 " .set pop "
645: "=&r" (_res), "=&r" (_tmp), "+R" (*_l)
646: /* no inputs */
647: "memory");
648 return _res;
649}
650
651/* MIPS S_UNLOCK is almost standard but requires a "sync" instruction */
652#define S_UNLOCK(lock) \
653do \
654{ \
655 __asm__ __volatile__( \
656 " .set push \n" \
657 MIPS_SET_MIPS2 \
658 " .set noreorder \n" \
659 " .set nomacro \n" \
660 " sync \n" \
661 " .set pop " \
662: /* no outputs */ \
663: /* no inputs */ \
664: "memory"); \
665 *((volatile slock_t *) (lock)) = 0; \
666} while (0)
667
668#endif /* __mips__ && !__sgi */
669
670
671#if defined(__m32r__) && defined(HAVE_SYS_TAS_H) /* Renesas' M32R */
672#define HAS_TEST_AND_SET
673
674#include <sys/tas.h>
675
676typedef int slock_t;
677
678#define TAS(lock) tas(lock)
679
680#endif /* __m32r__ */
681
682
683#if defined(__sh__) /* Renesas' SuperH */
684#define HAS_TEST_AND_SET
685
686typedef unsigned char slock_t;
687
688#define TAS(lock) tas(lock)
689
690static __inline__ int
691tas(volatile slock_t *lock)
692{
693 register int _res;
694
695 /*
696 * This asm is coded as if %0 could be any register, but actually SuperH
697 * restricts the target of xor-immediate to be R0. That's handled by
698 * the "z" constraint on _res.
699 */
700 __asm__ __volatile__(
701 " tas.b @%2 \n"
702 " movt %0 \n"
703 " xor #1,%0 \n"
704: "=z"(_res), "+m"(*lock)
705: "r"(lock)
706: "memory", "t");
707 return _res;
708}
709
710#endif /* __sh__ */
711
712
713/* These live in s_lock.c, but only for gcc */
714
715
716#if defined(__m68k__) && !defined(__linux__) /* non-Linux Motorola 68k */
717#define HAS_TEST_AND_SET
718
719typedef unsigned char slock_t;
720#endif
721
722/*
723 * Default implementation of S_UNLOCK() for gcc/icc.
724 *
725 * Note that this implementation is unsafe for any platform that can reorder
726 * a memory access (either load or store) after a following store. That
727 * happens not to be possible on x86 and most legacy architectures (some are
728 * single-processor!), but many modern systems have weaker memory ordering.
729 * Those that do must define their own version of S_UNLOCK() rather than
730 * relying on this one.
731 */
732#if !defined(S_UNLOCK)
733#define S_UNLOCK(lock) \
734 do { __asm__ __volatile__("" : : : "memory"); *(lock) = 0; } while (0)
735#endif
736
737#endif /* defined(__GNUC__) || defined(__INTEL_COMPILER) */
738
739
740
741/*
742 * ---------------------------------------------------------------------
743 * Platforms that use non-gcc inline assembly:
744 * ---------------------------------------------------------------------
745 */
746
747#if !defined(HAS_TEST_AND_SET) /* We didn't trigger above, let's try here */
748
749
750#if defined(__hppa) || defined(__hppa__) /* HP PA-RISC, GCC and HP compilers */
751/*
752 * HP's PA-RISC
753 *
754 * See src/backend/port/hpux/tas.c.template for details about LDCWX. Because
755 * LDCWX requires a 16-byte-aligned address, we declare slock_t as a 16-byte
756 * struct. The active word in the struct is whichever has the aligned address;
757 * the other three words just sit at -1.
758 *
759 * When using gcc, we can inline the required assembly code.
760 */
761#define HAS_TEST_AND_SET
762
763typedef struct
764{
765 int sema[4];
766} slock_t;
767
768#define TAS_ACTIVE_WORD(lock) ((volatile int *) (((uintptr_t) (lock) + 15) & ~15))
769
770#if defined(__GNUC__)
771
772static __inline__ int
773tas(volatile slock_t *lock)
774{
775 volatile int *lockword = TAS_ACTIVE_WORD(lock);
776 register int lockval;
777
778 __asm__ __volatile__(
779 " ldcwx 0(0,%2),%0 \n"
780: "=r"(lockval), "+m"(*lockword)
781: "r"(lockword)
782: "memory");
783 return (lockval == 0);
784}
785
786/*
787 * The hppa implementation doesn't follow the rules of this files and provides
788 * a gcc specific implementation outside of the above defined(__GNUC__). It
789 * does so to avoid duplication between the HP compiler and gcc. So undefine
790 * the generic fallback S_UNLOCK from above.
791 */
792#ifdef S_UNLOCK
793#undef S_UNLOCK
794#endif
795#define S_UNLOCK(lock) \
796 do { \
797 __asm__ __volatile__("" : : : "memory"); \
798 *TAS_ACTIVE_WORD(lock) = -1; \
799 } while (0)
800
801#endif /* __GNUC__ */
802
803#define S_INIT_LOCK(lock) \
804 do { \
805 volatile slock_t *lock_ = (lock); \
806 lock_->sema[0] = -1; \
807 lock_->sema[1] = -1; \
808 lock_->sema[2] = -1; \
809 lock_->sema[3] = -1; \
810 } while (0)
811
812#define S_LOCK_FREE(lock) (*TAS_ACTIVE_WORD(lock) != 0)
813
814#endif /* __hppa || __hppa__ */
815
816
817#if defined(__hpux) && defined(__ia64) && !defined(__GNUC__)
818/*
819 * HP-UX on Itanium, non-gcc/icc compiler
820 *
821 * We assume that the compiler enforces strict ordering of loads/stores on
822 * volatile data (see comments on the gcc-version earlier in this file).
823 * Note that this assumption does *not* hold if you use the
824 * +Ovolatile=__unordered option on the HP-UX compiler, so don't do that.
825 *
826 * See also Implementing Spinlocks on the Intel Itanium Architecture and
827 * PA-RISC, by Tor Ekqvist and David Graves, for more information. As of
828 * this writing, version 1.0 of the manual is available at:
829 * http://h21007.www2.hp.com/portal/download/files/unprot/itanium/spinlocks.pdf
830 */
831#define HAS_TEST_AND_SET
832
833typedef unsigned int slock_t;
834
835#include <ia64/sys/inline.h>
836#define TAS(lock) _Asm_xchg(_SZ_W, lock, 1, _LDHINT_NONE)
837/* On IA64, it's a win to use a non-locking test before the xchg proper */
838#define TAS_SPIN(lock) (*(lock) ? 1 : TAS(lock))
839#define S_UNLOCK(lock) \
840 do { _Asm_mf(); (*(lock)) = 0; } while (0)
841
842#endif /* HPUX on IA64, non gcc/icc */
843
844#if defined(_AIX) /* AIX */
845/*
846 * AIX (POWER)
847 */
848#define HAS_TEST_AND_SET
849
850#include <sys/atomic_op.h>
851
852typedef int slock_t;
853
854#define TAS(lock) _check_lock((slock_t *) (lock), 0, 1)
855#define S_UNLOCK(lock) _clear_lock((slock_t *) (lock), 0)
856#endif /* _AIX */
857
858
859/* These are in sunstudio_(sparc|x86).s */
860
861#if defined(__SUNPRO_C) && (defined(__i386) || defined(__x86_64__) || defined(__sparc__) || defined(__sparc))
862#define HAS_TEST_AND_SET
863
864#if defined(__i386) || defined(__x86_64__) || defined(__sparcv9) || defined(__sparcv8plus)
865typedef unsigned int slock_t;
866#else
867typedef unsigned char slock_t;
868#endif
869
870extern slock_t pg_atomic_cas(volatile slock_t *lock, slock_t with,
871 slock_t cmp);
872
873#define TAS(a) (pg_atomic_cas((a), 1, 0) != 0)
874#endif
875
876
877#ifdef _MSC_VER
878typedef LONG slock_t;
879
880#define HAS_TEST_AND_SET
881#define TAS(lock) (InterlockedCompareExchange(lock, 1, 0))
882
883#define SPIN_DELAY() spin_delay()
884
885/* If using Visual C++ on Win64, inline assembly is unavailable.
886 * Use a _mm_pause intrinsic instead of rep nop.
887 */
888#if defined(_WIN64)
889static __forceinline void
890spin_delay(void)
891{
892 _mm_pause();
893}
894#else
895static __forceinline void
896spin_delay(void)
897{
898 /* See comment for gcc code. Same code, MASM syntax */
899 __asm rep nop;
900}
901#endif
902
903#include <intrin.h>
904#pragma intrinsic(_ReadWriteBarrier)
905
906#define S_UNLOCK(lock) \
907 do { _ReadWriteBarrier(); (*(lock)) = 0; } while (0)
908
909#endif
910
911
912#endif /* !defined(HAS_TEST_AND_SET) */
913
914
915/* Blow up if we didn't have any way to do spinlocks */
916#ifndef HAS_TEST_AND_SET
917#error PostgreSQL does not have native spinlock support on this platform. To continue the compilation, rerun configure using --disable-spinlocks. However, performance will be poor. Please report this to pgsql-bugs@lists.postgresql.org.
918#endif
919
920
921#else /* !HAVE_SPINLOCKS */
922
923
924/*
925 * Fake spinlock implementation using semaphores --- slow and prone
926 * to fall foul of kernel limits on number of semaphores, so don't use this
927 * unless you must! The subroutines appear in spin.c.
928 */
929typedef int slock_t;
930
931extern bool s_lock_free_sema(volatile slock_t *lock);
932extern void s_unlock_sema(volatile slock_t *lock);
933extern void s_init_lock_sema(volatile slock_t *lock, bool nested);
934extern int tas_sema(volatile slock_t *lock);
935
936#define S_LOCK_FREE(lock) s_lock_free_sema(lock)
937#define S_UNLOCK(lock) s_unlock_sema(lock)
938#define S_INIT_LOCK(lock) s_init_lock_sema(lock, false)
939#define TAS(lock) tas_sema(lock)
940
941
942#endif /* HAVE_SPINLOCKS */
943
944
945/*
946 * Default Definitions - override these above as needed.
947 */
948
949#if !defined(S_LOCK)
950#define S_LOCK(lock) \
951 (TAS(lock) ? s_lock((lock), __FILE__, __LINE__, PG_FUNCNAME_MACRO) : 0)
952#endif /* S_LOCK */
953
954#if !defined(S_LOCK_FREE)
955#define S_LOCK_FREE(lock) (*(lock) == 0)
956#endif /* S_LOCK_FREE */
957
958#if !defined(S_UNLOCK)
959/*
960 * Our default implementation of S_UNLOCK is essentially *(lock) = 0. This
961 * is unsafe if the platform can reorder a memory access (either load or
962 * store) after a following store; platforms where this is possible must
963 * define their own S_UNLOCK. But CPU reordering is not the only concern:
964 * if we simply defined S_UNLOCK() as an inline macro, the compiler might
965 * reorder instructions from inside the critical section to occur after the
966 * lock release. Since the compiler probably can't know what the external
967 * function s_unlock is doing, putting the same logic there should be adequate.
968 * A sufficiently-smart globally optimizing compiler could break that
969 * assumption, though, and the cost of a function call for every spinlock
970 * release may hurt performance significantly, so we use this implementation
971 * only for platforms where we don't know of a suitable intrinsic. For the
972 * most part, those are relatively obscure platform/compiler combinations to
973 * which the PostgreSQL project does not have access.
974 */
975#define USE_DEFAULT_S_UNLOCK
976extern void s_unlock(volatile slock_t *lock);
977#define S_UNLOCK(lock) s_unlock(lock)
978#endif /* S_UNLOCK */
979
980#if !defined(S_INIT_LOCK)
981#define S_INIT_LOCK(lock) S_UNLOCK(lock)
982#endif /* S_INIT_LOCK */
983
984#if !defined(SPIN_DELAY)
985#define SPIN_DELAY() ((void) 0)
986#endif /* SPIN_DELAY */
987
988#if !defined(TAS)
989extern int tas(volatile slock_t *lock); /* in port/.../tas.s, or
990 * s_lock.c */
991
992#define TAS(lock) tas(lock)
993#endif /* TAS */
994
995#if !defined(TAS_SPIN)
996#define TAS_SPIN(lock) TAS(lock)
997#endif /* TAS_SPIN */
998
999extern slock_t dummy_spinlock;
1000
1001/*
1002 * Platform-independent out-of-line support routines
1003 */
1004extern int s_lock(volatile slock_t *lock, const char *file, int line, const char *func);
1005
1006/* Support for dynamic adjustment of spins_per_delay */
1007#define DEFAULT_SPINS_PER_DELAY 100
1008
1009extern void set_spins_per_delay(int shared_spins_per_delay);
1010extern int update_spins_per_delay(int shared_spins_per_delay);
1011
1012/*
1013 * Support for spin delay which is useful in various places where
1014 * spinlock-like procedures take place.
1015 */
1016typedef struct
1017{
1018 int spins;
1019 int delays;
1020 int cur_delay;
1021 const char *file;
1022 int line;
1023 const char *func;
1024} SpinDelayStatus;
1025
1026static inline void
1027init_spin_delay(SpinDelayStatus *status,
1028 const char *file, int line, const char *func)
1029{
1030 status->spins = 0;
1031 status->delays = 0;
1032 status->cur_delay = 0;
1033 status->file = file;
1034 status->line = line;
1035 status->func = func;
1036}
1037
1038#define init_local_spin_delay(status) init_spin_delay(status, __FILE__, __LINE__, PG_FUNCNAME_MACRO)
1039void perform_spin_delay(SpinDelayStatus *status);
1040void finish_spin_delay(SpinDelayStatus *status);
1041
1042#endif /* S_LOCK_H */
1043