Skip to content

Commit a30e8bb

Browse files
committed
Move atomics and base wait files
1 parent 7fc7125 commit a30e8bb

File tree

19 files changed

+2713
-2
lines changed

19 files changed

+2713
-2
lines changed

src/backend/port/Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ subdir = src/backend/port
2121
top_builddir = ../../..
2222
include $(top_builddir)/src/Makefile.global
2323

24-
OBJS = dynloader.o pg_sema.o pg_shmem.o pg_latch.o $(TAS)
24+
OBJS = dynloader.o pg_sema.o pg_shmem.o pg_latch.o atomics.o $(TAS)
2525

2626
ifeq ($(PORTNAME), darwin)
2727
SUBDIRS += darwin

src/backend/port/atomics.c

Lines changed: 148 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,148 @@
1+
/*-------------------------------------------------------------------------
2+
*
3+
* atomics.c
4+
* Non-Inline parts of the atomics implementation
5+
*
6+
* Portions Copyright (c) 2013-2015, PostgreSQL Global Development Group
7+
*
8+
*
9+
* IDENTIFICATION
10+
* src/backend/port/atomics.c
11+
*
12+
*-------------------------------------------------------------------------
13+
*/
14+
#include "postgres.h"
15+
16+
#include "miscadmin.h"
17+
#include "port/atomics.h"
18+
#include "storage/spin.h"
19+
20+
#ifdef PG_HAVE_MEMORY_BARRIER_EMULATION
21+
#ifdef WIN32
22+
#error "barriers are required (and provided) on WIN32 platforms"
23+
#endif
24+
#include <sys/types.h>
25+
#include <signal.h>
26+
#endif
27+
28+
#ifdef PG_HAVE_MEMORY_BARRIER_EMULATION
29+
void
30+
pg_spinlock_barrier(void)
31+
{
32+
/*
33+
* NB: we have to be reentrant here, some barriers are placed in signal
34+
* handlers.
35+
*
36+
* We use kill(0) for the fallback barrier as we assume that kernels on
37+
* systems old enough to require fallback barrier support will include an
38+
* appropriate barrier while checking the existence of the postmaster
39+
* pid.
40+
*/
41+
(void) kill(PostmasterPid, 0);
42+
}
43+
#endif
44+
45+
#ifdef PG_HAVE_COMPILER_BARRIER_EMULATION
46+
void
47+
pg_extern_compiler_barrier(void)
48+
{
49+
/* do nothing */
50+
}
51+
#endif
52+
53+
54+
#ifdef PG_HAVE_ATOMIC_FLAG_SIMULATION
55+
56+
void
57+
pg_atomic_init_flag_impl(volatile pg_atomic_flag *ptr)
58+
{
59+
StaticAssertStmt(sizeof(ptr->sema) >= sizeof(slock_t),
60+
"size mismatch of atomic_flag vs slock_t");
61+
62+
#ifndef HAVE_SPINLOCKS
63+
64+
/*
65+
* NB: If we're using semaphore based TAS emulation, be careful to use a
66+
* separate set of semaphores. Otherwise we'd get in trouble if an atomic
67+
* var would be manipulated while spinlock is held.
68+
*/
69+
s_init_lock_sema((slock_t *) &ptr->sema, true);
70+
#else
71+
SpinLockInit((slock_t *) &ptr->sema);
72+
#endif
73+
}
74+
75+
bool
76+
pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr)
77+
{
78+
return TAS((slock_t *) &ptr->sema);
79+
}
80+
81+
void
82+
pg_atomic_clear_flag_impl(volatile pg_atomic_flag *ptr)
83+
{
84+
S_UNLOCK((slock_t *) &ptr->sema);
85+
}
86+
87+
#endif /* PG_HAVE_ATOMIC_FLAG_SIMULATION */
88+
89+
#ifdef PG_HAVE_ATOMIC_U32_SIMULATION
90+
void
91+
pg_atomic_init_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val_)
92+
{
93+
StaticAssertStmt(sizeof(ptr->sema) >= sizeof(slock_t),
94+
"size mismatch of atomic_flag vs slock_t");
95+
96+
/*
97+
* If we're using semaphore based atomic flags, be careful about nested
98+
* usage of atomics while a spinlock is held.
99+
*/
100+
#ifndef HAVE_SPINLOCKS
101+
s_init_lock_sema((slock_t *) &ptr->sema, true);
102+
#else
103+
SpinLockInit((slock_t *) &ptr->sema);
104+
#endif
105+
ptr->value = val_;
106+
}
107+
108+
bool
109+
pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
110+
uint32 *expected, uint32 newval)
111+
{
112+
bool ret;
113+
114+
/*
115+
* Do atomic op under a spinlock. It might look like we could just skip
116+
* the cmpxchg if the lock isn't available, but that'd just emulate a
117+
* 'weak' compare and swap. I.e. one that allows spurious failures. Since
118+
* several algorithms rely on a strong variant and that is efficiently
119+
* implementable on most major architectures let's emulate it here as
120+
* well.
121+
*/
122+
SpinLockAcquire((slock_t *) &ptr->sema);
123+
124+
/* perform compare/exchange logic */
125+
ret = ptr->value == *expected;
126+
*expected = ptr->value;
127+
if (ret)
128+
ptr->value = newval;
129+
130+
/* and release lock */
131+
SpinLockRelease((slock_t *) &ptr->sema);
132+
133+
return ret;
134+
}
135+
136+
uint32
137+
pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
138+
{
139+
uint32 oldval;
140+
141+
SpinLockAcquire((slock_t *) &ptr->sema);
142+
oldval = ptr->value;
143+
ptr->value += add_;
144+
SpinLockRelease((slock_t *) &ptr->sema);
145+
return oldval;
146+
}
147+
148+
#endif /* PG_HAVE_ATOMIC_U32_SIMULATION */

src/backend/storage/lmgr/Makefile

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,8 @@ subdir = src/backend/storage/lmgr
1212
top_builddir = ../../../..
1313
include $(top_builddir)/src/Makefile.global
1414

15-
OBJS = lmgr.o lock.o proc.o deadlock.o lwlock.o spin.o s_lock.o predicate.o
15+
OBJS = lmgr.o lock.o proc.o deadlock.o lwlock.o spin.o s_lock.o \
16+
predicate.o wait.o
1617

1718
include $(top_srcdir)/src/backend/common.mk
1819

0 commit comments

Comments
 (0)
pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy