Skip to content

Commit befa3e6

Browse files
committed
Revert 9.5 pgindent changes to atomics directory files
This is because there are many __asm__ blocks there that pgindent messes up. Also configure pgindent to skip that directory in the future.
1 parent 2aa0476 commit befa3e6

File tree

10 files changed

+182
-218
lines changed

10 files changed

+182
-218
lines changed

src/include/port/atomics/arch-ia64.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -18,9 +18,9 @@
1818
* fence.
1919
*/
2020
#if defined(__INTEL_COMPILER)
21-
#define pg_memory_barrier_impl() __mf()
21+
# define pg_memory_barrier_impl() __mf()
2222
#elif defined(__GNUC__)
23-
#define pg_memory_barrier_impl() __asm__ __volatile__ ("mf" : : : "memory")
23+
# define pg_memory_barrier_impl() __asm__ __volatile__ ("mf" : : : "memory")
2424
#elif defined(__hpux)
25-
#define pg_memory_barrier_impl() _Asm_mf()
25+
# define pg_memory_barrier_impl() _Asm_mf()
2626
#endif

src/include/port/atomics/arch-x86.h

Lines changed: 61 additions & 70 deletions
Original file line numberDiff line numberDiff line change
@@ -78,10 +78,9 @@ typedef struct pg_atomic_uint64
7878
} pg_atomic_uint64;
7979
#endif
8080

81-
#endif /* defined(HAVE_ATOMICS) */
81+
#endif /* defined(HAVE_ATOMICS) */
8282

83-
#endif /* defined(__GNUC__) &&
84-
* !defined(__INTEL_COMPILER) */
83+
#endif /* defined(__GNUC__) && !defined(__INTEL_COMPILER) */
8584

8685
#if defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS)
8786

@@ -94,20 +93,20 @@ typedef struct pg_atomic_uint64
9493
* PAUSE in the inner loop of a spin lock is necessary for good
9594
* performance:
9695
*
97-
* The PAUSE instruction improves the performance of IA-32
98-
* processors supporting Hyper-Threading Technology when
99-
* executing spin-wait loops and other routines where one
100-
* thread is accessing a shared lock or semaphore in a tight
101-
* polling loop. When executing a spin-wait loop, the
102-
* processor can suffer a severe performance penalty when
103-
* exiting the loop because it detects a possible memory order
104-
* violation and flushes the core processor's pipeline. The
105-
* PAUSE instruction provides a hint to the processor that the
106-
* code sequence is a spin-wait loop. The processor uses this
107-
* hint to avoid the memory order violation and prevent the
108-
* pipeline flush. In addition, the PAUSE instruction
109-
* de-pipelines the spin-wait loop to prevent it from
110-
* consuming execution resources excessively.
96+
* The PAUSE instruction improves the performance of IA-32
97+
* processors supporting Hyper-Threading Technology when
98+
* executing spin-wait loops and other routines where one
99+
* thread is accessing a shared lock or semaphore in a tight
100+
* polling loop. When executing a spin-wait loop, the
101+
* processor can suffer a severe performance penalty when
102+
* exiting the loop because it detects a possible memory order
103+
* violation and flushes the core processor's pipeline. The
104+
* PAUSE instruction provides a hint to the processor that the
105+
* code sequence is a spin-wait loop. The processor uses this
106+
* hint to avoid the memory order violation and prevent the
107+
* pipeline flush. In addition, the PAUSE instruction
108+
* de-pipelines the spin-wait loop to prevent it from
109+
* consuming execution resources excessively.
111110
*/
112111
#if defined(__INTEL_COMPILER)
113112
#define PG_HAVE_SPIN_DELAY
@@ -121,8 +120,8 @@ pg_spin_delay_impl(void)
121120
static __inline__ void
122121
pg_spin_delay_impl(void)
123122
{
124-
__asm__ __volatile__(
125-
" rep; nop \n");
123+
__asm__ __volatile__(
124+
" rep; nop \n");
126125
}
127126
#elif defined(WIN32_ONLY_COMPILER) && defined(__x86_64__)
128127
#define PG_HAVE_SPIN_DELAY
@@ -137,10 +136,10 @@ static __forceinline void
137136
pg_spin_delay_impl(void)
138137
{
139138
/* See comment for gcc code. Same code, MASM syntax */
140-
__asm rep nop;
139+
__asm rep nop;
141140
}
142141
#endif
143-
#endif /* !defined(PG_HAVE_SPIN_DELAY) */
142+
#endif /* !defined(PG_HAVE_SPIN_DELAY) */
144143

145144

146145
#if defined(HAVE_ATOMICS)
@@ -154,13 +153,12 @@ pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr)
154153
{
155154
register char _res = 1;
156155

157-
__asm__ __volatile__(
158-
" lock \n"
159-
" xchgb %0,%1 \n"
160-
: "+q"(_res), "+m"(ptr->value)
161-
:
162-
: "memory");
163-
156+
__asm__ __volatile__(
157+
" lock \n"
158+
" xchgb %0,%1 \n"
159+
: "+q"(_res), "+m"(ptr->value)
160+
:
161+
: "memory");
164162
return _res == 0;
165163
}
166164

@@ -172,8 +170,7 @@ pg_atomic_clear_flag_impl(volatile pg_atomic_flag *ptr)
172170
* On a TSO architecture like x86 it's sufficient to use a compiler
173171
* barrier to achieve release semantics.
174172
*/
175-
__asm__ __volatile__("":::"memory");
176-
173+
__asm__ __volatile__("" ::: "memory");
177174
ptr->value = 0;
178175
}
179176

@@ -182,35 +179,33 @@ static inline bool
182179
pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
183180
uint32 *expected, uint32 newval)
184181
{
185-
char ret;
182+
char ret;
186183

187184
/*
188185
* Perform cmpxchg and use the zero flag which it implicitly sets when
189186
* equal to measure the success.
190187
*/
191-
__asm__ __volatile__(
192-
" lock \n"
193-
" cmpxchgl %4,%5 \n"
194-
" setz %2 \n"
195-
: "=a"(*expected), "=m"(ptr->value), "=q"(ret)
196-
: "a"(*expected), "r"(newval), "m"(ptr->value)
197-
: "memory", "cc");
198-
188+
__asm__ __volatile__(
189+
" lock \n"
190+
" cmpxchgl %4,%5 \n"
191+
" setz %2 \n"
192+
: "=a" (*expected), "=m"(ptr->value), "=q" (ret)
193+
: "a" (*expected), "r" (newval), "m"(ptr->value)
194+
: "memory", "cc");
199195
return (bool) ret;
200196
}
201197

202198
#define PG_HAVE_ATOMIC_FETCH_ADD_U32
203199
static inline uint32
204200
pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
205201
{
206-
uint32 res;
207-
__asm__ __volatile__(
208-
" lock \n"
209-
" xaddl %0,%1 \n"
210-
: "=q"(res), "=m"(ptr->value)
211-
: "0"(add_), "m"(ptr->value)
212-
: "memory", "cc");
213-
202+
uint32 res;
203+
__asm__ __volatile__(
204+
" lock \n"
205+
" xaddl %0,%1 \n"
206+
: "=q"(res), "=m"(ptr->value)
207+
: "0" (add_), "m"(ptr->value)
208+
: "memory", "cc");
214209
return res;
215210
}
216211

@@ -221,44 +216,40 @@ static inline bool
221216
pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
222217
uint64 *expected, uint64 newval)
223218
{
224-
char ret;
219+
char ret;
225220

226221
/*
227222
* Perform cmpxchg and use the zero flag which it implicitly sets when
228223
* equal to measure the success.
229224
*/
230-
__asm__ __volatile__(
231-
" lock \n"
232-
" cmpxchgq %4,%5 \n"
233-
" setz %2 \n"
234-
: "=a"(*expected), "=m"(ptr->value), "=q"(ret)
235-
: "a"(*expected), "r"(newval), "m"(ptr->value)
236-
: "memory", "cc");
237-
225+
__asm__ __volatile__(
226+
" lock \n"
227+
" cmpxchgq %4,%5 \n"
228+
" setz %2 \n"
229+
: "=a" (*expected), "=m"(ptr->value), "=q" (ret)
230+
: "a" (*expected), "r" (newval), "m"(ptr->value)
231+
: "memory", "cc");
238232
return (bool) ret;
239233
}
240234

241235
#define PG_HAVE_ATOMIC_FETCH_ADD_U64
242236
static inline uint64
243237
pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
244238
{
245-
uint64 res;
246-
__asm__ __volatile__(
247-
" lock \n"
248-
" xaddq %0,%1 \n"
249-
: "=q"(res), "=m"(ptr->value)
250-
: "0"(add_), "m"(ptr->value)
251-
: "memory", "cc");
252-
239+
uint64 res;
240+
__asm__ __volatile__(
241+
" lock \n"
242+
" xaddq %0,%1 \n"
243+
: "=q"(res), "=m"(ptr->value)
244+
: "0" (add_), "m"(ptr->value)
245+
: "memory", "cc");
253246
return res;
254247
}
255248

256-
#endif /* __x86_64__ */
249+
#endif /* __x86_64__ */
257250

258-
#endif /* defined(__GNUC__) &&
259-
* !defined(__INTEL_COMPILER) */
251+
#endif /* defined(__GNUC__) && !defined(__INTEL_COMPILER) */
260252

261-
#endif /* HAVE_ATOMICS */
253+
#endif /* HAVE_ATOMICS */
262254

263-
#endif /* defined(PG_USE_INLINE) ||
264-
* defined(ATOMICS_INCLUDE_DEFINITIONS) */
255+
#endif /* defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) */

src/include/port/atomics/fallback.h

Lines changed: 11 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
11
/*-------------------------------------------------------------------------
22
*
33
* fallback.h
4-
* Fallback for platforms without spinlock and/or atomics support. Slower
5-
* than native atomics support, but not unusably slow.
4+
* Fallback for platforms without spinlock and/or atomics support. Slower
5+
* than native atomics support, but not unusably slow.
66
*
77
* Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
88
* Portions Copyright (c) 1994, Regents of the University of California
@@ -14,7 +14,7 @@
1414

1515
/* intentionally no include guards, should only be included by atomics.h */
1616
#ifndef INSIDE_ATOMICS_H
17-
#error "should be included via atomics.h"
17+
# error "should be included via atomics.h"
1818
#endif
1919

2020
#ifndef pg_memory_barrier_impl
@@ -75,15 +75,14 @@ typedef struct pg_atomic_flag
7575
* be content with just one byte instead of 4, but that's not too much
7676
* waste.
7777
*/
78-
#if defined(__hppa) || defined(__hppa__) /* HP PA-RISC, GCC and HP
79-
* compilers */
78+
#if defined(__hppa) || defined(__hppa__) /* HP PA-RISC, GCC and HP compilers */
8079
int sema[4];
8180
#else
8281
int sema;
8382
#endif
8483
} pg_atomic_flag;
8584

86-
#endif /* PG_HAVE_ATOMIC_FLAG_SUPPORT */
85+
#endif /* PG_HAVE_ATOMIC_FLAG_SUPPORT */
8786

8887
#if !defined(PG_HAVE_ATOMIC_U32_SUPPORT)
8988

@@ -93,16 +92,15 @@ typedef struct pg_atomic_flag
9392
typedef struct pg_atomic_uint32
9493
{
9594
/* Check pg_atomic_flag's definition above for an explanation */
96-
#if defined(__hppa) || defined(__hppa__) /* HP PA-RISC, GCC and HP
97-
* compilers */
95+
#if defined(__hppa) || defined(__hppa__) /* HP PA-RISC, GCC and HP compilers */
9896
int sema[4];
9997
#else
10098
int sema;
10199
#endif
102100
volatile uint32 value;
103101
} pg_atomic_uint32;
104102

105-
#endif /* PG_HAVE_ATOMIC_U32_SUPPORT */
103+
#endif /* PG_HAVE_ATOMIC_U32_SUPPORT */
106104

107105
#if defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS)
108106

@@ -130,7 +128,7 @@ pg_atomic_unlocked_test_flag_impl(volatile pg_atomic_flag *ptr)
130128
return true;
131129
}
132130

133-
#endif /* PG_HAVE_ATOMIC_FLAG_SIMULATION */
131+
#endif /* PG_HAVE_ATOMIC_FLAG_SIMULATION */
134132

135133
#ifdef PG_HAVE_ATOMIC_U32_SIMULATION
136134

@@ -139,13 +137,12 @@ extern void pg_atomic_init_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val_)
139137

140138
#define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32
141139
extern bool pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
142-
uint32 *expected, uint32 newval);
140+
uint32 *expected, uint32 newval);
143141

144142
#define PG_HAVE_ATOMIC_FETCH_ADD_U32
145143
extern uint32 pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_);
146144

147-
#endif /* PG_HAVE_ATOMIC_U32_SIMULATION */
145+
#endif /* PG_HAVE_ATOMIC_U32_SIMULATION */
148146

149147

150-
#endif /* defined(PG_USE_INLINE) ||
151-
* defined(ATOMICS_INCLUDE_DEFINITIONS) */
148+
#endif /* defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) */

0 commit comments

Comments
 (0)
pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy