|
23 | 23 |
|
24 | 24 | #define INT_ACCESS_ONCE(var) ((int)(*((volatile int *)&(var)))) |
25 | 25 |
|
| 26 | +typedef struct ClockSweep |
| 27 | +{ |
| 28 | + pg_atomic_uint64 counter; /* Only incremented by one */ |
| 29 | + uint32_t size; /* Size of the clock */ |
| 30 | +} ClockSweep; |
26 | 31 |
|
27 | 32 | /* |
28 | 33 | * The shared freelist control information. |
29 | 34 | */ |
30 | | -typedef struct { |
| 35 | +typedef struct |
| 36 | +{ |
31 | 37 | /* |
32 | | - * The clock-sweep hand is atomically updated by 1 at every tick. Use the |
33 | | - * macro CLOCK_HAND_POSITION() o find the next victim's index in the |
34 | | - * BufferDescriptor array. To calculate the number of times the clock-sweep |
35 | | - * hand has made a complete pass through all available buffers in the pool |
36 | | - * divide NBuffers. |
| 38 | + * The next buffer available for use is determined by the clock-sweep |
| 39 | + * algorithm. |
37 | 40 | */ |
38 | | - pg_atomic_uint64 nextVictimBuffer; |
| 41 | + ClockSweep clock; |
39 | 42 |
|
40 | 43 | /* |
41 | 44 | * Statistics. These counters should be wide enough that they can't |
@@ -86,32 +89,40 @@ static BufferDesc *GetBufferFromRing(BufferAccessStrategy strategy, |
86 | 89 | static void AddBufferToRing(BufferAccessStrategy strategy, |
87 | 90 | BufferDesc *buf); |
88 | 91 |
|
89 | | -#define CLOCK_HAND_POSITION(counter) \ |
90 | | - ((counter) & 0xFFFFFFFF) % NBuffers |
| 92 | +static void |
| 93 | +ClockSweepInit(ClockSweep *sweep, uint32 size) |
| 94 | +{ |
| 95 | + pg_atomic_init_u64(&sweep->counter, 0); |
| 96 | + sweep->size = size; |
| 97 | +} |
91 | 98 |
|
92 | | -/* |
93 | | - * ClockSweepTick - Helper routine for StrategyGetBuffer() |
94 | | - * |
95 | | - * Move the clock hand one buffer ahead of its current position and return the |
96 | | - * id of the buffer now under the hand. |
97 | | - */ |
| 99 | +/* Extract the number of complete cycles from the clock hand */ |
98 | 100 | static inline uint32 |
99 | | -ClockSweepTick(void) |
| 101 | +ClockSweepCycles(ClockSweep *sweep) |
100 | 102 | { |
101 | | - uint64 hand = UINT64_MAX; |
102 | | - uint32 victim; |
| 103 | + uint64 current = pg_atomic_read_u64(&sweep->counter); |
103 | 104 |
|
104 | | - /* |
105 | | - * Atomically move hand ahead one buffer - if there's several processes |
106 | | - * doing this, this can lead to buffers being returned slightly out of |
107 | | - * apparent order. |
108 | | - */ |
109 | | - hand = pg_atomic_fetch_add_u64(&StrategyControl->nextVictimBuffer, 1); |
| 105 | + return current / sweep->size; |
| 106 | +} |
| 107 | + |
| 108 | +/* Return the current position of the clock's hand modulo size */ |
| 109 | +static inline uint32 |
| 110 | +ClockSweepPosition(ClockSweep *sweep) |
| 111 | +{ |
| 112 | + uint64 counter = pg_atomic_read_u64(&sweep->counter); |
| 113 | + |
| 114 | + return ((counter) & 0xFFFFFFFF) % sweep->size; |
| 115 | +} |
110 | 116 |
|
111 | | - victim = CLOCK_HAND_POSITION(hand); |
112 | | - Assert(victim < NBuffers); |
| 117 | +/* |
| 118 | + * Move the clock hand ahead one and return its new position. |
| 119 | + */ |
| 120 | +static inline uint32 |
| 121 | +ClockSweepTick(ClockSweep *sweep) |
| 122 | +{ |
| 123 | + uint64 counter = pg_atomic_fetch_add_u64(&sweep->counter, 1); |
113 | 124 |
|
114 | | - return victim; |
| 125 | + return ((counter) & 0xFFFFFFFF) % sweep->size; |
115 | 126 | } |
116 | 127 |
|
117 | 128 | /* |
@@ -181,11 +192,11 @@ StrategyGetBuffer(BufferAccessStrategy strategy, uint32 *buf_state, bool *from_r |
181 | 192 | */ |
182 | 193 | pg_atomic_fetch_add_u32(&StrategyControl->numBufferAllocs, 1); |
183 | 194 |
|
184 | | - /* Use the "clock-sweep" algorithm to find a free buffer */ |
| 195 | + /* Use the clock-sweep algorithm to find a free buffer */ |
185 | 196 | trycounter = NBuffers; |
186 | 197 | for (;;) |
187 | 198 | { |
188 | | - buf = GetBufferDescriptor(ClockSweepTick()); |
| 199 | + buf = GetBufferDescriptor(ClockSweepTick(&StrategyControl->clock)); |
189 | 200 |
|
190 | 201 | /* |
191 | 202 | * If the buffer is pinned or has a nonzero usage_count, we cannot use |
@@ -236,19 +247,14 @@ StrategyGetBuffer(BufferAccessStrategy strategy, uint32 *buf_state, bool *from_r |
236 | 247 | * buffer allocs if non-NULL pointers are passed. The alloc count is reset |
237 | 248 | * after being read. |
238 | 249 | */ |
239 | | -uint32 StrategySyncStart(uint32 *complete_passes, uint32 *num_buf_alloc) { |
240 | | - uint64 counter = UINT64_MAX; uint32 result; |
241 | | - |
242 | | - counter = pg_atomic_read_u64(&StrategyControl->nextVictimBuffer); |
243 | | - result = CLOCK_HAND_POSITION(counter); |
| 250 | +uint32 |
| 251 | +StrategySyncStart(uint32 *complete_passes, uint32 *num_buf_alloc) |
| 252 | +{ |
| 253 | + uint32 result = ClockSweepPosition(&StrategyControl->clock); |
244 | 254 |
|
245 | 255 | if (complete_passes) |
246 | 256 | { |
247 | | - /* |
248 | | - * The number of complete passes is the counter divided by NBuffers |
249 | | - * because the clock hand is a 64-bit counter that only increases. |
250 | | - */ |
251 | | - *complete_passes = (uint32) (counter / NBuffers); |
| 257 | + *complete_passes = ClockSweepCycles(&StrategyControl->clock); |
252 | 258 | } |
253 | 259 |
|
254 | 260 | if (num_buf_alloc) |
@@ -335,8 +341,8 @@ StrategyInitialize(bool init) |
335 | 341 | */ |
336 | 342 | Assert(init); |
337 | 343 |
|
338 | | - /* Initialize combined clock-sweep pointer/complete passes counter */ |
339 | | - pg_atomic_init_u64(&StrategyControl->nextVictimBuffer, 0); |
| 344 | + /* Initialize the clock-sweep algorithm */ |
| 345 | + ClockSweepInit(&StrategyControl->clock, NBuffers); |
340 | 346 |
|
341 | 347 | /* Clear statistics */ |
342 | 348 | pg_atomic_init_u32(&StrategyControl->numBufferAllocs, 0); |
|
0 commit comments