Skip to content

Commit c6be54e

Browse files
MasahikoSawadaCommitfest Bot
authored andcommitted
Add more parallel vacuum tests.
1 parent 964ce65 commit c6be54e

File tree

3 files changed

+135
-5
lines changed

3 files changed

+135
-5
lines changed

src/backend/access/heap/vacuumlazy.c

Lines changed: 20 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -192,6 +192,7 @@
192192
#include "storage/freespace.h"
193193
#include "storage/lmgr.h"
194194
#include "storage/read_stream.h"
195+
#include "utils/injection_point.h"
195196
#include "utils/lsyscache.h"
196197
#include "utils/pg_rusage.h"
197198
#include "utils/timestamp.h"
@@ -466,6 +467,14 @@ typedef struct ParallelLVLeader
466467
/* The number of workers launched for parallel lazy heap scan */
467468
int nworkers_launched;
468469

470+
/*
471+
* Will the leader participate to parallel lazy heap scan?
472+
*
473+
* This is a parameter for testing and always true unless it is disabled
474+
* explicitly by the injection point.
475+
*/
476+
bool leaderparticipate;
477+
469478
/*
470479
* These fields point to the arrays of all per-worker scan states stored
471480
* in DSM.
@@ -2251,7 +2260,8 @@ do_parallel_lazy_scan_heap(LVRelState *vacrel)
22512260
* retrieving new blocks for the read stream once the space of
22522261
* dead_items TIDs exceeds the limit.
22532262
*/
2254-
do_lazy_scan_heap(vacrel, false);
2263+
if (vacrel->leader->leaderparticipate)
2264+
do_lazy_scan_heap(vacrel, false);
22552265

22562266
/* Wait for parallel workers to finish and gather scan results */
22572267
parallel_lazy_scan_heap_end(vacrel);
@@ -4543,6 +4553,7 @@ heap_parallel_vacuum_estimate(Relation rel, ParallelContext *pcxt, int nworkers,
45434553
{
45444554
LVRelState *vacrel = (LVRelState *) state;
45454555
Size size = 0;
4556+
bool leaderparticipate = true;
45464557

45474558
vacrel->leader = palloc(sizeof(ParallelLVLeader));
45484559

@@ -4567,6 +4578,12 @@ heap_parallel_vacuum_estimate(Relation rel, ParallelContext *pcxt, int nworkers,
45674578
vacrel->leader->scandata_len = mul_size(sizeof(LVScanData), nworkers);
45684579
shm_toc_estimate_chunk(&pcxt->estimator, vacrel->leader->scandata_len);
45694580
shm_toc_estimate_keys(&pcxt->estimator, 1);
4581+
4582+
#ifdef USE_INJECTION_POINTS
4583+
if (IS_INJECTION_POINT_ATTACHED("parallel-heap-vacuum-disable-leader-participation"))
4584+
leaderparticipate = false;
4585+
#endif
4586+
vacrel->leader->leaderparticipate = leaderparticipate;
45704587
}
45714588

45724589
/*
@@ -4604,7 +4621,8 @@ heap_parallel_vacuum_initialize(Relation rel, ParallelContext *pcxt, int nworker
46044621

46054622
/* including the leader too */
46064623
shared->eager_scan_remaining_successes_per_worker =
4607-
vacrel->eager_scan_remaining_successes / (nworkers + 1);
4624+
vacrel->eager_scan_remaining_successes /
4625+
(vacrel->leader->leaderparticipate ? nworkers + 1 : nworkers);
46084626

46094627
shm_toc_insert(pcxt->toc, PARALLEL_LV_KEY_SHARED, shared);
46104628
vacrel->plvstate->shared = shared;

src/backend/commands/vacuumparallel.c

Lines changed: 18 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,7 @@
3939
#include "pgstat.h"
4040
#include "storage/bufmgr.h"
4141
#include "tcop/tcopprot.h"
42+
#include "utils/injection_point.h"
4243
#include "utils/lsyscache.h"
4344
#include "utils/rel.h"
4445

@@ -1035,14 +1036,28 @@ parallel_vacuum_index_is_parallel_safe(Relation indrel, int num_index_scans,
10351036
int
10361037
parallel_vacuum_collect_dead_items_begin(ParallelVacuumState *pvs)
10371038
{
1039+
int nworkers = pvs->nworkers_for_table;
1040+
#ifdef USE_INJECTION_POINTS
1041+
static int ntimes = 0;
1042+
#endif
1043+
10381044
Assert(!IsParallelWorker());
10391045

1040-
if (pvs->nworkers_for_table == 0)
1046+
if (nworkers == 0)
10411047
return 0;
10421048

10431049
/* Start parallel vacuum workers for collecting dead items */
1044-
Assert(pvs->nworkers_for_table <= pvs->pcxt->nworkers);
1045-
parallel_vacuum_begin_work_phase(pvs, pvs->nworkers_for_table,
1050+
Assert(nworkers <= pvs->pcxt->nworkers);
1051+
1052+
#ifdef USE_INJECTION_POINTS
1053+
if (IS_INJECTION_POINT_ATTACHED("parallel-vacuum-ramp-down-workers"))
1054+
{
1055+
nworkers = pvs->nworkers_for_table - Min(ntimes, pvs->nworkers_for_table);
1056+
ntimes++;
1057+
}
1058+
#endif
1059+
1060+
parallel_vacuum_begin_work_phase(pvs, nworkers,
10461061
PV_WORK_PHASE_COLLECT_DEAD_ITEMS);
10471062

10481063
/* Include the worker count for the leader itself */
Lines changed: 97 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,97 @@
1+
2+
# Copyright (c) 2025, PostgreSQL Global Development Group
3+
4+
# Tests for parallel heap vacuum.
5+
6+
use strict;
7+
use warnings FATAL => 'all';
8+
use locale;
9+
10+
use PostgreSQL::Test::Cluster;
11+
use PostgreSQL::Test::Utils;
12+
use Test::More;
13+
14+
# Test persistency of statistics generated for injection points.
15+
if ($ENV{enable_injection_points} ne 'yes')
16+
{
17+
plan skip_all => 'Injection points not supported by this build';
18+
}
19+
20+
my $node = PostgreSQL::Test::Cluster->new('master');
21+
$node->init;
22+
$node->start;
23+
$node->safe_psql('postgres', qq[create extension injection_points;]);
24+
25+
$node->safe_psql('postgres', qq[
26+
create table t (i int) with (autovacuum_enabled = off);
27+
create index on t (i);
28+
]);
29+
my $nrows = 1_000_000;
30+
my $first = int($nrows * rand());
31+
my $second = $nrows - $first;
32+
33+
my $psql = $node->background_psql('postgres', on_error_stop => 0);
34+
35+
# Begin the transaciton that holds xmin.
36+
$psql->query_safe('begin; select pg_current_xact_id();');
37+
38+
# consume some xids
39+
$node->safe_psql('postgres', qq[
40+
select pg_current_xact_id();
41+
select pg_current_xact_id();
42+
select pg_current_xact_id();
43+
select pg_current_xact_id();
44+
select pg_current_xact_id();
45+
]);
46+
47+
# While inserting $nrows tuples into the table with an older XID,
48+
# we inject some tuples with a newer XID filling one page somewhere
49+
# in the table.
50+
51+
# Insert the first part of rows.
52+
$psql->query_safe(qq[insert into t select generate_series(1, $first);]);
53+
54+
# Insert some rows with a newer XID, which needs to fill at least
55+
# one page to prevent the page from begin frozen in the following
56+
# vacuum.
57+
my $xid = $node->safe_psql('postgres', qq[
58+
begin;
59+
insert into t select 0 from generate_series(1, 300);
60+
select pg_current_xact_id()::xid;
61+
commit;
62+
]);
63+
64+
# Insert remaining rows and commit.
65+
$psql->query_safe(qq[insert into t select generate_series($first, $nrows);]);
66+
$psql->query_safe(qq[commit;]);
67+
68+
# Delete some rows.
69+
$node->safe_psql('postgres', qq[delete from t where i between 1 and 20000;]);
70+
71+
# Execute parallel vacuum that freezes all rows except for the
72+
# tuple inserted by $psql. We should update the relfrozenxid up to
73+
# that XID. Setting a lower value to maintenance_work_mem invokes
74+
# multiple rounds of heap scanning and the number of parallel workers
75+
# will ramp-down thanks to the injection points.
76+
$node->safe_psql('postgres', qq[
77+
set vacuum_freeze_min_age to 5;
78+
set max_parallel_maintenance_workers TO 5;
79+
set maintenance_work_mem TO 256;
80+
select injection_points_set_local();
81+
select injection_points_attach('parallel-vacuum-ramp-down-workers', 'notice');
82+
select injection_points_attach('parallel-heap-vacuum-disable-leader-participation', 'notice');
83+
vacuum (parallel 5, verbose) t;
84+
]);
85+
86+
is( $node->safe_psql('postgres', qq[select relfrozenxid from pg_class where relname = 't';]),
87+
"$xid", "relfrozenxid is updated as expected");
88+
89+
# Check if we have successfully frozen the table in the previous
90+
# vacuum by scanning all tuples.
91+
$node->safe_psql('postgres', qq[vacuum (freeze, parallel 0, verbose, disable_page_skipping) t;]);
92+
is( $node->safe_psql('postgres', qq[select $xid < relfrozenxid::text::int from pg_class where relname = 't';]),
93+
"t", "all rows are frozen");
94+
95+
$node->stop;
96+
done_testing();
97+

0 commit comments

Comments
 (0)