diff --git a/src/test/Makefile b/src/test/Makefile index b7cddc8..7174c2d 100644 --- a/src/test/Makefile +++ b/src/test/Makefile @@ -16,7 +16,7 @@ SUBDIRS = regress isolation modules # The SSL suite is not secure to run on a multi-user system, so don't run # it as part of global "check" target. -ALWAYS_SUBDIRS = ssl +ALWAYS_SUBDIRS = recovery ssl # We want to recurse to all subdirs for all standard targets, except that # installcheck and install should not recurse into the subdirectory "modules". diff --git a/src/test/perl/TestLib.pm b/src/test/perl/TestLib.pm index 003cd9a..a035472 100644 --- a/src/test/perl/TestLib.pm +++ b/src/test/perl/TestLib.pm @@ -20,6 +20,7 @@ our @EXPORT = qw( program_version_ok program_options_handling_ok command_like + command_is issues_sql_like ); @@ -200,6 +201,16 @@ sub command_like like($stdout, $expected_stdout, "$test_name: matches"); } +sub command_is +{ + my ($cmd, $expected_stdout, $test_name) = @_; + my ($stdout, $stderr); + my $result = run $cmd, '>', \$stdout, '2>', \$stderr; + ok($result, "@$cmd exit code 0"); + is($stderr, '', "@$cmd no stderr"); + is($stdout, $expected_stdout, "$test_name: matches"); +} + sub issues_sql_like { my ($cmd, $expected_sql, $test_name) = @_; diff --git a/src/test/recovery/.gitignore b/src/test/recovery/.gitignore new file mode 100644 index 0000000..499fa7d --- /dev/null +++ b/src/test/recovery/.gitignore @@ -0,0 +1,3 @@ +# Generated by test suite +/regress_log/ +/tmp_check/ diff --git a/src/test/recovery/Makefile b/src/test/recovery/Makefile new file mode 100644 index 0000000..16c063a --- /dev/null +++ b/src/test/recovery/Makefile @@ -0,0 +1,17 @@ +#------------------------------------------------------------------------- +# +# Makefile for src/test/recovery +# +# Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group +# Portions Copyright (c) 1994, Regents of the University of California +# +# src/test/recovery/Makefile +# +#------------------------------------------------------------------------- + +subdir = src/test/recovery +top_builddir = ../../.. +include $(top_builddir)/src/Makefile.global + +check: + $(prove_check) diff --git a/src/test/recovery/README b/src/test/recovery/README new file mode 100644 index 0000000..c194297 --- /dev/null +++ b/src/test/recovery/README @@ -0,0 +1,19 @@ +src/test/recovery/README + +Regression tests for recovery and replication +============================================= + +This directory contains a test suite for recovery and replication, +testing mainly the interactions of recovery.conf with cluster +instances by providing a simple set of routines that can be used +to define a custom cluster for a test, including backup, archiving, +and streaming configuration. + +Running the tests +================= + + make check + +NOTE: This creates a temporary installation, and some tests may +create one or multiple nodes, be they master of standby for the +purpose of the tests. diff --git a/src/test/recovery/RecoveryTest.pm b/src/test/recovery/RecoveryTest.pm new file mode 100644 index 0000000..f401418 --- /dev/null +++ b/src/test/recovery/RecoveryTest.pm @@ -0,0 +1,345 @@ +package RecoveryTest; + +# Set of common routines for recovery regression tests for a PostgreSQL +# cluster. This includes global variables and methods that can be used +# by the various set of tests present to set up cluster nodes and +# configure them according to the test scenario. +# +# Cluster nodes can be freely created using initdb or using the existing +# base backup of another node, with minimum configuration done when the +# node is created for the first time like having a proper port number. +# It is then up to the test to decide what to do with the newly-created +# node for flexibility though. +# +# Data folders of each node are available through the global variables +# provided by this package, hashed depending on the port number: +# - connstr_nodes to query the connection string of a given node +# - datadir_nodes to get the data folder of a given node +# - archive_nodes for the location of the WAL archives +# - backup_nodes for the location of base backups +# +# Nodes are identified by their port number, which should be unique +# for each node of the cluster as it is run locally. + +use Cwd; +use TestLib; +use Test::More; + +use Archive::Tar; +use File::Copy; +use File::Path qw(remove_tree); + +# TODO: Should try to remove this dependency +use IPC::Run qw(run start); + +use Exporter 'import'; + +our @EXPORT = qw( + %connstr_nodes + %datadir_nodes + %backup_nodes + %archive_nodes + + append_to_file + + backup_node + enable_archiving + get_free_port + init_node + init_node_from_backup + init_recovery_test + make_archiving_standby + make_streaming_standby + pgctl_node + psql_get + psql_node +); + +# Global variables for node data +%datadir_nodes = {}; # PGDATA folders +%backup_nodes = {}; # Backup base folder +%archive_nodes = {}; # Archive base folder +%connstr_nodes = {}; # Connection strings + +# Adjust these paths for your environment +my $testroot = "./tmp_check"; + +# Location of log files +my $logdir = "./regress_log"; +mkdir $logdir; +my $log_path; + +$ENV{PGDATABASE} = "postgres"; + +sub is_correct_port +{ + my $port = $shift; + + if ( $port =~ m/^\d+$/ ) + { + die "Port number specified is not an integer"; + } + + if (port > 65536) + { + die "Port number specified higher than 65536"; + } +} + +# init_recovery_test +# Routine to call at the beginning of a test +sub init_recovery_test +{ + $testname = shift; + $log_path="regress_log/recovery_log_${testname}.log"; + + remove_tree $log_path; +} + +# Handy routine to report a message in the test's log file +sub report_log +{ + my $msg = shift; + + system_or_bail("echo '$msg' >> $log_path 2>&1 "); +} + +# Set of handy routines able to set up a node with different characteristics +# Enable streaming replication +sub enable_streaming +{ + my $port_root = shift; # Instance to link to + my $port_standby = shift; + + append_to_file("$datadir_nodes{ $port_standby }/recovery.conf", qq( +primary_conninfo='$connstr_nodes{ $port_root }' +standby_mode=on +recovery_target_timeline='latest' +)); +} + +# Enable the use of restore_command from a node +sub enable_restoring +{ + my $port_root = shift; # Instance to link to + my $port_standby = shift; + + append_to_file("$datadir_nodes{ $port_standby }/recovery.conf", qq( +restore_command='cp -i $archive_nodes{ $port_root }/%f %p' +standby_mode=on +recovery_target_timeline='latest' +)); +} + +# Enable WAL archiving on a node +sub enable_archiving +{ + my $port = shift; + + is_correct_port($port); + + # Enable archive_mode and archive_command on node + append_to_file("$datadir_nodes{ $port }/postgresql.conf", qq( +archive_mode = on +archive_command = 'cp -i %p $archive_nodes{ $port }/%f' +)); +} + +# Standby node initialization +# Node only streaming. +sub make_streaming_standby +{ + my $port_master = shift; + my $backup_name = shift; + my $port_standby = get_free_port(); + init_node_from_backup($port_standby, $port_master, $backup_name); + + # Start second node, streaming from first one + enable_streaming($port_master, $port_standby); + pgctl_node($port_standby, 'start'); + + return $port_standby; +} +# Node getting only from archives +sub make_archiving_standby +{ + my $port_master = shift; + my $backup_name = shift; + my $port_standby = get_free_port(); + init_node_from_backup($port_standby, $port_master, $backup_name); + + # Start second node, streaming from first one + enable_restoring($port_master, $port_standby); + pgctl_node($port_standby, 'start'); + + return $port_standby; +} + +sub configure_base_node +{ + my $port = shift; + + is_correct_port($port); + + # Make configuration somewhat generic to test recovery + append_to_file("$datadir_nodes{ $port }/postgresql.conf", qq( +port = $port +wal_level = hot_standby +max_wal_senders = 5 +wal_keep_segments = 20 +max_wal_size = 128MB +shared_buffers = 1MB +wal_log_hints = on +hot_standby = on +autovacuum = off +max_connections = 10 +wal_retrieve_retry_interval = '100ms' +)); + + # Accept replication connections + append_to_file("$datadir_nodes{ $port }/pg_hba.conf", qq( +local replication all trust +)); +} + +# Get a port number not in use currently for a new node +# As port number retrieval is based on the nodes currently running, +# be sure that the node that is consuming this port number has already +# been started. +sub get_free_port +{ + my $found = 0; + # XXX: Should this part use PG_VERSION_NUM? + my $port = 90400 % 16384 + 49152; + + while ($found == 0) + { + $port++; + my $ret = system("psql -X -p $port postgres /dev/null"); + if ($ret != 0) + { + $found = 1; + } + } + + report_log("Found free port $port"); + return $port; +} + +# Low-level routines to initialize a node +# Initialize a node from scratch +sub init_node +{ + my $port = shift; + + is_correct_port($port); + + # Save configuration information + $datadir_nodes{ $port } = TestLib::tempdir; + $backup_nodes{ $port } = TestLib::tempdir; + $connstr_nodes{ $port } = "port=$port"; + $archive_nodes{ $port } = TestLib::tempdir; + + standard_initdb($datadir_nodes{ $port }); + configure_base_node($port); +} + +# Initialize a node from an existing base backup +sub init_node_from_backup +{ + my ($port, $root_port, $backup_name) = @_; + + is_correct_port($port); + is_correct_port($root_port); + + my $backup_path = "$backup_nodes{ $root_port }/$backup_name"; + my $backup_file = "$backup_path/base.tar"; + + # Check existence of backup wanted + if ( ! -d $backup_path ) + { + die "Backup $backup_path does not exist"; + } + + # Save configuration information + $datadir_nodes{ $port } = TestLib::tempdir; + $backup_nodes{ $port } = TestLib::tempdir; + $connstr_nodes{ $port } = "port=$port"; + $archive_nodes{ $port } = TestLib::tempdir; + + # Extract the base backup wanted + my $current_dir = cwd(); + + # Temporary move to the place of extraction + chdir "$datadir_nodes{ $port }"; + Archive::Tar->extract_archive($backup_file); + chdir "$current_dir"; + configure_base_node($port); +} + +# Create a backup on a node already running +sub backup_node +{ + my ($port, $backup_name) = @_; + + is_correct_port($port); + + my $backup_path = "$backup_nodes{ $port }/$backup_name"; + + # Backup a node in tar format, it is more portable across platforms + system_or_bail("pg_basebackup -D $backup_path -p $port --format=t -x >> $log_path 2>&1"); +} + +# Run simple SQL query on a node +sub psql_node +{ + my ($port, $query) = @_; + + is_correct_port($port); + + report_log("Running \"$query\" on node with port $port"); + system_or_bail("psql -q --no-psqlrc -d $connstr_nodes{ $port } -c \"$query\" >> $log_path 2>&1"); +} + +# Run a query on a node and fetch back its result, This routine is +# useful when needing server-side state data like data to define +# a recovery target for example that depend on the environment +# when this test suite is run. +sub psql_get +{ + my ($port, $query) = @_; + my ($stdout, $stderr); + + is_correct_port($port); + + report_log("Running \"$query\" on node with port $port to get back results"); + my $result = run [ "psql", "-A", "-t", "-q", + "-d $connstr_nodes{ $port }", "--no-psqlrc", + "-c $query" ], '>', \$stdout, '2>', \$stderr; + chomp($stdout); + return $stdout; +} + +# Perform an action with pg_ctl on a node +sub pgctl_node +{ + my $port = shift; + my $action = shift; + + is_correct_port($port); + + report_log("Running \"$action\" on node with port $port"); + system_or_bail("pg_ctl $action -w -D $datadir_nodes{ $port } >> $log_path 2>&1"); +} + +# Add a set of parameters to a configuration file +sub append_to_file +{ + my($filename, $str) = @_; + + open my $fh, ">>", $filename or die "could not open file $filename"; + print $fh $str; + close $fh; +} + +1; diff --git a/src/test/recovery/t/001_stream_rep.pl b/src/test/recovery/t/001_stream_rep.pl new file mode 100644 index 0000000..5d6ebeb --- /dev/null +++ b/src/test/recovery/t/001_stream_rep.pl @@ -0,0 +1,59 @@ +# Minimal test testing streaming replication +use strict; +use warnings; +use TestLib; +use Test::More tests => 8; + +use RecoveryTest; + +RecoveryTest::init_recovery_test('stream_rep'); + +# Initialize master node +my $port_master = get_free_port(); +my $backup_name = 'my_backup'; +init_node($port_master); + +# Start it +pgctl_node($port_master, 'start'); + +# Take backup +backup_node($port_master, $backup_name); + +# Create streaming standby linking to master +my $port_standby_1 = make_streaming_standby($port_master, $backup_name); + +# Take backup of standby 1 (not mandatory, but useful to check if +# pg_basebackup works on a standby). +backup_node($port_standby_1, $backup_name); + +# Create second standby node linking to standby 1 +my $port_standby_2 = make_streaming_standby($port_standby_1, $backup_name); + +# Create some content on master and check its presence in standby 1 and 2 +psql_node($port_master, "CREATE TABLE tab_int AS SELECT generate_series(1,1002) AS a"); + +# Sleep a bit to have time to replay things +sleep 1; +command_is(['psql', '-A', '-t', '--no-psqlrc', + "-d $connstr_nodes{ $port_standby_1 }", '-c', "SELECT count(*) FROM tab_int"], + qq(1002 +), + 'check streamed content'); +command_is(['psql', '-A', '-t', '--no-psqlrc', + "-d $connstr_nodes{ $port_standby_2 }", '-c', "SELECT count(*) FROM tab_int"], + qq(1002 +), + 'check streamed content'); + +# Check that only READ-only queries can run on standbys +command_fails(['psql', '-A', '-t', '--no-psqlrc', + "-d $connstr_nodes{ $port_standby_1 }", '-c', "INSERT INTO tab_int VALUES (1)"], + 'Read-only queries on standby 1'); +command_fails(['psql', '-A', '-t', '--no-psqlrc', + "-d $connstr_nodes{ $port_standby_2 }", '-c', "INSERT INTO tab_int VALUES (1)"], + 'Read-only queries on standby 2'); + +# Stop nodes +pgctl_node($port_master, 'stop', '-m', 'immediate'); +pgctl_node($port_standby_1, 'stop', '-m', 'immediate'); +pgctl_node($port_standby_2, 'stop', '-m', 'immediate'); diff --git a/src/test/recovery/t/002_archiving.pl b/src/test/recovery/t/002_archiving.pl new file mode 100644 index 0000000..a76ed65 --- /dev/null +++ b/src/test/recovery/t/002_archiving.pl @@ -0,0 +1,47 @@ +# test for archiving with warm standby +use strict; +use warnings; +use TestLib; +use Test::More tests => 3; +use File::Copy; +use RecoveryTest; + +RecoveryTest::init_recovery_test('archiving'); + +# Initialize master node, doing archives +my $port_master = get_free_port(); +my $backup_name = 'my_backup'; +init_node($port_master); +enable_archiving($port_master); + +# Start it +pgctl_node($port_master, 'start'); + +# Take backup for slave +backup_node($port_master, 'my_backup'); + +# Initialize standby node from backup, fetching WAL from archives +my $port_standby = make_archiving_standby($port_master, $backup_name); + +# Create some content on master +psql_node($port_master, + "CREATE TABLE tab_int AS SELECT generate_series(1,1000) AS a"); + +# Force archiving of WAL file to make it present on master +psql_node($port_master, "SELECT pg_switch_xlog()"); + +# Add some more content, it should not be present on standby +psql_node($port_master, + "INSERT INTO tab_int VALUES (generate_series(1001,2000))"); + +# Sleep a bit to have time to replay things +sleep 2; +command_is(['psql', '-A', '-t', '--no-psqlrc', + "-d $connstr_nodes{ $port_standby }", '-c', "SELECT count(*) FROM tab_int"], + qq(1000 +), + 'check content from archives'); + +# Stop nodes +pgctl_node($port_master, 'stop', '-m', 'immediate'); +pgctl_node($port_standby, 'stop', '-m', 'immediate'); diff --git a/src/test/recovery/t/003_recovery_targets.pl b/src/test/recovery/t/003_recovery_targets.pl new file mode 100644 index 0000000..e725f35 --- /dev/null +++ b/src/test/recovery/t/003_recovery_targets.pl @@ -0,0 +1,134 @@ +# Test for recovery targets: name, timestamp, XID +use strict; +use warnings; +use TestLib; +use Test::More tests => 21; + +use RecoveryTest; + +RecoveryTest::init_recovery_test('recovery_targets'); + +# Create and test a standby from given backup, with a certain +# recovery target. +sub test_recovery_standby +{ + my $test_name = shift; + my $port_master = shift; + my $recovery_params = shift; + my $num_rows = shift; + + my $port_standby = get_free_port(); + + init_node_from_backup($port_standby, $port_master, 'my_backup'); + append_to_file("$datadir_nodes{ $port_standby }/recovery.conf", qq( +restore_command='cp -i $archive_nodes{ $port_master }/%f %p' +)); + + foreach my $param_item (@$recovery_params) + { + append_to_file("$datadir_nodes{ $port_standby }/recovery.conf", + qq($param_item +)); + } + + pgctl_node($port_standby, 'start'); + + # Sleep a bit to have time to replay things + sleep 1; + + # Create some content on master and check its presence in standby + command_is(['psql', '-A', '-t', '--no-psqlrc', + "-d $connstr_nodes{ $port_standby }", '-c', + "SELECT count(*) FROM tab_int"], + qq($num_rows +), + "check standby content for $test_name"); + + # Stop standby nodes + pgctl_node($port_standby, 'stop', '-m', 'immediate'); +} + +# Initialize master node +my $port_master = get_free_port(); +init_node($port_master); +enable_archiving($port_master); + +# Start it +pgctl_node($port_master, 'start'); + +# Create data before taking the backup, aimed at testing +# recovery_target = 'immediate' +psql_node($port_master, + "CREATE TABLE tab_int AS SELECT generate_series(1,1000) AS a"); + +# Take backup from which all operations will be run +backup_node($port_master, 'my_backup'); + +# Insert some data with used as a replay reference, with a recovery +# target TXID. +psql_node($port_master, + "INSERT INTO tab_int VALUES (generate_series(1001,2000))"); +my $recovery_txid = psql_get($port_master, "SELECT txid_current()"); + +# More data, with recovery target timestamp +psql_node($port_master, + "INSERT INTO tab_int VALUES (generate_series(2001,3000))"); +my $recovery_time = psql_get($port_master, "SELECT now()"); + +# Even more data, this time with a recovery target name +psql_node($port_master, + "INSERT INTO tab_int VALUES (generate_series(3001,4000))"); +my $recovery_name = "my_target"; +psql_node($port_master, + "SELECT pg_create_restore_point('$recovery_name')"); + +# Force archiving of WAL file +psql_node($port_master, "SELECT pg_switch_xlog()"); + +# Test recovery targets +my @recovery_params = ( "recovery_target = 'immediate'" ); +test_recovery_standby('immediate target', $port_master, + \@recovery_params, + "1000"); +@recovery_params = ( "recovery_target_xid = '$recovery_txid'" ); +test_recovery_standby('XID', $port_master, + \@recovery_params, + "2000"); +@recovery_params = ( "recovery_target_time = '$recovery_time'" ); +test_recovery_standby('Time', $port_master, + \@recovery_params, + "3000"); +@recovery_params = ( "recovery_target_name = '$recovery_name'" ); +test_recovery_standby('Name', $port_master, + \@recovery_params, + "4000"); + +# Multiple targets +# Last entry has priority (note that an array respects the order of items +# not hashes). +@recovery_params = ( + "recovery_target_name = '$recovery_name'", + "recovery_target_xid = '$recovery_txid'", + "recovery_target_time = '$recovery_time'" +); +test_recovery_standby('Name + XID + Time', $port_master, + \@recovery_params, + "3000"); +@recovery_params = ( + "recovery_target_time = '$recovery_time'", + "recovery_target_name = '$recovery_name'", + "recovery_target_xid = '$recovery_txid'" +); +test_recovery_standby('Time + Name + XID', $port_master, + \@recovery_params, + "2000"); +@recovery_params = ( + "recovery_target_xid = '$recovery_txid'", + "recovery_target_time = '$recovery_time'", + "recovery_target_name = '$recovery_name'" +); +test_recovery_standby('XID + Time + Name', $port_master, + \@recovery_params, + "4000"); + +pgctl_node($port_master, 'stop', '-m', 'immediate'); diff --git a/src/test/recovery/t/004_timeline_switch.pl b/src/test/recovery/t/004_timeline_switch.pl new file mode 100644 index 0000000..1c05e9e --- /dev/null +++ b/src/test/recovery/t/004_timeline_switch.pl @@ -0,0 +1,65 @@ +# Tets for timeline switch +# Encure that a standby is able to follow a newly-promoted standby +# on a new timeline. +use strict; +use warnings; +use File::Path qw(remove_tree); +use TestLib; +use Test::More tests => 3; + +use RecoveryTest; + +RecoveryTest::init_recovery_test('timeline_switch'); + +# Initialize master node +my $port_master = get_free_port(); +init_node($port_master); + +# Start it +pgctl_node($port_master, 'start'); + +# Take backup +my $backup_name = 'my_backup'; +backup_node($port_master, $backup_name); + +# Create two standbys linking to it +my $port_standby_1 = make_streaming_standby($port_master, $backup_name); +diag("Started standby 1 with port $port_standby_1"); +my $port_standby_2 = make_streaming_standby($port_master, $backup_name); +diag("Started standby 2 with port $port_standby_2"); + +# Create some content on master +psql_node($port_master, "CREATE TABLE tab_int AS SELECT generate_series(1,1000) AS a"); + +# Sleep a bit to have time to replay things +sleep 1; + +# Stop master, and promote standby 1, switching it to a new timeline +pgctl_node($port_master, 'stop', '-m', 'immediate'); +pgctl_node($port_standby_1, 'promote'); +diag("Promoted standby 1"); + +# Switch standby 2 to replay from standby 1 +remove_tree("$datadir_nodes{ $port_standby_2 }/recovery.conf"); +append_to_file("$datadir_nodes{ $port_standby_2 }/recovery.conf", qq( +primary_conninfo='$connstr_nodes{ $port_standby_1 }' +standby_mode=on +recovery_target_timeline='latest' +)); +pgctl_node($port_standby_2, "restart"); +sleep 1; + +# Insert some data in standby 1 and check its presence in standby 2 +# to ensure that the timeline switch has been done. +psql_node($port_standby_1, + "INSERT INTO tab_int VALUES (generate_series(1001,2000))"); +sleep 1; +command_is(['psql', '-A', '-t', '--no-psqlrc', + "-d $connstr_nodes{ $port_standby_2 }", '-c', + "SELECT count(*) FROM tab_int"], qq(2000 +), + 'check content of standby 2'); + +# Stop nodes +pgctl_node($port_standby_1, 'stop', '-m', 'immediate'); +pgctl_node($port_standby_2, 'stop', '-m', 'immediate'); diff --git a/src/test/recovery/t/005_replay_delay.pl b/src/test/recovery/t/005_replay_delay.pl new file mode 100644 index 0000000..56bb090 --- /dev/null +++ b/src/test/recovery/t/005_replay_delay.pl @@ -0,0 +1,58 @@ +# Checks for recovery_min_apply_delay +use strict; +use warnings; +use TestLib; +use Test::More tests => 6; + +use RecoveryTest; + +RecoveryTest::init_recovery_test('replay_delay'); + +# Initialize master node +my $port_master = get_free_port(); +init_node($port_master); + +# Start it +pgctl_node($port_master, 'start'); + +# And some content +psql_node($port_master, "CREATE TABLE tab_int AS SELECT generate_series(1,10) AS a"); + +# Take backup +backup_node($port_master, 'my_backup'); + +# Initialize node from backup +my $port_standby = get_free_port(); +init_node_from_backup($port_standby, $port_master, 'my_backup'); + +# Start second node, streaming from first one +append_to_file("$datadir_nodes{ $port_standby }/recovery.conf", qq( +primary_conninfo='$connstr_nodes{ $port_master }' +recovery_min_apply_delay = '2s' +standby_mode=on +recovery_target_timeline='latest' +)); +pgctl_node($port_standby, 'start'); + +# Make new content on master and check its presence in standby +# depending on the delay of 2s applied above +psql_node($port_master, "INSERT INTO tab_int VALUES (generate_series(11,20))"); +# Here we should have only 10 rows +sleep 1; +command_is(['psql', '-A', '-t', '--no-psqlrc', + "-d $connstr_nodes{ $port_standby }", '-c', "SELECT count(*) FROM tab_int"], + qq(10 +), + 'check content with delay of 1s'); + +# Sleep a bit to have time to replay things and wait for delay to work +sleep 1; +command_is(['psql', '-A', '-t', '--no-psqlrc', + "-d $connstr_nodes{ $port_standby }", '-c', "SELECT count(*) FROM tab_int"], + qq(20 +), + 'check content with delay 2s'); + +# Stop nodes +pgctl_node($port_master, 'stop', '-m', 'immediate'); +pgctl_node($port_standby, 'stop', '-m', 'immediate');