From bd540abf3e31a4f5623449ba0669e61722bf7647 Mon Sep 17 00:00:00 2001 From: Craig Ringer Date: Fri, 19 Aug 2016 14:57:54 +0800 Subject: [PATCH 4/4] Add txid_incinerate() test function for fast wrap-around Burn txids, really, really, really, really fast. Especially if fsync is off. Good for reaching wraparound conditions quickly. --- src/backend/utils/adt/txid.c | 170 +++++++++++++++++++++++++++++++ src/include/catalog/pg_proc.h | 2 + src/test/regress/expected/wraparound.out | 161 +++++++++++++++++++++++++++++ src/test/regress/parallel_schedule | 3 + src/test/regress/serial_schedule | 1 + src/test/regress/sql/wraparound.sql | 118 +++++++++++++++++++++ 6 files changed, 455 insertions(+) create mode 100644 src/test/regress/expected/wraparound.out create mode 100644 src/test/regress/sql/wraparound.sql diff --git a/src/backend/utils/adt/txid.c b/src/backend/utils/adt/txid.c index ce25093..1137ddb 100644 --- a/src/backend/utils/adt/txid.c +++ b/src/backend/utils/adt/txid.c @@ -752,3 +752,173 @@ txid_status_internal(PG_FUNCTION_ARGS) ereport(ERROR, (errmsg_internal("unable to determine commit status of xid "UINT64_FORMAT, xid))); } + +/* + * Internal function for test only use to burn transaction IDs + * as fast as possible. + * + * Forcibly advance to just before wraparound. + * + * This will cause commit/rollback of our own xact to fail because + * the clog page has been truncated away. + * + * No safety check is performed to ensure nothing else has an xid. + * They'll fail on commit. Should really lock procarray. + * + * There's also no attempt to keep datfrozenxid correct for the other + * DBs. The user gets the fun of freezing them. + */ +Datum +txid_incinerate(PG_FUNCTION_ARGS) +{ + const char *target; + int nreserved; + int i; + + TransactionId lastAllocatedXid; + TransactionId clogPageFirstXid; + TransactionId targetNextXid; + + if (!superuser()) + elog(ERROR, "txid_incinerate may only be called by the superuser"); + + if (PG_ARGISNULL(0)) + elog(ERROR, "xid argument must be non-null"); + + if (PG_ARGISNULL(1)) + { + nreserved = 0; + } + else + { + nreserved = PG_GETARG_INT32(1); + if (nreserved < 0) + elog(ERROR, "nreserved xids must be >= 0"); + } + + target = text_to_cstring(PG_GETARG_TEXT_PP(0)); + + LWLockAcquire(XidGenLock, LW_SHARED); + if (GetTopTransactionIdIfAny() != InvalidTransactionId) + { + LWLockRelease(XidGenLock); + ereport(ERROR, + (errmsg_internal("can't burn XIDs in a session with an xid allocated"))); + } + + lastAllocatedXid = ShmemVariableCache->nextXid; + TransactionIdRetreat(lastAllocatedXid); + + if (strcmp(target, "stop")) + targetNextXid = ShmemVariableCache->xidStopLimit; + else if (strcmp(target, "warn")) + targetNextXid = ShmemVariableCache->xidWarnLimit; + else if (strcmp(target, "vac")) + targetNextXid = ShmemVariableCache->xidVacLimit; + else if (strcmp(target, "wrap")) + targetNextXid = ShmemVariableCache->xidWrapLimit; + else if (strcmp(target, "page")) + targetNextXid = ShmemVariableCache->nextXid + CLOG_XACTS_PER_PAGE - nreserved; + else + { + unsigned long parsed; + char *endp; + parsed = strtol(target, &endp, 10); + if (*endp != '\0') + elog(ERROR, "Argument must be an xid or one of the strings page, stop, warn, vac or wrap"); + if (!TransactionIdIsNormal((TransactionId)parsed)) + elog(ERROR, "Argument xid must be a normal xid, not the invalid/frozen/bootstrap xid"); + targetNextXid = (TransactionId)parsed; + } + + for (i = 0; i < nreserved; i++) + TransactionIdRetreat(targetNextXid); + + if (!TransactionIdFollowsOrEquals(targetNextXid, ShmemVariableCache->nextXid)) + elog(ERROR, "Target xid %u is <= current xid %u in modulo 32", + targetNextXid, ShmemVariableCache->nextXid); + + elog(NOTICE, "xid limits are: vac=%u, warn=%u, stop=%u, wrap=%u, oldest=%u, next=%u; target xid is %u", + ShmemVariableCache->xidVacLimit, + ShmemVariableCache->xidWarnLimit, + ShmemVariableCache->xidStopLimit, + ShmemVariableCache->xidWrapLimit, + ShmemVariableCache->nextXid, + ShmemVariableCache->oldestXid, + targetNextXid); + + Assert(TransactionIdPrecedes(ShmemVariableCache->nextXid, ShmemVariableCache->xidStopLimit)); + Assert(TransactionIdPrecedesOrEquals(ShmemVariableCache->nextXid, ShmemVariableCache->xidWrapLimit)); + + /* Advance nextXid to the last xid on the current clog page */ + clogPageFirstXid = ShmemVariableCache->nextXid - TransactionIdToPgIndex(ShmemVariableCache->nextXid); + ShmemVariableCache->nextXid = clogPageFirstXid + (CLOG_XACTS_PER_PAGE - 1); + elog(DEBUG1, "txid_incinerate: Advanced xid to %u, first %u on page %u", + ShmemVariableCache->nextXid, clogPageFirstXid, + TransactionIdToPage(ShmemVariableCache->nextXid)); + + /* + * Write new clog pages and advance to the end of the next page, until + * we've allocated the last clog page. This might take a while. + * + * At each step, force the next xid forward and extend the clog. We must + * allocate the first xid on the last page so that ExtendCLOG actually does + * some work, since otherwise it just shortcuts out. + */ + do + { + if (clogPageFirstXid == FirstNormalTransactionId) + clogPageFirstXid = CLOG_XACTS_PER_PAGE; + else + clogPageFirstXid += CLOG_XACTS_PER_PAGE; + + elog(DEBUG1, "txid_incinerate: nextXid %u", ShmemVariableCache->nextXid); + + if (TransactionIdPrecedes(clogPageFirstXid, targetNextXid) + && TransactionIdPrecedesOrEquals(targetNextXid, clogPageFirstXid + (CLOG_XACTS_PER_PAGE - 1))) + { + ShmemVariableCache->nextXid = targetNextXid; + elog(DEBUG1, "txid_incinerate: reached target xid, next page %u greater than target %u", + targetNextXid, targetNextXid + CLOG_XACTS_PER_PAGE); + } + else + { + ShmemVariableCache->nextXid = clogPageFirstXid + (CLOG_XACTS_PER_PAGE - 1); + } + + if (clogPageFirstXid < FirstNormalTransactionId) + { + clogPageFirstXid = FirstNormalTransactionId; + } + + Assert(TransactionIdToPgIndex(clogPageFirstXid) == 0 || clogPageFirstXid == FirstNormalTransactionId); + + Assert(TransactionIdPrecedesOrEquals(ShmemVariableCache->nextXid, ShmemVariableCache->xidWrapLimit)); + + ExtendCLOG(clogPageFirstXid); + + CHECK_FOR_INTERRUPTS(); + } + while (TransactionIdToPage(ShmemVariableCache->nextXid) != (targetNextXid/CLOG_XACTS_PER_PAGE)); + + elog(DEBUG1, "txid_incinerate: done extending clog and advancing counter, nextXid is %u", + ShmemVariableCache->nextXid); + + Assert(TransactionIdPrecedesOrEquals(ShmemVariableCache->nextXid, ShmemVariableCache->xidWrapLimit)); + + /* + * We'd really like to totally reset the clog by truncating it and + * moving the wraparound pointer, but we can't do that unless all DBs + * are already frozen. + * + * We can't freeze here since we can't access other DBs. So we've got + * to let the user do the job. + */ + + elog(NOTICE, "txid_incinerate: advanced nextXid to %u", + ShmemVariableCache->nextXid); + + LWLockRelease(XidGenLock); + + PG_RETURN_VOID(); +} diff --git a/src/include/catalog/pg_proc.h b/src/include/catalog/pg_proc.h index 226bed7..fde95bf 100644 --- a/src/include/catalog/pg_proc.h +++ b/src/include/catalog/pg_proc.h @@ -4920,6 +4920,8 @@ DATA(insert OID = 3344 ( txid_convert_if_recent PGNSP PGUID 12 1 0 0 0 f f f DESCR("get the xid from a bigint transaction id if not wrapped around"); DATA(insert OID = 3346 ( txid_status_internal PGNSP PGUID 12 1 0 0 0 f f f f t f v s 1 0 23 "20" _null_ _null_ _null_ _null_ _null_ txid_status_internal _null_ _null_ _null_ )); DESCR("commit status of transaction"); +DATA(insert OID = 3347 ( txid_incinerate PGNSP PGUID 12 1 0 0 0 f f f f t f v s 2 0 2278 "25 23" _null_ _null_ _null_ _null_ _null_ txid_incinerate _null_ _null_ _null_ )); +DESCR("burn xids fast"); /* record comparison using normal comparison rules */ DATA(insert OID = 2981 ( record_eq PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "2249 2249" _null_ _null_ _null_ _null_ _null_ record_eq _null_ _null_ _null_ )); diff --git a/src/test/regress/expected/wraparound.out b/src/test/regress/expected/wraparound.out new file mode 100644 index 0000000..5638dd6 --- /dev/null +++ b/src/test/regress/expected/wraparound.out @@ -0,0 +1,161 @@ +-- We need to be able to force vacuuming of template0 +UPDATE pg_database +SET datallowconn = true +WHERE datname = 'template0'; +-- For debugging these tests you'll find something like this useful: +-- SELECT txid_current() \gset +-- SELECT BIGINT :'txid_current' AS txid, +-- BIGINT :'txid_current' >> 32 AS epoch, +-- BIGINT :'txid_current' & 4294967295 AS xid32; +SELECT txid_current() AS before_wrap_xid \gset +SET client_min_messages = 'error'; +SELECT txid_incinerate('stop', 1000); + txid_incinerate +----------------- + +(1 row) + +-- Should be near UINT32_MAX/2 now +SELECT txid_current() \gset +SELECT BIGINT :'txid_current' > (BIGINT '1' << 31) - (BIGINT '1' << 30); + ?column? +---------- + t +(1 row) + +SELECT BIGINT :'txid_current' < (BIGINT '1' << 31) + (BIGINT '1' << 30); + ?column? +---------- + t +(1 row) + +SELECT BIGINT :'txid_current' >> 32 AS txid_current_epoch; + txid_current_epoch +-------------------- + 0 +(1 row) + +\c template0 +SET client_min_messages = 'error'; +VACUUM FREEZE; +\c template1 +SET client_min_messages = 'error'; +VACUUM FREEZE; +\c postgres +SET client_min_messages = 'error'; +VACUUM FREEZE; +\c regression +SET client_min_messages = 'error'; +VACUUM FREEZE; +CHECKPOINT; +select min(datfrozenxid::text::bigint) AS min_datfrozenxid from pg_database \gset +SELECT BIGINT :'min_datfrozenxid' > (BIGINT '1' << 31) - (BIGINT '1' << 30); + ?column? +---------- + t +(1 row) + +SELECT BIGINT :'min_datfrozenxid' < (BIGINT '1' << 31) + (BIGINT '1' << 30); + ?column? +---------- + t +(1 row) + +-- That got us to nearly UINT32_MAX/2, another run will get us to near UINT32_MAX +SET client_min_messages = 'error'; +SELECT txid_incinerate('stop', 1000); + txid_incinerate +----------------- + +(1 row) + +SELECT txid_current() \gset +SELECT BIGINT :'txid_current' > (BIGINT '1' << 31) + (BIGINT '1' << 30); + ?column? +---------- + t +(1 row) + +SELECT BIGINT :'txid_current' < (BIGINT '1' << 32); + ?column? +---------- + t +(1 row) + +SELECT BIGINT :'txid_current' >> 32 AS txid_current_epoch; + txid_current_epoch +-------------------- + 0 +(1 row) + +\c template0 +SET client_min_messages = 'error'; +VACUUM FREEZE; +\c template1 +SET client_min_messages = 'error'; +VACUUM FREEZE; +\c postgres +SET client_min_messages = 'error'; +VACUUM FREEZE; +\c regression +SET client_min_messages = 'error'; +VACUUM FREEZE; +CHECKPOINT; +select min(datfrozenxid::text::bigint) AS min_datfrozenxid from pg_database \gset +SELECT BIGINT :'min_datfrozenxid' > (BIGINT '1' << 31) + (BIGINT '1' << 30); + ?column? +---------- + t +(1 row) + +SELECT BIGINT :'min_datfrozenxid' < (BIGINT '1' << 32); + ?column? +---------- + t +(1 row) + +-- We should be near UINT32_MAX now, so the next run will +-- bring us across the epoch boundary. +SET client_min_messages = 'error'; +SELECT txid_incinerate('stop', 1000); + txid_incinerate +----------------- + +(1 row) + +SELECT txid_current() \gset +SELECT BIGINT :'txid_current' > (BIGINT '1' << 32); + ?column? +---------- + t +(1 row) + +SELECT BIGINT :'txid_current' >> 32 AS txid_current_epoch; + txid_current_epoch +-------------------- + 1 +(1 row) + +\c template0 +SET client_min_messages = 'error'; +VACUUM FREEZE; +\c template1 +SET client_min_messages = 'error'; +VACUUM FREEZE; +\c postgres +SET client_min_messages = 'error'; +VACUUM FREEZE; +CHECKPOINT; +\c regression +SET client_min_messages = 'error'; +VACUUM FREEZE; +UPDATE pg_database +SET datallowconn = false +WHERE datname = 'template0'; +-- Make sure our txid functions handle the epoch wrap +SELECT txid_convert_if_recent(BIGINT :'before_wrap_xid'); + txid_convert_if_recent +------------------------ + +(1 row) + diff --git a/src/test/regress/parallel_schedule b/src/test/regress/parallel_schedule index 1cb5dfc..e5c34d0 100644 --- a/src/test/regress/parallel_schedule +++ b/src/test/regress/parallel_schedule @@ -42,6 +42,9 @@ test: create_type test: create_table test: create_function_2 +# Force txid wraparound +test: wraparound + # ---------- # Load huge amounts of data # We should split the data files into single files and then diff --git a/src/test/regress/serial_schedule b/src/test/regress/serial_schedule index 8958d8c..1fa4852 100644 --- a/src/test/regress/serial_schedule +++ b/src/test/regress/serial_schedule @@ -55,6 +55,7 @@ test: create_function_1 test: create_type test: create_table test: create_function_2 +test: wraparound test: copy test: copyselect test: copydml diff --git a/src/test/regress/sql/wraparound.sql b/src/test/regress/sql/wraparound.sql new file mode 100644 index 0000000..f67c435 --- /dev/null +++ b/src/test/regress/sql/wraparound.sql @@ -0,0 +1,118 @@ +-- We need to be able to force vacuuming of template0 +UPDATE pg_database +SET datallowconn = true +WHERE datname = 'template0'; + +-- For debugging these tests you'll find something like this useful: +-- SELECT txid_current() \gset +-- SELECT BIGINT :'txid_current' AS txid, +-- BIGINT :'txid_current' >> 32 AS epoch, +-- BIGINT :'txid_current' & 4294967295 AS xid32; + +SELECT txid_current() AS before_wrap_xid \gset + + +SET client_min_messages = 'error'; +SELECT txid_incinerate('stop', 1000); + +-- Should be near UINT32_MAX/2 now +SELECT txid_current() \gset +SELECT BIGINT :'txid_current' > (BIGINT '1' << 31) - (BIGINT '1' << 30); +SELECT BIGINT :'txid_current' < (BIGINT '1' << 31) + (BIGINT '1' << 30); +SELECT BIGINT :'txid_current' >> 32 AS txid_current_epoch; + +\c template0 +SET client_min_messages = 'error'; +VACUUM FREEZE; + +\c template1 +SET client_min_messages = 'error'; +VACUUM FREEZE; + +\c postgres +SET client_min_messages = 'error'; +VACUUM FREEZE; + +\c regression +SET client_min_messages = 'error'; +VACUUM FREEZE; + +CHECKPOINT; + +select min(datfrozenxid::text::bigint) AS min_datfrozenxid from pg_database \gset +SELECT BIGINT :'min_datfrozenxid' > (BIGINT '1' << 31) - (BIGINT '1' << 30); +SELECT BIGINT :'min_datfrozenxid' < (BIGINT '1' << 31) + (BIGINT '1' << 30); + + + + + + +-- That got us to nearly UINT32_MAX/2, another run will get us to near UINT32_MAX +SET client_min_messages = 'error'; +SELECT txid_incinerate('stop', 1000); +SELECT txid_current() \gset +SELECT BIGINT :'txid_current' > (BIGINT '1' << 31) + (BIGINT '1' << 30); +SELECT BIGINT :'txid_current' < (BIGINT '1' << 32); +SELECT BIGINT :'txid_current' >> 32 AS txid_current_epoch; + +\c template0 +SET client_min_messages = 'error'; +VACUUM FREEZE; + +\c template1 +SET client_min_messages = 'error'; +VACUUM FREEZE; + +\c postgres +SET client_min_messages = 'error'; +VACUUM FREEZE; + +\c regression +SET client_min_messages = 'error'; +VACUUM FREEZE; + +CHECKPOINT; + +select min(datfrozenxid::text::bigint) AS min_datfrozenxid from pg_database \gset +SELECT BIGINT :'min_datfrozenxid' > (BIGINT '1' << 31) + (BIGINT '1' << 30); +SELECT BIGINT :'min_datfrozenxid' < (BIGINT '1' << 32); + + + + +-- We should be near UINT32_MAX now, so the next run will +-- bring us across the epoch boundary. +SET client_min_messages = 'error'; +SELECT txid_incinerate('stop', 1000); +SELECT txid_current() \gset +SELECT BIGINT :'txid_current' > (BIGINT '1' << 32); +SELECT BIGINT :'txid_current' >> 32 AS txid_current_epoch; + +\c template0 +SET client_min_messages = 'error'; +VACUUM FREEZE; + +\c template1 +SET client_min_messages = 'error'; +VACUUM FREEZE; + +\c postgres +SET client_min_messages = 'error'; +VACUUM FREEZE; + +CHECKPOINT; + +\c regression +SET client_min_messages = 'error'; +VACUUM FREEZE; + + +UPDATE pg_database +SET datallowconn = false +WHERE datname = 'template0'; + + + +-- Make sure our txid functions handle the epoch wrap +SELECT txid_convert_if_recent(BIGINT :'before_wrap_xid'); -- 2.5.5