From ed67a9a31fab91a396476001906e9c92f0767f37 Mon Sep 17 00:00:00 2001 From: Jelte Fennema-Nio Date: Thu, 4 Dec 2025 15:35:08 +0100 Subject: [PATCH 1/5] Add hash_make macros This adds a bunch of hash_make* and shmem_hash_make* macros to make it easier and less error prone to create HTABs. These macros are implemented as wrappers around the already existing hash_create function. Using the new macros is preferred, due to the additional compile time checks that they bring. Co-Authored-By: Bertrand Drouvot --- src/backend/utils/hash/dynahash.c | 109 ++++++++++++++++++++ src/include/c.h | 31 ++++++ src/include/storage/shmem.h | 32 ++++++ src/include/utils/hsearch.h | 160 +++++++++++++++++++++++++++++- src/tools/pgindent/typedefs.list | 1 + 5 files changed, 328 insertions(+), 5 deletions(-) diff --git a/src/backend/utils/hash/dynahash.c b/src/backend/utils/hash/dynahash.c index ac94b9e93c6e..bc47469ab3c0 100644 --- a/src/backend/utils/hash/dynahash.c +++ b/src/backend/utils/hash/dynahash.c @@ -631,6 +631,115 @@ hash_create(const char *tabname, int64 nelem, const HASHCTL *info, int flags) return hashp; } +/* + * hash_opts_init -- initialize HASHCTL and flags from HASHOPTS + * + * This processes HASHOPTS fields into HASHCTL and flags. It's used by code + * that needs to call the low-level hash_create function. + */ +void +hash_opts_init(HASHCTL *ctl, int *flags, + Size keysize, Size entrysize, bool string_key, + const HASHOPTS *opts) +{ + MemSet(ctl, 0, sizeof(*ctl)); + ctl->keysize = keysize; + ctl->entrysize = entrysize; + + *flags = HASH_ELEM; + + if (opts != NULL && opts->hash != NULL) + { + /* force_blobs only affects default hash selection, not custom hash */ + Assert(!opts->force_blobs); + ctl->hash = opts->hash; + *flags |= HASH_FUNCTION; + } + else if (opts != NULL && opts->force_blobs) + { + *flags |= HASH_BLOBS; + } + else + { + *flags |= string_key ? HASH_STRINGS : HASH_BLOBS; + } + + if (opts != NULL && opts->match != NULL) + { + ctl->match = opts->match; + *flags |= HASH_COMPARE; + } + + if (opts != NULL && opts->keycopy != NULL) + { + ctl->keycopy = opts->keycopy; + *flags |= HASH_KEYCOPY; + } + + if (opts != NULL && opts->alloc != NULL) + { + ctl->alloc = opts->alloc; + *flags |= HASH_ALLOC; + } + + if (opts != NULL && opts->num_partitions > 0) + { + ctl->num_partitions = opts->num_partitions; + *flags |= HASH_PARTITION; + } + + if (opts != NULL && opts->fixed_size) + *flags |= HASH_FIXED_SIZE; +} + +/* + * hash_make_impl -- simplified hash table creation + * + * This is the implementation function for the hash_make() and hash_make_ext() + * macros. It creates a hash table with sensible defaults. + * + * If string_key is true, the key is treated as a null-terminated string + * (uses HASH_STRINGS). Otherwise, the key is treated as a binary blob + * (uses HASH_BLOBS). + * + * Pass NULL for opts to use all defaults. + */ +HTAB * +hash_make_impl(const char *tabname, int64 nelem, + Size keysize, Size entrysize, + bool string_key, + const HASHOPTS *opts, + MemoryContext mcxt) +{ + HASHCTL ctl; + int flags; + + hash_opts_init(&ctl, &flags, keysize, entrysize, string_key, opts); + + ctl.hcxt = mcxt; + flags |= HASH_CONTEXT; + + return hash_create(tabname, nelem, &ctl, flags); +} + +/* + * hash_make_fn_impl -- create a hash table with custom functions + */ +HTAB * +hash_make_fn_impl(const char *tabname, int64 nelem, + Size keysize, Size entrysize, bool string_key, + HashValueFunc hashfn, HashCompareFunc matchfn, + MemoryContext mcxt) +{ + HASHOPTS opts = { + .hash = hashfn, + .match = matchfn + }; + + return hash_make_impl(tabname, nelem, keysize, entrysize, + string_key, &opts, mcxt); +} + /* * Set default HASHHDR parameters. */ diff --git a/src/include/c.h b/src/include/c.h index ccd2b654d459..e3c86d689325 100644 --- a/src/include/c.h +++ b/src/include/c.h @@ -81,6 +81,11 @@ #endif #ifdef ENABLE_NLS #include +#endif +#ifdef __cplusplus +extern "C++" { +#include +} #endif /* Pull in fundamental symbols that we also expose to applications */ @@ -362,6 +367,32 @@ #define HAVE_PG_INTEGER_CONSTANT_P #endif +/* + * pg_expr_has_type_p(expr, type) - Check if an expression has a specific type. + * + * Similar to pg_is_same, but takes an expression instead of a type as the + * first argument. This is useful when you have an expression and want to + * check its type without needing typeof/decltype. + */ +#if defined(__cplusplus) +#define pg_expr_has_type_p(expr, type) (std::is_same::value) +#else +#define pg_expr_has_type_p(expr, type) \ + _Generic((expr), type: 1, default: 0) +#endif + +/* + * pg_nullptr_of(type) - Create a null pointer of the given type. + * + * In C, (type *)NULL works for all types. In C++, this syntax fails for types + * containing brackets (like char[64]), so we use std::add_pointer_t instead. + */ +#if defined(__cplusplus) +#define pg_nullptr_of(type) (static_cast>(nullptr)) +#else +#define pg_nullptr_of(type) ((type *)NULL) +#endif + /* * pg_assume(expr) states that we assume `expr` to evaluate to true. In assert * enabled builds pg_assume() is turned into an assertion, in optimized builds diff --git a/src/include/storage/shmem.h b/src/include/storage/shmem.h index 70a5b8b172c6..74f61aebc1a0 100644 --- a/src/include/storage/shmem.h +++ b/src/include/storage/shmem.h @@ -43,6 +43,38 @@ extern Size mul_size(Size s1, Size s2); extern PGDLLIMPORT Size pg_get_shmem_pagesize(void); +/* + * Simplified shared memory hash table creation API + * + * These macros provide a simpler way to create shared memory hash tables by: + * - Automatically determining keysize and entrysize from type information + * - Automatically choosing HASH_STRINGS vs HASH_BLOBS based on key type + * - Eliminating the need for explicit HASHCTL and flags in common cases + * + * Usage: + * HTAB *hash = shmem_hash_make(MyEntry, keyfield, "My hash", 64, 128); + * + * For more options (partitioning, fixed size, custom hash): + * HASHOPTS opts = {.num_partitions = 16, .fixed_size = true}; + * HTAB *hash = shmem_hash_make_ext(MyEntry, keyfield, "My hash", 64, 128, &opts); + */ +#define shmem_hash_make(entrytype, keymember, tabname, init_size, max_size) \ + shmem_hash_make_ext(entrytype, keymember, tabname, init_size, max_size, NULL) + +#define shmem_hash_make_ext(entrytype, keymember, tabname, init_size, max_size, opts) \ + (StaticAssertExpr(offsetof(entrytype, keymember) == 0, \ + #keymember " must be first member in " #entrytype), \ + shmem_hash_make_impl( \ + (tabname), (init_size), (max_size), \ + sizeof(((entrytype *)0)->keymember), \ + sizeof(entrytype), \ + HASH_KEY_AS_STRING(entrytype, keymember), \ + (opts))) + +extern HTAB *shmem_hash_make_impl(const char *name, int64 init_size, int64 max_size, + Size keysize, Size entrysize, bool string_key, + const HASHOPTS *opts); + /* ipci.c */ extern void RequestAddinShmemSpace(Size size); diff --git a/src/include/utils/hsearch.h b/src/include/utils/hsearch.h index cb09a4cbe8cb..98c88726345c 100644 --- a/src/include/utils/hsearch.h +++ b/src/include/utils/hsearch.h @@ -15,6 +15,9 @@ #define HSEARCH_H +/* Hash table control struct is an opaque type known only within dynahash.c */ +typedef struct HTAB HTAB; + /* * Hash functions must have this signature. */ @@ -43,6 +46,150 @@ typedef void *(*HashCopyFunc) (void *dest, const void *src, Size keysize); */ typedef void *(*HashAllocFunc) (Size request); +/* + * Hash options for hash_make_ext and shmem_hash_make_ext macros. + * Contains less commonly needed options that aren't covered by the basic macros. + * All fields default to NULL/0/false when zero-initialized. + */ +typedef struct HASHOPTS +{ + HashValueFunc hash; /* custom hash function (NULL for default) */ + HashCompareFunc match; /* custom comparison function (NULL for + * default) */ + HashCopyFunc keycopy; /* custom key copy function (NULL for default) */ + HashAllocFunc alloc; /* custom allocator (NULL for default) */ + int64 num_partitions; /* partition count (0 for none) */ + bool fixed_size; /* if true, hash table cannot grow */ + bool force_blobs; /* if true, use HASH_BLOBS even for string + * types */ +} HASHOPTS; + +/* + * Helpers to detect if a type should be hashed as a string. + * + * String types include: char arrays and NameData. + * Everything else is treated as a binary blob (HASH_BLOBS). + */ +#define HASH_PTR_AS_STRING(ptr, size) \ + (pg_expr_has_type_p((ptr), char(*)[size]) || pg_expr_has_type_p((ptr), NameData *)) +#define HASH_KEY_AS_STRING(entrytype, keymember) \ + HASH_PTR_AS_STRING(&((entrytype *)0)->keymember, \ + sizeof(((entrytype *)0)->keymember)) +#define HASH_TYPE_AS_STRING(type) \ + HASH_PTR_AS_STRING(pg_nullptr_of(type), sizeof(type)) + +/* + * Create a hash table with minimal boilerplate. + * + * This is the simplest way to create a hash table. It: + * - Derives keysize from the keymember's actual type + * - Derives entrysize from the entrytype + * - Automatically chooses HASH_STRINGS or HASH_BLOBS based on key type (char arrays and NameData are treated as strings) + * - Uses CurrentMemoryContext (not TopMemoryContext) + * - Validates that keymember is at offset 0 + * + * Usage: + * typedef struct { Oid oid; char *data; } MyEntry; + * HTAB *h = hash_make(MyEntry, oid, "my table", 64); + */ +#define hash_make(entrytype, keymember, tabname, nelem) \ + hash_make_cxt(entrytype, keymember, tabname, nelem, CurrentMemoryContext) + +/* + * Like hash_make, but allows specifying a memory context. + */ +#define hash_make_cxt(entrytype, keymember, tabname, nelem, mcxt) \ + hash_make_ext_cxt(entrytype, keymember, tabname, nelem, NULL, mcxt) + +/* + * Hash table with custom hash and/or match functions. + * + * Like hash_make, but accepts custom hash and match function pointers. + * Pass NULL for hashfn to use the default hash (based on key type), + * or NULL for matchfn to use the default memcmp-based comparison. + */ +#define hash_make_fn(entrytype, keymember, tabname, nelem, hashfn, matchfn) \ + hash_make_fn_cxt(entrytype, keymember, tabname, nelem, hashfn, matchfn, \ + CurrentMemoryContext) +#define hash_make_fn_cxt(entrytype, keymember, tabname, nelem, hashfn, matchfn, mcxt) \ + (StaticAssertExpr(offsetof(entrytype, keymember) == 0, \ + #keymember " must be first member in " #entrytype), \ + hash_make_fn_impl((tabname), (nelem), \ + sizeof(((entrytype *)0)->keymember), \ + sizeof(entrytype), \ + HASH_KEY_AS_STRING(entrytype, keymember), \ + (hashfn), (matchfn), (mcxt))) + +/* + * Hash table with extended options via HASHOPTS struct. + * + * Like hash_make, but accepts additional options via HASHOPTS struct pointer. + * Pass NULL for opts to use all defaults. + * + * Example usage: + * HASHOPTS opts = {0}; + * opts.hash = my_hash_func; + * opts.num_partitions = 16; + * HTAB *h = hash_make_ext(MyEntry, key, "my table", 64, &opts); + */ +#define hash_make_ext(entrytype, keymember, tabname, nelem, opts) \ + hash_make_ext_cxt(entrytype, keymember, tabname, nelem, opts, CurrentMemoryContext) + +#define hash_make_ext_cxt(entrytype, keymember, tabname, nelem, opts, mcxt) \ + (StaticAssertExpr(offsetof(entrytype, keymember) == 0, \ + #keymember " must be first member in " #entrytype), \ + hash_make_impl( \ + (tabname), (nelem), \ + sizeof(((entrytype *)0)->keymember), \ + sizeof(entrytype), \ + HASH_KEY_AS_STRING(entrytype, keymember), \ + (opts), \ + (mcxt))) + + +/* + * Create a hash set where the entire entry is the key. This is + * like hash_make, but where the key is also the entry. + */ +#define hashset_make(entrytype, tabname, nelem) \ + hashset_make_cxt(entrytype, tabname, nelem, CurrentMemoryContext) +#define hashset_make_cxt(entrytype, tabname, nelem, mcxt) \ + hashset_make_ext_cxt(entrytype, tabname, nelem, NULL, mcxt) +#define hashset_make_fn(entrytype, tabname, nelem, hashfn, matchfn) \ + hashset_make_fn_cxt(entrytype, tabname, nelem, hashfn, matchfn, \ + CurrentMemoryContext) +#define hashset_make_fn_cxt(entrytype, tabname, nelem, hashfn, matchfn, mcxt) \ + hash_make_fn_impl((tabname), (nelem), sizeof(entrytype), sizeof(entrytype), \ + HASH_TYPE_AS_STRING(entrytype), (hashfn), (matchfn), \ + mcxt) +#define hashset_make_ext(entrytype, tabname, nelem, opts) \ + hashset_make_ext_cxt(entrytype, tabname, nelem, opts, \ + CurrentMemoryContext) +#define hashset_make_ext_cxt(entrytype, tabname, nelem, opts, mcxt) \ + hash_make_impl((tabname), (nelem), sizeof(entrytype), sizeof(entrytype), \ + HASH_TYPE_AS_STRING(entrytype), opts, (mcxt)) + +/* + * Implementation function for hash_make macros. Not meant to be called + * directly. + * + * If string_key is true, the key is treated as a null-terminated string. + * Pass NULL for opts to use all defaults. + */ +extern HTAB *hash_make_impl(const char *tabname, int64 nelem, + Size keysize, Size entrysize, + bool string_key, + const HASHOPTS *opts, + MemoryContext mcxt); + +/* + * Implementation function for hash_make_fn macros. + */ +extern HTAB *hash_make_fn_impl(const char *tabname, int64 nelem, + Size keysize, Size entrysize, bool string_key, + HashValueFunc hashfn, HashCompareFunc matchfn, + MemoryContext mcxt); + /* * HASHELEMENT is the private part of a hashtable entry. The caller's data * follows the HASHELEMENT structure (on a MAXALIGN'd boundary). The hash key @@ -57,11 +204,11 @@ typedef struct HASHELEMENT /* Hash table header struct is an opaque type known only within dynahash.c */ typedef struct HASHHDR HASHHDR; -/* Hash table control struct is an opaque type known only within dynahash.c */ -typedef struct HTAB HTAB; - -/* Parameter data structure for hash_create */ -/* Only those fields indicated by hash_flags need be set */ +/* + * Parameter data structure for hash_create (which is the low-level method of + * initializing hash tables, hash_make macros are preferred) + * Only those fields indicated by hash_flags need be set + */ typedef struct HASHCTL { /* Used if HASH_PARTITION flag is set: */ @@ -131,6 +278,9 @@ typedef struct */ extern HTAB *hash_create(const char *tabname, int64 nelem, const HASHCTL *info, int flags); +extern void hash_opts_init(HASHCTL *ctl, int *flags, + Size keysize, Size entrysize, bool string_key, + const HASHOPTS *opts); extern void hash_destroy(HTAB *hashp); extern void hash_stats(const char *caller, HTAB *hashp); extern void *hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, diff --git a/src/tools/pgindent/typedefs.list b/src/tools/pgindent/typedefs.list index c1ad80a418d0..265fac913b60 100644 --- a/src/tools/pgindent/typedefs.list +++ b/src/tools/pgindent/typedefs.list @@ -1145,6 +1145,7 @@ HASHBUCKET HASHCTL HASHELEMENT HASHHDR +HASHOPTS HASHSEGMENT HASH_SEQ_STATUS HE From 322cd978b4d4b1d8e575518bc661879d0366c83e Mon Sep 17 00:00:00 2001 From: Jelte Fennema-Nio Date: Thu, 4 Dec 2025 15:36:19 +0100 Subject: [PATCH 2/5] Use hash_make macros throughout the codebase This shows how our code base looks when using the new APIs. This has some typesafety, readability and maintanability benefits, but it also introduces some backpatching problems. These backpatching problems cannot be resolved by backporting the new hash_make macros, because some of them require C11 (which we only require on master for now). I think it's unlikely that we'll need to backpatch things in code that creates hashtables though, so it could still be worth it to do this complete refactor. At the very least we should choose a few places where we use the new macros to make sure they have coverage. --- contrib/dblink/dblink.c | 10 +-- .../pg_stat_statements/pg_stat_statements.c | 10 +-- contrib/pg_trgm/trgm_regexp.c | 9 +-- contrib/postgres_fdw/connection.c | 10 +-- contrib/postgres_fdw/shippable.c | 10 ++- contrib/tablefunc/tablefunc.c | 17 ++--- src/backend/access/common/heaptuple.c | 13 +--- src/backend/access/gist/gistbuild.c | 11 +--- src/backend/access/gist/gistbuildbuffers.c | 10 +-- src/backend/access/hash/hashpage.c | 13 +--- src/backend/access/heap/rewriteheap.c | 39 ++++------- src/backend/access/transam/xlogprefetcher.c | 8 +-- src/backend/access/transam/xlogutils.c | 13 ++-- src/backend/catalog/pg_enum.c | 24 ++----- src/backend/catalog/pg_inherits.c | 11 +--- src/backend/catalog/storage.c | 21 ++---- src/backend/commands/async.c | 14 ++-- src/backend/commands/prepare.c | 12 +--- src/backend/commands/sequence.c | 10 +-- src/backend/commands/tablecmds.c | 16 +---- src/backend/executor/nodeModifyTable.c | 10 +-- src/backend/nodes/extensible.c | 10 +-- src/backend/optimizer/util/plancat.c | 17 ++--- src/backend/optimizer/util/predtest.c | 9 +-- src/backend/optimizer/util/relnode.c | 13 +--- src/backend/parser/parse_oper.c | 10 ++- src/backend/partitioning/partdesc.c | 9 +-- src/backend/postmaster/autovacuum.c | 22 ++----- src/backend/postmaster/checkpointer.c | 12 +--- .../replication/logical/applyparallelworker.c | 13 +--- src/backend/replication/logical/relation.c | 22 ++----- .../replication/logical/reorderbuffer.c | 29 ++------ src/backend/replication/logical/tablesync.c | 10 ++- src/backend/replication/pgoutput/pgoutput.c | 11 +--- src/backend/storage/buffer/buf_table.c | 15 ++--- src/backend/storage/buffer/bufmgr.c | 11 ++-- src/backend/storage/buffer/localbuf.c | 11 +--- src/backend/storage/file/reinit.c | 8 +-- src/backend/storage/ipc/shmem.c | 33 +++++++--- src/backend/storage/ipc/standby.c | 20 ++---- src/backend/storage/lmgr/lock.c | 55 +++++----------- src/backend/storage/lmgr/lwlock.c | 8 +-- src/backend/storage/lmgr/predicate.c | 66 ++++++++----------- src/backend/storage/smgr/smgr.c | 10 ++- src/backend/storage/sync/sync.c | 11 +--- src/backend/tsearch/ts_typanalyze.c | 13 +--- src/backend/utils/activity/wait_event.c | 23 +++---- src/backend/utils/adt/array_typanalyze.c | 23 ++----- src/backend/utils/adt/json.c | 15 +---- src/backend/utils/adt/jsonfuncs.c | 20 ++---- src/backend/utils/adt/mcxtfuncs.c | 11 +--- src/backend/utils/adt/ri_triggers.c | 29 ++++---- src/backend/utils/adt/ruleutils.c | 23 ++----- src/backend/utils/cache/attoptcache.c | 16 ++--- src/backend/utils/cache/evtcache.c | 9 +-- src/backend/utils/cache/funccache.c | 14 ++-- src/backend/utils/cache/relcache.c | 19 ++---- src/backend/utils/cache/relfilenumbermap.c | 9 +-- src/backend/utils/cache/spccache.c | 9 +-- src/backend/utils/cache/ts_cache.c | 27 +++----- src/backend/utils/cache/typcache.c | 33 ++++------ src/backend/utils/fmgr/dfmgr.c | 12 ++-- src/backend/utils/fmgr/fmgr.c | 11 +--- src/backend/utils/misc/guc.c | 14 ++-- src/backend/utils/misc/injection_point.c | 14 ++-- src/backend/utils/mmgr/portalmem.c | 10 +-- src/backend/utils/time/combocid.c | 13 +--- src/pl/plperl/plperl.c | 32 +++------ src/pl/plpgsql/src/pl_exec.c | 32 ++++----- src/pl/plpython/plpy_plpymodule.c | 9 ++- src/pl/plpython/plpy_procedure.c | 9 +-- src/pl/tcl/pltcl.c | 19 ++---- src/timezone/pgtz.c | 12 +--- 73 files changed, 371 insertions(+), 835 deletions(-) diff --git a/contrib/dblink/dblink.c b/contrib/dblink/dblink.c index 8bf8fc8ea2f3..2951c39d69fb 100644 --- a/contrib/dblink/dblink.c +++ b/contrib/dblink/dblink.c @@ -2540,13 +2540,9 @@ getConnectionByName(const char *name) static HTAB * createConnHash(void) { - HASHCTL ctl; - - ctl.keysize = NAMEDATALEN; - ctl.entrysize = sizeof(remoteConnHashEnt); - - return hash_create("Remote Con hash", NUMCONN, &ctl, - HASH_ELEM | HASH_STRINGS); + return hash_make_cxt(remoteConnHashEnt, name, + "Remote Con hash", NUMCONN, + TopMemoryContext); } static remoteConn * diff --git a/contrib/pg_stat_statements/pg_stat_statements.c b/contrib/pg_stat_statements/pg_stat_statements.c index 39208f80b5bb..2c55b78e9ba0 100644 --- a/contrib/pg_stat_statements/pg_stat_statements.c +++ b/contrib/pg_stat_statements/pg_stat_statements.c @@ -517,7 +517,6 @@ static void pgss_shmem_startup(void) { bool found; - HASHCTL info; FILE *file = NULL; FILE *qfile = NULL; uint32 header; @@ -557,12 +556,9 @@ pgss_shmem_startup(void) pgss->stats.stats_reset = GetCurrentTimestamp(); } - info.keysize = sizeof(pgssHashKey); - info.entrysize = sizeof(pgssEntry); - pgss_hash = ShmemInitHash("pg_stat_statements hash", - pgss_max, pgss_max, - &info, - HASH_ELEM | HASH_BLOBS); + pgss_hash = shmem_hash_make(pgssEntry, key, + "pg_stat_statements hash", + pgss_max, pgss_max); LWLockRelease(AddinShmemInitLock); diff --git a/contrib/pg_trgm/trgm_regexp.c b/contrib/pg_trgm/trgm_regexp.c index 1a76794c4229..df7d3b70d04d 100644 --- a/contrib/pg_trgm/trgm_regexp.c +++ b/contrib/pg_trgm/trgm_regexp.c @@ -893,7 +893,6 @@ convertPgWchar(pg_wchar c, trgm_mb_char *result) static void transformGraph(TrgmNFA *trgmNFA) { - HASHCTL hashCtl; TrgmStateKey initkey; TrgmState *initstate; ListCell *lc; @@ -905,13 +904,7 @@ transformGraph(TrgmNFA *trgmNFA) trgmNFA->overflowed = false; /* Create hashtable for states */ - hashCtl.keysize = sizeof(TrgmStateKey); - hashCtl.entrysize = sizeof(TrgmState); - hashCtl.hcxt = CurrentMemoryContext; - trgmNFA->states = hash_create("Trigram NFA", - 1024, - &hashCtl, - HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); + trgmNFA->states = hash_make(TrgmState, stateKey, "Trigram NFA", 1024); trgmNFA->nstates = 0; /* Create initial state: ambiguous prefix, NFA's initial state */ diff --git a/contrib/postgres_fdw/connection.c b/contrib/postgres_fdw/connection.c index 953c2e0ab828..042a7fb3ac1a 100644 --- a/contrib/postgres_fdw/connection.c +++ b/contrib/postgres_fdw/connection.c @@ -214,17 +214,13 @@ GetConnection(UserMapping *user, bool will_prep_stmt, PgFdwConnState **state) /* First time through, initialize connection cache hashtable */ if (ConnectionHash == NULL) { - HASHCTL ctl; - if (pgfdw_we_get_result == 0) pgfdw_we_get_result = WaitEventExtensionNew("PostgresFdwGetResult"); - ctl.keysize = sizeof(ConnCacheKey); - ctl.entrysize = sizeof(ConnCacheEntry); - ConnectionHash = hash_create("postgres_fdw connections", 8, - &ctl, - HASH_ELEM | HASH_BLOBS); + ConnectionHash = hash_make_cxt(ConnCacheEntry, key, + "postgres_fdw connections", 8, + TopMemoryContext); /* * Register some callback functions that manage connection cleanup. diff --git a/contrib/postgres_fdw/shippable.c b/contrib/postgres_fdw/shippable.c index da3b13b207d1..66d2d0b9ff4f 100644 --- a/contrib/postgres_fdw/shippable.c +++ b/contrib/postgres_fdw/shippable.c @@ -28,6 +28,7 @@ #include "postgres_fdw.h" #include "utils/hsearch.h" #include "utils/inval.h" +#include "utils/memutils.h" #include "utils/syscache.h" /* Hash table for caching the results of shippability lookups */ @@ -90,13 +91,10 @@ InvalidateShippableCacheCallback(Datum arg, int cacheid, uint32 hashvalue) static void InitializeShippableCache(void) { - HASHCTL ctl; - /* Create the hash table. */ - ctl.keysize = sizeof(ShippableCacheKey); - ctl.entrysize = sizeof(ShippableCacheEntry); - ShippableCacheHash = - hash_create("Shippability cache", 256, &ctl, HASH_ELEM | HASH_BLOBS); + ShippableCacheHash = hash_make_cxt(ShippableCacheEntry, key, + "Shippability cache", 256, + TopMemoryContext); /* Set up invalidation callback on pg_foreign_server. */ CacheRegisterSyscacheCallback(FOREIGNSERVEROID, diff --git a/contrib/tablefunc/tablefunc.c b/contrib/tablefunc/tablefunc.c index c40fd36dc966..519855189cf4 100644 --- a/contrib/tablefunc/tablefunc.c +++ b/contrib/tablefunc/tablefunc.c @@ -705,24 +705,17 @@ static HTAB * load_categories_hash(char *cats_sql, MemoryContext per_query_ctx) { HTAB *crosstab_hash; - HASHCTL ctl; int ret; uint64 proc; MemoryContext SPIcontext; - /* initialize the category hash table */ - ctl.keysize = MAX_CATNAME_LEN; - ctl.entrysize = sizeof(crosstab_HashEnt); - ctl.hcxt = per_query_ctx; - /* - * use INIT_CATS, defined above as a guess of how many hash table entries - * to create, initially + * Initialize the category hash table. Use INIT_CATS, defined above as a + * guess of how many hash table entries to create, initially. */ - crosstab_hash = hash_create("crosstab hash", - INIT_CATS, - &ctl, - HASH_ELEM | HASH_STRINGS | HASH_CONTEXT); + crosstab_hash = hash_make_cxt(crosstab_HashEnt, internal_catname, + "crosstab hash", INIT_CATS, + per_query_ctx); /* Connect to SPI manager */ SPI_connect(); diff --git a/src/backend/access/common/heaptuple.c b/src/backend/access/common/heaptuple.c index 1967b047020a..98793a3f2eba 100644 --- a/src/backend/access/common/heaptuple.c +++ b/src/backend/access/common/heaptuple.c @@ -125,18 +125,9 @@ missing_match(const void *key1, const void *key2, Size keysize) static void init_missing_cache(void) { - HASHCTL hash_ctl; - - hash_ctl.keysize = sizeof(missing_cache_key); - hash_ctl.entrysize = sizeof(missing_cache_key); - hash_ctl.hcxt = TopMemoryContext; - hash_ctl.hash = missing_hash; - hash_ctl.match = missing_match; missing_cache = - hash_create("Missing Values Cache", - 32, - &hash_ctl, - HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION | HASH_COMPARE); + hashset_make_fn_cxt(missing_cache_key, "Missing Values Cache", 32, + missing_hash, missing_match, TopMemoryContext); } /* ---------------------------------------------------------------- diff --git a/src/backend/access/gist/gistbuild.c b/src/backend/access/gist/gistbuild.c index be0fd5b753d7..629558d905bc 100644 --- a/src/backend/access/gist/gistbuild.c +++ b/src/backend/access/gist/gistbuild.c @@ -1515,15 +1515,8 @@ typedef struct static void gistInitParentMap(GISTBuildState *buildstate) { - HASHCTL hashCtl; - - hashCtl.keysize = sizeof(BlockNumber); - hashCtl.entrysize = sizeof(ParentMapEntry); - hashCtl.hcxt = CurrentMemoryContext; - buildstate->parentMap = hash_create("gistbuild parent map", - 1024, - &hashCtl, - HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); + buildstate->parentMap = hash_make(ParentMapEntry, childblkno, + "gistbuild parent map", 1024); } static void diff --git a/src/backend/access/gist/gistbuildbuffers.c b/src/backend/access/gist/gistbuildbuffers.c index 0707254d18ea..fc36c90d333e 100644 --- a/src/backend/access/gist/gistbuildbuffers.c +++ b/src/backend/access/gist/gistbuildbuffers.c @@ -44,7 +44,6 @@ GISTBuildBuffers * gistInitBuildBuffers(int pagesPerBuffer, int levelStep, int maxLevel) { GISTBuildBuffers *gfbb; - HASHCTL hashCtl; gfbb = palloc(sizeof(GISTBuildBuffers)); gfbb->pagesPerBuffer = pagesPerBuffer; @@ -72,13 +71,8 @@ gistInitBuildBuffers(int pagesPerBuffer, int levelStep, int maxLevel) * nodeBuffersTab hash is association between index blocks and it's * buffers. */ - hashCtl.keysize = sizeof(BlockNumber); - hashCtl.entrysize = sizeof(GISTNodeBuffer); - hashCtl.hcxt = CurrentMemoryContext; - gfbb->nodeBuffersTab = hash_create("gistbuildbuffers", - 1024, - &hashCtl, - HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); + gfbb->nodeBuffersTab = hash_make(GISTNodeBuffer, nodeBlocknum, + "gistbuildbuffers", 1024); gfbb->bufferEmptyingQueue = NIL; diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c index b8e5bd005e59..cf71fa9cb4f4 100644 --- a/src/backend/access/hash/hashpage.c +++ b/src/backend/access/hash/hashpage.c @@ -1356,7 +1356,6 @@ void _hash_finish_split(Relation rel, Buffer metabuf, Buffer obuf, Bucket obucket, uint32 maxbucket, uint32 highmask, uint32 lowmask) { - HASHCTL hash_ctl; HTAB *tidhtab; Buffer bucket_nbuf = InvalidBuffer; Buffer nbuf; @@ -1367,16 +1366,8 @@ _hash_finish_split(Relation rel, Buffer metabuf, Buffer obuf, Bucket obucket, Bucket nbucket; bool found; - /* Initialize hash tables used to track TIDs */ - hash_ctl.keysize = sizeof(ItemPointerData); - hash_ctl.entrysize = sizeof(ItemPointerData); - hash_ctl.hcxt = CurrentMemoryContext; - - tidhtab = - hash_create("bucket ctids", - 256, /* arbitrary initial size */ - &hash_ctl, - HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); + /* Initialize hash tables used to track TIDs (with arbitrary initial size) */ + tidhtab = hashset_make(ItemPointerData, "bucket ctids", 256); bucket_nblkno = nblkno = _hash_get_newblock_from_oldbucket(rel, obucket); diff --git a/src/backend/access/heap/rewriteheap.c b/src/backend/access/heap/rewriteheap.c index 66ab48f0fe0d..dc2c46742fdc 100644 --- a/src/backend/access/heap/rewriteheap.c +++ b/src/backend/access/heap/rewriteheap.c @@ -237,7 +237,6 @@ begin_heap_rewrite(Relation old_heap, Relation new_heap, TransactionId oldest_xm RewriteState state; MemoryContext rw_cxt; MemoryContext old_cxt; - HASHCTL hash_ctl; /* * To ease cleanup, make a separate context that will contain the @@ -262,24 +261,19 @@ begin_heap_rewrite(Relation old_heap, Relation new_heap, TransactionId oldest_xm state->rs_cxt = rw_cxt; state->rs_bulkstate = smgr_bulk_start_rel(new_heap, MAIN_FORKNUM); - /* Initialize hash tables used to track update chains */ - hash_ctl.keysize = sizeof(TidHashKey); - hash_ctl.entrysize = sizeof(UnresolvedTupData); - hash_ctl.hcxt = state->rs_cxt; - + /* + * Initialize hash tables used to track update chains (with arbitrary + * initial sizes) + */ state->rs_unresolved_tups = - hash_create("Rewrite / Unresolved ctids", - 128, /* arbitrary initial size */ - &hash_ctl, - HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); - - hash_ctl.entrysize = sizeof(OldToNewMappingData); + hash_make_cxt(UnresolvedTupData, key, + "Rewrite / Unresolved ctids", 128, + state->rs_cxt); state->rs_old_new_tid_map = - hash_create("Rewrite / Old to new tid map", - 128, /* arbitrary initial size */ - &hash_ctl, - HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); + hash_make_cxt(OldToNewMappingData, key, + "Rewrite / Old to new tid map", 128, + state->rs_cxt); MemoryContextSwitchTo(old_cxt); @@ -757,7 +751,6 @@ raw_heap_insert(RewriteState state, HeapTuple tup) static void logical_begin_heap_rewrite(RewriteState state) { - HASHCTL hash_ctl; TransactionId logical_xmin; /* @@ -788,15 +781,11 @@ logical_begin_heap_rewrite(RewriteState state) state->rs_begin_lsn = GetXLogInsertRecPtr(); state->rs_num_rewrite_mappings = 0; - hash_ctl.keysize = sizeof(TransactionId); - hash_ctl.entrysize = sizeof(RewriteMappingFile); - hash_ctl.hcxt = state->rs_cxt; - state->rs_logical_mappings = - hash_create("Logical rewrite mapping", - 128, /* arbitrary initial size */ - &hash_ctl, - HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); + hash_make_cxt(RewriteMappingFile, xid, + "Logical rewrite mapping", + 128, /* arbitrary initial size */ + state->rs_cxt); } /* diff --git a/src/backend/access/transam/xlogprefetcher.c b/src/backend/access/transam/xlogprefetcher.c index ed3aacabc986..f790eea87ff8 100644 --- a/src/backend/access/transam/xlogprefetcher.c +++ b/src/backend/access/transam/xlogprefetcher.c @@ -362,15 +362,13 @@ XLogPrefetcher * XLogPrefetcherAllocate(XLogReaderState *reader) { XLogPrefetcher *prefetcher; - HASHCTL ctl; prefetcher = palloc0(sizeof(XLogPrefetcher)); prefetcher->reader = reader; - ctl.keysize = sizeof(RelFileLocator); - ctl.entrysize = sizeof(XLogPrefetcherFilter); - prefetcher->filter_table = hash_create("XLogPrefetcherFilterTable", 1024, - &ctl, HASH_ELEM | HASH_BLOBS); + prefetcher->filter_table = hash_make_cxt(XLogPrefetcherFilter, rlocator, + "XLogPrefetcherFilterTable", 1024, + TopMemoryContext); dlist_init(&prefetcher->filter_queue); SharedStats->wal_distance = 0; diff --git a/src/backend/access/transam/xlogutils.c b/src/backend/access/transam/xlogutils.c index ce2a3e421462..9e24759f5cc6 100644 --- a/src/backend/access/transam/xlogutils.c +++ b/src/backend/access/transam/xlogutils.c @@ -27,6 +27,7 @@ #include "storage/fd.h" #include "storage/smgr.h" #include "utils/hsearch.h" +#include "utils/memutils.h" #include "utils/rel.h" @@ -131,15 +132,9 @@ log_invalid_page(RelFileLocator locator, ForkNumber forkno, BlockNumber blkno, if (invalid_page_tab == NULL) { /* create hash table when first needed */ - HASHCTL ctl; - - ctl.keysize = sizeof(xl_invalid_page_key); - ctl.entrysize = sizeof(xl_invalid_page); - - invalid_page_tab = hash_create("XLOG invalid-page table", - 100, - &ctl, - HASH_ELEM | HASH_BLOBS); + invalid_page_tab = hash_make_cxt(xl_invalid_page, key, + "XLOG invalid-page table", 100, + TopMemoryContext); } /* we currently assume xl_invalid_page_key contains no padding */ diff --git a/src/backend/catalog/pg_enum.c b/src/backend/catalog/pg_enum.c index da9c2a46cfaa..4787a61c7d38 100644 --- a/src/backend/catalog/pg_enum.c +++ b/src/backend/catalog/pg_enum.c @@ -267,15 +267,9 @@ EnumValuesDelete(Oid enumTypeOid) static void init_uncommitted_enum_types(void) { - HASHCTL hash_ctl; - - hash_ctl.keysize = sizeof(Oid); - hash_ctl.entrysize = sizeof(Oid); - hash_ctl.hcxt = TopTransactionContext; - uncommitted_enum_types = hash_create("Uncommitted enum types", - 32, - &hash_ctl, - HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); + uncommitted_enum_types = hashset_make_cxt(Oid, + "Uncommitted enum types", 32, + TopTransactionContext); } /* @@ -284,15 +278,9 @@ init_uncommitted_enum_types(void) static void init_uncommitted_enum_values(void) { - HASHCTL hash_ctl; - - hash_ctl.keysize = sizeof(Oid); - hash_ctl.entrysize = sizeof(Oid); - hash_ctl.hcxt = TopTransactionContext; - uncommitted_enum_values = hash_create("Uncommitted enum values", - 32, - &hash_ctl, - HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); + uncommitted_enum_values = hashset_make_cxt(Oid, + "Uncommitted enum values", 32, + TopTransactionContext); } /* diff --git a/src/backend/catalog/pg_inherits.c b/src/backend/catalog/pg_inherits.c index 929bb53b620f..c78aa727f0bc 100644 --- a/src/backend/catalog/pg_inherits.c +++ b/src/backend/catalog/pg_inherits.c @@ -256,19 +256,12 @@ find_all_inheritors(Oid parentrelId, LOCKMODE lockmode, List **numparents) { /* hash table for O(1) rel_oid -> rel_numparents cell lookup */ HTAB *seen_rels; - HASHCTL ctl; List *rels_list, *rel_numparents; ListCell *l; - ctl.keysize = sizeof(Oid); - ctl.entrysize = sizeof(SeenRelsEntry); - ctl.hcxt = CurrentMemoryContext; - - seen_rels = hash_create("find_all_inheritors temporary table", - 32, /* start small and extend */ - &ctl, - HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); + seen_rels = hash_make(SeenRelsEntry, rel_id, + "find_all_inheritors temporary table", 32); /* * We build a list starting with the given rel and adding all direct and diff --git a/src/backend/catalog/storage.c b/src/backend/catalog/storage.c index c58e9418ac31..1d13b4a1f390 100644 --- a/src/backend/catalog/storage.c +++ b/src/backend/catalog/storage.c @@ -90,15 +90,9 @@ AddPendingSync(const RelFileLocator *rlocator) /* create the hash if not yet */ if (!pendingSyncHash) - { - HASHCTL ctl; - - ctl.keysize = sizeof(RelFileLocator); - ctl.entrysize = sizeof(PendingRelSync); - ctl.hcxt = TopTransactionContext; - pendingSyncHash = hash_create("pending sync hash", 16, &ctl, - HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); - } + pendingSyncHash = hash_make_cxt(PendingRelSync, rlocator, + "pending sync hash", 16, + TopTransactionContext); pending = hash_search(pendingSyncHash, rlocator, HASH_ENTER, &found); Assert(!found); @@ -600,7 +594,6 @@ void SerializePendingSyncs(Size maxSize, char *startAddress) { HTAB *tmphash; - HASHCTL ctl; HASH_SEQ_STATUS scan; PendingRelSync *sync; PendingRelDelete *delete; @@ -611,12 +604,8 @@ SerializePendingSyncs(Size maxSize, char *startAddress) goto terminate; /* Create temporary hash to collect active relfilelocators */ - ctl.keysize = sizeof(RelFileLocator); - ctl.entrysize = sizeof(RelFileLocator); - ctl.hcxt = CurrentMemoryContext; - tmphash = hash_create("tmp relfilelocators", - hash_get_num_entries(pendingSyncHash), &ctl, - HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); + tmphash = hashset_make(RelFileLocator, "tmp relfilelocators", + hash_get_num_entries(pendingSyncHash)); /* collect all rlocator from pending syncs */ hash_seq_init(&scan, pendingSyncHash); diff --git a/src/backend/commands/async.c b/src/backend/commands/async.c index eb86402cae43..9a7c8d1fb95d 100644 --- a/src/backend/commands/async.c +++ b/src/backend/commands/async.c @@ -2414,20 +2414,14 @@ AddEventToPendingNotifies(Notification *n) if (list_length(pendingNotifies->events) >= MIN_HASHABLE_NOTIFIES && pendingNotifies->hashtab == NULL) { - HASHCTL hash_ctl; ListCell *l; /* Create the hash table */ - hash_ctl.keysize = sizeof(Notification *); - hash_ctl.entrysize = sizeof(struct NotificationHash); - hash_ctl.hash = notification_hash; - hash_ctl.match = notification_match; - hash_ctl.hcxt = CurTransactionContext; pendingNotifies->hashtab = - hash_create("Pending Notifies", - 256L, - &hash_ctl, - HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT); + hash_make_fn_cxt(struct NotificationHash, event, + "Pending Notifies", 256, + notification_hash, notification_match, + CurTransactionContext); /* Insert all the already-existing events */ foreach(l, pendingNotifies->events) diff --git a/src/backend/commands/prepare.c b/src/backend/commands/prepare.c index 34b6410d6a26..0002c6dc993e 100644 --- a/src/backend/commands/prepare.c +++ b/src/backend/commands/prepare.c @@ -371,15 +371,9 @@ EvaluateParams(ParseState *pstate, PreparedStatement *pstmt, List *params, static void InitQueryHashTable(void) { - HASHCTL hash_ctl; - - hash_ctl.keysize = NAMEDATALEN; - hash_ctl.entrysize = sizeof(PreparedStatement); - - prepared_queries = hash_create("Prepared Queries", - 32, - &hash_ctl, - HASH_ELEM | HASH_STRINGS); + prepared_queries = hash_make_cxt(PreparedStatement, stmt_name, + "Prepared Queries", 32, + TopMemoryContext); } /* diff --git a/src/backend/commands/sequence.c b/src/backend/commands/sequence.c index 51567994126f..5fbb3ce90300 100644 --- a/src/backend/commands/sequence.c +++ b/src/backend/commands/sequence.c @@ -1113,13 +1113,9 @@ lock_and_open_sequence(SeqTable seq) static void create_seq_hashtable(void) { - HASHCTL ctl; - - ctl.keysize = sizeof(Oid); - ctl.entrysize = sizeof(SeqTableData); - - seqhashtab = hash_create("Sequence values", 16, &ctl, - HASH_ELEM | HASH_BLOBS); + seqhashtab = hash_make_cxt(SeqTableData, relid, + "Sequence values", 16, + TopMemoryContext); } /* diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c index 07e5b95782e4..7f0fb263772f 100644 --- a/src/backend/commands/tablecmds.c +++ b/src/backend/commands/tablecmds.c @@ -2154,19 +2154,9 @@ ExecuteTruncateGuts(List *explicit_rels, /* First time through, initialize hashtable for foreign tables */ if (!ft_htab) - { - HASHCTL hctl; - - memset(&hctl, 0, sizeof(HASHCTL)); - hctl.keysize = sizeof(Oid); - hctl.entrysize = sizeof(ForeignTruncateInfo); - hctl.hcxt = CurrentMemoryContext; - - ft_htab = hash_create("TRUNCATE for Foreign Tables", - 32, /* start small and extend */ - &hctl, - HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); - } + ft_htab = hash_make(ForeignTruncateInfo, serverid, + "TRUNCATE for Foreign Tables", + 32); /* start small and extend */ /* Find or create cached entry for the foreign table */ ft_info = hash_search(ft_htab, &serverid, HASH_ENTER, &found); diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c index e44f12238864..537901ad5a00 100644 --- a/src/backend/executor/nodeModifyTable.c +++ b/src/backend/executor/nodeModifyTable.c @@ -5134,15 +5134,9 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) #endif if (nrels >= MT_NRELS_HASH) { - HASHCTL hash_ctl; - - hash_ctl.keysize = sizeof(Oid); - hash_ctl.entrysize = sizeof(MTTargetRelLookup); - hash_ctl.hcxt = CurrentMemoryContext; mtstate->mt_resultOidHash = - hash_create("ModifyTable target hash", - nrels, &hash_ctl, - HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); + hash_make(MTTargetRelLookup, relationOid, + "ModifyTable target hash", nrels); for (i = 0; i < nrels; i++) { Oid hashkey; diff --git a/src/backend/nodes/extensible.c b/src/backend/nodes/extensible.c index 3ede1ee0f5d6..1c0e2b20cd10 100644 --- a/src/backend/nodes/extensible.c +++ b/src/backend/nodes/extensible.c @@ -22,6 +22,7 @@ #include "nodes/extensible.h" #include "utils/hsearch.h" +#include "utils/memutils.h" static HTAB *extensible_node_methods = NULL; static HTAB *custom_scan_methods = NULL; @@ -45,13 +46,8 @@ RegisterExtensibleNodeEntry(HTAB **p_htable, const char *htable_label, if (*p_htable == NULL) { - HASHCTL ctl; - - ctl.keysize = EXTNODENAME_MAX_LEN; - ctl.entrysize = sizeof(ExtensibleNodeEntry); - - *p_htable = hash_create(htable_label, 100, &ctl, - HASH_ELEM | HASH_STRINGS); + *p_htable = hash_make_cxt(ExtensibleNodeEntry, extnodename, + htable_label, 100, TopMemoryContext); } if (strlen(extnodename) >= EXTNODENAME_MAX_LEN) diff --git a/src/backend/optimizer/util/plancat.c b/src/backend/optimizer/util/plancat.c index 07f92fac239a..32435933a934 100644 --- a/src/backend/optimizer/util/plancat.c +++ b/src/backend/optimizer/util/plancat.c @@ -694,19 +694,10 @@ get_relation_notnullatts(PlannerInfo *root, Relation relation) /* create the hash table if it hasn't been created yet */ if (root->glob->rel_notnullatts_hash == NULL) { - HTAB *hashtab; - HASHCTL hash_ctl; - - hash_ctl.keysize = sizeof(Oid); - hash_ctl.entrysize = sizeof(NotnullHashEntry); - hash_ctl.hcxt = CurrentMemoryContext; - - hashtab = hash_create("Relation NOT NULL attnums", - 64L, /* arbitrary initial size */ - &hash_ctl, - HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); - - root->glob->rel_notnullatts_hash = hashtab; + root->glob->rel_notnullatts_hash = + hash_make(NotnullHashEntry, relid, + "Relation NOT NULL attnums", + 64L); /* arbitrary initial size */ } /* diff --git a/src/backend/optimizer/util/predtest.c b/src/backend/optimizer/util/predtest.c index ac28573cd0a5..43291f128fd0 100644 --- a/src/backend/optimizer/util/predtest.c +++ b/src/backend/optimizer/util/predtest.c @@ -2117,12 +2117,9 @@ lookup_proof_cache(Oid pred_op, Oid clause_op, bool refute_it) if (OprProofCacheHash == NULL) { /* First time through: initialize the hash table */ - HASHCTL ctl; - - ctl.keysize = sizeof(OprProofCacheKey); - ctl.entrysize = sizeof(OprProofCacheEntry); - OprProofCacheHash = hash_create("Btree proof lookup cache", 256, - &ctl, HASH_ELEM | HASH_BLOBS); + OprProofCacheHash = hash_make_cxt(OprProofCacheEntry, key, + "Btree proof lookup cache", 256, + TopMemoryContext); /* Arrange to flush cache on pg_amop changes */ CacheRegisterSyscacheCallback(AMOPOPID, diff --git a/src/backend/optimizer/util/relnode.c b/src/backend/optimizer/util/relnode.c index 1158bc194c31..3ed61fefb0a4 100644 --- a/src/backend/optimizer/util/relnode.c +++ b/src/backend/optimizer/util/relnode.c @@ -601,19 +601,12 @@ static void build_join_rel_hash(PlannerInfo *root) { HTAB *hashtab; - HASHCTL hash_ctl; ListCell *l; /* Create the hash table */ - hash_ctl.keysize = sizeof(Relids); - hash_ctl.entrysize = sizeof(JoinHashEntry); - hash_ctl.hash = bitmap_hash; - hash_ctl.match = bitmap_match; - hash_ctl.hcxt = CurrentMemoryContext; - hashtab = hash_create("JoinRelHashTable", - 256L, - &hash_ctl, - HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT); + hashtab = hash_make_fn(JoinHashEntry, join_relids, + "JoinRelHashTable", 256, + bitmap_hash, bitmap_match); /* Insert all the already-existing joinrels */ foreach(l, root->join_rel_list) diff --git a/src/backend/parser/parse_oper.c b/src/backend/parser/parse_oper.c index 7bd7a336fd6f..f9588865cfce 100644 --- a/src/backend/parser/parse_oper.c +++ b/src/backend/parser/parse_oper.c @@ -27,6 +27,7 @@ #include "utils/builtins.h" #include "utils/inval.h" #include "utils/lsyscache.h" +#include "utils/memutils.h" #include "utils/syscache.h" #include "utils/typcache.h" @@ -1028,12 +1029,9 @@ find_oper_cache_entry(OprCacheKey *key) if (OprCacheHash == NULL) { /* First time through: initialize the hash table */ - HASHCTL ctl; - - ctl.keysize = sizeof(OprCacheKey); - ctl.entrysize = sizeof(OprCacheEntry); - OprCacheHash = hash_create("Operator lookup cache", 256, - &ctl, HASH_ELEM | HASH_BLOBS); + OprCacheHash = hash_make_cxt(OprCacheEntry, key, + "Operator lookup cache", 256, + TopMemoryContext); /* Arrange to flush cache on pg_operator and pg_cast changes */ CacheRegisterSyscacheCallback(OPERNAMENSP, diff --git a/src/backend/partitioning/partdesc.c b/src/backend/partitioning/partdesc.c index 328b4d450e45..b3fbd781494b 100644 --- a/src/backend/partitioning/partdesc.c +++ b/src/backend/partitioning/partdesc.c @@ -424,17 +424,12 @@ CreatePartitionDirectory(MemoryContext mcxt, bool omit_detached) { MemoryContext oldcontext = MemoryContextSwitchTo(mcxt); PartitionDirectory pdir; - HASHCTL ctl; pdir = palloc(sizeof(PartitionDirectoryData)); pdir->pdir_mcxt = mcxt; - ctl.keysize = sizeof(Oid); - ctl.entrysize = sizeof(PartitionDirectoryEntry); - ctl.hcxt = mcxt; - - pdir->pdir_hash = hash_create("partition directory", 256, &ctl, - HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); + pdir->pdir_hash = hash_make_cxt(PartitionDirectoryEntry, reloid, + "partition directory", 256, mcxt); pdir->omit_detached = omit_detached; MemoryContextSwitchTo(oldcontext); diff --git a/src/backend/postmaster/autovacuum.c b/src/backend/postmaster/autovacuum.c index 1c38488f2cbb..f1b6b1a24a9e 100644 --- a/src/backend/postmaster/autovacuum.c +++ b/src/backend/postmaster/autovacuum.c @@ -907,7 +907,6 @@ rebuild_database_list(Oid newdb) MemoryContext newcxt; MemoryContext oldcxt; MemoryContext tmpcxt; - HASHCTL hctl; int score; int nelems; HTAB *dbhash; @@ -937,12 +936,10 @@ rebuild_database_list(Oid newdb) * score, and finally put the array elements into the new doubly linked * list. */ - hctl.keysize = sizeof(Oid); - hctl.entrysize = sizeof(avl_dbase); - hctl.hcxt = tmpcxt; - dbhash = hash_create("autovacuum db hash", 20, &hctl, /* magic number here - * FIXME */ - HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); + dbhash = hash_make_cxt(avl_dbase, adl_datid, + "autovacuum db hash", + 20, /* magic number here FIXME */ + tmpcxt); /* start by inserting the new database */ score = 0; @@ -1904,7 +1901,6 @@ do_autovacuum(void) Form_pg_database dbForm; List *table_oids = NIL; List *orphan_oids = NIL; - HASHCTL ctl; HTAB *table_toast_map; ListCell *volatile cell; BufferAccessStrategy bstrategy; @@ -1977,13 +1973,9 @@ do_autovacuum(void) pg_class_desc = CreateTupleDescCopy(RelationGetDescr(classRel)); /* create hash table for toast <-> main relid mapping */ - ctl.keysize = sizeof(Oid); - ctl.entrysize = sizeof(av_relation); - - table_toast_map = hash_create("TOAST to main relid map", - 100, - &ctl, - HASH_ELEM | HASH_BLOBS); + table_toast_map = hash_make_cxt(av_relation, ar_toastrelid, + "TOAST to main relid map", 100, + TopMemoryContext); /* * Scan pg_class to determine which tables to vacuum. diff --git a/src/backend/postmaster/checkpointer.c b/src/backend/postmaster/checkpointer.c index e84e8663e966..83bae2b75b6b 100644 --- a/src/backend/postmaster/checkpointer.c +++ b/src/backend/postmaster/checkpointer.c @@ -1301,7 +1301,6 @@ CompactCheckpointerRequestQueue(void) int num_requests; int read_idx, write_idx; - HASHCTL ctl; HTAB *htab; bool *skip_slot; @@ -1321,14 +1320,9 @@ CompactCheckpointerRequestQueue(void) head = CheckpointerShmem->head; /* Initialize temporary hash table */ - ctl.keysize = sizeof(CheckpointerRequest); - ctl.entrysize = sizeof(struct CheckpointerSlotMapping); - ctl.hcxt = CurrentMemoryContext; - - htab = hash_create("CompactCheckpointerRequestQueue", - CheckpointerShmem->num_requests, - &ctl, - HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); + htab = hash_make(struct CheckpointerSlotMapping, request, + "CompactCheckpointerRequestQueue", + CheckpointerShmem->num_requests); /* * The basic idea here is that a request can be skipped if it's followed diff --git a/src/backend/replication/logical/applyparallelworker.c b/src/backend/replication/logical/applyparallelworker.c index a4eb3962cb1f..eadf5b67eb5b 100644 --- a/src/backend/replication/logical/applyparallelworker.c +++ b/src/backend/replication/logical/applyparallelworker.c @@ -484,16 +484,9 @@ pa_allocate_worker(TransactionId xid) /* First time through, initialize parallel apply worker state hashtable. */ if (!ParallelApplyTxnHash) { - HASHCTL ctl; - - MemSet(&ctl, 0, sizeof(ctl)); - ctl.keysize = sizeof(TransactionId); - ctl.entrysize = sizeof(ParallelApplyWorkerEntry); - ctl.hcxt = ApplyContext; - - ParallelApplyTxnHash = hash_create("logical replication parallel apply workers hash", - 16, &ctl, - HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); + ParallelApplyTxnHash = hash_make_cxt(ParallelApplyWorkerEntry, xid, + "logical replication parallel apply workers hash", + 16, ApplyContext); } /* Create an entry for the requested transaction. */ diff --git a/src/backend/replication/logical/relation.c b/src/backend/replication/logical/relation.c index 10b3d0d9b823..c4af85d74f1a 100644 --- a/src/backend/replication/logical/relation.c +++ b/src/backend/replication/logical/relation.c @@ -104,8 +104,6 @@ logicalrep_relmap_invalidate_cb(Datum arg, Oid reloid) static void logicalrep_relmap_init(void) { - HASHCTL ctl; - if (!LogicalRepRelMapContext) LogicalRepRelMapContext = AllocSetContextCreate(CacheMemoryContext, @@ -113,12 +111,9 @@ logicalrep_relmap_init(void) ALLOCSET_DEFAULT_SIZES); /* Initialize the relation hash table. */ - ctl.keysize = sizeof(LogicalRepRelId); - ctl.entrysize = sizeof(LogicalRepRelMapEntry); - ctl.hcxt = LogicalRepRelMapContext; - - LogicalRepRelMap = hash_create("logicalrep relation map cache", 128, &ctl, - HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); + LogicalRepRelMap = hash_make_cxt(LogicalRepRelMapEntry, remoterel.remoteid, + "logicalrep relation map cache", 128, + LogicalRepRelMapContext); /* Watch for invalidation events. */ CacheRegisterRelcacheCallback(logicalrep_relmap_invalidate_cb, @@ -610,8 +605,6 @@ logicalrep_partmap_reset_relmap(LogicalRepRelation *remoterel) static void logicalrep_partmap_init(void) { - HASHCTL ctl; - if (!LogicalRepPartMapContext) LogicalRepPartMapContext = AllocSetContextCreate(CacheMemoryContext, @@ -619,12 +612,9 @@ logicalrep_partmap_init(void) ALLOCSET_DEFAULT_SIZES); /* Initialize the relation hash table. */ - ctl.keysize = sizeof(Oid); /* partition OID */ - ctl.entrysize = sizeof(LogicalRepPartMapEntry); - ctl.hcxt = LogicalRepPartMapContext; - - LogicalRepPartMap = hash_create("logicalrep partition map cache", 64, &ctl, - HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); + LogicalRepPartMap = hash_make_cxt(LogicalRepPartMapEntry, partoid, + "logicalrep partition map cache", 64, + LogicalRepPartMapContext); /* Watch for invalidation events. */ CacheRegisterRelcacheCallback(logicalrep_partmap_invalidate_cb, diff --git a/src/backend/replication/logical/reorderbuffer.c b/src/backend/replication/logical/reorderbuffer.c index eb6a84554b78..e09516d3d8b8 100644 --- a/src/backend/replication/logical/reorderbuffer.c +++ b/src/backend/replication/logical/reorderbuffer.c @@ -324,7 +324,6 @@ ReorderBuffer * ReorderBufferAllocate(void) { ReorderBuffer *buffer; - HASHCTL hash_ctl; MemoryContext new_ctx; Assert(MyReplicationSlot != NULL); @@ -337,8 +336,6 @@ ReorderBufferAllocate(void) buffer = (ReorderBuffer *) MemoryContextAlloc(new_ctx, sizeof(ReorderBuffer)); - memset(&hash_ctl, 0, sizeof(hash_ctl)); - buffer->context = new_ctx; buffer->change_context = SlabContextCreate(new_ctx, @@ -367,12 +364,8 @@ ReorderBufferAllocate(void) SLAB_DEFAULT_BLOCK_SIZE, SLAB_DEFAULT_BLOCK_SIZE); - hash_ctl.keysize = sizeof(TransactionId); - hash_ctl.entrysize = sizeof(ReorderBufferTXNByIdEnt); - hash_ctl.hcxt = buffer->context; - - buffer->by_txn = hash_create("ReorderBufferByXid", 1000, &hash_ctl, - HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); + buffer->by_txn = hash_make_cxt(ReorderBufferTXNByIdEnt, xid, + "ReorderBufferByXid", 1000, buffer->context); buffer->by_txn_last_xid = InvalidTransactionId; buffer->by_txn_last_txn = NULL; @@ -1836,22 +1829,17 @@ static void ReorderBufferBuildTupleCidHash(ReorderBuffer *rb, ReorderBufferTXN *txn) { dlist_iter iter; - HASHCTL hash_ctl; if (!rbtxn_has_catalog_changes(txn) || dlist_is_empty(&txn->tuplecids)) return; - hash_ctl.keysize = sizeof(ReorderBufferTupleCidKey); - hash_ctl.entrysize = sizeof(ReorderBufferTupleCidEnt); - hash_ctl.hcxt = rb->context; - /* * create the hash with the exact number of to-be-stored tuplecids from * the start */ txn->tuplecid_hash = - hash_create("ReorderBufferTupleCid", txn->ntuplecids, &hash_ctl, - HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); + hash_make_cxt(ReorderBufferTupleCidEnt, key, + "ReorderBufferTupleCid", txn->ntuplecids, rb->context); dlist_foreach(iter, &txn->tuplecids) { @@ -4977,15 +4965,10 @@ StartupReorderBuffer(void) static void ReorderBufferToastInitHash(ReorderBuffer *rb, ReorderBufferTXN *txn) { - HASHCTL hash_ctl; - Assert(txn->toast_hash == NULL); - hash_ctl.keysize = sizeof(Oid); - hash_ctl.entrysize = sizeof(ReorderBufferToastEnt); - hash_ctl.hcxt = rb->context; - txn->toast_hash = hash_create("ReorderBufferToastHash", 5, &hash_ctl, - HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); + txn->toast_hash = hash_make_cxt(ReorderBufferToastEnt, chunk_id, + "ReorderBufferToastHash", 5, rb->context); } /* diff --git a/src/backend/replication/logical/tablesync.c b/src/backend/replication/logical/tablesync.c index 6bb0cbeedad8..276b746b4dbe 100644 --- a/src/backend/replication/logical/tablesync.c +++ b/src/backend/replication/logical/tablesync.c @@ -117,6 +117,7 @@ #include "utils/array.h" #include "utils/builtins.h" #include "utils/lsyscache.h" +#include "utils/memutils.h" #include "utils/rls.h" #include "utils/snapmgr.h" #include "utils/syscache.h" @@ -390,12 +391,9 @@ ProcessSyncingTablesForApply(XLogRecPtr current_lsn) */ if (table_states_not_ready != NIL && !last_start_times) { - HASHCTL ctl; - - ctl.keysize = sizeof(Oid); - ctl.entrysize = sizeof(struct tablesync_start_time_mapping); - last_start_times = hash_create("Logical replication table sync worker start times", - 256, &ctl, HASH_ELEM | HASH_BLOBS); + last_start_times = hash_make_cxt(struct tablesync_start_time_mapping, relid, + "Logical replication table sync worker start times", + 256, TopMemoryContext); } /* diff --git a/src/backend/replication/pgoutput/pgoutput.c b/src/backend/replication/pgoutput/pgoutput.c index 942e1abdb584..6dea24ff0a65 100644 --- a/src/backend/replication/pgoutput/pgoutput.c +++ b/src/backend/replication/pgoutput/pgoutput.c @@ -1969,7 +1969,6 @@ pgoutput_stream_prepare_txn(LogicalDecodingContext *ctx, static void init_rel_sync_cache(MemoryContext cachectx) { - HASHCTL ctl; static bool relation_callbacks_registered = false; /* Nothing to do if hash table already exists */ @@ -1977,13 +1976,9 @@ init_rel_sync_cache(MemoryContext cachectx) return; /* Make a new hash table for the cache */ - ctl.keysize = sizeof(Oid); - ctl.entrysize = sizeof(RelationSyncEntry); - ctl.hcxt = cachectx; - - RelationSyncCache = hash_create("logical replication output relation cache", - 128, &ctl, - HASH_ELEM | HASH_CONTEXT | HASH_BLOBS); + RelationSyncCache = hash_make_cxt(RelationSyncEntry, relid, + "logical replication output relation cache", + 128, cachectx); Assert(RelationSyncCache != NULL); diff --git a/src/backend/storage/buffer/buf_table.c b/src/backend/storage/buffer/buf_table.c index 9d256559bab9..bfcec855ff64 100644 --- a/src/backend/storage/buffer/buf_table.c +++ b/src/backend/storage/buffer/buf_table.c @@ -50,19 +50,16 @@ BufTableShmemSize(int size) void InitBufTable(int size) { - HASHCTL info; + HASHOPTS opts = {0}; /* assume no locking is needed yet */ /* BufferTag maps to Buffer */ - info.keysize = sizeof(BufferTag); - info.entrysize = sizeof(BufferLookupEnt); - info.num_partitions = NUM_BUFFER_PARTITIONS; - - SharedBufHash = ShmemInitHash("Shared Buffer Lookup Table", - size, size, - &info, - HASH_ELEM | HASH_BLOBS | HASH_PARTITION | HASH_FIXED_SIZE); + opts.num_partitions = NUM_BUFFER_PARTITIONS; + opts.fixed_size = true; + SharedBufHash = shmem_hash_make_ext(BufferLookupEnt, key, + "Shared Buffer Lookup Table", + size, size, &opts); } /* diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c index ce52d6ca81f1..781738956af4 100644 --- a/src/backend/storage/buffer/bufmgr.c +++ b/src/backend/storage/buffer/bufmgr.c @@ -62,6 +62,7 @@ #include "storage/smgr.h" #include "storage/standby.h" #include "utils/memdebug.h" +#include "utils/memutils.h" #include "utils/ps_status.h" #include "utils/rel.h" #include "utils/resowner.h" @@ -4006,8 +4007,6 @@ AtEOXact_Buffers(bool isCommit) void InitBufferManagerAccess(void) { - HASHCTL hash_ctl; - /* * An advisory limit on the number of pins each backend should hold, based * on shared_buffers and the maximum number of connections possible. @@ -4019,11 +4018,9 @@ InitBufferManagerAccess(void) memset(&PrivateRefCountArray, 0, sizeof(PrivateRefCountArray)); - hash_ctl.keysize = sizeof(Buffer); - hash_ctl.entrysize = sizeof(PrivateRefCountEntry); - - PrivateRefCountHash = hash_create("PrivateRefCount", 100, &hash_ctl, - HASH_ELEM | HASH_BLOBS); + PrivateRefCountHash = hash_make_cxt(PrivateRefCountEntry, buffer, + "PrivateRefCount", 100, + TopMemoryContext); /* * AtProcExit_Buffers needs LWLock access, and thereby has to be called at diff --git a/src/backend/storage/buffer/localbuf.c b/src/backend/storage/buffer/localbuf.c index 15aac7d1c9fe..a66f2975a475 100644 --- a/src/backend/storage/buffer/localbuf.c +++ b/src/backend/storage/buffer/localbuf.c @@ -728,7 +728,6 @@ static void InitLocalBuffers(void) { int nbufs = num_temp_buffers; - HASHCTL info; int i; /* @@ -779,13 +778,9 @@ InitLocalBuffers(void) } /* Create the lookup hash table */ - info.keysize = sizeof(BufferTag); - info.entrysize = sizeof(LocalBufferLookupEnt); - - LocalBufHash = hash_create("Local Buffer Lookup Table", - nbufs, - &info, - HASH_ELEM | HASH_BLOBS); + LocalBufHash = hash_make_cxt(LocalBufferLookupEnt, key, + "Local Buffer Lookup Table", nbufs, + TopMemoryContext); if (!LocalBufHash) elog(ERROR, "could not initialize local buffer hash table"); diff --git a/src/backend/storage/file/reinit.c b/src/backend/storage/file/reinit.c index 5c8275cf5365..a943982715b5 100644 --- a/src/backend/storage/file/reinit.c +++ b/src/backend/storage/file/reinit.c @@ -175,7 +175,6 @@ ResetUnloggedRelationsInDbspaceDir(const char *dbspacedirname, int op) if ((op & UNLOGGED_RELATION_CLEANUP) != 0) { HTAB *hash; - HASHCTL ctl; /* * It's possible that someone could create a ton of unlogged relations @@ -184,11 +183,8 @@ ResetUnloggedRelationsInDbspaceDir(const char *dbspacedirname, int op) * need to be reset. Otherwise, this cleanup operation would be * O(n^2). */ - ctl.keysize = sizeof(Oid); - ctl.entrysize = sizeof(unlogged_relation_entry); - ctl.hcxt = CurrentMemoryContext; - hash = hash_create("unlogged relation OIDs", 32, &ctl, - HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); + hash = hash_make(unlogged_relation_entry, relnumber, + "unlogged relation OIDs", 32); /* Scan the directory. */ dbspace_dir = AllocateDir(dbspacedirname); diff --git a/src/backend/storage/ipc/shmem.c b/src/backend/storage/ipc/shmem.c index ee3408df301b..2d4c518dec2c 100644 --- a/src/backend/storage/ipc/shmem.c +++ b/src/backend/storage/ipc/shmem.c @@ -284,8 +284,6 @@ ShmemAddrIsValid(const void *addr) void InitShmemIndex(void) { - HASHCTL info; - /* * Create the shared memory shmem index. * @@ -294,13 +292,8 @@ InitShmemIndex(void) * initializing the ShmemIndex itself. The special "ShmemIndex" hash * table name will tell ShmemInitStruct to fake it. */ - info.keysize = SHMEM_INDEX_KEYSIZE; - info.entrysize = sizeof(ShmemIndexEnt); - - ShmemIndex = ShmemInitHash("ShmemIndex", - SHMEM_INDEX_SIZE, SHMEM_INDEX_SIZE, - &info, - HASH_ELEM | HASH_STRINGS); + ShmemIndex = shmem_hash_make(ShmemIndexEnt, key, "ShmemIndex", + SHMEM_INDEX_SIZE, SHMEM_INDEX_SIZE); } /* @@ -369,6 +362,28 @@ ShmemInitHash(const char *name, /* table string name for shmem index */ return hash_create(name, init_size, infoP, hash_flags); } +/* + * Implementation function for shmem_hash_make macros. + * + * Creates a shared memory hash table with simplified parameters. + * Pass NULL for opts to use all defaults. + */ +HTAB * +shmem_hash_make_impl(const char *name, int64 init_size, int64 max_size, + Size keysize, Size entrysize, bool string_key, + const HASHOPTS *opts) +{ + HASHCTL ctl; + int flags; + + /* Shared memory hash tables use ShmemAllocNoError, not a custom allocator */ + Assert(opts == NULL || opts->alloc == NULL); + + hash_opts_init(&ctl, &flags, keysize, entrysize, string_key, opts); + + return ShmemInitHash(name, init_size, max_size, &ctl, flags); +} + /* * ShmemInitStruct -- Create/attach to a structure in shared memory. * diff --git a/src/backend/storage/ipc/standby.c b/src/backend/storage/ipc/standby.c index fc45d72c79bc..7b9f7e218aa1 100644 --- a/src/backend/storage/ipc/standby.c +++ b/src/backend/storage/ipc/standby.c @@ -32,6 +32,7 @@ #include "storage/standby.h" #include "utils/hsearch.h" #include "utils/injection_point.h" +#include "utils/memutils.h" #include "utils/ps_status.h" #include "utils/timeout.h" #include "utils/timestamp.h" @@ -95,7 +96,6 @@ void InitRecoveryTransactionEnvironment(void) { VirtualTransactionId vxid; - HASHCTL hash_ctl; Assert(RecoveryLockHash == NULL); /* don't run this twice */ @@ -103,18 +103,12 @@ InitRecoveryTransactionEnvironment(void) * Initialize the hash tables for tracking the locks held by each * transaction. */ - hash_ctl.keysize = sizeof(xl_standby_lock); - hash_ctl.entrysize = sizeof(RecoveryLockEntry); - RecoveryLockHash = hash_create("RecoveryLockHash", - 64, - &hash_ctl, - HASH_ELEM | HASH_BLOBS); - hash_ctl.keysize = sizeof(TransactionId); - hash_ctl.entrysize = sizeof(RecoveryLockXidEntry); - RecoveryLockXidHash = hash_create("RecoveryLockXidHash", - 64, - &hash_ctl, - HASH_ELEM | HASH_BLOBS); + RecoveryLockHash = hash_make_cxt(RecoveryLockEntry, key, + "RecoveryLockHash", 64, + TopMemoryContext); + RecoveryLockXidHash = hash_make_cxt(RecoveryLockXidEntry, xid, + "RecoveryLockXidHash", 64, + TopMemoryContext); /* * Initialize shared invalidation management for Startup process, being diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c index 9cb78ead105a..e5ea54e78247 100644 --- a/src/backend/storage/lmgr/lock.c +++ b/src/backend/storage/lmgr/lock.c @@ -443,7 +443,7 @@ static void GetSingleProcBlockerStatusData(PGPROC *blocked_proc, void LockManagerShmemInit(void) { - HASHCTL info; + HASHOPTS opts; int64 init_table_size, max_table_size; bool found; @@ -459,15 +459,11 @@ LockManagerShmemInit(void) * Allocate hash table for LOCK structs. This stores per-locked-object * information. */ - info.keysize = sizeof(LOCKTAG); - info.entrysize = sizeof(LOCK); - info.num_partitions = NUM_LOCK_PARTITIONS; - - LockMethodLockHash = ShmemInitHash("LOCK hash", - init_table_size, - max_table_size, - &info, - HASH_ELEM | HASH_BLOBS | HASH_PARTITION); + MemSet(&opts, 0, sizeof(opts)); + opts.num_partitions = NUM_LOCK_PARTITIONS; + LockMethodLockHash = shmem_hash_make_ext(LOCK, tag, "LOCK hash", + init_table_size, max_table_size, + &opts); /* Assume an average of 2 holders per lock */ max_table_size *= 2; @@ -477,16 +473,12 @@ LockManagerShmemInit(void) * Allocate hash table for PROCLOCK structs. This stores * per-lock-per-holder information. */ - info.keysize = sizeof(PROCLOCKTAG); - info.entrysize = sizeof(PROCLOCK); - info.hash = proclock_hash; - info.num_partitions = NUM_LOCK_PARTITIONS; - - LockMethodProcLockHash = ShmemInitHash("PROCLOCK hash", - init_table_size, - max_table_size, - &info, - HASH_ELEM | HASH_FUNCTION | HASH_PARTITION); + MemSet(&opts, 0, sizeof(opts)); + opts.hash = proclock_hash; + opts.num_partitions = NUM_LOCK_PARTITIONS; + LockMethodProcLockHash = shmem_hash_make_ext(PROCLOCK, tag, "PROCLOCK hash", + init_table_size, max_table_size, + &opts); /* * Allocate fast-path structures. @@ -508,15 +500,9 @@ InitLockManagerAccess(void) * Allocate non-shared hash table for LOCALLOCK structs. This stores lock * counts and resource owner information. */ - HASHCTL info; - - info.keysize = sizeof(LOCALLOCKTAG); - info.entrysize = sizeof(LOCALLOCK); - - LockMethodLocalHash = hash_create("LOCALLOCK hash", - 16, - &info, - HASH_ELEM | HASH_BLOBS); + LockMethodLocalHash = hash_make_cxt(LOCALLOCK, tag, + "LOCALLOCK hash", 16, + TopMemoryContext); } @@ -3396,20 +3382,13 @@ CheckForSessionAndXactLocks(void) bool xactLock; /* is any lockmode held at xact level? */ } PerLockTagEntry; - HASHCTL hash_ctl; HTAB *lockhtab; HASH_SEQ_STATUS status; LOCALLOCK *locallock; /* Create a local hash table keyed by LOCKTAG only */ - hash_ctl.keysize = sizeof(LOCKTAG); - hash_ctl.entrysize = sizeof(PerLockTagEntry); - hash_ctl.hcxt = CurrentMemoryContext; - - lockhtab = hash_create("CheckForSessionAndXactLocks table", - 256, /* arbitrary initial size */ - &hash_ctl, - HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); + lockhtab = hash_make(PerLockTagEntry, lock, + "CheckForSessionAndXactLocks table", 256); /* Scan local lock table to find entries for each LOCKTAG */ hash_seq_init(&status, LockMethodLocalHash); diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c index 255cfa8fa95e..1f749beadcee 100644 --- a/src/backend/storage/lmgr/lwlock.c +++ b/src/backend/storage/lmgr/lwlock.c @@ -285,7 +285,6 @@ static lwlock_stats * get_lwlock_stats_entry(LWLock *lock); static void init_lwlock_stats(void) { - HASHCTL ctl; static MemoryContext lwlock_stats_cxt = NULL; static bool exit_registered = false; @@ -305,11 +304,8 @@ init_lwlock_stats(void) ALLOCSET_DEFAULT_SIZES); MemoryContextAllowInCriticalSection(lwlock_stats_cxt, true); - ctl.keysize = sizeof(lwlock_stats_key); - ctl.entrysize = sizeof(lwlock_stats); - ctl.hcxt = lwlock_stats_cxt; - lwlock_stats_htab = hash_create("lwlock stats", 16384, &ctl, - HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); + lwlock_stats_htab = hash_make_cxt(lwlock_stats, key, + "lwlock stats", 16384, lwlock_stats_cxt); if (!exit_registered) { on_shmem_exit(print_lwlock_stats, 0); diff --git a/src/backend/storage/lmgr/predicate.c b/src/backend/storage/lmgr/predicate.c index f12f8f77aade..c2d850327161 100644 --- a/src/backend/storage/lmgr/predicate.c +++ b/src/backend/storage/lmgr/predicate.c @@ -212,6 +212,7 @@ #include "storage/proc.h" #include "storage/procarray.h" #include "utils/guc_hooks.h" +#include "utils/memutils.h" #include "utils/rel.h" #include "utils/snapmgr.h" @@ -1144,7 +1145,7 @@ CheckPointPredicate(void) void PredicateLockShmemInit(void) { - HASHCTL info; + HASHOPTS opts; int64 max_table_size; Size requestSize; bool found; @@ -1163,16 +1164,13 @@ PredicateLockShmemInit(void) * Allocate hash table for PREDICATELOCKTARGET structs. This stores * per-predicate-lock-target information. */ - info.keysize = sizeof(PREDICATELOCKTARGETTAG); - info.entrysize = sizeof(PREDICATELOCKTARGET); - info.num_partitions = NUM_PREDICATELOCK_PARTITIONS; - - PredicateLockTargetHash = ShmemInitHash("PREDICATELOCKTARGET hash", - max_table_size, - max_table_size, - &info, - HASH_ELEM | HASH_BLOBS | - HASH_PARTITION | HASH_FIXED_SIZE); + MemSet(&opts, 0, sizeof(opts)); + opts.num_partitions = NUM_PREDICATELOCK_PARTITIONS; + opts.fixed_size = true; + PredicateLockTargetHash = shmem_hash_make_ext(PREDICATELOCKTARGET, tag, + "PREDICATELOCKTARGET hash", + max_table_size, max_table_size, + &opts); /* * Reserve a dummy entry in the hash table; we use it to make sure there's @@ -1195,20 +1193,17 @@ PredicateLockShmemInit(void) * Allocate hash table for PREDICATELOCK structs. This stores per * xact-lock-of-a-target information. */ - info.keysize = sizeof(PREDICATELOCKTAG); - info.entrysize = sizeof(PREDICATELOCK); - info.hash = predicatelock_hash; - info.num_partitions = NUM_PREDICATELOCK_PARTITIONS; - /* Assume an average of 2 xacts per target */ max_table_size *= 2; - PredicateLockHash = ShmemInitHash("PREDICATELOCK hash", - max_table_size, - max_table_size, - &info, - HASH_ELEM | HASH_FUNCTION | - HASH_PARTITION | HASH_FIXED_SIZE); + MemSet(&opts, 0, sizeof(opts)); + opts.hash = predicatelock_hash; + opts.num_partitions = NUM_PREDICATELOCK_PARTITIONS; + opts.fixed_size = true; + PredicateLockHash = shmem_hash_make_ext(PREDICATELOCK, tag, + "PREDICATELOCK hash", + max_table_size, max_table_size, + &opts); /* * Compute size for serializable transaction hashtable. Note these @@ -1282,15 +1277,12 @@ PredicateLockShmemInit(void) * Allocate hash table for SERIALIZABLEXID structs. This stores per-xid * information for serializable transactions which have accessed data. */ - info.keysize = sizeof(SERIALIZABLEXIDTAG); - info.entrysize = sizeof(SERIALIZABLEXID); - - SerializableXidHash = ShmemInitHash("SERIALIZABLEXID hash", - max_table_size, - max_table_size, - &info, - HASH_ELEM | HASH_BLOBS | - HASH_FIXED_SIZE); + MemSet(&opts, 0, sizeof(opts)); + opts.fixed_size = true; + SerializableXidHash = shmem_hash_make_ext(SERIALIZABLEXID, tag, + "SERIALIZABLEXID hash", + max_table_size, max_table_size, + &opts); /* * Allocate space for tracking rw-conflicts in lists attached to the @@ -1939,16 +1931,12 @@ GetSerializableTransactionSnapshotInt(Snapshot snapshot, static void CreateLocalPredicateLockHash(void) { - HASHCTL hash_ctl; - /* Initialize the backend-local hash table of parent locks */ Assert(LocalPredicateLockHash == NULL); - hash_ctl.keysize = sizeof(PREDICATELOCKTARGETTAG); - hash_ctl.entrysize = sizeof(LOCALPREDICATELOCK); - LocalPredicateLockHash = hash_create("Local predicate lock", - max_predicate_locks_per_xact, - &hash_ctl, - HASH_ELEM | HASH_BLOBS); + LocalPredicateLockHash = hash_make_cxt(LOCALPREDICATELOCK, tag, + "Local predicate lock", + max_predicate_locks_per_xact, + TopMemoryContext); } /* diff --git a/src/backend/storage/smgr/smgr.c b/src/backend/storage/smgr/smgr.c index bce37a36d51b..d4fa6a144f15 100644 --- a/src/backend/storage/smgr/smgr.c +++ b/src/backend/storage/smgr/smgr.c @@ -73,6 +73,7 @@ #include "storage/smgr.h" #include "utils/hsearch.h" #include "utils/inval.h" +#include "utils/memutils.h" /* @@ -250,12 +251,9 @@ smgropen(RelFileLocator rlocator, ProcNumber backend) if (SMgrRelationHash == NULL) { /* First time through: initialize the hash table */ - HASHCTL ctl; - - ctl.keysize = sizeof(RelFileLocatorBackend); - ctl.entrysize = sizeof(SMgrRelationData); - SMgrRelationHash = hash_create("smgr relation table", 400, - &ctl, HASH_ELEM | HASH_BLOBS); + SMgrRelationHash = hash_make_cxt(SMgrRelationData, smgr_rlocator, + "smgr relation table", 400, + TopMemoryContext); dlist_init(&unpinned_relns); } diff --git a/src/backend/storage/sync/sync.c b/src/backend/storage/sync/sync.c index fc16db90133b..42b7fc112881 100644 --- a/src/backend/storage/sync/sync.c +++ b/src/backend/storage/sync/sync.c @@ -130,8 +130,6 @@ InitSync(void) */ if (!IsUnderPostmaster || AmCheckpointerProcess()) { - HASHCTL hash_ctl; - /* * XXX: The checkpointer needs to add entries to the pending ops table * when absorbing fsync requests. That is done within a critical @@ -146,13 +144,8 @@ InitSync(void) ALLOCSET_DEFAULT_SIZES); MemoryContextAllowInCriticalSection(pendingOpsCxt, true); - hash_ctl.keysize = sizeof(FileTag); - hash_ctl.entrysize = sizeof(PendingFsyncEntry); - hash_ctl.hcxt = pendingOpsCxt; - pendingOps = hash_create("Pending Ops Table", - 100L, - &hash_ctl, - HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); + pendingOps = hash_make_cxt(PendingFsyncEntry, tag, + "Pending Ops Table", 100L, pendingOpsCxt); pendingUnlinks = NIL; } } diff --git a/src/backend/tsearch/ts_typanalyze.c b/src/backend/tsearch/ts_typanalyze.c index 93aab00a3cac..a9bc6901a129 100644 --- a/src/backend/tsearch/ts_typanalyze.c +++ b/src/backend/tsearch/ts_typanalyze.c @@ -149,7 +149,6 @@ compute_tsvector_stats(VacAttrStats *stats, /* This is D from the LC algorithm. */ HTAB *lexemes_tab; - HASHCTL hash_ctl; HASH_SEQ_STATUS scan_status; /* This is the current bucket number from the LC algorithm */ @@ -180,15 +179,9 @@ compute_tsvector_stats(VacAttrStats *stats, * worry about overflowing the initial size. Also we don't need to pay any * attention to locking and memory management. */ - hash_ctl.keysize = sizeof(LexemeHashKey); - hash_ctl.entrysize = sizeof(TrackItem); - hash_ctl.hash = lexeme_hash; - hash_ctl.match = lexeme_match; - hash_ctl.hcxt = CurrentMemoryContext; - lexemes_tab = hash_create("Analyzed lexemes table", - num_mcelem, - &hash_ctl, - HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT); + lexemes_tab = hash_make_fn(TrackItem, key, + "Analyzed lexemes table", num_mcelem, + lexeme_hash, lexeme_match); /* Initialize counters. */ b_current = 1; diff --git a/src/backend/utils/activity/wait_event.c b/src/backend/utils/activity/wait_event.c index 96d61f77f6ef..35aa0208fc93 100644 --- a/src/backend/utils/activity/wait_event.c +++ b/src/backend/utils/activity/wait_event.c @@ -119,7 +119,6 @@ void WaitEventCustomShmemInit(void) { bool found; - HASHCTL info; WaitEventCustomCounter = (WaitEventCustomCounterData *) ShmemInitStruct("WaitEventCustomCounterData", @@ -133,24 +132,18 @@ WaitEventCustomShmemInit(void) } /* initialize or attach the hash tables to store custom wait events */ - info.keysize = sizeof(uint32); - info.entrysize = sizeof(WaitEventCustomEntryByInfo); WaitEventCustomHashByInfo = - ShmemInitHash("WaitEventCustom hash by wait event information", - WAIT_EVENT_CUSTOM_HASH_INIT_SIZE, - WAIT_EVENT_CUSTOM_HASH_MAX_SIZE, - &info, - HASH_ELEM | HASH_BLOBS); + shmem_hash_make(WaitEventCustomEntryByInfo, wait_event_info, + "WaitEventCustom hash by wait event information", + WAIT_EVENT_CUSTOM_HASH_INIT_SIZE, + WAIT_EVENT_CUSTOM_HASH_MAX_SIZE); /* key is a NULL-terminated string */ - info.keysize = sizeof(char[NAMEDATALEN]); - info.entrysize = sizeof(WaitEventCustomEntryByName); WaitEventCustomHashByName = - ShmemInitHash("WaitEventCustom hash by name", - WAIT_EVENT_CUSTOM_HASH_INIT_SIZE, - WAIT_EVENT_CUSTOM_HASH_MAX_SIZE, - &info, - HASH_ELEM | HASH_STRINGS); + shmem_hash_make(WaitEventCustomEntryByName, wait_event_name, + "WaitEventCustom hash by name", + WAIT_EVENT_CUSTOM_HASH_INIT_SIZE, + WAIT_EVENT_CUSTOM_HASH_MAX_SIZE); } /* diff --git a/src/backend/utils/adt/array_typanalyze.c b/src/backend/utils/adt/array_typanalyze.c index 560b27f3ca7d..cc310c044b35 100644 --- a/src/backend/utils/adt/array_typanalyze.c +++ b/src/backend/utils/adt/array_typanalyze.c @@ -223,7 +223,6 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc, /* This is D from the LC algorithm. */ HTAB *elements_tab; - HASHCTL elem_hash_ctl; HASH_SEQ_STATUS scan_status; /* This is the current bucket number from the LC algorithm */ @@ -236,7 +235,6 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc, TrackItem *item; int slot_idx; HTAB *count_tab; - HASHCTL count_hash_ctl; DECountItem *count_item; extra_data = (ArrayAnalyzeExtraData *) stats->extra_data; @@ -276,24 +274,13 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc, * worry about overflowing the initial size. Also we don't need to pay any * attention to locking and memory management. */ - elem_hash_ctl.keysize = sizeof(Datum); - elem_hash_ctl.entrysize = sizeof(TrackItem); - elem_hash_ctl.hash = element_hash; - elem_hash_ctl.match = element_match; - elem_hash_ctl.hcxt = CurrentMemoryContext; - elements_tab = hash_create("Analyzed elements table", - num_mcelem, - &elem_hash_ctl, - HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT); + elements_tab = hash_make_fn(TrackItem, key, + "Analyzed elements table", num_mcelem, + element_hash, element_match); /* hashtable for array distinct elements counts */ - count_hash_ctl.keysize = sizeof(int); - count_hash_ctl.entrysize = sizeof(DECountItem); - count_hash_ctl.hcxt = CurrentMemoryContext; - count_tab = hash_create("Array distinct element count table", - 64, - &count_hash_ctl, - HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); + count_tab = hash_make(DECountItem, count, + "Array distinct element count table", 64); /* Initialize counters. */ b_current = 1; diff --git a/src/backend/utils/adt/json.c b/src/backend/utils/adt/json.c index 06dd62f00080..56aa88e495ec 100644 --- a/src/backend/utils/adt/json.c +++ b/src/backend/utils/adt/json.c @@ -933,19 +933,8 @@ json_unique_hash_match(const void *key1, const void *key2, Size keysize) static void json_unique_check_init(JsonUniqueCheckState *cxt) { - HASHCTL ctl; - - memset(&ctl, 0, sizeof(ctl)); - ctl.keysize = sizeof(JsonUniqueHashEntry); - ctl.entrysize = sizeof(JsonUniqueHashEntry); - ctl.hcxt = CurrentMemoryContext; - ctl.hash = json_unique_hash; - ctl.match = json_unique_hash_match; - - *cxt = hash_create("json object hashtable", - 32, - &ctl, - HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION | HASH_COMPARE); + *cxt = hashset_make_fn(JsonUniqueHashEntry, "json object hashtable", 32, + json_unique_hash, json_unique_hash_match); } static void diff --git a/src/backend/utils/adt/jsonfuncs.c b/src/backend/utils/adt/jsonfuncs.c index 22de18bc5b9b..b369a9e3b162 100644 --- a/src/backend/utils/adt/jsonfuncs.c +++ b/src/backend/utils/adt/jsonfuncs.c @@ -3808,18 +3808,12 @@ static HTAB * get_json_object_as_hash(const char *json, int len, const char *funcname, Node *escontext) { - HASHCTL ctl; HTAB *tab; JHashState *state; JsonSemAction *sem; - ctl.keysize = NAMEDATALEN; - ctl.entrysize = sizeof(JsonHashEntry); - ctl.hcxt = CurrentMemoryContext; - tab = hash_create("json object hashtable", - 100, - &ctl, - HASH_ELEM | HASH_STRINGS | HASH_CONTEXT); + tab = hash_make(JsonHashEntry, fname, + "json object hashtable", 100); state = palloc0(sizeof(JHashState)); sem = palloc0(sizeof(JsonSemAction)); @@ -4213,7 +4207,6 @@ populate_recordset_object_start(void *state) { PopulateRecordsetState *_state = (PopulateRecordsetState *) state; int lex_level = _state->lex->lex_level; - HASHCTL ctl; /* Reject object at top level: we must have an array at level 0 */ if (lex_level == 0) @@ -4227,13 +4220,8 @@ populate_recordset_object_start(void *state) return JSON_SUCCESS; /* Object at level 1: set up a new hash table for this object */ - ctl.keysize = NAMEDATALEN; - ctl.entrysize = sizeof(JsonHashEntry); - ctl.hcxt = CurrentMemoryContext; - _state->json_hash = hash_create("json object hashtable", - 100, - &ctl, - HASH_ELEM | HASH_STRINGS | HASH_CONTEXT); + _state->json_hash = hash_make(JsonHashEntry, fname, + "json object hashtable", 100); return JSON_SUCCESS; } diff --git a/src/backend/utils/adt/mcxtfuncs.c b/src/backend/utils/adt/mcxtfuncs.c index fe6dce9cba3e..84b43f5c4e0a 100644 --- a/src/backend/utils/adt/mcxtfuncs.c +++ b/src/backend/utils/adt/mcxtfuncs.c @@ -185,17 +185,10 @@ pg_get_backend_memory_contexts(PG_FUNCTION_ARGS) ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; int context_id; List *contexts; - HASHCTL ctl; HTAB *context_id_lookup; - ctl.keysize = sizeof(MemoryContext); - ctl.entrysize = sizeof(MemoryContextId); - ctl.hcxt = CurrentMemoryContext; - - context_id_lookup = hash_create("pg_get_backend_memory_contexts", - 256, - &ctl, - HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); + context_id_lookup = hash_make(MemoryContextId, context, + "pg_get_backend_memory_contexts", 256); InitMaterializedSRF(fcinfo, 0); diff --git a/src/backend/utils/adt/ri_triggers.c b/src/backend/utils/adt/ri_triggers.c index 059fc5ebf601..e0f05f2cc4af 100644 --- a/src/backend/utils/adt/ri_triggers.c +++ b/src/backend/utils/adt/ri_triggers.c @@ -2857,30 +2857,25 @@ ri_NullCheck(TupleDesc tupDesc, static void ri_InitHashTables(void) { - HASHCTL ctl; - - ctl.keysize = sizeof(Oid); - ctl.entrysize = sizeof(RI_ConstraintInfo); - ri_constraint_cache = hash_create("RI constraint cache", - RI_INIT_CONSTRAINTHASHSIZE, - &ctl, HASH_ELEM | HASH_BLOBS); + ri_constraint_cache = hash_make_cxt(RI_ConstraintInfo, constraint_id, + "RI constraint cache", + RI_INIT_CONSTRAINTHASHSIZE, + TopMemoryContext); /* Arrange to flush cache on pg_constraint changes */ CacheRegisterSyscacheCallback(CONSTROID, InvalidateConstraintCacheCallBack, (Datum) 0); - ctl.keysize = sizeof(RI_QueryKey); - ctl.entrysize = sizeof(RI_QueryHashEntry); - ri_query_cache = hash_create("RI query cache", - RI_INIT_QUERYHASHSIZE, - &ctl, HASH_ELEM | HASH_BLOBS); - - ctl.keysize = sizeof(RI_CompareKey); - ctl.entrysize = sizeof(RI_CompareHashEntry); - ri_compare_cache = hash_create("RI compare cache", + ri_query_cache = hash_make_cxt(RI_QueryHashEntry, key, + "RI query cache", RI_INIT_QUERYHASHSIZE, - &ctl, HASH_ELEM | HASH_BLOBS); + TopMemoryContext); + + ri_compare_cache = hash_make_cxt(RI_CompareHashEntry, key, + "RI compare cache", + RI_INIT_QUERYHASHSIZE, + TopMemoryContext); } diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c index 6cf90be40bb5..98d331c290fb 100644 --- a/src/backend/utils/adt/ruleutils.c +++ b/src/backend/utils/adt/ruleutils.c @@ -3887,7 +3887,6 @@ static void set_rtable_names(deparse_namespace *dpns, List *parent_namespaces, Bitmapset *rels_used) { - HASHCTL hash_ctl; HTAB *names_hash; NameHashEntry *hentry; bool found; @@ -3903,13 +3902,9 @@ set_rtable_names(deparse_namespace *dpns, List *parent_namespaces, * We use a hash table to hold known names, so that this process is O(N) * not O(N^2) for N names. */ - hash_ctl.keysize = NAMEDATALEN; - hash_ctl.entrysize = sizeof(NameHashEntry); - hash_ctl.hcxt = CurrentMemoryContext; - names_hash = hash_create("set_rtable_names names", - list_length(dpns->rtable), - &hash_ctl, - HASH_ELEM | HASH_STRINGS | HASH_CONTEXT); + names_hash = hash_make(NameHashEntry, name, + "set_rtable_names names", + list_length(dpns->rtable)); /* Preload the hash table with names appearing in parent_namespaces */ foreach(lc, parent_namespaces) @@ -4980,7 +4975,6 @@ expand_colnames_array_to(deparse_columns *colinfo, int n) static void build_colinfo_names_hash(deparse_columns *colinfo) { - HASHCTL hash_ctl; int i; ListCell *lc; @@ -4996,13 +4990,10 @@ build_colinfo_names_hash(deparse_columns *colinfo) * Set up the hash table. The entries are just strings with no other * payload. */ - hash_ctl.keysize = NAMEDATALEN; - hash_ctl.entrysize = NAMEDATALEN; - hash_ctl.hcxt = CurrentMemoryContext; - colinfo->names_hash = hash_create("deparse_columns names", - colinfo->num_cols + colinfo->num_new_cols, - &hash_ctl, - HASH_ELEM | HASH_STRINGS | HASH_CONTEXT); + colinfo->names_hash = + hashset_make_cxt(NameData, "deparse_columns names", + colinfo->num_cols + colinfo->num_new_cols, + CurrentMemoryContext); /* * Preload the hash table with any names already present (these would have diff --git a/src/backend/utils/cache/attoptcache.c b/src/backend/utils/cache/attoptcache.c index 45d1e2be007b..caae39e3bb14 100644 --- a/src/backend/utils/cache/attoptcache.c +++ b/src/backend/utils/cache/attoptcache.c @@ -21,6 +21,7 @@ #include "utils/catcache.h" #include "utils/hsearch.h" #include "utils/inval.h" +#include "utils/memutils.h" #include "utils/syscache.h" #include "varatt.h" @@ -96,22 +97,15 @@ relatt_cache_syshash(const void *key, Size keysize) static void InitializeAttoptCache(void) { - HASHCTL ctl; - - /* Initialize the hash table. */ - ctl.keysize = sizeof(AttoptCacheKey); - ctl.entrysize = sizeof(AttoptCacheEntry); - /* * AttoptCacheEntry takes hash value from the system cache. For * AttoptCacheHash we use the same hash in order to speedup search by hash * value. This is used by hash_seq_init_with_hash_value(). */ - ctl.hash = relatt_cache_syshash; - - AttoptCacheHash = - hash_create("Attopt cache", 256, &ctl, - HASH_ELEM | HASH_FUNCTION); + AttoptCacheHash = hash_make_fn_cxt(AttoptCacheEntry, key, + "Attopt cache", 256, + relatt_cache_syshash, NULL, + TopMemoryContext); /* Make sure we've initialized CacheMemoryContext. */ if (!CacheMemoryContext) diff --git a/src/backend/utils/cache/evtcache.c b/src/backend/utils/cache/evtcache.c index 76ba2db53906..f5ce873f43ca 100644 --- a/src/backend/utils/cache/evtcache.c +++ b/src/backend/utils/cache/evtcache.c @@ -76,7 +76,6 @@ EventCacheLookup(EventTriggerEvent event) static void BuildEventTriggerCache(void) { - HASHCTL ctl; HTAB *cache; Relation rel; Relation irel; @@ -113,11 +112,9 @@ BuildEventTriggerCache(void) EventTriggerCacheState = ETCS_REBUILD_STARTED; /* Create new hash table. */ - ctl.keysize = sizeof(EventTriggerEvent); - ctl.entrysize = sizeof(EventTriggerCacheEntry); - ctl.hcxt = EventTriggerCacheContext; - cache = hash_create("EventTriggerCacheHash", 32, &ctl, - HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); + cache = hash_make_cxt(EventTriggerCacheEntry, event, + "EventTriggerCacheHash", 32, + EventTriggerCacheContext); /* * Prepare to scan pg_event_trigger in name order. diff --git a/src/backend/utils/cache/funccache.c b/src/backend/utils/cache/funccache.c index afc048a051ea..34438a9f93b5 100644 --- a/src/backend/utils/cache/funccache.c +++ b/src/backend/utils/cache/funccache.c @@ -58,19 +58,13 @@ static int cfunc_match(const void *key1, const void *key2, Size keysize); static void cfunc_hashtable_init(void) { - HASHCTL ctl; - /* don't allow double-initialization */ Assert(cfunc_hashtable == NULL); - ctl.keysize = sizeof(CachedFunctionHashKey); - ctl.entrysize = sizeof(CachedFunctionHashEntry); - ctl.hash = cfunc_hash; - ctl.match = cfunc_match; - cfunc_hashtable = hash_create("Cached function hash", - FUNCS_PER_USER, - &ctl, - HASH_ELEM | HASH_FUNCTION | HASH_COMPARE); + cfunc_hashtable = hash_make_fn_cxt(CachedFunctionHashEntry, key, + "Cached function hash", FUNCS_PER_USER, + cfunc_hash, cfunc_match, + TopMemoryContext); } /* diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c index 915d0bc90842..b76d6bbd5c95 100644 --- a/src/backend/utils/cache/relcache.c +++ b/src/backend/utils/cache/relcache.c @@ -1677,17 +1677,14 @@ LookupOpclassInfo(Oid operatorClassOid, if (OpClassCache == NULL) { - /* First time through: initialize the opclass cache */ - HASHCTL ctl; - /* Also make sure CacheMemoryContext exists */ if (!CacheMemoryContext) CreateCacheMemoryContext(); - ctl.keysize = sizeof(Oid); - ctl.entrysize = sizeof(OpClassCacheEnt); - OpClassCache = hash_create("Operator class cache", 64, - &ctl, HASH_ELEM | HASH_BLOBS); + /* First time through: initialize the opclass cache */ + OpClassCache = hash_make_cxt(OpClassCacheEnt, opclassoid, + "Operator class cache", 64, + TopMemoryContext); } opcentry = (OpClassCacheEnt *) hash_search(OpClassCache, @@ -4001,7 +3998,6 @@ RelationAssumeNewRelfilelocator(Relation relation) void RelationCacheInitialize(void) { - HASHCTL ctl; int allocsize; /* @@ -4013,10 +4009,9 @@ RelationCacheInitialize(void) /* * create hashtable that indexes the relcache */ - ctl.keysize = sizeof(Oid); - ctl.entrysize = sizeof(RelIdCacheEnt); - RelationIdCache = hash_create("Relcache by OID", INITRELCACHESIZE, - &ctl, HASH_ELEM | HASH_BLOBS); + RelationIdCache = hash_make_cxt(RelIdCacheEnt, reloid, + "Relcache by OID", INITRELCACHESIZE, + TopMemoryContext); /* * reserve enough in_progress_list slots for many cases diff --git a/src/backend/utils/cache/relfilenumbermap.c b/src/backend/utils/cache/relfilenumbermap.c index 0b6f9cf3fa19..69aba463eaea 100644 --- a/src/backend/utils/cache/relfilenumbermap.c +++ b/src/backend/utils/cache/relfilenumbermap.c @@ -85,7 +85,6 @@ RelfilenumberMapInvalidateCallback(Datum arg, Oid relid) static void InitializeRelfilenumberMap(void) { - HASHCTL ctl; int i; /* Make sure we've initialized CacheMemoryContext. */ @@ -113,13 +112,9 @@ InitializeRelfilenumberMap(void) * initialized when fmgr_info_cxt() above ERRORs out with an out of memory * error. */ - ctl.keysize = sizeof(RelfilenumberMapKey); - ctl.entrysize = sizeof(RelfilenumberMapEntry); - ctl.hcxt = CacheMemoryContext; - RelfilenumberMapHash = - hash_create("RelfilenumberMap cache", 64, &ctl, - HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); + hash_make_cxt(RelfilenumberMapEntry, key, + "RelfilenumberMap cache", 64, CacheMemoryContext); /* Watch for invalidation events. */ CacheRegisterRelcacheCallback(RelfilenumberMapInvalidateCallback, diff --git a/src/backend/utils/cache/spccache.c b/src/backend/utils/cache/spccache.c index 234585992983..bcfd6a9f0d27 100644 --- a/src/backend/utils/cache/spccache.c +++ b/src/backend/utils/cache/spccache.c @@ -27,6 +27,7 @@ #include "utils/catcache.h" #include "utils/hsearch.h" #include "utils/inval.h" +#include "utils/memutils.h" #include "utils/spccache.h" #include "utils/syscache.h" #include "varatt.h" @@ -77,14 +78,10 @@ InvalidateTableSpaceCacheCallback(Datum arg, int cacheid, uint32 hashvalue) static void InitializeTableSpaceCache(void) { - HASHCTL ctl; - /* Initialize the hash table. */ - ctl.keysize = sizeof(Oid); - ctl.entrysize = sizeof(TableSpaceCacheEntry); TableSpaceCacheHash = - hash_create("TableSpace cache", 16, &ctl, - HASH_ELEM | HASH_BLOBS); + hash_make_cxt(TableSpaceCacheEntry, oid, + "TableSpace cache", 16, TopMemoryContext); /* Make sure we've initialized CacheMemoryContext. */ if (!CacheMemoryContext) diff --git a/src/backend/utils/cache/ts_cache.c b/src/backend/utils/cache/ts_cache.c index e8ae53238d07..cfa6d8b09a44 100644 --- a/src/backend/utils/cache/ts_cache.c +++ b/src/backend/utils/cache/ts_cache.c @@ -117,12 +117,9 @@ lookup_ts_parser_cache(Oid prsId) if (TSParserCacheHash == NULL) { /* First time through: initialize the hash table */ - HASHCTL ctl; - - ctl.keysize = sizeof(Oid); - ctl.entrysize = sizeof(TSParserCacheEntry); - TSParserCacheHash = hash_create("Tsearch parser cache", 4, - &ctl, HASH_ELEM | HASH_BLOBS); + TSParserCacheHash = hash_make_cxt(TSParserCacheEntry, prsId, + "Tsearch parser cache", 4, + TopMemoryContext); /* Flush cache on pg_ts_parser changes */ CacheRegisterSyscacheCallback(TSPARSEROID, InvalidateTSCacheCallBack, PointerGetDatum(TSParserCacheHash)); @@ -212,12 +209,9 @@ lookup_ts_dictionary_cache(Oid dictId) if (TSDictionaryCacheHash == NULL) { /* First time through: initialize the hash table */ - HASHCTL ctl; - - ctl.keysize = sizeof(Oid); - ctl.entrysize = sizeof(TSDictionaryCacheEntry); - TSDictionaryCacheHash = hash_create("Tsearch dictionary cache", 8, - &ctl, HASH_ELEM | HASH_BLOBS); + TSDictionaryCacheHash = hash_make_cxt(TSDictionaryCacheEntry, dictId, + "Tsearch dictionary cache", 8, + TopMemoryContext); /* Flush cache on pg_ts_dict and pg_ts_template changes */ CacheRegisterSyscacheCallback(TSDICTOID, InvalidateTSCacheCallBack, PointerGetDatum(TSDictionaryCacheHash)); @@ -363,12 +357,9 @@ lookup_ts_dictionary_cache(Oid dictId) static void init_ts_config_cache(void) { - HASHCTL ctl; - - ctl.keysize = sizeof(Oid); - ctl.entrysize = sizeof(TSConfigCacheEntry); - TSConfigCacheHash = hash_create("Tsearch configuration cache", 16, - &ctl, HASH_ELEM | HASH_BLOBS); + TSConfigCacheHash = hash_make_cxt(TSConfigCacheEntry, cfgId, + "Tsearch configuration cache", 16, + TopMemoryContext); /* Flush cache on pg_ts_config and pg_ts_config_map changes */ CacheRegisterSyscacheCallback(TSCONFIGOID, InvalidateTSCacheCallBack, PointerGetDatum(TSConfigCacheHash)); diff --git a/src/backend/utils/cache/typcache.c b/src/backend/utils/cache/typcache.c index 6a347698edff..7e4ffbe67084 100644 --- a/src/backend/utils/cache/typcache.c +++ b/src/backend/utils/cache/typcache.c @@ -392,28 +392,23 @@ lookup_type_cache(Oid type_id, int flags) if (TypeCacheHash == NULL) { /* First time through: initialize the hash table */ - HASHCTL ctl; int allocsize; - ctl.keysize = sizeof(Oid); - ctl.entrysize = sizeof(TypeCacheEntry); - /* * TypeCacheEntry takes hash value from the system cache. For * TypeCacheHash we use the same hash in order to speedup search by * hash value. This is used by hash_seq_init_with_hash_value(). */ - ctl.hash = type_cache_syshash; - - TypeCacheHash = hash_create("Type information cache", 64, - &ctl, HASH_ELEM | HASH_FUNCTION); + TypeCacheHash = hash_make_fn_cxt(TypeCacheEntry, type_id, + "Type information cache", 64, + type_cache_syshash, NULL, + TopMemoryContext); Assert(RelIdToTypeIdCacheHash == NULL); - ctl.keysize = sizeof(Oid); - ctl.entrysize = sizeof(RelIdToTypeIdCacheEntry); - RelIdToTypeIdCacheHash = hash_create("Map from relid to OID of cached composite type", 64, - &ctl, HASH_ELEM | HASH_BLOBS); + RelIdToTypeIdCacheHash = hash_make_cxt(RelIdToTypeIdCacheEntry, relid, + "Map from relid to OID of cached composite type", + 64, TopMemoryContext); /* Also set up callbacks for SI invalidations */ CacheRegisterRelcacheCallback(TypeCacheRelCallback, (Datum) 0); @@ -2050,15 +2045,11 @@ assign_record_type_typmod(TupleDesc tupDesc) if (RecordCacheHash == NULL) { /* First time through: initialize the hash table */ - HASHCTL ctl; - - ctl.keysize = sizeof(TupleDesc); /* just the pointer */ - ctl.entrysize = sizeof(RecordCacheEntry); - ctl.hash = record_type_typmod_hash; - ctl.match = record_type_typmod_compare; - RecordCacheHash = hash_create("Record information cache", 64, - &ctl, - HASH_ELEM | HASH_FUNCTION | HASH_COMPARE); + RecordCacheHash = hash_make_fn_cxt(RecordCacheEntry, tupdesc, + "Record information cache", 64, + record_type_typmod_hash, + record_type_typmod_compare, + TopMemoryContext); /* Also make sure CacheMemoryContext exists */ if (!CacheMemoryContext) diff --git a/src/backend/utils/fmgr/dfmgr.c b/src/backend/utils/fmgr/dfmgr.c index 1366521f471e..f587bbb4cf92 100644 --- a/src/backend/utils/fmgr/dfmgr.c +++ b/src/backend/utils/fmgr/dfmgr.c @@ -26,6 +26,7 @@ #include "storage/fd.h" #include "storage/shmem.h" #include "utils/hsearch.h" +#include "utils/memutils.h" /* signature for PostgreSQL-specific library init function */ @@ -671,14 +672,9 @@ find_rendezvous_variable(const char *varName) /* Create a hashtable if we haven't already done so in this process */ if (rendezvousHash == NULL) { - HASHCTL ctl; - - ctl.keysize = NAMEDATALEN; - ctl.entrysize = sizeof(rendezvousHashEntry); - rendezvousHash = hash_create("Rendezvous variable hash", - 16, - &ctl, - HASH_ELEM | HASH_STRINGS); + rendezvousHash = hash_make_cxt(rendezvousHashEntry, varName, + "Rendezvous variable hash", 16, + TopMemoryContext); } /* Find or create the hashtable entry for this varName */ diff --git a/src/backend/utils/fmgr/fmgr.c b/src/backend/utils/fmgr/fmgr.c index 0fe63c6bb830..fc85a05367c9 100644 --- a/src/backend/utils/fmgr/fmgr.c +++ b/src/backend/utils/fmgr/fmgr.c @@ -32,6 +32,7 @@ #include "utils/fmgrtab.h" #include "utils/guc.h" #include "utils/lsyscache.h" +#include "utils/memutils.h" #include "utils/syscache.h" /* @@ -547,14 +548,8 @@ record_C_func(HeapTuple procedureTuple, /* Create the hash table if it doesn't exist yet */ if (CFuncHash == NULL) { - HASHCTL hash_ctl; - - hash_ctl.keysize = sizeof(Oid); - hash_ctl.entrysize = sizeof(CFuncHashTabEntry); - CFuncHash = hash_create("CFuncHash", - 100, - &hash_ctl, - HASH_ELEM | HASH_BLOBS); + CFuncHash = hash_make_cxt(CFuncHashTabEntry, fn_oid, + "CFuncHash", 100, TopMemoryContext); } entry = (CFuncHashTabEntry *) diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c index c6484aea087c..f3897c953d3b 100644 --- a/src/backend/utils/misc/guc.c +++ b/src/backend/utils/misc/guc.c @@ -871,7 +871,6 @@ build_guc_variables(void) { int size_vars; int num_vars = 0; - HASHCTL hash_ctl; GUCHashEntry *hentry; bool found; @@ -894,15 +893,10 @@ build_guc_variables(void) */ size_vars = num_vars + num_vars / 4; - hash_ctl.keysize = sizeof(char *); - hash_ctl.entrysize = sizeof(GUCHashEntry); - hash_ctl.hash = guc_name_hash; - hash_ctl.match = guc_name_match; - hash_ctl.hcxt = GUCMemoryContext; - guc_hashtab = hash_create("GUC hash table", - size_vars, - &hash_ctl, - HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT); + guc_hashtab = hash_make_fn_cxt(GUCHashEntry, gucname, + "GUC hash table", size_vars, + guc_name_hash, guc_name_match, + GUCMemoryContext); for (int i = 0; ConfigureNames[i].name; i++) { diff --git a/src/backend/utils/misc/injection_point.c b/src/backend/utils/misc/injection_point.c index d02618c7ffeb..5075399f017b 100644 --- a/src/backend/utils/misc/injection_point.c +++ b/src/backend/utils/misc/injection_point.c @@ -127,16 +127,10 @@ injection_point_cache_add(const char *name, /* If first time, initialize */ if (InjectionPointCache == NULL) { - HASHCTL hash_ctl; - - hash_ctl.keysize = sizeof(char[INJ_NAME_MAXLEN]); - hash_ctl.entrysize = sizeof(InjectionPointCacheEntry); - hash_ctl.hcxt = TopMemoryContext; - - InjectionPointCache = hash_create("InjectionPoint cache hash", - MAX_INJECTION_POINTS, - &hash_ctl, - HASH_ELEM | HASH_STRINGS | HASH_CONTEXT); + InjectionPointCache = hash_make_cxt(InjectionPointCacheEntry, name, + "InjectionPoint cache hash", + MAX_INJECTION_POINTS, + TopMemoryContext); } entry = (InjectionPointCacheEntry *) diff --git a/src/backend/utils/mmgr/portalmem.c b/src/backend/utils/mmgr/portalmem.c index 943da087c9f5..8241d32f0c62 100644 --- a/src/backend/utils/mmgr/portalmem.c +++ b/src/backend/utils/mmgr/portalmem.c @@ -103,23 +103,19 @@ static MemoryContext TopPortalContext = NULL; void EnablePortalManager(void) { - HASHCTL ctl; - Assert(TopPortalContext == NULL); TopPortalContext = AllocSetContextCreate(TopMemoryContext, "TopPortalContext", ALLOCSET_DEFAULT_SIZES); - ctl.keysize = MAX_PORTALNAME_LEN; - ctl.entrysize = sizeof(PortalHashEnt); - /* * use PORTALS_PER_USER as a guess of how many hash table entries to * create, initially */ - PortalHashTable = hash_create("Portal hash", PORTALS_PER_USER, - &ctl, HASH_ELEM | HASH_STRINGS); + PortalHashTable = hash_make_cxt(PortalHashEnt, portalname, + "Portal hash", PORTALS_PER_USER, + TopMemoryContext); } /* diff --git a/src/backend/utils/time/combocid.c b/src/backend/utils/time/combocid.c index 1e8155715709..92662a63177b 100644 --- a/src/backend/utils/time/combocid.c +++ b/src/backend/utils/time/combocid.c @@ -214,8 +214,6 @@ GetComboCommandId(CommandId cmin, CommandId cmax) */ if (comboHash == NULL) { - HASHCTL hash_ctl; - /* Make array first; existence of hash table asserts array exists */ comboCids = (ComboCidKeyData *) MemoryContextAlloc(TopTransactionContext, @@ -223,14 +221,9 @@ GetComboCommandId(CommandId cmin, CommandId cmax) sizeComboCids = CCID_ARRAY_SIZE; usedComboCids = 0; - hash_ctl.keysize = sizeof(ComboCidKeyData); - hash_ctl.entrysize = sizeof(ComboCidEntryData); - hash_ctl.hcxt = TopTransactionContext; - - comboHash = hash_create("Combo CIDs", - CCID_HASH_SIZE, - &hash_ctl, - HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); + comboHash = hash_make_cxt(ComboCidEntryData, key, + "Combo CIDs", CCID_HASH_SIZE, + TopTransactionContext); } /* diff --git a/src/pl/plperl/plperl.c b/src/pl/plperl/plperl.c index 73ba1748fe0a..e211f821edc8 100644 --- a/src/pl/plperl/plperl.c +++ b/src/pl/plperl/plperl.c @@ -390,7 +390,6 @@ _PG_init(void) * "plperl.use_strict" */ static bool inited = false; - HASHCTL hash_ctl; if (inited) return; @@ -460,19 +459,13 @@ _PG_init(void) /* * Create hash tables. */ - hash_ctl.keysize = sizeof(Oid); - hash_ctl.entrysize = sizeof(plperl_interp_desc); - plperl_interp_hash = hash_create("PL/Perl interpreters", - 8, - &hash_ctl, - HASH_ELEM | HASH_BLOBS); - - hash_ctl.keysize = sizeof(plperl_proc_key); - hash_ctl.entrysize = sizeof(plperl_proc_ptr); - plperl_proc_hash = hash_create("PL/Perl procedures", - 32, - &hash_ctl, - HASH_ELEM | HASH_BLOBS); + plperl_interp_hash = hash_make_cxt(plperl_interp_desc, user_id, + "PL/Perl interpreters", 8, + TopMemoryContext); + + plperl_proc_hash = hash_make_cxt(plperl_proc_ptr, proc_key, + "PL/Perl procedures", 32, + TopMemoryContext); /* * Save the default opmask. @@ -578,14 +571,9 @@ select_perl_context(bool trusted) /* Make sure we have a query_hash for this interpreter */ if (interp_desc->query_hash == NULL) { - HASHCTL hash_ctl; - - hash_ctl.keysize = NAMEDATALEN; - hash_ctl.entrysize = sizeof(plperl_query_entry); - interp_desc->query_hash = hash_create("PL/Perl queries", - 32, - &hash_ctl, - HASH_ELEM | HASH_STRINGS); + interp_desc->query_hash = hash_make_cxt(plperl_query_entry, query_name, + "PL/Perl queries", 32, + TopMemoryContext); } /* diff --git a/src/pl/plpgsql/src/pl_exec.c b/src/pl/plpgsql/src/pl_exec.c index d19425b7a71a..f3b469432aa3 100644 --- a/src/pl/plpgsql/src/pl_exec.c +++ b/src/pl/plpgsql/src/pl_exec.c @@ -3996,8 +3996,6 @@ plpgsql_estate_setup(PLpgSQL_execstate *estate, EState *simple_eval_estate, ResourceOwner simple_eval_resowner) { - HASHCTL ctl; - /* this link will be restored at exit from plpgsql_call_handler */ func->cur_estate = estate; @@ -4052,12 +4050,10 @@ plpgsql_estate_setup(PLpgSQL_execstate *estate, /* Create the session-wide cast-expression hash if we didn't already */ if (cast_expr_hash == NULL) { - ctl.keysize = sizeof(plpgsql_CastHashKey); - ctl.entrysize = sizeof(plpgsql_CastExprHashEntry); - cast_expr_hash = hash_create("PLpgSQL cast expressions", - 16, /* start small and extend */ - &ctl, - HASH_ELEM | HASH_BLOBS); + cast_expr_hash = hash_make_cxt(plpgsql_CastExprHashEntry, key, + "PLpgSQL cast expressions", + 16, /* start small and extend */ + TopMemoryContext); } /* set up for use of appropriate simple-expression EState and cast hash */ @@ -4065,13 +4061,9 @@ plpgsql_estate_setup(PLpgSQL_execstate *estate, { estate->simple_eval_estate = simple_eval_estate; /* Private cast hash just lives in function's main context */ - ctl.keysize = sizeof(plpgsql_CastHashKey); - ctl.entrysize = sizeof(plpgsql_CastHashEntry); - ctl.hcxt = CurrentMemoryContext; - estate->cast_hash = hash_create("PLpgSQL private cast cache", - 16, /* start small and extend */ - &ctl, - HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); + estate->cast_hash = hash_make(plpgsql_CastHashEntry, key, + "PLpgSQL private cast cache", + 16); /* start small and extend */ } else { @@ -4079,12 +4071,10 @@ plpgsql_estate_setup(PLpgSQL_execstate *estate, /* Create the session-wide cast-info hash table if we didn't already */ if (shared_cast_hash == NULL) { - ctl.keysize = sizeof(plpgsql_CastHashKey); - ctl.entrysize = sizeof(plpgsql_CastHashEntry); - shared_cast_hash = hash_create("PLpgSQL cast cache", - 16, /* start small and extend */ - &ctl, - HASH_ELEM | HASH_BLOBS); + shared_cast_hash = hash_make_cxt(plpgsql_CastHashEntry, key, + "PLpgSQL cast cache", + 16, /* start small and extend */ + TopMemoryContext); } estate->cast_hash = shared_cast_hash; } diff --git a/src/pl/plpython/plpy_plpymodule.c b/src/pl/plpython/plpy_plpymodule.c index 89931612c5b1..b268b380cd50 100644 --- a/src/pl/plpython/plpy_plpymodule.c +++ b/src/pl/plpython/plpy_plpymodule.c @@ -16,6 +16,7 @@ #include "plpy_subxactobject.h" #include "plpy_util.h" #include "utils/builtins.h" +#include "utils/memutils.h" HTAB *PLy_spi_exceptions = NULL; @@ -172,7 +173,6 @@ static void PLy_add_exceptions(PyObject *plpy) { PyObject *excmod; - HASHCTL hash_ctl; excmod = PyModule_Create(&PLy_exc_module); if (excmod == NULL) @@ -193,10 +193,9 @@ PLy_add_exceptions(PyObject *plpy) PLy_exc_spi_error = PLy_create_exception("plpy.SPIError", NULL, NULL, "SPIError", plpy); - hash_ctl.keysize = sizeof(int); - hash_ctl.entrysize = sizeof(PLyExceptionEntry); - PLy_spi_exceptions = hash_create("PL/Python SPI exceptions", 256, - &hash_ctl, HASH_ELEM | HASH_BLOBS); + PLy_spi_exceptions = hash_make_cxt(PLyExceptionEntry, sqlstate, + "PL/Python SPI exceptions", 256, + TopMemoryContext); PLy_generate_spi_exceptions(excmod, PLy_exc_spi_error); } diff --git a/src/pl/plpython/plpy_procedure.c b/src/pl/plpython/plpy_procedure.c index 655ab1d09eea..6317606e01e9 100644 --- a/src/pl/plpython/plpy_procedure.c +++ b/src/pl/plpython/plpy_procedure.c @@ -29,12 +29,9 @@ static char *PLy_procedure_munge_source(const char *name, const char *src); void init_procedure_caches(void) { - HASHCTL hash_ctl; - - hash_ctl.keysize = sizeof(PLyProcedureKey); - hash_ctl.entrysize = sizeof(PLyProcedureEntry); - PLy_procedure_cache = hash_create("PL/Python procedures", 32, &hash_ctl, - HASH_ELEM | HASH_BLOBS); + PLy_procedure_cache = hash_make_cxt(PLyProcedureEntry, key, + "PL/Python procedures", 32, + TopMemoryContext); } /* diff --git a/src/pl/tcl/pltcl.c b/src/pl/tcl/pltcl.c index 73d660e88a69..a3ef429d926b 100644 --- a/src/pl/tcl/pltcl.c +++ b/src/pl/tcl/pltcl.c @@ -408,7 +408,6 @@ void _PG_init(void) { Tcl_NotifierProcs notifier; - HASHCTL hash_ctl; /* Be sure we do initialization only once (should be redundant now) */ if (pltcl_pm_init_done) @@ -446,22 +445,16 @@ _PG_init(void) /************************************************************ * Create the hash table for working interpreters ************************************************************/ - hash_ctl.keysize = sizeof(Oid); - hash_ctl.entrysize = sizeof(pltcl_interp_desc); - pltcl_interp_htab = hash_create("PL/Tcl interpreters", - 8, - &hash_ctl, - HASH_ELEM | HASH_BLOBS); + pltcl_interp_htab = hash_make_cxt(pltcl_interp_desc, user_id, + "PL/Tcl interpreters", 8, + TopMemoryContext); /************************************************************ * Create the hash table for function lookup ************************************************************/ - hash_ctl.keysize = sizeof(pltcl_proc_key); - hash_ctl.entrysize = sizeof(pltcl_proc_ptr); - pltcl_proc_htab = hash_create("PL/Tcl functions", - 100, - &hash_ctl, - HASH_ELEM | HASH_BLOBS); + pltcl_proc_htab = hash_make_cxt(pltcl_proc_ptr, proc_key, + "PL/Tcl functions", 100, + TopMemoryContext); /************************************************************ * Define PL/Tcl's custom GUCs diff --git a/src/timezone/pgtz.c b/src/timezone/pgtz.c index 504c0235ffbc..ce0edf98266e 100644 --- a/src/timezone/pgtz.c +++ b/src/timezone/pgtz.c @@ -22,6 +22,7 @@ #include "pgtz.h" #include "storage/fd.h" #include "utils/hsearch.h" +#include "utils/memutils.h" /* Current session timezone (controlled by TimeZone GUC) */ @@ -201,15 +202,8 @@ static HTAB *timezone_cache = NULL; static bool init_timezone_hashtable(void) { - HASHCTL hash_ctl; - - hash_ctl.keysize = TZ_STRLEN_MAX + 1; - hash_ctl.entrysize = sizeof(pg_tz_cache); - - timezone_cache = hash_create("Timezones", - 4, - &hash_ctl, - HASH_ELEM | HASH_STRINGS); + timezone_cache = hash_make_cxt(pg_tz_cache, tznameupper, + "Timezones", 4, TopMemoryContext); if (!timezone_cache) return false; From ba2680d11efd2a83bf2debe2b171219bb1f322f1 Mon Sep 17 00:00:00 2001 From: Jelte Fennema-Nio Date: Wed, 3 Dec 2025 23:33:18 +0100 Subject: [PATCH 3/5] Inline functions that have now become trivial --- src/backend/commands/prepare.c | 17 +++-------------- src/backend/commands/sequence.c | 16 +++------------- src/backend/utils/cache/funccache.c | 22 ++++------------------ 3 files changed, 10 insertions(+), 45 deletions(-) diff --git a/src/backend/commands/prepare.c b/src/backend/commands/prepare.c index 0002c6dc993e..cc950ce2887c 100644 --- a/src/backend/commands/prepare.c +++ b/src/backend/commands/prepare.c @@ -46,7 +46,6 @@ */ static HTAB *prepared_queries = NULL; -static void InitQueryHashTable(void); static ParamListInfo EvaluateParams(ParseState *pstate, PreparedStatement *pstmt, List *params, EState *estate); @@ -364,18 +363,6 @@ EvaluateParams(ParseState *pstate, PreparedStatement *pstmt, List *params, return paramLI; } - -/* - * Initialize query hash table upon first use. - */ -static void -InitQueryHashTable(void) -{ - prepared_queries = hash_make_cxt(PreparedStatement, stmt_name, - "Prepared Queries", 32, - TopMemoryContext); -} - /* * Store all the data pertaining to a query in the hash table using * the specified key. The passed CachedPlanSource should be "unsaved" @@ -393,7 +380,9 @@ StorePreparedStatement(const char *stmt_name, /* Initialize the hash table, if necessary */ if (!prepared_queries) - InitQueryHashTable(); + prepared_queries = hash_make_cxt(PreparedStatement, stmt_name, + "Prepared Queries", 32, + TopMemoryContext); /* Add entry to hash table */ entry = (PreparedStatement *) hash_search(prepared_queries, diff --git a/src/backend/commands/sequence.c b/src/backend/commands/sequence.c index 5fbb3ce90300..aa037901613f 100644 --- a/src/backend/commands/sequence.c +++ b/src/backend/commands/sequence.c @@ -89,7 +89,6 @@ static SeqTableData *last_used_seq = NULL; static void fill_seq_with_data(Relation rel, HeapTuple tuple); static void fill_seq_fork_with_data(Relation rel, HeapTuple tuple, ForkNumber forkNum); static Relation lock_and_open_sequence(SeqTable seq); -static void create_seq_hashtable(void); static void init_sequence(Oid relid, SeqTable *p_elm, Relation *p_rel); static Form_pg_sequence_data read_seq_tuple(Relation rel, Buffer *buf, HeapTuple seqdatatuple); @@ -1107,17 +1106,6 @@ lock_and_open_sequence(SeqTable seq) return sequence_open(seq->relid, NoLock); } -/* - * Creates the hash table for storing sequence data - */ -static void -create_seq_hashtable(void) -{ - seqhashtab = hash_make_cxt(SeqTableData, relid, - "Sequence values", 16, - TopMemoryContext); -} - /* * Given a relation OID, open and lock the sequence. p_elm and p_rel are * output parameters. @@ -1131,7 +1119,9 @@ init_sequence(Oid relid, SeqTable *p_elm, Relation *p_rel) /* Find or create a hash table entry for this sequence */ if (seqhashtab == NULL) - create_seq_hashtable(); + seqhashtab = hash_make_cxt(SeqTableData, relid, + "Sequence values", 16, + TopMemoryContext); elm = (SeqTable) hash_search(seqhashtab, &relid, HASH_ENTER, &found); diff --git a/src/backend/utils/cache/funccache.c b/src/backend/utils/cache/funccache.c index 34438a9f93b5..613fcfdc9f55 100644 --- a/src/backend/utils/cache/funccache.c +++ b/src/backend/utils/cache/funccache.c @@ -50,23 +50,6 @@ static uint32 cfunc_hash(const void *key, Size keysize); static int cfunc_match(const void *key1, const void *key2, Size keysize); -/* - * Initialize the hash table on first use. - * - * The hash table will be in TopMemoryContext regardless of caller's context. - */ -static void -cfunc_hashtable_init(void) -{ - /* don't allow double-initialization */ - Assert(cfunc_hashtable == NULL); - - cfunc_hashtable = hash_make_fn_cxt(CachedFunctionHashEntry, key, - "Cached function hash", FUNCS_PER_USER, - cfunc_hash, cfunc_match, - TopMemoryContext); -} - /* * cfunc_hash: hash function for cfunc hash table * @@ -165,7 +148,10 @@ cfunc_hashtable_insert(CachedFunction *function, bool found; if (cfunc_hashtable == NULL) - cfunc_hashtable_init(); + cfunc_hashtable = hash_make_fn_cxt(CachedFunctionHashEntry, key, + "Cached function hash", FUNCS_PER_USER, + cfunc_hash, cfunc_match, + TopMemoryContext); hentry = (CachedFunctionHashEntry *) hash_search(cfunc_hashtable, func_key, From 46bd2662ebb172cbeff04f6389709583bedc2cab Mon Sep 17 00:00:00 2001 From: Jelte Fennema-Nio Date: Thu, 4 Dec 2025 15:39:00 +0100 Subject: [PATCH 4/5] Add foreach_hash macro For lists we've had a new foreach style macros since 14dd0f27d7. This adds a similar macro for hash tables. This new foreach_hash macro makes iterating over the items in an HTAB as simple as iterating over the items in a List. The only additional thing to keep in mind is that when exiting the loop early you need to call foreach_hash_term. --- src/backend/utils/hash/dynahash.c | 15 +++++++++- src/include/utils/hsearch.h | 46 +++++++++++++++++++++++++++++++ 2 files changed, 60 insertions(+), 1 deletion(-) diff --git a/src/backend/utils/hash/dynahash.c b/src/backend/utils/hash/dynahash.c index bc47469ab3c0..50be734624af 100644 --- a/src/backend/utils/hash/dynahash.c +++ b/src/backend/utils/hash/dynahash.c @@ -1462,7 +1462,7 @@ hash_get_num_entries(HTAB *hashp) } /* - * hash_seq_init/_search/_term + * hash_seq_init/_new/_search/_term * Sequentially search through hash table and return * all the elements one by one, return NULL when no more. * @@ -1496,6 +1496,19 @@ hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp) register_seq_scan(hashp); } +/* + * Same as hash_seq_init(), but returns the status struct instead of taking a + * pointer. + */ +HASH_SEQ_STATUS +hash_seq_new(HTAB *hashp) +{ + HASH_SEQ_STATUS status; + + hash_seq_init(&status, hashp); + return status; +} + /* * Same as above but scan by the given hash value. * See also hash_seq_search(). diff --git a/src/include/utils/hsearch.h b/src/include/utils/hsearch.h index 98c88726345c..957680d7a9b9 100644 --- a/src/include/utils/hsearch.h +++ b/src/include/utils/hsearch.h @@ -273,6 +273,51 @@ typedef struct uint32 hashvalue; /* hashvalue to start seqscan over hash */ } HASH_SEQ_STATUS; +/* + * foreach_hash - iterate over all entries in a hash table + * + * This macro simplifies hash table iteration by combining hash_seq_init + * and hash_seq_search into a single for-loop construct. + * + * Usage: + * foreach_hash(MyEntry, entry, my_hashtable) + * { + * // use entry + * } + * + * This replaces the more verbose pattern: + * HASH_SEQ_STATUS status; + * MyEntry *entry; + * hash_seq_init(&status, my_hashtable); + * while ((entry = (MyEntry *) hash_seq_search(&status)) != NULL) + * { + * // use entry + * } + * + * For early termination, use foreach_hash_term() before break: + * foreach_hash(MyEntry, entry, my_hashtable) + * { + * if (found_it) + * { + * foreach_hash_term(entry); + * break; + * } + * } + */ +#define foreach_hash(type, var, htab) \ + for (type *var = 0, *var##__outerloop = (type *) 1; \ + var##__outerloop; \ + var##__outerloop = 0) \ + for (HASH_SEQ_STATUS var##__status = hash_seq_new(htab); \ + (var = (type *) hash_seq_search(&var##__status)) != NULL; ) + +/* + * foreach_hash_term - terminate a foreach_hash loop early + * + * Call this before 'break' to properly clean up the hash scan. + */ +#define foreach_hash_term(var) hash_seq_term(&var##__status) + /* * prototypes for functions in dynahash.c */ @@ -293,6 +338,7 @@ extern bool hash_update_hash_key(HTAB *hashp, void *existingEntry, const void *newKeyPtr); extern int64 hash_get_num_entries(HTAB *hashp); extern void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp); +extern HASH_SEQ_STATUS hash_seq_new(HTAB *hashp); extern void hash_seq_init_with_hash_value(HASH_SEQ_STATUS *status, HTAB *hashp, uint32 hashvalue); From 4ca72766d36477cc8ab8d3ed8267e3ca52f92f52 Mon Sep 17 00:00:00 2001 From: Jelte Fennema-Nio Date: Thu, 4 Dec 2025 15:39:09 +0100 Subject: [PATCH 5/5] Use foreach_hash macro throughout the codebase This starts using the new foreach_hash macro throughout the codebase. This makes code easier to read, but obviously does introduce backpatching problems. We can choose not to do this refactor to avoid that. Or we could instead choose to do the refactor and then backpatch these new macros so they can be used in backpatched code. At the very least we should choose a few places where we use the new macros to make sure they have coverage. --- contrib/dblink/dblink.c | 5 +- .../pg_stat_statements/pg_stat_statements.c | 39 ++++------- contrib/pg_trgm/trgm_regexp.c | 18 ++--- contrib/postgres_fdw/connection.c | 26 ++----- contrib/postgres_fdw/shippable.c | 6 +- src/backend/access/heap/rewriteheap.c | 18 +---- src/backend/access/transam/xlogutils.c | 20 +----- src/backend/catalog/pg_enum.c | 16 ++--- src/backend/catalog/storage.c | 18 ++--- src/backend/commands/prepare.c | 12 +--- src/backend/commands/tablecmds.c | 7 +- src/backend/optimizer/util/predtest.c | 7 +- src/backend/parser/parse_oper.c | 7 +- src/backend/partitioning/partdesc.c | 6 +- src/backend/postmaster/autovacuum.c | 7 +- src/backend/replication/logical/relation.c | 37 ++-------- .../replication/logical/reorderbuffer.c | 12 +--- src/backend/replication/pgoutput/pgoutput.c | 24 ++----- src/backend/storage/buffer/bufmgr.c | 10 +-- src/backend/storage/ipc/shmem.c | 12 +--- src/backend/storage/ipc/standby.c | 12 +--- src/backend/storage/lmgr/lock.c | 67 ++++--------------- src/backend/storage/lmgr/lwlock.c | 7 +- src/backend/storage/lmgr/predicate.c | 18 +---- src/backend/storage/smgr/smgr.c | 7 +- src/backend/storage/sync/sync.c | 13 +--- src/backend/tsearch/ts_typanalyze.c | 11 +-- src/backend/utils/activity/wait_event.c | 6 +- src/backend/utils/adt/array_typanalyze.c | 17 ++--- src/backend/utils/cache/relcache.c | 42 ++++-------- src/backend/utils/cache/relfilenumbermap.c | 6 +- src/backend/utils/cache/spccache.c | 6 +- src/backend/utils/cache/ts_cache.c | 5 +- src/backend/utils/misc/guc.c | 30 ++------- src/backend/utils/mmgr/portalmem.c | 67 +++---------------- 35 files changed, 131 insertions(+), 490 deletions(-) diff --git a/contrib/dblink/dblink.c b/contrib/dblink/dblink.c index 2951c39d69fb..d621465bd3bb 100644 --- a/contrib/dblink/dblink.c +++ b/contrib/dblink/dblink.c @@ -1275,14 +1275,11 @@ PG_FUNCTION_INFO_V1(dblink_get_connections); Datum dblink_get_connections(PG_FUNCTION_ARGS) { - HASH_SEQ_STATUS status; - remoteConnHashEnt *hentry; ArrayBuildState *astate = NULL; if (remoteConnHash) { - hash_seq_init(&status, remoteConnHash); - while ((hentry = (remoteConnHashEnt *) hash_seq_search(&status)) != NULL) + foreach_hash(remoteConnHashEnt, hentry, remoteConnHash) { /* ignore it if it's not an open connection */ if (hentry->rconn.conn == NULL) diff --git a/contrib/pg_stat_statements/pg_stat_statements.c b/contrib/pg_stat_statements/pg_stat_statements.c index 2c55b78e9ba0..a2e13f0a5d8b 100644 --- a/contrib/pg_stat_statements/pg_stat_statements.c +++ b/contrib/pg_stat_statements/pg_stat_statements.c @@ -741,9 +741,7 @@ pgss_shmem_shutdown(int code, Datum arg) FILE *file; char *qbuffer = NULL; Size qbuffer_size = 0; - HASH_SEQ_STATUS hash_seq; int32 num_entries; - pgssEntry *entry; /* Don't try to dump during a crash. */ if (code) @@ -777,8 +775,7 @@ pgss_shmem_shutdown(int code, Datum arg) * When serializing to disk, we store query texts immediately after their * entry data. Any orphaned query texts are thereby excluded. */ - hash_seq_init(&hash_seq, pgss_hash); - while ((entry = hash_seq_search(&hash_seq)) != NULL) + foreach_hash(pgssEntry, entry, pgss_hash) { int len = entry->query_len; char *qstr = qtext_fetch(entry->query_offset, len, @@ -790,8 +787,8 @@ pgss_shmem_shutdown(int code, Datum arg) if (fwrite(entry, sizeof(pgssEntry), 1, file) != 1 || fwrite(qstr, 1, len + 1, file) != len + 1) { - /* note: we assume hash_seq_term won't change errno */ - hash_seq_term(&hash_seq); + /* note: we assume foreach_hash_term won't change errno */ + foreach_hash_term(entry); goto error; } } @@ -1695,8 +1692,6 @@ pg_stat_statements_internal(FunctionCallInfo fcinfo, Size qbuffer_size = 0; Size extent = 0; int gc_count = 0; - HASH_SEQ_STATUS hash_seq; - pgssEntry *entry; /* * Superusers or roles with the privileges of pg_read_all_stats members @@ -1825,8 +1820,7 @@ pg_stat_statements_internal(FunctionCallInfo fcinfo, } } - hash_seq_init(&hash_seq, pgss_hash); - while ((entry = hash_seq_search(&hash_seq)) != NULL) + foreach_hash(pgssEntry, entry, pgss_hash) { Datum values[PG_STAT_STATEMENTS_COLS]; bool nulls[PG_STAT_STATEMENTS_COLS]; @@ -2170,9 +2164,7 @@ entry_cmp(const void *lhs, const void *rhs) static void entry_dealloc(void) { - HASH_SEQ_STATUS hash_seq; pgssEntry **entries; - pgssEntry *entry; int nvictims; int i; Size tottextlen; @@ -2196,8 +2188,7 @@ entry_dealloc(void) tottextlen = 0; nvalidtexts = 0; - hash_seq_init(&hash_seq, pgss_hash); - while ((entry = hash_seq_search(&hash_seq)) != NULL) + foreach_hash(pgssEntry, entry, pgss_hash) { entries[i++] = entry; /* "Sticky" entries get a different usage decay rate. */ @@ -2509,8 +2500,6 @@ gc_qtexts(void) char *qbuffer; Size qbuffer_size; FILE *qfile = NULL; - HASH_SEQ_STATUS hash_seq; - pgssEntry *entry; Size extent; int nentries; @@ -2552,8 +2541,7 @@ gc_qtexts(void) extent = 0; nentries = 0; - hash_seq_init(&hash_seq, pgss_hash); - while ((entry = hash_seq_search(&hash_seq)) != NULL) + foreach_hash(pgssEntry, entry, pgss_hash) { int query_len = entry->query_len; char *qry = qtext_fetch(entry->query_offset, @@ -2576,7 +2564,7 @@ gc_qtexts(void) (errcode_for_file_access(), errmsg("could not write file \"%s\": %m", PGSS_TEXT_FILE))); - hash_seq_term(&hash_seq); + foreach_hash_term(entry); goto gc_fail; } @@ -2643,8 +2631,7 @@ gc_qtexts(void) * Since the contents of the external file are now uncertain, mark all * hashtable entries as having invalid texts. */ - hash_seq_init(&hash_seq, pgss_hash); - while ((entry = hash_seq_search(&hash_seq)) != NULL) + foreach_hash(pgssEntry, entry, pgss_hash) { entry->query_offset = 0; entry->query_len = -1; @@ -2708,8 +2695,6 @@ if (e) { \ static TimestampTz entry_reset(Oid userid, Oid dbid, int64 queryid, bool minmax_only) { - HASH_SEQ_STATUS hash_seq; - pgssEntry *entry; FILE *qfile; int64 num_entries; int64 num_remove = 0; @@ -2729,6 +2714,8 @@ entry_reset(Oid userid, Oid dbid, int64 queryid, bool minmax_only) if (userid != 0 && dbid != 0 && queryid != INT64CONST(0)) { /* If all the parameters are available, use the fast path. */ + pgssEntry *entry; + memset(&key, 0, sizeof(pgssHashKey)); key.userid = userid; key.dbid = dbid; @@ -2752,8 +2739,7 @@ entry_reset(Oid userid, Oid dbid, int64 queryid, bool minmax_only) else if (userid != 0 || dbid != 0 || queryid != INT64CONST(0)) { /* Reset entries corresponding to valid parameters. */ - hash_seq_init(&hash_seq, pgss_hash); - while ((entry = hash_seq_search(&hash_seq)) != NULL) + foreach_hash(pgssEntry, entry, pgss_hash) { if ((!userid || entry->key.userid == userid) && (!dbid || entry->key.dbid == dbid) && @@ -2766,8 +2752,7 @@ entry_reset(Oid userid, Oid dbid, int64 queryid, bool minmax_only) else { /* Reset all entries. */ - hash_seq_init(&hash_seq, pgss_hash); - while ((entry = hash_seq_search(&hash_seq)) != NULL) + foreach_hash(pgssEntry, entry, pgss_hash) { SINGLE_ENTRY_RESET(entry); } diff --git a/contrib/pg_trgm/trgm_regexp.c b/contrib/pg_trgm/trgm_regexp.c index df7d3b70d04d..53a9f885ec22 100644 --- a/contrib/pg_trgm/trgm_regexp.c +++ b/contrib/pg_trgm/trgm_regexp.c @@ -1449,10 +1449,8 @@ prefixContains(TrgmPrefix *prefix1, TrgmPrefix *prefix2) static bool selectColorTrigrams(TrgmNFA *trgmNFA) { - HASH_SEQ_STATUS scan_status; int arcsCount = trgmNFA->arcsCount, i; - TrgmState *state; ColorTrgmInfo *colorTrgms; int64 totalTrgmCount; float4 totalTrgmPenalty; @@ -1463,8 +1461,7 @@ selectColorTrigrams(TrgmNFA *trgmNFA) trgmNFA->colorTrgms = colorTrgms; i = 0; - hash_seq_init(&scan_status, trgmNFA->states); - while ((state = (TrgmState *) hash_seq_search(&scan_status)) != NULL) + foreach_hash(TrgmState, state, trgmNFA->states) { ListCell *cell; @@ -1926,8 +1923,6 @@ packGraph(TrgmNFA *trgmNFA, MemoryContext rcontext) int snumber = 2, arcIndex, arcsCount; - HASH_SEQ_STATUS scan_status; - TrgmState *state; TrgmPackArcInfo *arcs; TrgmPackedArc *packedArcs; TrgmPackedGraph *result; @@ -1935,8 +1930,7 @@ packGraph(TrgmNFA *trgmNFA, MemoryContext rcontext) j; /* Enumerate surviving states, giving init and fin reserved numbers */ - hash_seq_init(&scan_status, trgmNFA->states); - while ((state = (TrgmState *) hash_seq_search(&scan_status)) != NULL) + foreach_hash(TrgmState, state, trgmNFA->states) { while (state->parent) state = state->parent; @@ -1958,8 +1952,7 @@ packGraph(TrgmNFA *trgmNFA, MemoryContext rcontext) /* Collect array of all arcs */ arcs = palloc_array(TrgmPackArcInfo, trgmNFA->arcsCount); arcIndex = 0; - hash_seq_init(&scan_status, trgmNFA->states); - while ((state = (TrgmState *) hash_seq_search(&scan_status)) != NULL) + foreach_hash(TrgmState, state, trgmNFA->states) { TrgmState *source = state; ListCell *cell; @@ -2202,16 +2195,13 @@ static void printTrgmNFA(TrgmNFA *trgmNFA) { StringInfoData buf; - HASH_SEQ_STATUS scan_status; - TrgmState *state; TrgmState *initstate = NULL; initStringInfo(&buf); appendStringInfoString(&buf, "\ndigraph transformedNFA {\n"); - hash_seq_init(&scan_status, trgmNFA->states); - while ((state = (TrgmState *) hash_seq_search(&scan_status)) != NULL) + foreach_hash(TrgmState, state, trgmNFA->states) { ListCell *cell; diff --git a/contrib/postgres_fdw/connection.c b/contrib/postgres_fdw/connection.c index 042a7fb3ac1a..42863b02909d 100644 --- a/contrib/postgres_fdw/connection.c +++ b/contrib/postgres_fdw/connection.c @@ -1044,8 +1044,6 @@ pgfdw_report_internal(int elevel, PGresult *res, PGconn *conn, static void pgfdw_xact_callback(XactEvent event, void *arg) { - HASH_SEQ_STATUS scan; - ConnCacheEntry *entry; List *pending_entries = NIL; List *cancel_requested = NIL; @@ -1057,8 +1055,7 @@ pgfdw_xact_callback(XactEvent event, void *arg) * Scan all connection cache entries to find open remote transactions, and * close them. */ - hash_seq_init(&scan, ConnectionHash); - while ((entry = (ConnCacheEntry *) hash_seq_search(&scan))) + foreach_hash(ConnCacheEntry, entry, ConnectionHash) { PGresult *res; @@ -1195,8 +1192,6 @@ static void pgfdw_subxact_callback(SubXactEvent event, SubTransactionId mySubid, SubTransactionId parentSubid, void *arg) { - HASH_SEQ_STATUS scan; - ConnCacheEntry *entry; int curlevel; List *pending_entries = NIL; List *cancel_requested = NIL; @@ -1215,8 +1210,7 @@ pgfdw_subxact_callback(SubXactEvent event, SubTransactionId mySubid, * of the current level, and close them. */ curlevel = GetCurrentTransactionNestLevel(); - hash_seq_init(&scan, ConnectionHash); - while ((entry = (ConnCacheEntry *) hash_seq_search(&scan))) + foreach_hash(ConnCacheEntry, entry, ConnectionHash) { char sql[100]; @@ -1307,14 +1301,10 @@ pgfdw_subxact_callback(SubXactEvent event, SubTransactionId mySubid, static void pgfdw_inval_callback(Datum arg, int cacheid, uint32 hashvalue) { - HASH_SEQ_STATUS scan; - ConnCacheEntry *entry; - Assert(cacheid == FOREIGNSERVEROID || cacheid == USERMAPPINGOID); /* ConnectionHash must exist already, if we're registered */ - hash_seq_init(&scan, ConnectionHash); - while ((entry = (ConnCacheEntry *) hash_seq_search(&scan))) + foreach_hash(ConnCacheEntry, entry, ConnectionHash) { /* Ignore invalid entries */ if (entry->conn == NULL) @@ -2165,8 +2155,6 @@ postgres_fdw_get_connections_internal(FunctionCallInfo fcinfo, enum pgfdwVersion api_version) { ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; - HASH_SEQ_STATUS scan; - ConnCacheEntry *entry; InitMaterializedSRF(fcinfo, 0); @@ -2189,8 +2177,7 @@ postgres_fdw_get_connections_internal(FunctionCallInfo fcinfo, elog(ERROR, "incorrect number of output arguments"); } - hash_seq_init(&scan, ConnectionHash); - while ((entry = (ConnCacheEntry *) hash_seq_search(&scan))) + foreach_hash(ConnCacheEntry, entry, ConnectionHash) { ForeignServer *server; Datum values[POSTGRES_FDW_GET_CONNECTIONS_COLS] = {0}; @@ -2392,8 +2379,6 @@ postgres_fdw_disconnect_all(PG_FUNCTION_ARGS) static bool disconnect_cached_connections(Oid serverid) { - HASH_SEQ_STATUS scan; - ConnCacheEntry *entry; bool all = !OidIsValid(serverid); bool result = false; @@ -2404,8 +2389,7 @@ disconnect_cached_connections(Oid serverid) if (!ConnectionHash) return false; - hash_seq_init(&scan, ConnectionHash); - while ((entry = (ConnCacheEntry *) hash_seq_search(&scan))) + foreach_hash(ConnCacheEntry, entry, ConnectionHash) { /* Ignore cache entry if no open connection right now. */ if (!entry->conn) diff --git a/contrib/postgres_fdw/shippable.c b/contrib/postgres_fdw/shippable.c index 66d2d0b9ff4f..bc5dc90e5413 100644 --- a/contrib/postgres_fdw/shippable.c +++ b/contrib/postgres_fdw/shippable.c @@ -65,17 +65,13 @@ typedef struct static void InvalidateShippableCacheCallback(Datum arg, int cacheid, uint32 hashvalue) { - HASH_SEQ_STATUS status; - ShippableCacheEntry *entry; - /* * In principle we could flush only cache entries relating to the * pg_foreign_server entry being outdated; but that would be more * complicated, and it's probably not worth the trouble. So for now, just * flush all entries. */ - hash_seq_init(&status, ShippableCacheHash); - while ((entry = (ShippableCacheEntry *) hash_seq_search(&status)) != NULL) + foreach_hash(ShippableCacheEntry, entry, ShippableCacheHash) { if (hash_search(ShippableCacheHash, &entry->key, diff --git a/src/backend/access/heap/rewriteheap.c b/src/backend/access/heap/rewriteheap.c index dc2c46742fdc..32433fce4a42 100644 --- a/src/backend/access/heap/rewriteheap.c +++ b/src/backend/access/heap/rewriteheap.c @@ -290,16 +290,11 @@ begin_heap_rewrite(Relation old_heap, Relation new_heap, TransactionId oldest_xm void end_heap_rewrite(RewriteState state) { - HASH_SEQ_STATUS seq_status; - UnresolvedTup unresolved; - /* * Write any remaining tuples in the UnresolvedTups table. If we have any * left, they should in fact be dead, but let's err on the safe side. */ - hash_seq_init(&seq_status, state->rs_unresolved_tups); - - while ((unresolved = hash_seq_search(&seq_status)) != NULL) + foreach_hash(UnresolvedTupData, unresolved, state->rs_unresolved_tups) { ItemPointerSetInvalid(&unresolved->tuple->t_data->t_ctid); raw_heap_insert(state, unresolved->tuple); @@ -794,8 +789,6 @@ logical_begin_heap_rewrite(RewriteState state) static void logical_heap_rewrite_flush_mappings(RewriteState state) { - HASH_SEQ_STATUS seq_status; - RewriteMappingFile *src; dlist_mutable_iter iter; Assert(state->rs_logical_rewrite); @@ -807,8 +800,7 @@ logical_heap_rewrite_flush_mappings(RewriteState state) elog(DEBUG1, "flushing %u logical rewrite mapping entries", state->rs_num_rewrite_mappings); - hash_seq_init(&seq_status, state->rs_logical_mappings); - while ((src = (RewriteMappingFile *) hash_seq_search(&seq_status)) != NULL) + foreach_hash(RewriteMappingFile, src, state->rs_logical_mappings) { char *waldata; char *waldata_start; @@ -892,9 +884,6 @@ logical_heap_rewrite_flush_mappings(RewriteState state) static void logical_end_heap_rewrite(RewriteState state) { - HASH_SEQ_STATUS seq_status; - RewriteMappingFile *src; - /* done, no logical rewrite in progress */ if (!state->rs_logical_rewrite) return; @@ -904,8 +893,7 @@ logical_end_heap_rewrite(RewriteState state) logical_heap_rewrite_flush_mappings(state); /* Iterate over all mappings we have written and fsync the files. */ - hash_seq_init(&seq_status, state->rs_logical_mappings); - while ((src = (RewriteMappingFile *) hash_seq_search(&seq_status)) != NULL) + foreach_hash(RewriteMappingFile, src, state->rs_logical_mappings) { if (FileSync(src->vfd, WAIT_EVENT_LOGICAL_REWRITE_SYNC) != 0) ereport(data_sync_elevel(ERROR), diff --git a/src/backend/access/transam/xlogutils.c b/src/backend/access/transam/xlogutils.c index 9e24759f5cc6..ef13f04e08dd 100644 --- a/src/backend/access/transam/xlogutils.c +++ b/src/backend/access/transam/xlogutils.c @@ -160,15 +160,10 @@ static void forget_invalid_pages(RelFileLocator locator, ForkNumber forkno, BlockNumber minblkno) { - HASH_SEQ_STATUS status; - xl_invalid_page *hentry; - if (invalid_page_tab == NULL) return; /* nothing to do */ - hash_seq_init(&status, invalid_page_tab); - - while ((hentry = (xl_invalid_page *) hash_seq_search(&status)) != NULL) + foreach_hash(xl_invalid_page, hentry, invalid_page_tab) { if (RelFileLocatorEquals(hentry->key.locator, locator) && hentry->key.forkno == forkno && @@ -190,15 +185,10 @@ forget_invalid_pages(RelFileLocator locator, ForkNumber forkno, static void forget_invalid_pages_db(Oid dbid) { - HASH_SEQ_STATUS status; - xl_invalid_page *hentry; - if (invalid_page_tab == NULL) return; /* nothing to do */ - hash_seq_init(&status, invalid_page_tab); - - while ((hentry = (xl_invalid_page *) hash_seq_search(&status)) != NULL) + foreach_hash(xl_invalid_page, hentry, invalid_page_tab) { if (hentry->key.locator.dbOid == dbid) { @@ -228,20 +218,16 @@ XLogHaveInvalidPages(void) void XLogCheckInvalidPages(void) { - HASH_SEQ_STATUS status; - xl_invalid_page *hentry; bool foundone = false; if (invalid_page_tab == NULL) return; /* nothing to do */ - hash_seq_init(&status, invalid_page_tab); - /* * Our strategy is to emit WARNING messages for all remaining entries and * only PANIC after we've dumped all the available info. */ - while ((hentry = (xl_invalid_page *) hash_seq_search(&status)) != NULL) + foreach_hash(xl_invalid_page, hentry, invalid_page_tab) { report_invalid_page(WARNING, hentry->key.locator, hentry->key.forkno, hentry->key.blkno, hentry->present); diff --git a/src/backend/catalog/pg_enum.c b/src/backend/catalog/pg_enum.c index 4787a61c7d38..fca136b68adb 100644 --- a/src/backend/catalog/pg_enum.c +++ b/src/backend/catalog/pg_enum.c @@ -838,12 +838,10 @@ SerializeUncommittedEnums(void *space, Size size) /* Write out all the OIDs from the types hash table, if there is one. */ if (uncommitted_enum_types) { - HASH_SEQ_STATUS status; - Oid *value; - - hash_seq_init(&status, uncommitted_enum_types); - while ((value = (Oid *) hash_seq_search(&status))) + foreach_hash(Oid, value, uncommitted_enum_types) + { *serialized++ = *value; + } } /* Write out the terminator. */ @@ -852,12 +850,8 @@ SerializeUncommittedEnums(void *space, Size size) /* Write out all the OIDs from the values hash table, if there is one. */ if (uncommitted_enum_values) { - HASH_SEQ_STATUS status; - Oid *value; - - hash_seq_init(&status, uncommitted_enum_values); - while ((value = (Oid *) hash_seq_search(&status))) - *serialized++ = *value; + foreach_hash(Oid, value, uncommitted_enum_values) + * serialized++ = *value; } /* Write out the terminator. */ diff --git a/src/backend/catalog/storage.c b/src/backend/catalog/storage.c index 1d13b4a1f390..dc44cae9e3ed 100644 --- a/src/backend/catalog/storage.c +++ b/src/backend/catalog/storage.c @@ -594,10 +594,7 @@ void SerializePendingSyncs(Size maxSize, char *startAddress) { HTAB *tmphash; - HASH_SEQ_STATUS scan; - PendingRelSync *sync; PendingRelDelete *delete; - RelFileLocator *src; RelFileLocator *dest = (RelFileLocator *) startAddress; if (!pendingSyncHash) @@ -608,9 +605,10 @@ SerializePendingSyncs(Size maxSize, char *startAddress) hash_get_num_entries(pendingSyncHash)); /* collect all rlocator from pending syncs */ - hash_seq_init(&scan, pendingSyncHash); - while ((sync = (PendingRelSync *) hash_seq_search(&scan))) + foreach_hash(PendingRelSync, sync, pendingSyncHash) + { (void) hash_search(tmphash, &sync->rlocator, HASH_ENTER, NULL); + } /* remove deleted rnodes */ for (delete = pendingDeletes; delete != NULL; delete = delete->next) @@ -618,9 +616,10 @@ SerializePendingSyncs(Size maxSize, char *startAddress) (void) hash_search(tmphash, &delete->rlocator, HASH_REMOVE, NULL); - hash_seq_init(&scan, tmphash); - while ((src = (RelFileLocator *) hash_seq_search(&scan))) + foreach_hash(RelFileLocator, src, tmphash) + { *dest++ = *src; + } hash_destroy(tmphash); @@ -733,8 +732,6 @@ smgrDoPendingSyncs(bool isCommit, bool isParallelWorker) int nrels = 0, maxrels = 0; SMgrRelation *srels = NULL; - HASH_SEQ_STATUS scan; - PendingRelSync *pendingsync; Assert(GetCurrentTransactionNestLevel() == 1); @@ -763,8 +760,7 @@ smgrDoPendingSyncs(bool isCommit, bool isParallelWorker) (void) hash_search(pendingSyncHash, &pending->rlocator, HASH_REMOVE, NULL); - hash_seq_init(&scan, pendingSyncHash); - while ((pendingsync = (PendingRelSync *) hash_seq_search(&scan))) + foreach_hash(PendingRelSync, pendingsync, pendingSyncHash) { ForkNumber fork; BlockNumber nblocks[MAX_FORKNUM + 1]; diff --git a/src/backend/commands/prepare.c b/src/backend/commands/prepare.c index cc950ce2887c..7d4e4bd862c6 100644 --- a/src/backend/commands/prepare.c +++ b/src/backend/commands/prepare.c @@ -522,16 +522,12 @@ DropPreparedStatement(const char *stmt_name, bool showError) void DropAllPreparedStatements(void) { - HASH_SEQ_STATUS seq; - PreparedStatement *entry; - /* nothing cached */ if (!prepared_queries) return; /* walk over cache */ - hash_seq_init(&seq, prepared_queries); - while ((entry = hash_seq_search(&seq)) != NULL) + foreach_hash(PreparedStatement, entry, prepared_queries) { /* Release the plancache entry */ DropCachedPlan(entry->plansource); @@ -678,11 +674,7 @@ pg_prepared_statement(PG_FUNCTION_ARGS) /* hash table might be uninitialized */ if (prepared_queries) { - HASH_SEQ_STATUS hash_seq; - PreparedStatement *prep_stmt; - - hash_seq_init(&hash_seq, prepared_queries); - while ((prep_stmt = hash_seq_search(&hash_seq)) != NULL) + foreach_hash(PreparedStatement, prep_stmt, prepared_queries) { TupleDesc result_desc; Datum values[8]; diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c index 7f0fb263772f..9a55662af551 100644 --- a/src/backend/commands/tablecmds.c +++ b/src/backend/commands/tablecmds.c @@ -2236,14 +2236,9 @@ ExecuteTruncateGuts(List *explicit_rels, /* Now go through the hash table, and truncate foreign tables */ if (ft_htab) { - ForeignTruncateInfo *ft_info; - HASH_SEQ_STATUS seq; - - hash_seq_init(&seq, ft_htab); - PG_TRY(); { - while ((ft_info = hash_seq_search(&seq)) != NULL) + foreach_hash(ForeignTruncateInfo, ft_info, ft_htab) { FdwRoutine *routine = GetFdwRoutineByServerId(ft_info->serverid); diff --git a/src/backend/optimizer/util/predtest.c b/src/backend/optimizer/util/predtest.c index 43291f128fd0..fa52f7a776dd 100644 --- a/src/backend/optimizer/util/predtest.c +++ b/src/backend/optimizer/util/predtest.c @@ -2342,15 +2342,10 @@ get_btree_test_op(Oid pred_op, Oid clause_op, bool refute_it) static void InvalidateOprProofCacheCallBack(Datum arg, int cacheid, uint32 hashvalue) { - HASH_SEQ_STATUS status; - OprProofCacheEntry *hentry; - Assert(OprProofCacheHash != NULL); /* Currently we just reset all entries; hard to be smarter ... */ - hash_seq_init(&status, OprProofCacheHash); - - while ((hentry = (OprProofCacheEntry *) hash_seq_search(&status)) != NULL) + foreach_hash(OprProofCacheEntry, hentry, OprProofCacheHash) { hentry->have_implic = false; hentry->have_refute = false; diff --git a/src/backend/parser/parse_oper.c b/src/backend/parser/parse_oper.c index f9588865cfce..91382962d94a 100644 --- a/src/backend/parser/parse_oper.c +++ b/src/backend/parser/parse_oper.c @@ -1076,15 +1076,10 @@ make_oper_cache_entry(OprCacheKey *key, Oid opr_oid) static void InvalidateOprCacheCallBack(Datum arg, int cacheid, uint32 hashvalue) { - HASH_SEQ_STATUS status; - OprCacheEntry *hentry; - Assert(OprCacheHash != NULL); /* Currently we just flush all entries; hard to be smarter ... */ - hash_seq_init(&status, OprCacheHash); - - while ((hentry = (OprCacheEntry *) hash_seq_search(&status)) != NULL) + foreach_hash(OprCacheEntry, hentry, OprCacheHash) { if (hash_search(OprCacheHash, &hentry->key, diff --git a/src/backend/partitioning/partdesc.c b/src/backend/partitioning/partdesc.c index b3fbd781494b..908b0e39b8c2 100644 --- a/src/backend/partitioning/partdesc.c +++ b/src/backend/partitioning/partdesc.c @@ -478,11 +478,7 @@ PartitionDirectoryLookup(PartitionDirectory pdir, Relation rel) void DestroyPartitionDirectory(PartitionDirectory pdir) { - HASH_SEQ_STATUS status; - PartitionDirectoryEntry *pde; - - hash_seq_init(&status, pdir->pdir_hash); - while ((pde = hash_seq_search(&status)) != NULL) + foreach_hash(PartitionDirectoryEntry, pde, pdir->pdir_hash) RelationDecrementReferenceCount(pde->rel); } diff --git a/src/backend/postmaster/autovacuum.c b/src/backend/postmaster/autovacuum.c index f1b6b1a24a9e..b0df7fba6900 100644 --- a/src/backend/postmaster/autovacuum.c +++ b/src/backend/postmaster/autovacuum.c @@ -1021,8 +1021,6 @@ rebuild_database_list(Oid newdb) TimestampTz current_time; int millis_increment; avl_dbase *dbary; - avl_dbase *db; - HASH_SEQ_STATUS seq; int i; /* put all the hash elements into an array */ @@ -1033,8 +1031,7 @@ rebuild_database_list(Oid newdb) #endif i = 0; - hash_seq_init(&seq, dbhash); - while ((db = hash_seq_search(&seq)) != NULL) + foreach_hash(avl_dbase, db, dbhash) memcpy(&(dbary[i++]), db, sizeof(avl_dbase)); /* sort the array */ @@ -1059,7 +1056,7 @@ rebuild_database_list(Oid newdb) */ for (i = 0; i < nelems; i++) { - db = &(dbary[i]); + avl_dbase *db = &(dbary[i]); current_time = TimestampTzPlusMilliseconds(current_time, millis_increment); diff --git a/src/backend/replication/logical/relation.c b/src/backend/replication/logical/relation.c index c4af85d74f1a..57f5872984b7 100644 --- a/src/backend/replication/logical/relation.c +++ b/src/backend/replication/logical/relation.c @@ -63,25 +63,19 @@ static Oid FindLogicalRepLocalIndex(Relation localrel, LogicalRepRelation *remot static void logicalrep_relmap_invalidate_cb(Datum arg, Oid reloid) { - LogicalRepRelMapEntry *entry; - /* Just to be sure. */ if (LogicalRepRelMap == NULL) return; if (reloid != InvalidOid) { - HASH_SEQ_STATUS status; - - hash_seq_init(&status, LogicalRepRelMap); - /* TODO, use inverse lookup hashtable? */ - while ((entry = (LogicalRepRelMapEntry *) hash_seq_search(&status)) != NULL) + foreach_hash(LogicalRepRelMapEntry, entry, LogicalRepRelMap) { if (entry->localreloid == reloid) { entry->localrelvalid = false; - hash_seq_term(&status); + foreach_hash_term(entry); break; } } @@ -89,11 +83,7 @@ logicalrep_relmap_invalidate_cb(Datum arg, Oid reloid) else { /* invalidate all cache entries */ - HASH_SEQ_STATUS status; - - hash_seq_init(&status, LogicalRepRelMap); - - while ((entry = (LogicalRepRelMapEntry *) hash_seq_search(&status)) != NULL) + foreach_hash(LogicalRepRelMapEntry, entry, LogicalRepRelMap) entry->localrelvalid = false; } } @@ -530,25 +520,19 @@ logicalrep_rel_close(LogicalRepRelMapEntry *rel, LOCKMODE lockmode) static void logicalrep_partmap_invalidate_cb(Datum arg, Oid reloid) { - LogicalRepPartMapEntry *entry; - /* Just to be sure. */ if (LogicalRepPartMap == NULL) return; if (reloid != InvalidOid) { - HASH_SEQ_STATUS status; - - hash_seq_init(&status, LogicalRepPartMap); - /* TODO, use inverse lookup hashtable? */ - while ((entry = (LogicalRepPartMapEntry *) hash_seq_search(&status)) != NULL) + foreach_hash(LogicalRepPartMapEntry, entry, LogicalRepPartMap) { if (entry->relmapentry.localreloid == reloid) { entry->relmapentry.localrelvalid = false; - hash_seq_term(&status); + foreach_hash_term(entry); break; } } @@ -556,11 +540,7 @@ logicalrep_partmap_invalidate_cb(Datum arg, Oid reloid) else { /* invalidate all cache entries */ - HASH_SEQ_STATUS status; - - hash_seq_init(&status, LogicalRepPartMap); - - while ((entry = (LogicalRepPartMapEntry *) hash_seq_search(&status)) != NULL) + foreach_hash(LogicalRepPartMapEntry, entry, LogicalRepPartMap) entry->relmapentry.localrelvalid = false; } } @@ -578,15 +558,12 @@ logicalrep_partmap_invalidate_cb(Datum arg, Oid reloid) void logicalrep_partmap_reset_relmap(LogicalRepRelation *remoterel) { - HASH_SEQ_STATUS status; - LogicalRepPartMapEntry *part_entry; LogicalRepRelMapEntry *entry; if (LogicalRepPartMap == NULL) return; - hash_seq_init(&status, LogicalRepPartMap); - while ((part_entry = (LogicalRepPartMapEntry *) hash_seq_search(&status)) != NULL) + foreach_hash(LogicalRepPartMapEntry, part_entry, LogicalRepPartMap) { entry = &part_entry->relmapentry; diff --git a/src/backend/replication/logical/reorderbuffer.c b/src/backend/replication/logical/reorderbuffer.c index e09516d3d8b8..562fd743d941 100644 --- a/src/backend/replication/logical/reorderbuffer.c +++ b/src/backend/replication/logical/reorderbuffer.c @@ -5250,15 +5250,11 @@ ReorderBufferToastReplace(ReorderBuffer *rb, ReorderBufferTXN *txn, static void ReorderBufferToastReset(ReorderBuffer *rb, ReorderBufferTXN *txn) { - HASH_SEQ_STATUS hstat; - ReorderBufferToastEnt *ent; - if (txn->toast_hash == NULL) return; /* sequentially walk over the hash and free everything */ - hash_seq_init(&hstat, txn->toast_hash); - while ((ent = (ReorderBufferToastEnt *) hash_seq_search(&hstat)) != NULL) + foreach_hash(ReorderBufferToastEnt, ent, txn->toast_hash) { dlist_mutable_iter it; @@ -5321,11 +5317,7 @@ typedef struct RewriteMappingFile static void DisplayMapping(HTAB *tuplecid_data) { - HASH_SEQ_STATUS hstat; - ReorderBufferTupleCidEnt *ent; - - hash_seq_init(&hstat, tuplecid_data); - while ((ent = (ReorderBufferTupleCidEnt *) hash_seq_search(&hstat)) != NULL) + foreach_hash(ReorderBufferTupleCidEnt, ent, tuplecid_data) { elog(DEBUG3, "mapping: node: %u/%u/%u tid: %u/%u cmin: %u, cmax: %u", ent->key.rlocator.dbOid, diff --git a/src/backend/replication/pgoutput/pgoutput.c b/src/backend/replication/pgoutput/pgoutput.c index 6dea24ff0a65..3750a880aed7 100644 --- a/src/backend/replication/pgoutput/pgoutput.c +++ b/src/backend/replication/pgoutput/pgoutput.c @@ -2339,13 +2339,9 @@ get_rel_sync_entry(PGOutputData *data, Relation relation) static void cleanup_rel_sync_cache(TransactionId xid, bool is_commit) { - HASH_SEQ_STATUS hash_seq; - RelationSyncEntry *entry; - Assert(RelationSyncCache != NULL); - hash_seq_init(&hash_seq, RelationSyncCache); - while ((entry = hash_seq_search(&hash_seq)) != NULL) + foreach_hash(RelationSyncEntry, entry, RelationSyncCache) { /* * We can set the schema_sent flag for an entry that has committed xid @@ -2374,8 +2370,6 @@ cleanup_rel_sync_cache(TransactionId xid, bool is_commit) static void rel_sync_cache_relation_cb(Datum arg, Oid relid) { - RelationSyncEntry *entry; - /* * We can get here if the plugin was used in SQL interface as the * RelationSyncCache is destroyed when the decoding finishes, but there is @@ -2398,18 +2392,16 @@ rel_sync_cache_relation_cb(Datum arg, Oid relid) * Getting invalidations for relations that aren't in the table is * entirely normal. So we don't care if it's found or not. */ - entry = (RelationSyncEntry *) hash_search(RelationSyncCache, &relid, - HASH_FIND, NULL); + RelationSyncEntry *entry = hash_search(RelationSyncCache, &relid, + HASH_FIND, NULL); + if (entry != NULL) entry->replicate_valid = false; } else { /* Whole cache must be flushed. */ - HASH_SEQ_STATUS status; - - hash_seq_init(&status, RelationSyncCache); - while ((entry = (RelationSyncEntry *) hash_seq_search(&status)) != NULL) + foreach_hash(RelationSyncEntry, entry, RelationSyncCache) { entry->replicate_valid = false; } @@ -2424,9 +2416,6 @@ rel_sync_cache_relation_cb(Datum arg, Oid relid) static void rel_sync_cache_publication_cb(Datum arg, int cacheid, uint32 hashvalue) { - HASH_SEQ_STATUS status; - RelationSyncEntry *entry; - /* * We can get here if the plugin was used in SQL interface as the * RelationSyncCache is destroyed when the decoding finishes, but there is @@ -2439,8 +2428,7 @@ rel_sync_cache_publication_cb(Datum arg, int cacheid, uint32 hashvalue) * We have no easy way to identify which cache entries this invalidation * event might have affected, so just mark them all invalid. */ - hash_seq_init(&status, RelationSyncCache); - while ((entry = (RelationSyncEntry *) hash_seq_search(&status)) != NULL) + foreach_hash(RelationSyncEntry, entry, RelationSyncCache) { entry->replicate_valid = false; } diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c index 781738956af4..edcf917fb7d5 100644 --- a/src/backend/storage/buffer/bufmgr.c +++ b/src/backend/storage/buffer/bufmgr.c @@ -4057,14 +4057,13 @@ CheckForBufferLeaks(void) { #ifdef USE_ASSERT_CHECKING int RefCountErrors = 0; - PrivateRefCountEntry *res; int i; char *s; /* check the array */ for (i = 0; i < REFCOUNT_ARRAY_ENTRIES; i++) { - res = &PrivateRefCountArray[i]; + PrivateRefCountEntry *res = &PrivateRefCountArray[i]; if (res->buffer != InvalidBuffer) { @@ -4079,12 +4078,9 @@ CheckForBufferLeaks(void) /* if necessary search the hash */ if (PrivateRefCountOverflowed) { - HASH_SEQ_STATUS hstat; - - hash_seq_init(&hstat, PrivateRefCountHash); - while ((res = (PrivateRefCountEntry *) hash_seq_search(&hstat)) != NULL) + foreach_hash(PrivateRefCountEntry, ent, PrivateRefCountHash) { - s = DebugPrintBufferRefcount(res->buffer); + s = DebugPrintBufferRefcount(ent->buffer); elog(WARNING, "buffer refcount leak: %s", s); pfree(s); RefCountErrors++; diff --git a/src/backend/storage/ipc/shmem.c b/src/backend/storage/ipc/shmem.c index 2d4c518dec2c..8164a8c9174e 100644 --- a/src/backend/storage/ipc/shmem.c +++ b/src/backend/storage/ipc/shmem.c @@ -539,8 +539,6 @@ pg_get_shmem_allocations(PG_FUNCTION_ARGS) { #define PG_GET_SHMEM_SIZES_COLS 4 ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; - HASH_SEQ_STATUS hstat; - ShmemIndexEnt *ent; Size named_allocated = 0; Datum values[PG_GET_SHMEM_SIZES_COLS]; bool nulls[PG_GET_SHMEM_SIZES_COLS]; @@ -549,11 +547,9 @@ pg_get_shmem_allocations(PG_FUNCTION_ARGS) LWLockAcquire(ShmemIndexLock, LW_SHARED); - hash_seq_init(&hstat, ShmemIndex); - /* output all allocated entries */ memset(nulls, 0, sizeof(nulls)); - while ((ent = (ShmemIndexEnt *) hash_seq_search(&hstat)) != NULL) + foreach_hash(ShmemIndexEnt, ent, ShmemIndex) { values[0] = CStringGetTextDatum(ent->key); values[1] = Int64GetDatum((char *) ent->location - (char *) ShmemSegHdr); @@ -596,8 +592,6 @@ pg_get_shmem_allocations_numa(PG_FUNCTION_ARGS) { #define PG_GET_SHMEM_NUMA_SIZES_COLS 3 ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; - HASH_SEQ_STATUS hstat; - ShmemIndexEnt *ent; Datum values[PG_GET_SHMEM_NUMA_SIZES_COLS]; bool nulls[PG_GET_SHMEM_NUMA_SIZES_COLS]; Size os_page_size; @@ -647,11 +641,9 @@ pg_get_shmem_allocations_numa(PG_FUNCTION_ARGS) LWLockAcquire(ShmemIndexLock, LW_SHARED); - hash_seq_init(&hstat, ShmemIndex); - /* output all allocated entries */ memset(nulls, 0, sizeof(nulls)); - while ((ent = (ShmemIndexEnt *) hash_seq_search(&hstat)) != NULL) + foreach_hash(ShmemIndexEnt, ent, ShmemIndex) { int i; char *startptr, diff --git a/src/backend/storage/ipc/standby.c b/src/backend/storage/ipc/standby.c index 7b9f7e218aa1..c63feaa9dd7a 100644 --- a/src/backend/storage/ipc/standby.c +++ b/src/backend/storage/ipc/standby.c @@ -1099,13 +1099,9 @@ StandbyReleaseLockTree(TransactionId xid, int nsubxids, TransactionId *subxids) void StandbyReleaseAllLocks(void) { - HASH_SEQ_STATUS status; - RecoveryLockXidEntry *entry; - elog(DEBUG2, "release all standby locks"); - hash_seq_init(&status, RecoveryLockXidHash); - while ((entry = hash_seq_search(&status))) + foreach_hash(RecoveryLockXidEntry, entry, RecoveryLockXidHash) { StandbyReleaseXidEntryLocks(entry); hash_search(RecoveryLockXidHash, entry, HASH_REMOVE, NULL); @@ -1123,11 +1119,7 @@ StandbyReleaseAllLocks(void) void StandbyReleaseOldLocks(TransactionId oldxid) { - HASH_SEQ_STATUS status; - RecoveryLockXidEntry *entry; - - hash_seq_init(&status, RecoveryLockXidHash); - while ((entry = hash_seq_search(&status))) + foreach_hash(RecoveryLockXidEntry, entry, RecoveryLockXidHash) { Assert(TransactionIdIsValid(entry->xid)); diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c index e5ea54e78247..02852ee1a4c1 100644 --- a/src/backend/storage/lmgr/lock.c +++ b/src/backend/storage/lmgr/lock.c @@ -2292,11 +2292,9 @@ LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock) void LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks) { - HASH_SEQ_STATUS status; LockMethod lockMethodTable; int i, numLockModes; - LOCALLOCK *locallock; LOCK *lock; int partition; bool have_fast_path_lwlock = false; @@ -2329,9 +2327,7 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks) * pointers. Fast-path locks are cleaned up during the locallock table * scan, though. */ - hash_seq_init(&status, LockMethodLocalHash); - - while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL) + foreach_hash(LOCALLOCK, locallock, LockMethodLocalHash) { /* * If the LOCALLOCK entry is unused, something must've gone wrong @@ -2566,15 +2562,10 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks) void LockReleaseSession(LOCKMETHODID lockmethodid) { - HASH_SEQ_STATUS status; - LOCALLOCK *locallock; - if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods)) elog(ERROR, "unrecognized lock method: %d", lockmethodid); - hash_seq_init(&status, LockMethodLocalHash); - - while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL) + foreach_hash(LOCALLOCK, locallock, LockMethodLocalHash) { /* Ignore items that are not of the specified lock method */ if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid) @@ -2598,12 +2589,7 @@ LockReleaseCurrentOwner(LOCALLOCK **locallocks, int nlocks) { if (locallocks == NULL) { - HASH_SEQ_STATUS status; - LOCALLOCK *locallock; - - hash_seq_init(&status, LockMethodLocalHash); - - while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL) + foreach_hash(LOCALLOCK, locallock, LockMethodLocalHash) ReleaseLockIfHeld(locallock, false); } else @@ -2697,12 +2683,7 @@ LockReassignCurrentOwner(LOCALLOCK **locallocks, int nlocks) if (locallocks == NULL) { - HASH_SEQ_STATUS status; - LOCALLOCK *locallock; - - hash_seq_init(&status, LockMethodLocalHash); - - while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL) + foreach_hash(LOCALLOCK, locallock, LockMethodLocalHash) LockReassignOwner(locallock, parent); } else @@ -3383,17 +3364,13 @@ CheckForSessionAndXactLocks(void) } PerLockTagEntry; HTAB *lockhtab; - HASH_SEQ_STATUS status; - LOCALLOCK *locallock; /* Create a local hash table keyed by LOCKTAG only */ lockhtab = hash_make(PerLockTagEntry, lock, "CheckForSessionAndXactLocks table", 256); /* Scan local lock table to find entries for each LOCKTAG */ - hash_seq_init(&status, LockMethodLocalHash); - - while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL) + foreach_hash(LOCALLOCK, locallock, LockMethodLocalHash) { LOCALLOCKOWNER *lockOwners = locallock->lockOwners; PerLockTagEntry *hentry; @@ -3456,16 +3433,11 @@ CheckForSessionAndXactLocks(void) void AtPrepare_Locks(void) { - HASH_SEQ_STATUS status; - LOCALLOCK *locallock; - /* First, verify there aren't locks of both xact and session level */ CheckForSessionAndXactLocks(); /* Now do the per-locallock cleanup work */ - hash_seq_init(&status, LockMethodLocalHash); - - while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL) + foreach_hash(LOCALLOCK, locallock, LockMethodLocalHash) { TwoPhaseLockRecord record; LOCALLOCKOWNER *lockOwners = locallock->lockOwners; @@ -3553,8 +3525,6 @@ void PostPrepare_Locks(FullTransactionId fxid) { PGPROC *newproc = TwoPhaseGetDummyProc(fxid, false); - HASH_SEQ_STATUS status; - LOCALLOCK *locallock; LOCK *lock; PROCLOCK *proclock; PROCLOCKTAG proclocktag; @@ -3576,9 +3546,7 @@ PostPrepare_Locks(FullTransactionId fxid) * pointing to the same proclock, and we daren't end up with any dangling * pointers. */ - hash_seq_init(&status, LockMethodLocalHash); - - while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL) + foreach_hash(LOCALLOCK, locallock, LockMethodLocalHash) { LOCALLOCKOWNER *lockOwners = locallock->lockOwners; bool haveSessionLock; @@ -3774,8 +3742,6 @@ LockData * GetLockStatusData(void) { LockData *data; - PROCLOCK *proclock; - HASH_SEQ_STATUS seqstat; int els; int el; int i; @@ -3911,9 +3877,7 @@ GetLockStatusData(void) } /* Now scan the tables to copy the data */ - hash_seq_init(&seqstat, LockMethodProcLockHash); - - while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat))) + foreach_hash(PROCLOCK, proclock, LockMethodProcLockHash) { PGPROC *proc = proclock->tag.myProc; LOCK *lock = proclock->tag.myLock; @@ -4152,8 +4116,6 @@ xl_standby_lock * GetRunningTransactionLocks(int *nlocks) { xl_standby_lock *accessExclusiveLocks; - PROCLOCK *proclock; - HASH_SEQ_STATUS seqstat; int i; int index; int els; @@ -4175,10 +4137,9 @@ GetRunningTransactionLocks(int *nlocks) */ accessExclusiveLocks = palloc(els * sizeof(xl_standby_lock)); - /* Now scan the tables to copy the data */ - hash_seq_init(&seqstat, LockMethodProcLockHash); - /* + * Now scan the tables to copy the data. + * * If lock is a currently granted AccessExclusiveLock then it will have * just one proclock holder, so locks are never accessed twice in this * particular case. Don't copy this code for use elsewhere because in the @@ -4186,7 +4147,7 @@ GetRunningTransactionLocks(int *nlocks) * non-exclusive lock types. */ index = 0; - while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat))) + foreach_hash(PROCLOCK, proclock, LockMethodProcLockHash) { /* make sure this definition matches the one used in LockAcquire */ if ((proclock->holdMask & LOCKBIT_ON(AccessExclusiveLock)) && @@ -4281,18 +4242,14 @@ void DumpAllLocks(void) { PGPROC *proc; - PROCLOCK *proclock; LOCK *lock; - HASH_SEQ_STATUS status; proc = MyProc; if (proc && proc->waitLock) LOCK_PRINT("DumpAllLocks: waiting on", proc->waitLock, 0); - hash_seq_init(&status, LockMethodProcLockHash); - - while ((proclock = (PROCLOCK *) hash_seq_search(&status)) != NULL) + foreach_hash(PROCLOCK, proclock, LockMethodProcLockHash) { PROCLOCK_PRINT("DumpAllLocks", proclock); diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c index 1f749beadcee..b7466d3ca18c 100644 --- a/src/backend/storage/lmgr/lwlock.c +++ b/src/backend/storage/lmgr/lwlock.c @@ -316,15 +316,10 @@ init_lwlock_stats(void) static void print_lwlock_stats(int code, Datum arg) { - HASH_SEQ_STATUS scan; - lwlock_stats *lwstats; - - hash_seq_init(&scan, lwlock_stats_htab); - /* Grab an LWLock to keep different backends from mixing reports */ LWLockAcquire(&MainLWLockArray[0].lock, LW_EXCLUSIVE); - while ((lwstats = (lwlock_stats *) hash_seq_search(&scan)) != NULL) + foreach_hash(lwlock_stats, lwstats, lwlock_stats_htab) { fprintf(stderr, "PID %d lwlock %s %p: shacq %u exacq %u blk %u spindelay %u dequeue self %u\n", diff --git a/src/backend/storage/lmgr/predicate.c b/src/backend/storage/lmgr/predicate.c index c2d850327161..6b602ba89ea0 100644 --- a/src/backend/storage/lmgr/predicate.c +++ b/src/backend/storage/lmgr/predicate.c @@ -1440,8 +1440,6 @@ GetPredicateLockStatusData(void) int i; int els, el; - HASH_SEQ_STATUS seqstat; - PREDICATELOCK *predlock; data = (PredicateLockData *) palloc(sizeof(PredicateLockData)); @@ -1463,11 +1461,9 @@ GetPredicateLockStatusData(void) /* Scan through PredicateLockHash and copy contents */ - hash_seq_init(&seqstat, PredicateLockHash); - el = 0; - while ((predlock = (PREDICATELOCK *) hash_seq_search(&seqstat))) + foreach_hash(PREDICATELOCK, predlock, PredicateLockHash) { data->locktags[el] = predlock->tag.myTarget->tag; data->xacts[el] = *predlock->tag.myXact; @@ -2924,8 +2920,6 @@ TransferPredicateLocksToNewTarget(PREDICATELOCKTARGETTAG oldtargettag, static void DropAllPredicateLocksFromTable(Relation relation, bool transfer) { - HASH_SEQ_STATUS seqstat; - PREDICATELOCKTARGET *oldtarget; PREDICATELOCKTARGET *heaptarget; Oid dbId; Oid relId; @@ -2981,9 +2975,7 @@ DropAllPredicateLocksFromTable(Relation relation, bool transfer) RemoveScratchTarget(true); /* Scan through target map */ - hash_seq_init(&seqstat, PredicateLockTargetHash); - - while ((oldtarget = (PREDICATELOCKTARGET *) hash_seq_search(&seqstat))) + foreach_hash(PREDICATELOCKTARGET, oldtarget, PredicateLockTargetHash) { dlist_mutable_iter iter; @@ -4406,8 +4398,6 @@ CheckForSerializableConflictIn(Relation relation, const ItemPointerData *tid, Bl void CheckTableForSerializableConflictIn(Relation relation) { - HASH_SEQ_STATUS seqstat; - PREDICATELOCKTARGET *target; Oid dbId; Oid heapId; int i; @@ -4441,9 +4431,7 @@ CheckTableForSerializableConflictIn(Relation relation) LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE); /* Scan through target list */ - hash_seq_init(&seqstat, PredicateLockTargetHash); - - while ((target = (PREDICATELOCKTARGET *) hash_seq_search(&seqstat))) + foreach_hash(PREDICATELOCKTARGET, target, PredicateLockTargetHash) { dlist_mutable_iter iter; diff --git a/src/backend/storage/smgr/smgr.c b/src/backend/storage/smgr/smgr.c index d4fa6a144f15..b2ae58662f4b 100644 --- a/src/backend/storage/smgr/smgr.c +++ b/src/backend/storage/smgr/smgr.c @@ -409,9 +409,6 @@ smgrdestroyall(void) void smgrreleaseall(void) { - HASH_SEQ_STATUS status; - SMgrRelation reln; - /* Nothing to do if hashtable not set up */ if (SMgrRelationHash == NULL) return; @@ -419,9 +416,7 @@ smgrreleaseall(void) /* seems unsafe to accept interrupts while iterating */ HOLD_INTERRUPTS(); - hash_seq_init(&status, SMgrRelationHash); - - while ((reln = (SMgrRelation) hash_seq_search(&status)) != NULL) + foreach_hash(SMgrRelationData, reln, SMgrRelationHash) { smgrrelease(reln); } diff --git a/src/backend/storage/sync/sync.c b/src/backend/storage/sync/sync.c index 42b7fc112881..f65c52eb9a2e 100644 --- a/src/backend/storage/sync/sync.c +++ b/src/backend/storage/sync/sync.c @@ -280,8 +280,6 @@ ProcessSyncRequests(void) { static bool sync_in_progress = false; - HASH_SEQ_STATUS hstat; - PendingFsyncEntry *entry; int absorb_counter; /* Statistics on sync times */ @@ -338,8 +336,7 @@ ProcessSyncRequests(void) if (sync_in_progress) { /* prior try failed, so update any stale cycle_ctr values */ - hash_seq_init(&hstat, pendingOps); - while ((entry = (PendingFsyncEntry *) hash_seq_search(&hstat)) != NULL) + foreach_hash(PendingFsyncEntry, entry, pendingOps) { entry->cycle_ctr = sync_cycle_ctr; } @@ -353,8 +350,7 @@ ProcessSyncRequests(void) /* Now scan the hashtable for fsync requests to process */ absorb_counter = FSYNCS_PER_ABSORB; - hash_seq_init(&hstat, pendingOps); - while ((entry = (PendingFsyncEntry *) hash_seq_search(&hstat)) != NULL) + foreach_hash(PendingFsyncEntry, entry, pendingOps) { int failures; @@ -495,13 +491,10 @@ RememberSyncRequest(const FileTag *ftag, SyncRequestType type) } else if (type == SYNC_FILTER_REQUEST) { - HASH_SEQ_STATUS hstat; - PendingFsyncEntry *pfe; ListCell *cell; /* Cancel matching fsync requests */ - hash_seq_init(&hstat, pendingOps); - while ((pfe = (PendingFsyncEntry *) hash_seq_search(&hstat)) != NULL) + foreach_hash(PendingFsyncEntry, pfe, pendingOps) { if (pfe->tag.handler == ftag->handler && syncsw[ftag->handler].sync_filetagmatches(ftag, &pfe->tag)) diff --git a/src/backend/tsearch/ts_typanalyze.c b/src/backend/tsearch/ts_typanalyze.c index a9bc6901a129..439f87880036 100644 --- a/src/backend/tsearch/ts_typanalyze.c +++ b/src/backend/tsearch/ts_typanalyze.c @@ -149,7 +149,6 @@ compute_tsvector_stats(VacAttrStats *stats, /* This is D from the LC algorithm. */ HTAB *lexemes_tab; - HASH_SEQ_STATUS scan_status; /* This is the current bucket number from the LC algorithm */ int b_current; @@ -288,7 +287,6 @@ compute_tsvector_stats(VacAttrStats *stats, int nonnull_cnt = samplerows - null_cnt; int i; TrackItem **sort_table; - TrackItem *item; int track_len; int cutoff_freq; int minfreq, @@ -315,10 +313,9 @@ compute_tsvector_stats(VacAttrStats *stats, i = hash_get_num_entries(lexemes_tab); /* surely enough space */ sort_table = (TrackItem **) palloc(sizeof(TrackItem *) * i); - hash_seq_init(&scan_status, lexemes_tab); track_len = 0; maxfreq = 0; - while ((item = (TrackItem *) hash_seq_search(&scan_status)) != NULL) + foreach_hash(TrackItem, item, lexemes_tab) { if (item->frequency > cutoff_freq) { @@ -462,11 +459,7 @@ compute_tsvector_stats(VacAttrStats *stats, static void prune_lexemes_hashtable(HTAB *lexemes_tab, int b_current) { - HASH_SEQ_STATUS scan_status; - TrackItem *item; - - hash_seq_init(&scan_status, lexemes_tab); - while ((item = (TrackItem *) hash_seq_search(&scan_status)) != NULL) + foreach_hash(TrackItem, item, lexemes_tab) { if (item->frequency + item->delta <= b_current) { diff --git a/src/backend/utils/activity/wait_event.c b/src/backend/utils/activity/wait_event.c index 35aa0208fc93..d0b90f40d32d 100644 --- a/src/backend/utils/activity/wait_event.c +++ b/src/backend/utils/activity/wait_event.c @@ -299,8 +299,6 @@ char ** GetWaitEventCustomNames(uint32 classId, int *nwaitevents) { char **waiteventnames; - WaitEventCustomEntryByName *hentry; - HASH_SEQ_STATUS hash_seq; int index; int els; @@ -313,10 +311,8 @@ GetWaitEventCustomNames(uint32 classId, int *nwaitevents) waiteventnames = palloc(els * sizeof(char *)); /* Now scan the hash table to copy the data */ - hash_seq_init(&hash_seq, WaitEventCustomHashByName); - index = 0; - while ((hentry = (WaitEventCustomEntryByName *) hash_seq_search(&hash_seq)) != NULL) + foreach_hash(WaitEventCustomEntryByName, hentry, WaitEventCustomHashByName) { if ((hentry->wait_event_info & WAIT_EVENT_CLASS_MASK) != classId) continue; diff --git a/src/backend/utils/adt/array_typanalyze.c b/src/backend/utils/adt/array_typanalyze.c index cc310c044b35..a5be7e4c1c38 100644 --- a/src/backend/utils/adt/array_typanalyze.c +++ b/src/backend/utils/adt/array_typanalyze.c @@ -223,7 +223,6 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc, /* This is D from the LC algorithm. */ HTAB *elements_tab; - HASH_SEQ_STATUS scan_status; /* This is the current bucket number from the LC algorithm */ int b_current; @@ -232,10 +231,8 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc, int bucket_width; int array_no; int64 element_no; - TrackItem *item; int slot_idx; HTAB *count_tab; - DECountItem *count_item; extra_data = (ArrayAnalyzeExtraData *) stats->extra_data; @@ -300,6 +297,7 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc, int64 prev_element_no = element_no; int distinct_count; bool count_item_found; + DECountItem *count_item; vacuum_delay_point(true); @@ -338,6 +336,7 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc, { Datum elem_value; bool found; + TrackItem *item; /* No null element processing other than flag setting here */ if (elem_nulls[j]) @@ -458,10 +457,9 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc, i = hash_get_num_entries(elements_tab); /* surely enough space */ sort_table = (TrackItem **) palloc(sizeof(TrackItem *) * i); - hash_seq_init(&scan_status, elements_tab); track_len = 0; maxfreq = 0; - while ((item = (TrackItem *) hash_seq_search(&scan_status)) != NULL) + foreach_hash(TrackItem, item, elements_tab) { if (item->frequency > cutoff_freq) { @@ -595,9 +593,8 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc, */ sorted_count_items = (DECountItem **) palloc(sizeof(DECountItem *) * count_items_count); - hash_seq_init(&scan_status, count_tab); j = 0; - while ((count_item = (DECountItem *) hash_seq_search(&scan_status)) != NULL) + foreach_hash(DECountItem, count_item, count_tab) { sorted_count_items[j++] = count_item; } @@ -684,11 +681,7 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc, static void prune_element_hashtable(HTAB *elements_tab, int b_current) { - HASH_SEQ_STATUS scan_status; - TrackItem *item; - - hash_seq_init(&scan_status, elements_tab); - while ((item = (TrackItem *) hash_seq_search(&scan_status)) != NULL) + foreach_hash(TrackItem, item, elements_tab) { if (item->frequency + item->delta <= b_current) { diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c index b76d6bbd5c95..eb441a815523 100644 --- a/src/backend/utils/cache/relcache.c +++ b/src/backend/utils/cache/relcache.c @@ -2990,8 +2990,6 @@ RelationCacheInvalidateEntry(Oid relationId) void RelationCacheInvalidate(bool debug_discard) { - HASH_SEQ_STATUS status; - RelIdCacheEnt *idhentry; Relation relation; List *rebuildFirstList = NIL; List *rebuildList = NIL; @@ -3004,9 +3002,7 @@ RelationCacheInvalidate(bool debug_discard) RelationMapInvalidateAll(); /* Phase 1 */ - hash_seq_init(&status, RelationIdCache); - - while ((idhentry = (RelIdCacheEnt *) hash_seq_search(&status)) != NULL) + foreach_hash(RelIdCacheEnt, idhentry, RelationIdCache) { relation = idhentry->reldesc; @@ -3151,12 +3147,9 @@ AssertPendingSyncConsistency(Relation relation) void AssertPendingSyncs_RelationCache(void) { - HASH_SEQ_STATUS status; - LOCALLOCK *locallock; Relation *rels; int maxrels; int nrels; - RelIdCacheEnt *idhentry; int i; /* @@ -3170,8 +3163,7 @@ AssertPendingSyncs_RelationCache(void) maxrels = 1; rels = palloc(maxrels * sizeof(*rels)); nrels = 0; - hash_seq_init(&status, GetLockMethodLocalHash()); - while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL) + foreach_hash(LOCALLOCK, locallock, GetLockMethodLocalHash()) { Oid relid; Relation r; @@ -3193,8 +3185,7 @@ AssertPendingSyncs_RelationCache(void) rels[nrels++] = r; } - hash_seq_init(&status, RelationIdCache); - while ((idhentry = (RelIdCacheEnt *) hash_seq_search(&status)) != NULL) + foreach_hash(RelIdCacheEnt, idhentry, RelationIdCache) AssertPendingSyncConsistency(idhentry->reldesc); for (i = 0; i < nrels; i++) @@ -3222,8 +3213,6 @@ AssertPendingSyncs_RelationCache(void) void AtEOXact_RelationCache(bool isCommit) { - HASH_SEQ_STATUS status; - RelIdCacheEnt *idhentry; int i; /* @@ -3246,8 +3235,7 @@ AtEOXact_RelationCache(bool isCommit) */ if (eoxact_list_overflowed) { - hash_seq_init(&status, RelationIdCache); - while ((idhentry = (RelIdCacheEnt *) hash_seq_search(&status)) != NULL) + foreach_hash(RelIdCacheEnt, idhentry, RelationIdCache) { AtEOXact_cleanup(idhentry->reldesc, isCommit); } @@ -3256,10 +3244,11 @@ AtEOXact_RelationCache(bool isCommit) { for (i = 0; i < eoxact_list_len; i++) { - idhentry = (RelIdCacheEnt *) hash_search(RelationIdCache, - &eoxact_list[i], - HASH_FIND, - NULL); + RelIdCacheEnt *idhentry = hash_search(RelationIdCache, + &eoxact_list[i], + HASH_FIND, + NULL); + if (idhentry != NULL) AtEOXact_cleanup(idhentry->reldesc, isCommit); } @@ -3375,8 +3364,6 @@ void AtEOSubXact_RelationCache(bool isCommit, SubTransactionId mySubid, SubTransactionId parentSubid) { - HASH_SEQ_STATUS status; - RelIdCacheEnt *idhentry; int i; /* @@ -3394,8 +3381,7 @@ AtEOSubXact_RelationCache(bool isCommit, SubTransactionId mySubid, */ if (eoxact_list_overflowed) { - hash_seq_init(&status, RelationIdCache); - while ((idhentry = (RelIdCacheEnt *) hash_seq_search(&status)) != NULL) + foreach_hash(RelIdCacheEnt, idhentry, RelationIdCache) { AtEOSubXact_cleanup(idhentry->reldesc, isCommit, mySubid, parentSubid); @@ -3405,6 +3391,8 @@ AtEOSubXact_RelationCache(bool isCommit, SubTransactionId mySubid, { for (i = 0; i < eoxact_list_len; i++) { + RelIdCacheEnt *idhentry; + idhentry = (RelIdCacheEnt *) hash_search(RelationIdCache, &eoxact_list[i], HASH_FIND, @@ -6583,8 +6571,6 @@ write_relcache_init_file(bool shared) char tempfilename[MAXPGPATH]; char finalfilename[MAXPGPATH]; int magic; - HASH_SEQ_STATUS status; - RelIdCacheEnt *idhentry; int i; /* @@ -6644,9 +6630,7 @@ write_relcache_init_file(bool shared) /* * Write all the appropriate reldescs (in no particular order). */ - hash_seq_init(&status, RelationIdCache); - - while ((idhentry = (RelIdCacheEnt *) hash_seq_search(&status)) != NULL) + foreach_hash(RelIdCacheEnt, idhentry, RelationIdCache) { Relation rel = idhentry->reldesc; Form_pg_class relform = rel->rd_rel; diff --git a/src/backend/utils/cache/relfilenumbermap.c b/src/backend/utils/cache/relfilenumbermap.c index 69aba463eaea..2b9ca8b36a14 100644 --- a/src/backend/utils/cache/relfilenumbermap.c +++ b/src/backend/utils/cache/relfilenumbermap.c @@ -51,14 +51,10 @@ typedef struct static void RelfilenumberMapInvalidateCallback(Datum arg, Oid relid) { - HASH_SEQ_STATUS status; - RelfilenumberMapEntry *entry; - /* callback only gets registered after creating the hash */ Assert(RelfilenumberMapHash != NULL); - hash_seq_init(&status, RelfilenumberMapHash); - while ((entry = (RelfilenumberMapEntry *) hash_seq_search(&status)) != NULL) + foreach_hash(RelfilenumberMapEntry, entry, RelfilenumberMapHash) { /* * If relid is InvalidOid, signaling a complete reset, we must remove diff --git a/src/backend/utils/cache/spccache.c b/src/backend/utils/cache/spccache.c index bcfd6a9f0d27..bd6a161e7ded 100644 --- a/src/backend/utils/cache/spccache.c +++ b/src/backend/utils/cache/spccache.c @@ -55,11 +55,7 @@ typedef struct static void InvalidateTableSpaceCacheCallback(Datum arg, int cacheid, uint32 hashvalue) { - HASH_SEQ_STATUS status; - TableSpaceCacheEntry *spc; - - hash_seq_init(&status, TableSpaceCacheHash); - while ((spc = (TableSpaceCacheEntry *) hash_seq_search(&status)) != NULL) + foreach_hash(TableSpaceCacheEntry, spc, TableSpaceCacheHash) { if (spc->opts) pfree(spc->opts); diff --git a/src/backend/utils/cache/ts_cache.c b/src/backend/utils/cache/ts_cache.c index cfa6d8b09a44..5446de5eb1ab 100644 --- a/src/backend/utils/cache/ts_cache.c +++ b/src/backend/utils/cache/ts_cache.c @@ -94,11 +94,8 @@ static void InvalidateTSCacheCallBack(Datum arg, int cacheid, uint32 hashvalue) { HTAB *hash = (HTAB *) DatumGetPointer(arg); - HASH_SEQ_STATUS status; - TSAnyCacheEntry *entry; - hash_seq_init(&status, hash); - while ((entry = (TSAnyCacheEntry *) hash_seq_search(&status)) != NULL) + foreach_hash(TSAnyCacheEntry, entry, hash) entry->isvalid = false; /* Also invalidate the current-config cache if it's pg_ts_config */ diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c index f3897c953d3b..f9ce7c5ddf36 100644 --- a/src/backend/utils/misc/guc.c +++ b/src/backend/utils/misc/guc.c @@ -288,8 +288,6 @@ ProcessConfigFileInternal(GucContext context, bool applySettings, int elevel) const char *ConfFileWithError; ConfigVariable *head, *tail; - HASH_SEQ_STATUS status; - GUCHashEntry *hentry; /* Parse the main config file into a list of option names and values */ ConfFileWithError = ConfigFileName; @@ -364,8 +362,7 @@ ProcessConfigFileInternal(GucContext context, bool applySettings, int elevel) * need this so that we can tell below which ones have been removed from * the file since we last processed it. */ - hash_seq_init(&status, guc_hashtab); - while ((hentry = (GUCHashEntry *) hash_seq_search(&status)) != NULL) + foreach_hash(GUCHashEntry, hentry, guc_hashtab) { struct config_generic *gconf = hentry->gucvar; @@ -449,8 +446,7 @@ ProcessConfigFileInternal(GucContext context, bool applySettings, int elevel) * boot-time defaults. If such a variable can't be changed after startup, * report that and continue. */ - hash_seq_init(&status, guc_hashtab); - while ((hentry = (GUCHashEntry *) hash_seq_search(&status)) != NULL) + foreach_hash(GUCHashEntry, hentry, guc_hashtab) { struct config_generic *gconf = hentry->gucvar; @@ -839,8 +835,6 @@ struct config_generic ** get_guc_variables(int *num_vars) { struct config_generic **result; - HASH_SEQ_STATUS status; - GUCHashEntry *hentry; int i; *num_vars = hash_get_num_entries(guc_hashtab); @@ -848,8 +842,7 @@ get_guc_variables(int *num_vars) /* Extract pointers from the hash table */ i = 0; - hash_seq_init(&status, guc_hashtab); - while ((hentry = (GUCHashEntry *) hash_seq_search(&status)) != NULL) + foreach_hash(GUCHashEntry, hentry, guc_hashtab) result[i++] = hentry->gucvar; Assert(i == *num_vars); @@ -1400,9 +1393,6 @@ check_GUC_init(const struct config_generic *gconf) void InitializeGUCOptions(void) { - HASH_SEQ_STATUS status; - GUCHashEntry *hentry; - /* * Before log_line_prefix could possibly receive a nonempty setting, make * sure that timezone processing is minimally alive (see elog.c). @@ -1418,8 +1408,7 @@ InitializeGUCOptions(void) * Load all variables with their compiled-in defaults, and initialize * status fields as needed. */ - hash_seq_init(&status, guc_hashtab); - while ((hentry = (GUCHashEntry *) hash_seq_search(&status)) != NULL) + foreach_hash(GUCHashEntry, hentry, guc_hashtab) { /* Check mapping between initial and default value */ Assert(check_GUC_init(hentry->gucvar)); @@ -2414,9 +2403,6 @@ AtEOXact_GUC(bool isCommit, int nestLevel) void BeginReportingGUCOptions(void) { - HASH_SEQ_STATUS status; - GUCHashEntry *hentry; - /* * Don't do anything unless talking to an interactive frontend. */ @@ -2438,8 +2424,7 @@ BeginReportingGUCOptions(void) PGC_INTERNAL, PGC_S_OVERRIDE); /* Transmit initial values of interesting variables */ - hash_seq_init(&status, guc_hashtab); - while ((hentry = (GUCHashEntry *) hash_seq_search(&status)) != NULL) + foreach_hash(GUCHashEntry, hentry, guc_hashtab) { struct config_generic *conf = hentry->gucvar; @@ -5142,16 +5127,13 @@ void MarkGUCPrefixReserved(const char *className) { int classLen = strlen(className); - HASH_SEQ_STATUS status; - GUCHashEntry *hentry; MemoryContext oldcontext; /* * Check for existing placeholders. We must actually remove invalid * placeholders, else future parallel worker startups will fail. */ - hash_seq_init(&status, guc_hashtab); - while ((hentry = (GUCHashEntry *) hash_seq_search(&status)) != NULL) + foreach_hash(GUCHashEntry, hentry, guc_hashtab) { struct config_generic *var = hentry->gucvar; diff --git a/src/backend/utils/mmgr/portalmem.c b/src/backend/utils/mmgr/portalmem.c index 8241d32f0c62..4a7b10d219b8 100644 --- a/src/backend/utils/mmgr/portalmem.c +++ b/src/backend/utils/mmgr/portalmem.c @@ -776,12 +776,7 @@ PreCommit_Portals(bool isPrepare) void AtAbort_Portals(void) { - HASH_SEQ_STATUS status; - PortalHashEnt *hentry; - - hash_seq_init(&status, PortalHashTable); - - while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL) + foreach_hash(PortalHashEnt, hentry, PortalHashTable) { Portal portal = hentry->portal; @@ -853,12 +848,7 @@ AtAbort_Portals(void) void AtCleanup_Portals(void) { - HASH_SEQ_STATUS status; - PortalHashEnt *hentry; - - hash_seq_init(&status, PortalHashTable); - - while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL) + foreach_hash(PortalHashEnt, hentry, PortalHashTable) { Portal portal = hentry->portal; @@ -912,12 +902,7 @@ AtCleanup_Portals(void) void PortalErrorCleanup(void) { - HASH_SEQ_STATUS status; - PortalHashEnt *hentry; - - hash_seq_init(&status, PortalHashTable); - - while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL) + foreach_hash(PortalHashEnt, hentry, PortalHashTable) { Portal portal = hentry->portal; @@ -941,12 +926,7 @@ AtSubCommit_Portals(SubTransactionId mySubid, int parentLevel, ResourceOwner parentXactOwner) { - HASH_SEQ_STATUS status; - PortalHashEnt *hentry; - - hash_seq_init(&status, PortalHashTable); - - while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL) + foreach_hash(PortalHashEnt, hentry, PortalHashTable) { Portal portal = hentry->portal; @@ -977,12 +957,7 @@ AtSubAbort_Portals(SubTransactionId mySubid, ResourceOwner myXactOwner, ResourceOwner parentXactOwner) { - HASH_SEQ_STATUS status; - PortalHashEnt *hentry; - - hash_seq_init(&status, PortalHashTable); - - while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL) + foreach_hash(PortalHashEnt, hentry, PortalHashTable) { Portal portal = hentry->portal; @@ -1087,12 +1062,7 @@ AtSubAbort_Portals(SubTransactionId mySubid, void AtSubCleanup_Portals(SubTransactionId mySubid) { - HASH_SEQ_STATUS status; - PortalHashEnt *hentry; - - hash_seq_init(&status, PortalHashTable); - - while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL) + foreach_hash(PortalHashEnt, hentry, PortalHashTable) { Portal portal = hentry->portal; @@ -1127,8 +1097,6 @@ Datum pg_cursor(PG_FUNCTION_ARGS) { ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; - HASH_SEQ_STATUS hash_seq; - PortalHashEnt *hentry; /* * We put all the tuples into a tuplestore in one scan of the hashtable. @@ -1136,8 +1104,7 @@ pg_cursor(PG_FUNCTION_ARGS) */ InitMaterializedSRF(fcinfo, 0); - hash_seq_init(&hash_seq, PortalHashTable); - while ((hentry = hash_seq_search(&hash_seq)) != NULL) + foreach_hash(PortalHashEnt, hentry, PortalHashTable) { Portal portal = hentry->portal; Datum values[6]; @@ -1166,12 +1133,7 @@ pg_cursor(PG_FUNCTION_ARGS) bool ThereAreNoReadyPortals(void) { - HASH_SEQ_STATUS status; - PortalHashEnt *hentry; - - hash_seq_init(&status, PortalHashTable); - - while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL) + foreach_hash(PortalHashEnt, hentry, PortalHashTable) { Portal portal = hentry->portal; @@ -1202,12 +1164,7 @@ ThereAreNoReadyPortals(void) void HoldPinnedPortals(void) { - HASH_SEQ_STATUS status; - PortalHashEnt *hentry; - - hash_seq_init(&status, PortalHashTable); - - while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL) + foreach_hash(PortalHashEnt, hentry, PortalHashTable) { Portal portal = hentry->portal; @@ -1251,15 +1208,11 @@ HoldPinnedPortals(void) void ForgetPortalSnapshots(void) { - HASH_SEQ_STATUS status; - PortalHashEnt *hentry; int numPortalSnaps = 0; int numActiveSnaps = 0; /* First, scan PortalHashTable and clear portalSnapshot fields */ - hash_seq_init(&status, PortalHashTable); - - while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL) + foreach_hash(PortalHashEnt, hentry, PortalHashTable) { Portal portal = hentry->portal;