| /*------------------------------------------------------------------------- |
| * |
| * tableam.h |
| * POSTGRES table access method definitions. |
| * |
| * |
| * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group |
| * Portions Copyright (c) 1994, Regents of the University of California |
| * |
| * src/include/access/tableam.h |
| * |
| * NOTES |
| * See tableam.sgml for higher level documentation. |
| * |
| *------------------------------------------------------------------------- |
| */ |
| #ifndef TABLEAM_H |
| #define TABLEAM_H |
| |
| #include "access/relscan.h" |
| #include "access/sdir.h" |
| #include "access/xact.h" |
| #include "executor/tuptable.h" |
| #include "utils/rel.h" |
| #include "utils/snapshot.h" |
| #include "utils/snapmgr.h" |
| |
| |
| #define DEFAULT_TABLE_ACCESS_METHOD "heap" |
| |
| /* GUCs */ |
| extern PGDLLIMPORT char *default_table_access_method; |
| extern PGDLLIMPORT bool synchronize_seqscans; |
| |
| |
| struct BulkInsertStateData; |
| struct IndexInfo; |
| struct SampleScanState; |
| struct TBMIterateResult; |
| struct VacuumParams; |
| struct ValidateIndexState; |
| |
| /** |
| * Flags represented the supported features of scan. |
| * |
| * The first 8 bits are reserved for kernel expansion of some attributes, |
| * and the remaining 24 bits are reserved for custom tableam. |
| * |
| * If you add a new flag, make sure the flag's bit is consecutive with |
| * the previous one. |
| * |
| */ |
| #define SCAN_SUPPORT_COLUMN_ORIENTED_SCAN (1 << 0) /* support column-oriented scanning*/ |
| #define SCAN_SUPPORT_RUNTIME_FILTER (1 << 1) /* support runtime filter scan */ |
| #define SCAN_SUPPORT_VECTORIZATION (1 << 2) /* support vectorization scan */ |
| #define SCAN_FORCE_BIG_WRITE_LOCK (1 << 3) /* force big write lock */ |
| |
| /* |
| * Bitmask values for the flags argument to the scan_begin callback. |
| */ |
| typedef enum ScanOptions |
| { |
| /* one of SO_TYPE_* may be specified */ |
| SO_TYPE_SEQSCAN = 1 << 0, |
| SO_TYPE_BITMAPSCAN = 1 << 1, |
| SO_TYPE_SAMPLESCAN = 1 << 2, |
| SO_TYPE_TIDSCAN = 1 << 3, |
| SO_TYPE_TIDRANGESCAN = 1 << 4, |
| SO_TYPE_ANALYZE = 1 << 5, |
| |
| /* several of SO_ALLOW_* may be specified */ |
| /* allow or disallow use of access strategy */ |
| SO_ALLOW_STRAT = 1 << 6, |
| /* report location to syncscan logic? */ |
| SO_ALLOW_SYNC = 1 << 7, |
| /* verify visibility page-at-a-time? */ |
| SO_ALLOW_PAGEMODE = 1 << 8, |
| |
| /* unregister snapshot at scan end? */ |
| SO_TEMP_SNAPSHOT = 1 << 9 |
| } ScanOptions; |
| |
| /* |
| * Result codes for table_{update,delete,lock_tuple}, and for visibility |
| * routines inside table AMs. |
| */ |
| typedef enum TM_Result |
| { |
| /* |
| * Signals that the action succeeded (i.e. update/delete performed, lock |
| * was acquired) |
| */ |
| TM_Ok, |
| |
| /* The affected tuple wasn't visible to the relevant snapshot */ |
| TM_Invisible, |
| |
| /* The affected tuple was already modified by the calling backend */ |
| TM_SelfModified, |
| |
| /* |
| * The affected tuple was updated by another transaction. This includes |
| * the case where tuple was moved to another partition. |
| */ |
| TM_Updated, |
| |
| /* The affected tuple was deleted by another transaction */ |
| TM_Deleted, |
| |
| /* |
| * The affected tuple is currently being modified by another session. This |
| * will only be returned if table_(update/delete/lock_tuple) are |
| * instructed not to wait. |
| */ |
| TM_BeingModified, |
| |
| /* lock couldn't be acquired, action skipped. Only used by lock_tuple */ |
| TM_WouldBlock |
| } TM_Result; |
| |
| /* |
| * Result codes for table_update(..., update_indexes*..). |
| * Used to determine which indexes to update. |
| */ |
| typedef enum TU_UpdateIndexes |
| { |
| /* No indexed columns were updated (incl. TID addressing of tuple) */ |
| TU_None, |
| |
| /* A non-summarizing indexed column was updated, or the TID has changed */ |
| TU_All, |
| |
| /* Only summarized columns were updated, TID is unchanged */ |
| TU_Summarizing |
| } TU_UpdateIndexes; |
| |
| /* |
| * When table_tuple_update, table_tuple_delete, or table_tuple_lock fail |
| * because the target tuple is already outdated, they fill in this struct to |
| * provide information to the caller about what happened. |
| * |
| * ctid is the target's ctid link: it is the same as the target's TID if the |
| * target was deleted, or the location of the replacement tuple if the target |
| * was updated. |
| * |
| * xmax is the outdating transaction's XID. If the caller wants to visit the |
| * replacement tuple, it must check that this matches before believing the |
| * replacement is really a match. This is InvalidTransactionId if the target |
| * was !LP_NORMAL (expected only for a TID retrieved from syscache). |
| * |
| * cmax is the outdating command's CID, but only when the failure code is |
| * TM_SelfModified (i.e., something in the current transaction outdated the |
| * tuple); otherwise cmax is zero. (We make this restriction because |
| * HeapTupleHeaderGetCmax doesn't work for tuples outdated in other |
| * transactions.) |
| */ |
| typedef struct TM_FailureData |
| { |
| ItemPointerData ctid; |
| TransactionId xmax; |
| CommandId cmax; |
| bool traversed; |
| } TM_FailureData; |
| |
| /* |
| * State used when calling table_index_delete_tuples(). |
| * |
| * Represents the status of table tuples, referenced by table TID and taken by |
| * index AM from index tuples. State consists of high level parameters of the |
| * deletion operation, plus two mutable palloc()'d arrays for information |
| * about the status of individual table tuples. These are conceptually one |
| * single array. Using two arrays keeps the TM_IndexDelete struct small, |
| * which makes sorting the first array (the deltids array) fast. |
| * |
| * Some index AM callers perform simple index tuple deletion (by specifying |
| * bottomup = false), and include only known-dead deltids. These known-dead |
| * entries are all marked knowndeletable = true directly (typically these are |
| * TIDs from LP_DEAD-marked index tuples), but that isn't strictly required. |
| * |
| * Callers that specify bottomup = true are "bottom-up index deletion" |
| * callers. The considerations for the tableam are more subtle with these |
| * callers because they ask the tableam to perform highly speculative work, |
| * and might only expect the tableam to check a small fraction of all entries. |
| * Caller is not allowed to specify knowndeletable = true for any entry |
| * because everything is highly speculative. Bottom-up caller provides |
| * context and hints to tableam -- see comments below for details on how index |
| * AMs and tableams should coordinate during bottom-up index deletion. |
| * |
| * Simple index deletion callers may ask the tableam to perform speculative |
| * work, too. This is a little like bottom-up deletion, but not too much. |
| * The tableam will only perform speculative work when it's practically free |
| * to do so in passing for simple deletion caller (while always performing |
| * whatever work is needed to enable knowndeletable/LP_DEAD index tuples to |
| * be deleted within index AM). This is the real reason why it's possible for |
| * simple index deletion caller to specify knowndeletable = false up front |
| * (this means "check if it's possible for me to delete corresponding index |
| * tuple when it's cheap to do so in passing"). The index AM should only |
| * include "extra" entries for index tuples whose TIDs point to a table block |
| * that tableam is expected to have to visit anyway (in the event of a block |
| * orientated tableam). The tableam isn't strictly obligated to check these |
| * "extra" TIDs, but a block-based AM should always manage to do so in |
| * practice. |
| * |
| * The final contents of the deltids/status arrays are interesting to callers |
| * that ask tableam to perform speculative work (i.e. when _any_ items have |
| * knowndeletable set to false up front). These index AM callers will |
| * naturally need to consult final state to determine which index tuples are |
| * in fact deletable. |
| * |
| * The index AM can keep track of which index tuple relates to which deltid by |
| * setting idxoffnum (and/or relying on each entry being uniquely identifiable |
| * using tid), which is important when the final contents of the array will |
| * need to be interpreted -- the array can shrink from initial size after |
| * tableam processing and/or have entries in a new order (tableam may sort |
| * deltids array for its own reasons). Bottom-up callers may find that final |
| * ndeltids is 0 on return from call to tableam, in which case no index tuple |
| * deletions are possible. Simple deletion callers can rely on any entries |
| * they know to be deletable appearing in the final array as deletable. |
| */ |
| typedef struct TM_IndexDelete |
| { |
| ItemPointerData tid; /* table TID from index tuple */ |
| int16 id; /* Offset into TM_IndexStatus array */ |
| } TM_IndexDelete; |
| |
| typedef struct TM_IndexStatus |
| { |
| OffsetNumber idxoffnum; /* Index am page offset number */ |
| bool knowndeletable; /* Currently known to be deletable? */ |
| |
| /* Bottom-up index deletion specific fields follow */ |
| bool promising; /* Promising (duplicate) index tuple? */ |
| int16 freespace; /* Space freed in index if deleted */ |
| } TM_IndexStatus; |
| |
| /* |
| * Index AM/tableam coordination is central to the design of bottom-up index |
| * deletion. The index AM provides hints about where to look to the tableam |
| * by marking some entries as "promising". Index AM does this with duplicate |
| * index tuples that are strongly suspected to be old versions left behind by |
| * UPDATEs that did not logically modify indexed values. Index AM may find it |
| * helpful to only mark entries as promising when they're thought to have been |
| * affected by such an UPDATE in the recent past. |
| * |
| * Bottom-up index deletion casts a wide net at first, usually by including |
| * all TIDs on a target index page. It is up to the tableam to worry about |
| * the cost of checking transaction status information. The tableam is in |
| * control, but needs careful guidance from the index AM. Index AM requests |
| * that bottomupfreespace target be met, while tableam measures progress |
| * towards that goal by tallying the per-entry freespace value for known |
| * deletable entries. (All !bottomup callers can just set these space related |
| * fields to zero.) |
| */ |
| typedef struct TM_IndexDeleteOp |
| { |
| Relation irel; /* Target index relation */ |
| BlockNumber iblknum; /* Index block number (for error reports) */ |
| bool bottomup; /* Bottom-up (not simple) deletion? */ |
| int bottomupfreespace; /* Bottom-up space target */ |
| |
| /* Mutable per-TID information follows (index AM initializes entries) */ |
| int ndeltids; /* Current # of deltids/status elements */ |
| TM_IndexDelete *deltids; |
| TM_IndexStatus *status; |
| } TM_IndexDeleteOp; |
| |
| /* "options" flag bits for table_tuple_insert */ |
| /* TABLE_INSERT_SKIP_WAL was 0x0001; RelationNeedsWAL() now governs */ |
| #define TABLE_INSERT_SKIP_FSM 0x0002 |
| #define TABLE_INSERT_FROZEN 0x0004 |
| #define TABLE_INSERT_NO_LOGICAL 0x0008 |
| |
| /* flag bits for table_tuple_lock */ |
| /* Follow tuples whose update is in progress if lock modes don't conflict */ |
| #define TUPLE_LOCK_FLAG_LOCK_UPDATE_IN_PROGRESS (1 << 0) |
| /* Follow update chain and lock latest version of tuple */ |
| #define TUPLE_LOCK_FLAG_FIND_LAST_VERSION (1 << 1) |
| |
| |
| /* Typedef for callback function for table_index_build_scan */ |
| /* GPDB: This takes an ItemPointer, rather than HeapTuple, because this is also |
| * used with AO/AOCO tables */ |
| typedef void (*IndexBuildCallback) (Relation index, |
| ItemPointer tid, |
| Datum *values, |
| bool *isnull, |
| bool tupleIsAlive, |
| void *state); |
| |
| /* |
| * CBDB: Execution structure shouldn't appear here in PG design. We need to |
| * tell the scan node to do low-level and efficient filtering. The scan node |
| * will use low-level APIs exposed by the storage. |
| */ |
| struct PlanState; |
| |
| typedef bytea *(*tamoptions_function)(Datum reloptions, |
| char relkind, |
| bool validate); |
| |
| /* |
| * GPDB: Tables in GPDB can be thought of a set of BlockSequences, each of which |
| * is a monotonically increasing sequence of tids. The tids may or may not be |
| * contiguous. For eg. in AO/CO tables, row number allocations are batched for |
| * each insert(-like) command and the row numbers may have holes. |
| * |
| * Furthermore, each sequence of tids is represented in terms of BlockNumbers, |
| * for brevity. The BlockNumbers inside a BlockSequence are contiguous. |
| * BlockSequences do not overlap with each other and need not be contiguous |
| * between themselves. |
| * |
| * This generalization helps to unify code for heap vs AO/CO tables (eg. BRIN). |
| * |
| * Heap tables can be thought of as having only one BlockSequence having range: |
| * [0,num_blocks]. AO/CO tables will have one such sequence for every segno: |
| * [0,blocks_seg0], [33554432,33554432+blocks_seg1], [67108864,67108864+blocks_seg2], ... |
| * where blocks_segi is the number of heap blocks represented in that segment. |
| * |
| * To be clear, this does NOT mean that there must be (blocks_segi * BLCKSZ) |
| * number of bytes in that segment. Please refer to how AOTupleIds can be |
| * treated as ItemPointers. |
| */ |
| typedef struct BlockSequence |
| { |
| BlockNumber startblknum; /* starting block number in sequence */ |
| BlockNumber nblocks; /* number of heap blocks represented in sequence */ |
| } BlockSequence; |
| |
| /* |
| * API struct for a table AM. Note this must be allocated in a |
| * server-lifetime manner, typically as a static const struct, which then gets |
| * returned by FormData_pg_am.amhandler. |
| * |
| * In most cases it's not appropriate to call the callbacks directly, use the |
| * table_* wrapper functions instead. |
| * |
| * GetTableAmRoutine() asserts that required callbacks are filled in, remember |
| * to update when adding a callback. |
| */ |
| typedef struct TableAmRoutine |
| { |
| /* this must be set to T_TableAmRoutine */ |
| NodeTag type; |
| |
| |
| /* ------------------------------------------------------------------------ |
| * Slot related callbacks. |
| * ------------------------------------------------------------------------ |
| */ |
| |
| /* |
| * Return slot implementation suitable for storing a tuple of this AM. |
| */ |
| const TupleTableSlotOps *(*slot_callbacks) (Relation rel); |
| |
| |
| /* ------------------------------------------------------------------------ |
| * Table scan callbacks. |
| * ------------------------------------------------------------------------ |
| */ |
| |
| /* |
| * Start a scan of `rel`. The callback has to return a TableScanDesc, |
| * which will typically be embedded in a larger, AM specific, struct. |
| * |
| * If nkeys != 0, the results need to be filtered by those scan keys. |
| * |
| * pscan, if not NULL, will have already been initialized with |
| * parallelscan_initialize(), and has to be for the same relation. Will |
| * only be set coming from table_beginscan_parallel(). |
| * |
| * `flags` is a bitmask indicating the type of scan (ScanOptions's |
| * SO_TYPE_*, currently only one may be specified), options controlling |
| * the scan's behaviour (ScanOptions's SO_ALLOW_*, several may be |
| * specified, an AM may ignore unsupported ones) and whether the snapshot |
| * needs to be deallocated at scan_end (ScanOptions's SO_TEMP_SNAPSHOT). |
| */ |
| TableScanDesc (*scan_begin) (Relation rel, |
| Snapshot snapshot, |
| int nkeys, struct ScanKeyData *key, |
| ParallelTableScanDesc pscan, |
| uint32 flags); |
| |
| /* |
| * GPDB: Extract columns for scan from targetlist and quals. This is mainly |
| * for AOCS tables. |
| */ |
| TableScanDesc (*scan_begin_extractcolumns) (Relation rel, |
| Snapshot snapshot, |
| int nkeys, struct ScanKeyData *key, |
| ParallelTableScanDesc parallel_scan, |
| struct PlanState *ps, |
| uint32 flags); |
| |
| /* |
| * GPDB: Extract columns for scan from targetlist and quals, |
| * stored in key as struct ScanKeyData. This is mainly |
| * for AOCS tables. |
| */ |
| TableScanDesc (*scan_begin_extractcolumns_bm) (Relation rel, Snapshot snapshot, |
| List *targetList, List *quals, |
| List *bitmapqualorig, |
| uint32 flags); |
| /* |
| * Release resources and deallocate scan. If TableScanDesc.temp_snap, |
| * TableScanDesc.rs_snapshot needs to be unregistered. |
| */ |
| void (*scan_end) (TableScanDesc scan); |
| |
| /* |
| * Restart relation scan. If set_params is set to true, allow_{strat, |
| * sync, pagemode} (see scan_begin) changes should be taken into account. |
| */ |
| void (*scan_rescan) (TableScanDesc scan, struct ScanKeyData *key, |
| bool set_params, bool allow_strat, |
| bool allow_sync, bool allow_pagemode); |
| |
| /* |
| * Return next tuple from `scan`, store in slot. |
| */ |
| bool (*scan_getnextslot) (TableScanDesc scan, |
| ScanDirection direction, |
| TupleTableSlot *slot); |
| |
| /*----------- |
| * Optional functions to provide scanning for ranges of ItemPointers. |
| * Implementations must either provide both of these functions, or neither |
| * of them. |
| * |
| * Implementations of scan_set_tidrange must themselves handle |
| * ItemPointers of any value. i.e, they must handle each of the following: |
| * |
| * 1) mintid or maxtid is beyond the end of the table; and |
| * 2) mintid is above maxtid; and |
| * 3) item offset for mintid or maxtid is beyond the maximum offset |
| * allowed by the AM. |
| * |
| * Implementations can assume that scan_set_tidrange is always called |
| * before scan_getnextslot_tidrange or after scan_rescan and before any |
| * further calls to scan_getnextslot_tidrange. |
| */ |
| void (*scan_set_tidrange) (TableScanDesc scan, |
| ItemPointer mintid, |
| ItemPointer maxtid); |
| |
| /* |
| * Return next tuple from `scan` that's in the range of TIDs defined by |
| * scan_set_tidrange. |
| */ |
| bool (*scan_getnextslot_tidrange) (TableScanDesc scan, |
| ScanDirection direction, |
| TupleTableSlot *slot); |
| |
| /* |
| * This callback is used to indicate what the AM can do, what features the |
| * AM can support, return the flags represented the supported features of |
| * scan. |
| */ |
| uint32 (*scan_flags) (Relation rel); |
| |
| /* ------------------------------------------------------------------------ |
| * Parallel table scan related functions. |
| * ------------------------------------------------------------------------ |
| */ |
| |
| /* |
| * Estimate the size of shared memory needed for a parallel scan of this |
| * relation. The snapshot does not need to be accounted for. |
| */ |
| Size (*parallelscan_estimate) (Relation rel); |
| |
| /* |
| * Initialize ParallelTableScanDesc for a parallel scan of this relation. |
| * `pscan` will be sized according to parallelscan_estimate() for the same |
| * relation. |
| */ |
| Size (*parallelscan_initialize) (Relation rel, |
| ParallelTableScanDesc pscan); |
| |
| /* |
| * Reinitialize `pscan` for a new scan. `rel` will be the same relation as |
| * when `pscan` was initialized by parallelscan_initialize. |
| */ |
| void (*parallelscan_reinitialize) (Relation rel, |
| ParallelTableScanDesc pscan); |
| |
| |
| /* ------------------------------------------------------------------------ |
| * Index Scan Callbacks |
| * ------------------------------------------------------------------------ |
| */ |
| |
| /* |
| * Prepare to fetch tuples from the relation, as needed when fetching |
| * tuples for an index scan. The callback has to return an |
| * IndexFetchTableData, which the AM will typically embed in a larger |
| * structure with additional information. |
| * |
| * Tuples for an index scan can then be fetched via index_fetch_tuple. |
| */ |
| struct IndexFetchTableData *(*index_fetch_begin) (Relation rel); |
| |
| /* |
| * Reset index fetch. Typically this will release cross index fetch |
| * resources held in IndexFetchTableData. |
| */ |
| void (*index_fetch_reset) (struct IndexFetchTableData *data); |
| |
| /* |
| * Release resources and deallocate index fetch. |
| */ |
| void (*index_fetch_end) (struct IndexFetchTableData *data); |
| |
| /* |
| * Fetch tuple at `tid` into `slot`, after doing a visibility test |
| * according to `snapshot`. If a tuple was found and passed the visibility |
| * test, return true, false otherwise. |
| * |
| * Note that AMs that do not necessarily update indexes when indexed |
| * columns do not change, need to return the current/correct version of |
| * the tuple that is visible to the snapshot, even if the tid points to an |
| * older version of the tuple. |
| * |
| * *call_again is false on the first call to index_fetch_tuple for a tid. |
| * If there potentially is another tuple matching the tid, *call_again |
| * needs to be set to true by index_fetch_tuple, signaling to the caller |
| * that index_fetch_tuple should be called again for the same tid. |
| * |
| * *all_dead, if all_dead is not NULL, should be set to true by |
| * index_fetch_tuple iff it is guaranteed that no backend needs to see |
| * that tuple. Index AMs can use that to avoid returning that tid in |
| * future searches. |
| */ |
| bool (*index_fetch_tuple) (struct IndexFetchTableData *scan, |
| ItemPointer tid, |
| Snapshot snapshot, |
| TupleTableSlot *slot, |
| bool *call_again, bool *all_dead); |
| |
| /* See table_index_unique_check() for details */ |
| bool (*index_unique_check) (Relation rel, |
| ItemPointer tid, |
| Snapshot snapshot, |
| bool *all_dead); |
| |
| /* ------------------------------------------------------------------------ |
| * Callbacks for non-modifying operations on individual tuples |
| * ------------------------------------------------------------------------ |
| */ |
| |
| /* |
| * Fetch tuple at `tid` into `slot`, after doing a visibility test |
| * according to `snapshot`. If a tuple was found and passed the visibility |
| * test, returns true, false otherwise. |
| */ |
| bool (*tuple_fetch_row_version) (Relation rel, |
| ItemPointer tid, |
| Snapshot snapshot, |
| TupleTableSlot *slot); |
| |
| /* |
| * Is tid valid for a scan of this relation. |
| */ |
| bool (*tuple_tid_valid) (TableScanDesc scan, |
| ItemPointer tid); |
| |
| /* |
| * Return the latest version of the tuple at `tid`, by updating `tid` to |
| * point at the newest version. |
| */ |
| void (*tuple_get_latest_tid) (TableScanDesc scan, |
| ItemPointer tid); |
| |
| /* |
| * Does the tuple in `slot` satisfy `snapshot`? The slot needs to be of |
| * the appropriate type for the AM. |
| */ |
| bool (*tuple_satisfies_snapshot) (Relation rel, |
| TupleTableSlot *slot, |
| Snapshot snapshot); |
| |
| /* see table_index_delete_tuples() */ |
| TransactionId (*index_delete_tuples) (Relation rel, |
| TM_IndexDeleteOp *delstate); |
| |
| |
| /* ------------------------------------------------------------------------ |
| * Manipulations of physical tuples. |
| * ------------------------------------------------------------------------ |
| */ |
| |
| /* see table_tuple_insert() for reference about parameters */ |
| void (*tuple_insert) (Relation rel, TupleTableSlot *slot, |
| CommandId cid, int options, |
| struct BulkInsertStateData *bistate); |
| |
| /* see table_tuple_insert_speculative() for reference about parameters */ |
| void (*tuple_insert_speculative) (Relation rel, |
| TupleTableSlot *slot, |
| CommandId cid, |
| int options, |
| struct BulkInsertStateData *bistate, |
| uint32 specToken); |
| |
| /* see table_tuple_complete_speculative() for reference about parameters */ |
| void (*tuple_complete_speculative) (Relation rel, |
| TupleTableSlot *slot, |
| uint32 specToken, |
| bool succeeded); |
| |
| /* see table_multi_insert() for reference about parameters */ |
| void (*multi_insert) (Relation rel, TupleTableSlot **slots, int nslots, |
| CommandId cid, int options, struct BulkInsertStateData *bistate); |
| |
| /* see table_tuple_delete() for reference about parameters */ |
| TM_Result (*tuple_delete) (Relation rel, |
| ItemPointer tid, |
| CommandId cid, |
| Snapshot snapshot, |
| Snapshot crosscheck, |
| bool wait, |
| TM_FailureData *tmfd, |
| bool changingPart); |
| |
| /* see table_tuple_update() for reference about parameters */ |
| TM_Result (*tuple_update) (Relation rel, |
| ItemPointer otid, |
| TupleTableSlot *slot, |
| CommandId cid, |
| Snapshot snapshot, |
| Snapshot crosscheck, |
| bool wait, |
| TM_FailureData *tmfd, |
| LockTupleMode *lockmode, |
| TU_UpdateIndexes *update_indexes); |
| |
| /* see table_tuple_lock() for reference about parameters */ |
| TM_Result (*tuple_lock) (Relation rel, |
| ItemPointer tid, |
| Snapshot snapshot, |
| TupleTableSlot *slot, |
| CommandId cid, |
| LockTupleMode mode, |
| LockWaitPolicy wait_policy, |
| uint8 flags, |
| TM_FailureData *tmfd); |
| |
| /* |
| * Perform operations necessary to complete insertions made via |
| * tuple_insert and multi_insert with a BulkInsertState specified. In-tree |
| * access methods ceased to use this. |
| * |
| * Typically callers of tuple_insert and multi_insert will just pass all |
| * the flags that apply to them, and each AM has to decide which of them |
| * make sense for it, and then only take actions in finish_bulk_insert for |
| * those flags, and ignore others. |
| * |
| * Optional callback. |
| */ |
| void (*finish_bulk_insert) (Relation rel, int options); |
| |
| /* ------------------------------------------------------------------------ |
| * DDL related functionality. |
| * ------------------------------------------------------------------------ |
| */ |
| |
| /* |
| * This callback needs to create new relation storage for `rel`, with |
| * appropriate durability behaviour for `persistence`. |
| * |
| * Note that only the subset of the relcache filled by |
| * RelationBuildLocalRelation() can be relied upon and that the relation's |
| * catalog entries will either not yet exist (new relation), or will still |
| * reference the old relfilelocator. |
| * |
| * As output *freezeXid, *minmulti must be set to the values appropriate |
| * for pg_class.{relfrozenxid, relminmxid}. For AMs that don't need those |
| * fields to be filled they can be set to InvalidTransactionId and |
| * InvalidMultiXactId, respectively. |
| * |
| * See also table_relation_set_new_filelocator(). |
| */ |
| void (*relation_set_new_filelocator) (Relation rel, |
| const RelFileLocator *newrlocator, |
| char persistence, |
| TransactionId *freezeXid, |
| MultiXactId *minmulti); |
| |
| /* |
| * This callback needs to remove all contents from `rel`'s current |
| * relfilelocator. No provisions for transactional behaviour need to be |
| * made. Often this can be implemented by truncating the underlying |
| * storage to its minimal size. |
| * |
| * See also table_relation_nontransactional_truncate(). |
| */ |
| void (*relation_nontransactional_truncate) (Relation rel); |
| |
| /* |
| * See table_relation_copy_data(). |
| * |
| * This can typically be implemented by directly copying the underlying |
| * storage, unless it contains references to the tablespace internally. |
| */ |
| void (*relation_copy_data) (Relation rel, |
| const RelFileLocator *newrlocator); |
| |
| /* See table_relation_copy_for_cluster() */ |
| void (*relation_copy_for_cluster) (Relation OldTable, |
| Relation NewTable, |
| Relation OldIndex, |
| bool use_sort, |
| TransactionId OldestXmin, |
| TransactionId *xid_cutoff, |
| MultiXactId *multi_cutoff, |
| double *num_tuples, |
| double *tups_vacuumed, |
| double *tups_recently_dead); |
| |
| /* |
| * React to VACUUM command on the relation. The VACUUM can be triggered by |
| * a user or by autovacuum. The specific actions performed by the AM will |
| * depend heavily on the individual AM. |
| * |
| * On entry a transaction is already established, and the relation is |
| * locked with a ShareUpdateExclusive lock. |
| * |
| * Note that neither VACUUM FULL (and CLUSTER), nor ANALYZE go through |
| * this routine, even if (for ANALYZE) it is part of the same VACUUM |
| * command. |
| * |
| * There probably, in the future, needs to be a separate callback to |
| * integrate with autovacuum's scheduling. |
| */ |
| void (*relation_vacuum) (Relation rel, |
| struct VacuumParams *params, |
| BufferAccessStrategy bstrategy); |
| |
| /* |
| * Prepare to analyze block `blockno` of `scan`. The scan has been started |
| * with table_beginscan_analyze(). See also |
| * table_scan_analyze_next_block(). |
| * |
| * The callback may acquire resources like locks that are held until |
| * table_scan_analyze_next_tuple() returns false. It e.g. can make sense |
| * to hold a lock until all tuples on a block have been analyzed by |
| * scan_analyze_next_tuple. |
| * |
| * The callback can return false if the block is not suitable for |
| * sampling, e.g. because it's a metapage that could never contain tuples. |
| * |
| * XXX: This obviously is primarily suited for block-based AMs. It's not |
| * clear what a good interface for non block based AMs would be, so there |
| * isn't one yet. |
| */ |
| bool (*scan_analyze_next_block) (TableScanDesc scan, |
| BlockNumber blockno, |
| BufferAccessStrategy bstrategy); |
| |
| /* |
| * See table_scan_analyze_next_tuple(). |
| * |
| * Not every AM might have a meaningful concept of dead rows, in which |
| * case it's OK to not increment *deadrows - but note that that may |
| * influence autovacuum scheduling (see comment for relation_vacuum |
| * callback). |
| */ |
| bool (*scan_analyze_next_tuple) (TableScanDesc scan, |
| TransactionId OldestXmin, |
| double *liverows, |
| double *deadrows, |
| TupleTableSlot *slot); |
| |
| /* see table_index_build_range_scan for reference about parameters */ |
| double (*index_build_range_scan) (Relation table_rel, |
| Relation index_rel, |
| struct IndexInfo *index_info, |
| bool allow_sync, |
| bool anyvisible, |
| bool progress, |
| BlockNumber start_blockno, |
| BlockNumber numblocks, |
| IndexBuildCallback callback, |
| void *callback_state, |
| TableScanDesc scan); |
| |
| /* see table_index_validate_scan for reference about parameters */ |
| void (*index_validate_scan) (Relation table_rel, |
| Relation index_rel, |
| struct IndexInfo *index_info, |
| Snapshot snapshot, |
| struct ValidateIndexState *state); |
| |
| |
| /* ------------------------------------------------------------------------ |
| * Miscellaneous functions. |
| * ------------------------------------------------------------------------ |
| */ |
| |
| /* |
| * See table_relation_size(). |
| * |
| * Note that currently a few callers use the MAIN_FORKNUM size to figure |
| * out the range of potentially interesting blocks (brin, analyze). It's |
| * probable that we'll need to revise the interface for those at some |
| * point. |
| */ |
| uint64 (*relation_size) (Relation rel, ForkNumber forkNumber); |
| |
| /* |
| * See table_relation_get_block_sequences() |
| */ |
| BlockSequence *(*relation_get_block_sequences) (Relation rel, |
| int *numSequences); |
| |
| /* |
| * See table_relation_get_block_sequence() |
| */ |
| void (*relation_get_block_sequence) (Relation rel, |
| BlockNumber blkNum, |
| BlockSequence *sequence); |
| |
| /* |
| * This callback should return true if the relation requires a TOAST table |
| * and false if it does not. It may wish to examine the relation's tuple |
| * descriptor before making a decision, but if it uses some other method |
| * of storing large values (or if it does not support them) it can simply |
| * return false. |
| */ |
| bool (*relation_needs_toast_table) (Relation rel); |
| |
| /* |
| * This callback should return the OID of the table AM that implements |
| * TOAST tables for this AM. If the relation_needs_toast_table callback |
| * always returns false, this callback is not required. |
| */ |
| Oid (*relation_toast_am) (Relation rel); |
| |
| /* |
| * This callback is invoked when detoasting a value stored in a toast |
| * table implemented by this AM. See table_relation_fetch_toast_slice() |
| * for more details. |
| */ |
| void (*relation_fetch_toast_slice) (Relation toastrel, Oid valueid, |
| int32 attrsize, |
| int32 sliceoffset, |
| int32 slicelength, |
| struct varlena *result); |
| |
| |
| /* ------------------------------------------------------------------------ |
| * Planner related functions. |
| * ------------------------------------------------------------------------ |
| */ |
| |
| /* |
| * See table_relation_estimate_size(). |
| * |
| * While block oriented, it shouldn't be too hard for an AM that doesn't |
| * internally use blocks to convert into a usable representation. |
| * |
| * This differs from the relation_size callback by returning size |
| * estimates (both relation size and tuple count) for planning purposes, |
| * rather than returning a currently correct estimate. |
| */ |
| void (*relation_estimate_size) (Relation rel, int32 *attr_widths, |
| BlockNumber *pages, double *tuples, |
| double *allvisfrac); |
| |
| |
| /* ------------------------------------------------------------------------ |
| * Executor related functions. |
| * ------------------------------------------------------------------------ |
| */ |
| |
| /* |
| * Prepare to fetch / check / return tuples from `tbmres->blockno` as part |
| * of a bitmap table scan. `scan` was started via table_beginscan_bm(). |
| * Return false if there are no tuples to be found on the page, true |
| * otherwise. |
| * |
| * This will typically read and pin the target block, and do the necessary |
| * work to allow scan_bitmap_next_tuple() to return tuples (e.g. it might |
| * make sense to perform tuple visibility checks at this time). For some |
| * AMs it will make more sense to do all the work referencing `tbmres` |
| * contents here, for others it might be better to defer more work to |
| * scan_bitmap_next_tuple. |
| * |
| * If `tbmres->blockno` is -1, this is a lossy scan and all visible tuples |
| * on the page have to be returned, otherwise the tuples at offsets in |
| * `tbmres->offsets` need to be returned. |
| * |
| * XXX: Currently this may only be implemented if the AM uses md.c as its |
| * storage manager, and uses ItemPointer->ip_blkid in a manner that maps |
| * blockids directly to the underlying storage. nodeBitmapHeapscan.c |
| * performs prefetching directly using that interface. This probably |
| * needs to be rectified at a later point. |
| * |
| * XXX: Currently this may only be implemented if the AM uses the |
| * visibilitymap, as nodeBitmapHeapscan.c unconditionally accesses it to |
| * perform prefetching. This probably needs to be rectified at a later |
| * point. |
| * |
| * Optional callback, but either both scan_bitmap_next_block and |
| * scan_bitmap_next_tuple need to exist, or neither. |
| */ |
| bool (*scan_bitmap_next_block) (TableScanDesc scan, |
| struct TBMIterateResult *tbmres); |
| |
| /* |
| * Fetch the next tuple of a bitmap table scan into `slot` and return true |
| * if a visible tuple was found, false otherwise. |
| * |
| * For some AMs it will make more sense to do all the work referencing |
| * `tbmres` contents in scan_bitmap_next_block, for others it might be |
| * better to defer more work to this callback. |
| * |
| * Optional callback, but either both scan_bitmap_next_block and |
| * scan_bitmap_next_tuple need to exist, or neither. |
| */ |
| bool (*scan_bitmap_next_tuple) (TableScanDesc scan, |
| struct TBMIterateResult *tbmres, |
| TupleTableSlot *slot); |
| |
| /* |
| * Prepare to fetch tuples from the next block in a sample scan. Return |
| * false if the sample scan is finished, true otherwise. `scan` was |
| * started via table_beginscan_sampling(). |
| * |
| * Typically this will first determine the target block by calling the |
| * TsmRoutine's NextSampleBlock() callback if not NULL, or alternatively |
| * perform a sequential scan over all blocks. The determined block is |
| * then typically read and pinned. |
| * |
| * As the TsmRoutine interface is block based, a block needs to be passed |
| * to NextSampleBlock(). If that's not appropriate for an AM, it |
| * internally needs to perform mapping between the internal and a block |
| * based representation. |
| * |
| * Note that it's not acceptable to hold deadlock prone resources such as |
| * lwlocks until scan_sample_next_tuple() has exhausted the tuples on the |
| * block - the tuple is likely to be returned to an upper query node, and |
| * the next call could be off a long while. Holding buffer pins and such |
| * is obviously OK. |
| * |
| * Currently it is required to implement this interface, as there's no |
| * alternative way (contrary e.g. to bitmap scans) to implement sample |
| * scans. If infeasible to implement, the AM may raise an error. |
| */ |
| bool (*scan_sample_next_block) (TableScanDesc scan, |
| struct SampleScanState *scanstate); |
| |
| /* |
| * This callback, only called after scan_sample_next_block has returned |
| * true, should determine the next tuple to be returned from the selected |
| * block using the TsmRoutine's NextSampleTuple() callback. |
| * |
| * The callback needs to perform visibility checks, and only return |
| * visible tuples. That obviously can mean calling NextSampleTuple() |
| * multiple times. |
| * |
| * The TsmRoutine interface assumes that there's a maximum offset on a |
| * given page, so if that doesn't apply to an AM, it needs to emulate that |
| * assumption somehow. |
| */ |
| bool (*scan_sample_next_tuple) (TableScanDesc scan, |
| struct SampleScanState *scanstate, |
| TupleTableSlot *slot); |
| |
| /* |
| * Like scan_sample_next_block and scan_sample_next_tuple, this callback |
| * is also used to sample tuple rows. As different storage maybe need to |
| * use different acquire sample rows process, we extend an interface to |
| * achieve this requirement. |
| */ |
| int (*acquire_sample_rows) (Relation onerel, int elevel, |
| HeapTuple *rows, int targrows, |
| double *totalrows, double *totaldeadrows); |
| /* |
| * This callback is used to parse reloptions for relation/matview/toast. |
| */ |
| bytea *(*amoptions)(Datum reloptions, char relkind, bool validate); |
| |
| /* |
| * This callback is used to swap internal auxiliary relation if |
| * the table AM use different layout structure to organize tuples. |
| * |
| * Standard heap relation will swap the heap relation itself and optional |
| * toast relation. Custom table AM could have different data structure, |
| * like AO-row/AO-col tables have several auxiliary relations. |
| * This behavior is table AM-specific. |
| */ |
| void (*swap_relation_files) (Oid relid1, Oid relid2, TransactionId frozenXid, MultiXactId cutoffMulti); |
| |
| void (*validate_column_encoding_clauses) (List *encoding_clause_opts); |
| |
| List * (*transform_column_encoding_clauses) (Relation, List *encoding_opts, bool validate, bool optionFromType); |
| |
| } TableAmRoutine; |
| |
| |
| /* ---------------------------------------------------------------------------- |
| * Slot functions. |
| * ---------------------------------------------------------------------------- |
| */ |
| |
| /* |
| * Returns slot callbacks suitable for holding tuples of the appropriate type |
| * for the relation. Works for tables, views, foreign tables and partitioned |
| * tables. |
| */ |
| extern const TupleTableSlotOps *table_slot_callbacks(Relation relation); |
| |
| /* |
| * Returns slot using the callbacks returned by table_slot_callbacks(), and |
| * registers it on *reglist. |
| */ |
| extern TupleTableSlot *table_slot_create(Relation relation, List **reglist); |
| |
| |
| /* ---------------------------------------------------------------------------- |
| * Table scan functions. |
| * ---------------------------------------------------------------------------- |
| */ |
| |
| /* |
| * Start a scan of `rel`. Returned tuples pass a visibility test of |
| * `snapshot`, and if nkeys != 0, the results are filtered by those scan keys. |
| */ |
| static inline TableScanDesc |
| table_beginscan(Relation rel, Snapshot snapshot, |
| int nkeys, struct ScanKeyData *key) |
| { |
| uint32 flags = SO_TYPE_SEQSCAN | |
| SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE; |
| |
| return rel->rd_tableam->scan_begin(rel, snapshot, nkeys, key, NULL, flags); |
| } |
| |
| /* |
| * GPDB: Like table_beginscan(), but first attempt to create a |
| * scan key array from the targetList and the quals if the corresponding method |
| * is implemented. This is an optimization needed for AOCO relations. |
| * Otherwise, it is equivalent as passing the last two arguments as, 0, NULL. |
| * Like table_beginscan_parallel, it will be parallel mode if parallel_scan is not NULL. |
| */ |
| static inline TableScanDesc |
| table_beginscan_es(Relation relation, Snapshot snapshot, |
| int nkeys, struct ScanKeyData *key, |
| ParallelTableScanDesc parallel_scan, |
| struct PlanState *ps) |
| { |
| bool isParallel = parallel_scan != NULL; |
| uint32 flags = SO_TYPE_SEQSCAN | |
| SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE; |
| |
| /* reset snapshot if in parallel mode */ |
| if (isParallel) |
| { |
| Assert(RelationGetRelid(relation) == parallel_scan->phs_relid); |
| if (!parallel_scan->phs_snapshot_any) { |
| /* Snapshot was serialized -- restore it */ |
| snapshot = RestoreSnapshot((char *) parallel_scan + |
| parallel_scan->phs_snapshot_off); |
| RegisterSnapshot(snapshot); |
| flags |= SO_TEMP_SNAPSHOT; |
| } |
| else |
| { |
| /* SnapshotAny passed by caller (not serialized) */ |
| snapshot = SnapshotAny; |
| } |
| } |
| |
| if (relation->rd_tableam->scan_begin_extractcolumns) |
| return relation->rd_tableam->scan_begin_extractcolumns(relation, snapshot, |
| nkeys, key, |
| parallel_scan, |
| ps, flags); |
| |
| return relation->rd_tableam->scan_begin(relation, snapshot, |
| nkeys, key, |
| parallel_scan, flags); |
| } |
| |
| /* |
| * Like table_beginscan(), but for scanning catalog. It'll automatically use a |
| * snapshot appropriate for scanning catalog relations. |
| */ |
| extern TableScanDesc table_beginscan_catalog(Relation relation, int nkeys, |
| struct ScanKeyData *key); |
| |
| /* |
| * Like table_beginscan(), but table_beginscan_strat() offers an extended API |
| * that lets the caller control whether a nondefault buffer access strategy |
| * can be used, and whether syncscan can be chosen (possibly resulting in the |
| * scan not starting from block zero). Both of these default to true with |
| * plain table_beginscan. |
| */ |
| static inline TableScanDesc |
| table_beginscan_strat(Relation rel, Snapshot snapshot, |
| int nkeys, struct ScanKeyData *key, |
| bool allow_strat, bool allow_sync) |
| { |
| uint32 flags = SO_TYPE_SEQSCAN | SO_ALLOW_PAGEMODE; |
| |
| if (allow_strat) |
| flags |= SO_ALLOW_STRAT; |
| if (allow_sync) |
| flags |= SO_ALLOW_SYNC; |
| |
| return rel->rd_tableam->scan_begin(rel, snapshot, nkeys, key, NULL, flags); |
| } |
| |
| /* |
| * table_beginscan_bm is an alternative entry point for setting up a |
| * TableScanDesc for a bitmap heap scan. Although that scan technology is |
| * really quite unlike a standard seqscan, there is just enough commonality to |
| * make it worth using the same data structure. |
| */ |
| static inline TableScanDesc |
| table_beginscan_bm(Relation rel, Snapshot snapshot, |
| int nkeys, struct ScanKeyData *key) |
| { |
| uint32 flags = SO_TYPE_BITMAPSCAN | SO_ALLOW_PAGEMODE; |
| |
| return rel->rd_tableam->scan_begin(rel, snapshot, nkeys, key, NULL, flags); |
| } |
| |
| /* |
| * GPDB: Like table_beginscan_bm but with extended information in |
| * order to extract columns and set up all the needed state for AOCO relations. |
| * In case that the access method does not implement the extract function, this |
| * defaults to table_beginscan_bm with nkeys and key set to 0 and NULL |
| * respectively. |
| */ |
| static inline TableScanDesc |
| table_beginscan_bm_ecs(Relation rel, Snapshot snapshot, |
| List *targetList, List *quals, List *bitmapqualorig) |
| { |
| uint32 flags = SO_TYPE_BITMAPSCAN | SO_ALLOW_PAGEMODE; |
| |
| if (rel->rd_tableam->scan_begin_extractcolumns_bm) |
| return rel->rd_tableam->scan_begin_extractcolumns_bm(rel, snapshot, |
| targetList, quals, |
| bitmapqualorig, |
| flags); |
| |
| return rel->rd_tableam->scan_begin(rel, snapshot, 0, NULL, NULL, flags); |
| } |
| |
| /* |
| * table_beginscan_sampling is an alternative entry point for setting up a |
| * TableScanDesc for a TABLESAMPLE scan. As with bitmap scans, it's worth |
| * using the same data structure although the behavior is rather different. |
| * In addition to the options offered by table_beginscan_strat, this call |
| * also allows control of whether page-mode visibility checking is used. |
| */ |
| static inline TableScanDesc |
| table_beginscan_sampling(Relation rel, Snapshot snapshot, |
| int nkeys, struct ScanKeyData *key, |
| bool allow_strat, bool allow_sync, |
| bool allow_pagemode) |
| { |
| uint32 flags = SO_TYPE_SAMPLESCAN; |
| |
| if (allow_strat) |
| flags |= SO_ALLOW_STRAT; |
| if (allow_sync) |
| flags |= SO_ALLOW_SYNC; |
| if (allow_pagemode) |
| flags |= SO_ALLOW_PAGEMODE; |
| |
| return rel->rd_tableam->scan_begin(rel, snapshot, nkeys, key, NULL, flags); |
| } |
| |
| /* |
| * table_beginscan_tid is an alternative entry point for setting up a |
| * TableScanDesc for a Tid scan. As with bitmap scans, it's worth using |
| * the same data structure although the behavior is rather different. |
| */ |
| static inline TableScanDesc |
| table_beginscan_tid(Relation rel, Snapshot snapshot) |
| { |
| uint32 flags = SO_TYPE_TIDSCAN; |
| |
| return rel->rd_tableam->scan_begin(rel, snapshot, 0, NULL, NULL, flags); |
| } |
| |
| /* |
| * table_beginscan_analyze is an alternative entry point for setting up a |
| * TableScanDesc for an ANALYZE scan. As with bitmap scans, it's worth using |
| * the same data structure although the behavior is rather different. |
| */ |
| static inline TableScanDesc |
| table_beginscan_analyze(Relation rel) |
| { |
| uint32 flags = SO_TYPE_ANALYZE; |
| |
| return rel->rd_tableam->scan_begin(rel, NULL, 0, NULL, NULL, flags); |
| } |
| |
| /* |
| * End relation scan. |
| */ |
| static inline void |
| table_endscan(TableScanDesc scan) |
| { |
| scan->rs_rd->rd_tableam->scan_end(scan); |
| } |
| |
| /* |
| * Restart a relation scan. |
| */ |
| static inline void |
| table_rescan(TableScanDesc scan, |
| struct ScanKeyData *key) |
| { |
| scan->rs_rd->rd_tableam->scan_rescan(scan, key, false, false, false, false); |
| } |
| |
| /* |
| * Restart a relation scan after changing params. |
| * |
| * This call allows changing the buffer strategy, syncscan, and pagemode |
| * options before starting a fresh scan. Note that although the actual use of |
| * syncscan might change (effectively, enabling or disabling reporting), the |
| * previously selected startblock will be kept. |
| */ |
| static inline void |
| table_rescan_set_params(TableScanDesc scan, struct ScanKeyData *key, |
| bool allow_strat, bool allow_sync, bool allow_pagemode) |
| { |
| scan->rs_rd->rd_tableam->scan_rescan(scan, key, true, |
| allow_strat, allow_sync, |
| allow_pagemode); |
| } |
| |
| /* |
| * Update snapshot used by the scan. |
| */ |
| extern void table_scan_update_snapshot(TableScanDesc scan, Snapshot snapshot); |
| |
| /* |
| * Return next tuple from `scan`, store in slot. |
| */ |
| static inline bool |
| table_scan_getnextslot(TableScanDesc sscan, ScanDirection direction, TupleTableSlot *slot) |
| { |
| slot->tts_tableOid = RelationGetRelid(sscan->rs_rd); |
| |
| /* We don't expect actual scans using NoMovementScanDirection */ |
| Assert(direction == ForwardScanDirection || |
| direction == BackwardScanDirection); |
| |
| /* |
| * We don't expect direct calls to table_scan_getnextslot with valid |
| * CheckXidAlive for catalog or regular tables. See detailed comments in |
| * xact.c where these variables are declared. |
| */ |
| if (unlikely(TransactionIdIsValid(CheckXidAlive) && !bsysscan)) |
| elog(ERROR, "unexpected table_scan_getnextslot call during logical decoding"); |
| |
| return sscan->rs_rd->rd_tableam->scan_getnextslot(sscan, direction, slot); |
| } |
| |
| /* ---------------------------------------------------------------------------- |
| * TID Range scanning related functions. |
| * ---------------------------------------------------------------------------- |
| */ |
| |
| /* |
| * table_beginscan_tidrange is the entry point for setting up a TableScanDesc |
| * for a TID range scan. |
| */ |
| static inline TableScanDesc |
| table_beginscan_tidrange(Relation rel, Snapshot snapshot, |
| ItemPointer mintid, |
| ItemPointer maxtid) |
| { |
| TableScanDesc sscan; |
| uint32 flags = SO_TYPE_TIDRANGESCAN | SO_ALLOW_PAGEMODE; |
| |
| sscan = rel->rd_tableam->scan_begin(rel, snapshot, 0, NULL, NULL, flags); |
| |
| /* Set the range of TIDs to scan */ |
| sscan->rs_rd->rd_tableam->scan_set_tidrange(sscan, mintid, maxtid); |
| |
| return sscan; |
| } |
| |
| /* |
| * table_rescan_tidrange resets the scan position and sets the minimum and |
| * maximum TID range to scan for a TableScanDesc created by |
| * table_beginscan_tidrange. |
| */ |
| static inline void |
| table_rescan_tidrange(TableScanDesc sscan, ItemPointer mintid, |
| ItemPointer maxtid) |
| { |
| /* Ensure table_beginscan_tidrange() was used. */ |
| Assert((sscan->rs_flags & SO_TYPE_TIDRANGESCAN) != 0); |
| |
| sscan->rs_rd->rd_tableam->scan_rescan(sscan, NULL, false, false, false, false); |
| sscan->rs_rd->rd_tableam->scan_set_tidrange(sscan, mintid, maxtid); |
| } |
| |
| /* |
| * Fetch the next tuple from `sscan` for a TID range scan created by |
| * table_beginscan_tidrange(). Stores the tuple in `slot` and returns true, |
| * or returns false if no more tuples exist in the range. |
| */ |
| static inline bool |
| table_scan_getnextslot_tidrange(TableScanDesc sscan, ScanDirection direction, |
| TupleTableSlot *slot) |
| { |
| /* Ensure table_beginscan_tidrange() was used. */ |
| Assert((sscan->rs_flags & SO_TYPE_TIDRANGESCAN) != 0); |
| |
| /* We don't expect actual scans using NoMovementScanDirection */ |
| Assert(direction == ForwardScanDirection || |
| direction == BackwardScanDirection); |
| |
| return sscan->rs_rd->rd_tableam->scan_getnextslot_tidrange(sscan, |
| direction, |
| slot); |
| } |
| |
| /* |
| * Return the flags represented the supported features of table AM scan. |
| */ |
| static inline uint32 |
| table_scan_flags(Relation rel) |
| { |
| if (rel->rd_tableam->scan_flags) |
| return rel->rd_tableam->scan_flags(rel); |
| return 0; |
| } |
| |
| /* ---------------------------------------------------------------------------- |
| * Parallel table scan related functions. |
| * ---------------------------------------------------------------------------- |
| */ |
| |
| /* |
| * Estimate the size of shared memory needed for a parallel scan of this |
| * relation. |
| */ |
| extern Size table_parallelscan_estimate(Relation rel, Snapshot snapshot); |
| |
| /* |
| * Initialize ParallelTableScanDesc for a parallel scan of this |
| * relation. `pscan` needs to be sized according to parallelscan_estimate() |
| * for the same relation. Call this just once in the leader process; then, |
| * individual workers attach via table_beginscan_parallel. |
| */ |
| extern void table_parallelscan_initialize(Relation rel, |
| ParallelTableScanDesc pscan, |
| Snapshot snapshot); |
| |
| /* |
| * Begin a parallel scan. `pscan` needs to have been initialized with |
| * table_parallelscan_initialize(), for the same relation. The initialization |
| * does not need to have happened in this backend. |
| * |
| * Caller must hold a suitable lock on the relation. |
| */ |
| extern TableScanDesc table_beginscan_parallel(Relation relation, |
| ParallelTableScanDesc pscan); |
| |
| /* |
| * Restart a parallel scan. Call this in the leader process. Caller is |
| * responsible for making sure that all workers have finished the scan |
| * beforehand. |
| */ |
| static inline void |
| table_parallelscan_reinitialize(Relation rel, ParallelTableScanDesc pscan) |
| { |
| rel->rd_tableam->parallelscan_reinitialize(rel, pscan); |
| } |
| |
| |
| /* ---------------------------------------------------------------------------- |
| * Index scan related functions. |
| * ---------------------------------------------------------------------------- |
| */ |
| |
| /* |
| * Prepare to fetch tuples from the relation, as needed when fetching tuples |
| * for an index scan. |
| * |
| * Tuples for an index scan can then be fetched via table_index_fetch_tuple(). |
| */ |
| static inline IndexFetchTableData * |
| table_index_fetch_begin(Relation rel) |
| { |
| return rel->rd_tableam->index_fetch_begin(rel); |
| } |
| |
| /* |
| * Reset index fetch. Typically this will release cross index fetch resources |
| * held in IndexFetchTableData. |
| */ |
| static inline void |
| table_index_fetch_reset(struct IndexFetchTableData *scan) |
| { |
| scan->rel->rd_tableam->index_fetch_reset(scan); |
| } |
| |
| /* |
| * Release resources and deallocate index fetch. |
| */ |
| static inline void |
| table_index_fetch_end(struct IndexFetchTableData *scan) |
| { |
| scan->rel->rd_tableam->index_fetch_end(scan); |
| } |
| |
| /* |
| * Fetches, as part of an index scan, tuple at `tid` into `slot`, after doing |
| * a visibility test according to `snapshot`. If a tuple was found and passed |
| * the visibility test, returns true, false otherwise. Note that *tid may be |
| * modified when we return true (see later remarks on multiple row versions |
| * reachable via a single index entry). |
| * |
| * *call_again needs to be false on the first call to table_index_fetch_tuple() for |
| * a tid. If there potentially is another tuple matching the tid, *call_again |
| * will be set to true, signaling that table_index_fetch_tuple() should be called |
| * again for the same tid. |
| * |
| * *all_dead, if all_dead is not NULL, will be set to true by |
| * table_index_fetch_tuple() iff it is guaranteed that no backend needs to see |
| * that tuple. Index AMs can use that to avoid returning that tid in future |
| * searches. |
| * |
| * The difference between this function and table_tuple_fetch_row_version() |
| * is that this function returns the currently visible version of a row if |
| * the AM supports storing multiple row versions reachable via a single index |
| * entry (like heap's HOT). Whereas table_tuple_fetch_row_version() only |
| * evaluates the tuple exactly at `tid`. Outside of index entry ->table tuple |
| * lookups, table_tuple_fetch_row_version() is what's usually needed. |
| */ |
| static inline bool |
| table_index_fetch_tuple(struct IndexFetchTableData *scan, |
| ItemPointer tid, |
| Snapshot snapshot, |
| TupleTableSlot *slot, |
| bool *call_again, bool *all_dead) |
| { |
| /* |
| * We don't expect direct calls to table_index_fetch_tuple with valid |
| * CheckXidAlive for catalog or regular tables. See detailed comments in |
| * xact.c where these variables are declared. |
| */ |
| if (unlikely(TransactionIdIsValid(CheckXidAlive) && !bsysscan)) |
| elog(ERROR, "unexpected table_index_fetch_tuple call during logical decoding"); |
| |
| return scan->rel->rd_tableam->index_fetch_tuple(scan, tid, snapshot, |
| slot, call_again, |
| all_dead); |
| } |
| |
| /* |
| * This is a convenience wrapper around table_index_fetch_tuple() which |
| * returns whether there are table tuple items corresponding to an index |
| * entry. This likely is only useful to verify if there's a conflict in a |
| * unique index. |
| */ |
| extern bool table_index_fetch_tuple_check(Relation rel, |
| ItemPointer tid, |
| Snapshot snapshot, |
| bool *all_dead); |
| |
| /* |
| * GPDB: Check if a tuple exists for a given tid obtained from an index. |
| * This is used to entertain unique index checks on AO/CO tables. For heap |
| * tables, the regular method of beginindexscan..fetchtuple..endindexscan |
| * can be used. Creating/destroying scan descriptors for AO/CO tables are |
| * too expensive to be done on a per-tuple basis. |
| * |
| * This has to have an identical signature to table_index_fetch_tuple_check(). |
| */ |
| static inline bool |
| table_index_unique_check(Relation rel, |
| ItemPointer tid, |
| Snapshot snapshot, |
| bool *all_dead) |
| { |
| return rel->rd_tableam->index_unique_check(rel, tid, snapshot, |
| all_dead); |
| } |
| |
| /* ------------------------------------------------------------------------ |
| * Functions for non-modifying operations on individual tuples |
| * ------------------------------------------------------------------------ |
| */ |
| |
| |
| /* |
| * Fetch tuple at `tid` into `slot`, after doing a visibility test according to |
| * `snapshot`. If a tuple was found and passed the visibility test, returns |
| * true, false otherwise. |
| * |
| * See table_index_fetch_tuple's comment about what the difference between |
| * these functions is. It is correct to use this function outside of index |
| * entry->table tuple lookups. |
| */ |
| static inline bool |
| table_tuple_fetch_row_version(Relation rel, |
| ItemPointer tid, |
| Snapshot snapshot, |
| TupleTableSlot *slot) |
| { |
| /* |
| * We don't expect direct calls to table_tuple_fetch_row_version with |
| * valid CheckXidAlive for catalog or regular tables. See detailed |
| * comments in xact.c where these variables are declared. |
| */ |
| if (unlikely(TransactionIdIsValid(CheckXidAlive) && !bsysscan)) |
| elog(ERROR, "unexpected table_tuple_fetch_row_version call during logical decoding"); |
| |
| return rel->rd_tableam->tuple_fetch_row_version(rel, tid, snapshot, slot); |
| } |
| |
| /* |
| * Verify that `tid` is a potentially valid tuple identifier. That doesn't |
| * mean that the pointed to row needs to exist or be visible, but that |
| * attempting to fetch the row (e.g. with table_tuple_get_latest_tid() or |
| * table_tuple_fetch_row_version()) should not error out if called with that |
| * tid. |
| * |
| * `scan` needs to have been started via table_beginscan(). |
| */ |
| static inline bool |
| table_tuple_tid_valid(TableScanDesc scan, ItemPointer tid) |
| { |
| return scan->rs_rd->rd_tableam->tuple_tid_valid(scan, tid); |
| } |
| |
| /* |
| * Return the latest version of the tuple at `tid`, by updating `tid` to |
| * point at the newest version. |
| */ |
| extern void table_tuple_get_latest_tid(TableScanDesc scan, ItemPointer tid); |
| |
| /* |
| * Return true iff tuple in slot satisfies the snapshot. |
| * |
| * This assumes the slot's tuple is valid, and of the appropriate type for the |
| * AM. |
| * |
| * Some AMs might modify the data underlying the tuple as a side-effect. If so |
| * they ought to mark the relevant buffer dirty. |
| */ |
| static inline bool |
| table_tuple_satisfies_snapshot(Relation rel, TupleTableSlot *slot, |
| Snapshot snapshot) |
| { |
| return rel->rd_tableam->tuple_satisfies_snapshot(rel, slot, snapshot); |
| } |
| |
| /* |
| * Determine which index tuples are safe to delete based on their table TID. |
| * |
| * Determines which entries from index AM caller's TM_IndexDeleteOp state |
| * point to vacuumable table tuples. Entries that are found by tableam to be |
| * vacuumable are naturally safe for index AM to delete, and so get directly |
| * marked as deletable. See comments above TM_IndexDelete and comments above |
| * TM_IndexDeleteOp for full details. |
| * |
| * Returns a snapshotConflictHorizon transaction ID that caller places in |
| * its index deletion WAL record. This might be used during subsequent REDO |
| * of the WAL record when in Hot Standby mode -- a recovery conflict for the |
| * index deletion operation might be required on the standby. |
| */ |
| static inline TransactionId |
| table_index_delete_tuples(Relation rel, TM_IndexDeleteOp *delstate) |
| { |
| return rel->rd_tableam->index_delete_tuples(rel, delstate); |
| } |
| |
| |
| /* ---------------------------------------------------------------------------- |
| * Functions for manipulations of physical tuples. |
| * ---------------------------------------------------------------------------- |
| */ |
| |
| /* |
| * Insert a tuple from a slot into table AM routine. |
| * |
| * The options bitmask allows the caller to specify options that may change the |
| * behaviour of the AM. The AM will ignore options that it does not support. |
| * |
| * If the TABLE_INSERT_SKIP_FSM option is specified, AMs are free to not reuse |
| * free space in the relation. This can save some cycles when we know the |
| * relation is new and doesn't contain useful amounts of free space. |
| * TABLE_INSERT_SKIP_FSM is commonly passed directly to |
| * RelationGetBufferForTuple. See that method for more information. |
| * |
| * TABLE_INSERT_FROZEN should only be specified for inserts into |
| * relation storage created during the current subtransaction and when |
| * there are no prior snapshots or pre-existing portals open. |
| * This causes rows to be frozen, which is an MVCC violation and |
| * requires explicit options chosen by user. |
| * |
| * TABLE_INSERT_NO_LOGICAL force-disables the emitting of logical decoding |
| * information for the tuple. This should solely be used during table rewrites |
| * where RelationIsLogicallyLogged(relation) is not yet accurate for the new |
| * relation. |
| * |
| * Note that most of these options will be applied when inserting into the |
| * heap's TOAST table, too, if the tuple requires any out-of-line data. |
| * |
| * The BulkInsertState object (if any; bistate can be NULL for default |
| * behavior) is also just passed through to RelationGetBufferForTuple. If |
| * `bistate` is provided, table_finish_bulk_insert() needs to be called. |
| * |
| * On return the slot's tts_tid and tts_tableOid are updated to reflect the |
| * insertion. But note that any toasting of fields within the slot is NOT |
| * reflected in the slots contents. |
| */ |
| static inline void |
| table_tuple_insert(Relation rel, TupleTableSlot *slot, CommandId cid, |
| int options, struct BulkInsertStateData *bistate) |
| { |
| rel->rd_tableam->tuple_insert(rel, slot, cid, options, |
| bistate); |
| } |
| |
| /* |
| * Perform a "speculative insertion". These can be backed out afterwards |
| * without aborting the whole transaction. Other sessions can wait for the |
| * speculative insertion to be confirmed, turning it into a regular tuple, or |
| * aborted, as if it never existed. Speculatively inserted tuples behave as |
| * "value locks" of short duration, used to implement INSERT .. ON CONFLICT. |
| * |
| * A transaction having performed a speculative insertion has to either abort, |
| * or finish the speculative insertion with |
| * table_tuple_complete_speculative(succeeded = ...). |
| */ |
| static inline void |
| table_tuple_insert_speculative(Relation rel, TupleTableSlot *slot, |
| CommandId cid, int options, |
| struct BulkInsertStateData *bistate, |
| uint32 specToken) |
| { |
| rel->rd_tableam->tuple_insert_speculative(rel, slot, cid, options, |
| bistate, specToken); |
| } |
| |
| /* |
| * Complete "speculative insertion" started in the same transaction. If |
| * succeeded is true, the tuple is fully inserted, if false, it's removed. |
| */ |
| static inline void |
| table_tuple_complete_speculative(Relation rel, TupleTableSlot *slot, |
| uint32 specToken, bool succeeded) |
| { |
| rel->rd_tableam->tuple_complete_speculative(rel, slot, specToken, |
| succeeded); |
| } |
| |
| /* |
| * Insert multiple tuples into a table. |
| * |
| * This is like table_tuple_insert(), but inserts multiple tuples in one |
| * operation. That's often faster than calling table_tuple_insert() in a loop, |
| * because e.g. the AM can reduce WAL logging and page locking overhead. |
| * |
| * Except for taking `nslots` tuples as input, and an array of TupleTableSlots |
| * in `slots`, the parameters for table_multi_insert() are the same as for |
| * table_tuple_insert(). |
| * |
| * Note: this leaks memory into the current memory context. You can create a |
| * temporary context before calling this, if that's a problem. |
| */ |
| static inline void |
| table_multi_insert(Relation rel, TupleTableSlot **slots, int nslots, |
| CommandId cid, int options, struct BulkInsertStateData *bistate) |
| { |
| rel->rd_tableam->multi_insert(rel, slots, nslots, |
| cid, options, bistate); |
| } |
| |
| /* |
| * Delete a tuple. |
| * |
| * NB: do not call this directly unless prepared to deal with |
| * concurrent-update conditions. Use simple_table_tuple_delete instead. |
| * |
| * Input parameters: |
| * relation - table to be modified (caller must hold suitable lock) |
| * tid - TID of tuple to be deleted |
| * cid - delete command ID (used for visibility test, and stored into |
| * cmax if successful) |
| * crosscheck - if not InvalidSnapshot, also check tuple against this |
| * wait - true if should wait for any conflicting update to commit/abort |
| * Output parameters: |
| * tmfd - filled in failure cases (see below) |
| * changingPart - true iff the tuple is being moved to another partition |
| * table due to an update of the partition key. Otherwise, false. |
| * |
| * Normal, successful return value is TM_Ok, which means we did actually |
| * delete it. Failure return codes are TM_SelfModified, TM_Updated, and |
| * TM_BeingModified (the last only possible if wait == false). |
| * |
| * In the failure cases, the routine fills *tmfd with the tuple's t_ctid, |
| * t_xmax, and, if possible, t_cmax. See comments for struct |
| * TM_FailureData for additional info. |
| */ |
| static inline TM_Result |
| table_tuple_delete(Relation rel, ItemPointer tid, CommandId cid, |
| Snapshot snapshot, Snapshot crosscheck, bool wait, |
| TM_FailureData *tmfd, bool changingPart) |
| { |
| return rel->rd_tableam->tuple_delete(rel, tid, cid, |
| snapshot, crosscheck, |
| wait, tmfd, changingPart); |
| } |
| |
| /* |
| * Update a tuple. |
| * |
| * NB: do not call this directly unless you are prepared to deal with |
| * concurrent-update conditions. Use simple_table_tuple_update instead. |
| * |
| * Input parameters: |
| * relation - table to be modified (caller must hold suitable lock) |
| * otid - TID of old tuple to be replaced |
| * slot - newly constructed tuple data to store |
| * cid - update command ID (used for visibility test, and stored into |
| * cmax/cmin if successful) |
| * crosscheck - if not InvalidSnapshot, also check old tuple against this |
| * wait - true if should wait for any conflicting update to commit/abort |
| * Output parameters: |
| * tmfd - filled in failure cases (see below) |
| * lockmode - filled with lock mode acquired on tuple |
| * update_indexes - in success cases this is set to true if new index entries |
| * are required for this tuple |
| * |
| * Normal, successful return value is TM_Ok, which means we did actually |
| * update it. Failure return codes are TM_SelfModified, TM_Updated, and |
| * TM_BeingModified (the last only possible if wait == false). |
| * |
| * On success, the slot's tts_tid and tts_tableOid are updated to match the new |
| * stored tuple; in particular, slot->tts_tid is set to the TID where the |
| * new tuple was inserted, and its HEAP_ONLY_TUPLE flag is set iff a HOT |
| * update was done. However, any TOAST changes in the new tuple's |
| * data are not reflected into *newtup. |
| * |
| * In the failure cases, the routine fills *tmfd with the tuple's t_ctid, |
| * t_xmax, and, if possible, t_cmax. See comments for struct TM_FailureData |
| * for additional info. |
| */ |
| static inline TM_Result |
| table_tuple_update(Relation rel, ItemPointer otid, TupleTableSlot *slot, |
| CommandId cid, Snapshot snapshot, Snapshot crosscheck, |
| bool wait, TM_FailureData *tmfd, LockTupleMode *lockmode, |
| TU_UpdateIndexes *update_indexes) |
| { |
| return rel->rd_tableam->tuple_update(rel, otid, slot, |
| cid, snapshot, crosscheck, |
| wait, tmfd, |
| lockmode, update_indexes); |
| } |
| |
| /* |
| * Lock a tuple in the specified mode. |
| * |
| * Input parameters: |
| * relation: relation containing tuple (caller must hold suitable lock) |
| * tid: TID of tuple to lock |
| * snapshot: snapshot to use for visibility determinations |
| * cid: current command ID (used for visibility test, and stored into |
| * tuple's cmax if lock is successful) |
| * mode: lock mode desired |
| * wait_policy: what to do if tuple lock is not available |
| * flags: |
| * If TUPLE_LOCK_FLAG_LOCK_UPDATE_IN_PROGRESS, follow the update chain to |
| * also lock descendant tuples if lock modes don't conflict. |
| * If TUPLE_LOCK_FLAG_FIND_LAST_VERSION, follow the update chain and lock |
| * latest version. |
| * |
| * Output parameters: |
| * *slot: contains the target tuple |
| * *tmfd: filled in failure cases (see below) |
| * |
| * Function result may be: |
| * TM_Ok: lock was successfully acquired |
| * TM_Invisible: lock failed because tuple was never visible to us |
| * TM_SelfModified: lock failed because tuple updated by self |
| * TM_Updated: lock failed because tuple updated by other xact |
| * TM_Deleted: lock failed because tuple deleted by other xact |
| * TM_WouldBlock: lock couldn't be acquired and wait_policy is skip |
| * |
| * In the failure cases other than TM_Invisible and TM_Deleted, the routine |
| * fills *tmfd with the tuple's t_ctid, t_xmax, and, if possible, t_cmax. See |
| * comments for struct TM_FailureData for additional info. |
| */ |
| static inline TM_Result |
| table_tuple_lock(Relation rel, ItemPointer tid, Snapshot snapshot, |
| TupleTableSlot *slot, CommandId cid, LockTupleMode mode, |
| LockWaitPolicy wait_policy, uint8 flags, |
| TM_FailureData *tmfd) |
| { |
| return rel->rd_tableam->tuple_lock(rel, tid, snapshot, slot, |
| cid, mode, wait_policy, |
| flags, tmfd); |
| } |
| |
| /* |
| * Perform operations necessary to complete insertions made via |
| * tuple_insert and multi_insert with a BulkInsertState specified. |
| */ |
| static inline void |
| table_finish_bulk_insert(Relation rel, int options) |
| { |
| /* optional callback */ |
| if (rel->rd_tableam && rel->rd_tableam->finish_bulk_insert) |
| rel->rd_tableam->finish_bulk_insert(rel, options); |
| } |
| |
| |
| /* ------------------------------------------------------------------------ |
| * DDL related functionality. |
| * ------------------------------------------------------------------------ |
| */ |
| |
| /* |
| * Create storage for `rel` in `newrlocator`, with persistence set to |
| * `persistence`. |
| * |
| * This is used both during relation creation and various DDL operations to |
| * create new rel storage that can be filled from scratch. When creating |
| * new storage for an existing relfilelocator, this should be called before the |
| * relcache entry has been updated. |
| * |
| * *freezeXid, *minmulti are set to the xid / multixact horizon for the table |
| * that pg_class.{relfrozenxid, relminmxid} have to be set to. |
| */ |
| static inline void |
| table_relation_set_new_filelocator(Relation rel, |
| const RelFileLocator *newrlocator, |
| char persistence, |
| TransactionId *freezeXid, |
| MultiXactId *minmulti) |
| { |
| rel->rd_tableam->relation_set_new_filelocator(rel, newrlocator, |
| persistence, freezeXid, |
| minmulti); |
| } |
| |
| /* |
| * Remove all table contents from `rel`, in a non-transactional manner. |
| * Non-transactional meaning that there's no need to support rollbacks. This |
| * commonly only is used to perform truncations for relation storage created in |
| * the current transaction. |
| */ |
| static inline void |
| table_relation_nontransactional_truncate(Relation rel) |
| { |
| rel->rd_tableam->relation_nontransactional_truncate(rel); |
| } |
| |
| /* |
| * Copy data from `rel` into the new relfilelocator `newrlocator`. The new |
| * relfilelocator may not have storage associated before this function is |
| * called. This is only supposed to be used for low level operations like |
| * changing a relation's tablespace. |
| */ |
| static inline void |
| table_relation_copy_data(Relation rel, const RelFileLocator *newrlocator) |
| { |
| rel->rd_tableam->relation_copy_data(rel, newrlocator); |
| } |
| |
| /* |
| * Copy data from `OldTable` into `NewTable`, as part of a CLUSTER or VACUUM |
| * FULL. |
| * |
| * Additional Input parameters: |
| * - use_sort - if true, the table contents are sorted appropriate for |
| * `OldIndex`; if false and OldIndex is not InvalidOid, the data is copied |
| * in that index's order; if false and OldIndex is InvalidOid, no sorting is |
| * performed |
| * - OldIndex - see use_sort |
| * - OldestXmin - computed by vacuum_get_cutoffs(), even when |
| * not needed for the relation's AM |
| * - *xid_cutoff - ditto |
| * - *multi_cutoff - ditto |
| * |
| * Output parameters: |
| * - *xid_cutoff - rel's new relfrozenxid value, may be invalid |
| * - *multi_cutoff - rel's new relminmxid value, may be invalid |
| * - *tups_vacuumed - stats, for logging, if appropriate for AM |
| * - *tups_recently_dead - stats, for logging, if appropriate for AM |
| */ |
| static inline void |
| table_relation_copy_for_cluster(Relation OldTable, Relation NewTable, |
| Relation OldIndex, |
| bool use_sort, |
| TransactionId OldestXmin, |
| TransactionId *xid_cutoff, |
| MultiXactId *multi_cutoff, |
| double *num_tuples, |
| double *tups_vacuumed, |
| double *tups_recently_dead) |
| { |
| OldTable->rd_tableam->relation_copy_for_cluster(OldTable, NewTable, OldIndex, |
| use_sort, OldestXmin, |
| xid_cutoff, multi_cutoff, |
| num_tuples, tups_vacuumed, |
| tups_recently_dead); |
| } |
| |
| /* |
| * Perform VACUUM on the relation. The VACUUM can be triggered by a user or by |
| * autovacuum. The specific actions performed by the AM will depend heavily on |
| * the individual AM. |
| * |
| * On entry a transaction needs to already been established, and the |
| * table is locked with a ShareUpdateExclusive lock. |
| * |
| * Note that neither VACUUM FULL (and CLUSTER), nor ANALYZE go through this |
| * routine, even if (for ANALYZE) it is part of the same VACUUM command. |
| */ |
| static inline void |
| table_relation_vacuum(Relation rel, struct VacuumParams *params, |
| BufferAccessStrategy bstrategy) |
| { |
| rel->rd_tableam->relation_vacuum(rel, params, bstrategy); |
| } |
| |
| /* |
| * Prepare to analyze block `blockno` of `scan`. The scan needs to have been |
| * started with table_beginscan_analyze(). Note that this routine might |
| * acquire resources like locks that are held until |
| * table_scan_analyze_next_tuple() returns false. |
| * |
| * Returns false if block is unsuitable for sampling, true otherwise. |
| */ |
| static inline bool |
| table_scan_analyze_next_block(TableScanDesc scan, BlockNumber blockno, |
| BufferAccessStrategy bstrategy) |
| { |
| return scan->rs_rd->rd_tableam->scan_analyze_next_block(scan, blockno, |
| bstrategy); |
| } |
| |
| /* |
| * Iterate over tuples in the block selected with |
| * table_scan_analyze_next_block() (which needs to have returned true, and |
| * this routine may not have returned false for the same block before). If a |
| * tuple that's suitable for sampling is found, true is returned and a tuple |
| * is stored in `slot`. |
| * |
| * *liverows and *deadrows are incremented according to the encountered |
| * tuples. |
| */ |
| static inline bool |
| table_scan_analyze_next_tuple(TableScanDesc scan, TransactionId OldestXmin, |
| double *liverows, double *deadrows, |
| TupleTableSlot *slot) |
| { |
| return scan->rs_rd->rd_tableam->scan_analyze_next_tuple(scan, OldestXmin, |
| liverows, deadrows, |
| slot); |
| } |
| |
| /* |
| * table_index_build_scan - scan the table to find tuples to be indexed |
| * |
| * This is called back from an access-method-specific index build procedure |
| * after the AM has done whatever setup it needs. The parent table relation |
| * is scanned to find tuples that should be entered into the index. Each |
| * such tuple is passed to the AM's callback routine, which does the right |
| * things to add it to the new index. After we return, the AM's index |
| * build procedure does whatever cleanup it needs. |
| * |
| * The total count of live tuples is returned. This is for updating pg_class |
| * statistics. (It's annoying not to be able to do that here, but we want to |
| * merge that update with others; see index_update_stats.) Note that the |
| * index AM itself must keep track of the number of index tuples; we don't do |
| * so here because the AM might reject some of the tuples for its own reasons, |
| * such as being unable to store NULLs. |
| * |
| * If 'progress', the PROGRESS_SCAN_BLOCKS_TOTAL counter is updated when |
| * starting the scan, and PROGRESS_SCAN_BLOCKS_DONE is updated as we go along. |
| * |
| * A side effect is to set indexInfo->ii_BrokenHotChain to true if we detect |
| * any potentially broken HOT chains. Currently, we set this if there are any |
| * RECENTLY_DEAD or DELETE_IN_PROGRESS entries in a HOT chain, without trying |
| * very hard to detect whether they're really incompatible with the chain tip. |
| * This only really makes sense for heap AM, it might need to be generalized |
| * for other AMs later. |
| */ |
| static inline double |
| table_index_build_scan(Relation table_rel, |
| Relation index_rel, |
| struct IndexInfo *index_info, |
| bool allow_sync, |
| bool progress, |
| IndexBuildCallback callback, |
| void *callback_state, |
| TableScanDesc scan) |
| { |
| return table_rel->rd_tableam->index_build_range_scan(table_rel, |
| index_rel, |
| index_info, |
| allow_sync, |
| false, |
| progress, |
| 0, |
| InvalidBlockNumber, |
| callback, |
| callback_state, |
| scan); |
| } |
| |
| /* |
| * As table_index_build_scan(), except that instead of scanning the complete |
| * table, only the given number of blocks are scanned. Scan to end-of-rel can |
| * be signaled by passing InvalidBlockNumber as numblocks. Note that |
| * restricting the range to scan cannot be done when requesting syncscan. |
| * |
| * When "anyvisible" mode is requested, all tuples visible to any transaction |
| * are indexed and counted as live, including those inserted or deleted by |
| * transactions that are still in progress. |
| */ |
| static inline double |
| table_index_build_range_scan(Relation table_rel, |
| Relation index_rel, |
| struct IndexInfo *index_info, |
| bool allow_sync, |
| bool anyvisible, |
| bool progress, |
| BlockNumber start_blockno, |
| BlockNumber numblocks, |
| IndexBuildCallback callback, |
| void *callback_state, |
| TableScanDesc scan) |
| { |
| return table_rel->rd_tableam->index_build_range_scan(table_rel, |
| index_rel, |
| index_info, |
| allow_sync, |
| anyvisible, |
| progress, |
| start_blockno, |
| numblocks, |
| callback, |
| callback_state, |
| scan); |
| } |
| |
| /* |
| * table_index_validate_scan - second table scan for concurrent index build |
| * |
| * See validate_index() for an explanation. |
| */ |
| static inline void |
| table_index_validate_scan(Relation table_rel, |
| Relation index_rel, |
| struct IndexInfo *index_info, |
| Snapshot snapshot, |
| struct ValidateIndexState *state) |
| { |
| table_rel->rd_tableam->index_validate_scan(table_rel, |
| index_rel, |
| index_info, |
| snapshot, |
| state); |
| } |
| |
| |
| /* ---------------------------------------------------------------------------- |
| * Miscellaneous functionality |
| * ---------------------------------------------------------------------------- |
| */ |
| |
| /* |
| * Return the current size of `rel` in bytes. If `forkNumber` is |
| * InvalidForkNumber, return the relation's overall size, otherwise the size |
| * for the indicated fork. |
| * |
| * Note that the overall size might not be the equivalent of the sum of sizes |
| * for the individual forks for some AMs, e.g. because the AMs storage does |
| * not neatly map onto the builtin types of forks. |
| */ |
| static inline uint64 |
| table_relation_size(Relation rel, ForkNumber forkNumber) |
| { |
| return rel->rd_tableam->relation_size(rel, forkNumber); |
| } |
| |
| /* |
| * GPDB: Returns the block sequences contained in this relation. See |
| * BlockSequence for details. Currently used by BRIN. |
| * |
| * Out of all theoretically possible block sequences, we only return the ones |
| * that currently exist for the relation. |
| */ |
| static inline BlockSequence * |
| table_relation_get_block_sequences(Relation rel, int *numSequences) |
| { |
| return rel->rd_tableam->relation_get_block_sequences(rel, numSequences); |
| } |
| |
| /* |
| * GPDB: Determines the block sequence in which the logical heap 'blkNumber' falls. |
| * See BlockSequence for details. Currently used by BRIN. |
| * |
| * If the specified logical 'blkNumber' doesn't fall into the range of an |
| * existing BlockSequence, the BlockSequence is expected to contain the correct |
| * startblknum with nblocks = 0. |
| */ |
| static inline void |
| table_relation_get_block_sequence(Relation rel, |
| BlockNumber blkNumber, |
| BlockSequence *sequence) |
| { |
| rel->rd_tableam->relation_get_block_sequence(rel, blkNumber, sequence); |
| } |
| |
| /* |
| * table_relation_needs_toast_table - does this relation need a toast table? |
| */ |
| static inline bool |
| table_relation_needs_toast_table(Relation rel) |
| { |
| return rel->rd_tableam->relation_needs_toast_table(rel); |
| } |
| |
| /* |
| * Return the OID of the AM that should be used to implement the TOAST table |
| * for this relation. |
| */ |
| static inline Oid |
| table_relation_toast_am(Relation rel) |
| { |
| return rel->rd_tableam->relation_toast_am(rel); |
| } |
| |
| /* |
| * Fetch all or part of a TOAST value from a TOAST table. |
| * |
| * If this AM is never used to implement a TOAST table, then this callback |
| * is not needed. But, if toasted values are ever stored in a table of this |
| * type, then you will need this callback. |
| * |
| * toastrel is the relation in which the toasted value is stored. |
| * |
| * valueid identifies which toast value is to be fetched. For the heap, |
| * this corresponds to the values stored in the chunk_id column. |
| * |
| * attrsize is the total size of the toast value to be fetched. |
| * |
| * sliceoffset is the offset within the toast value of the first byte that |
| * should be fetched. |
| * |
| * slicelength is the number of bytes from the toast value that should be |
| * fetched. |
| * |
| * result is caller-allocated space into which the fetched bytes should be |
| * stored. |
| */ |
| static inline void |
| table_relation_fetch_toast_slice(Relation toastrel, Oid valueid, |
| int32 attrsize, int32 sliceoffset, |
| int32 slicelength, struct varlena *result) |
| { |
| toastrel->rd_tableam->relation_fetch_toast_slice(toastrel, valueid, |
| attrsize, |
| sliceoffset, slicelength, |
| result); |
| } |
| |
| /* ---------------------------------------------------------------------------- |
| * Planner related functionality |
| * ---------------------------------------------------------------------------- |
| */ |
| |
| /* |
| * Estimate the current size of the relation, as an AM specific workhorse for |
| * estimate_rel_size(). Look there for an explanation of the parameters. |
| */ |
| static inline void |
| table_relation_estimate_size(Relation rel, int32 *attr_widths, |
| BlockNumber *pages, double *tuples, |
| double *allvisfrac) |
| { |
| rel->rd_tableam->relation_estimate_size(rel, attr_widths, pages, tuples, |
| allvisfrac); |
| } |
| |
| |
| /* ---------------------------------------------------------------------------- |
| * Executor related functionality |
| * ---------------------------------------------------------------------------- |
| */ |
| |
| /* |
| * Prepare to fetch / check / return tuples from `tbmres->blockno` as part of |
| * a bitmap table scan. `scan` needs to have been started via |
| * table_beginscan_bm(). Returns false if there are no tuples to be found on |
| * the page, true otherwise. |
| * |
| * Note, this is an optionally implemented function, therefore should only be |
| * used after verifying the presence (at plan time or such). |
| */ |
| static inline bool |
| table_scan_bitmap_next_block(TableScanDesc scan, |
| struct TBMIterateResult *tbmres) |
| { |
| /* |
| * We don't expect direct calls to table_scan_bitmap_next_block with valid |
| * CheckXidAlive for catalog or regular tables. See detailed comments in |
| * xact.c where these variables are declared. |
| */ |
| if (unlikely(TransactionIdIsValid(CheckXidAlive) && !bsysscan)) |
| elog(ERROR, "unexpected table_scan_bitmap_next_block call during logical decoding"); |
| |
| return scan->rs_rd->rd_tableam->scan_bitmap_next_block(scan, |
| tbmres); |
| } |
| |
| /* |
| * Fetch the next tuple of a bitmap table scan into `slot` and return true if |
| * a visible tuple was found, false otherwise. |
| * table_scan_bitmap_next_block() needs to previously have selected a |
| * block (i.e. returned true), and no previous |
| * table_scan_bitmap_next_tuple() for the same block may have |
| * returned false. |
| */ |
| static inline bool |
| table_scan_bitmap_next_tuple(TableScanDesc scan, |
| struct TBMIterateResult *tbmres, |
| TupleTableSlot *slot) |
| { |
| slot->tts_tableOid = RelationGetRelid(scan->rs_rd); |
| /* |
| * We don't expect direct calls to table_scan_bitmap_next_tuple with valid |
| * CheckXidAlive for catalog or regular tables. See detailed comments in |
| * xact.c where these variables are declared. |
| */ |
| if (unlikely(TransactionIdIsValid(CheckXidAlive) && !bsysscan)) |
| elog(ERROR, "unexpected table_scan_bitmap_next_tuple call during logical decoding"); |
| |
| return scan->rs_rd->rd_tableam->scan_bitmap_next_tuple(scan, |
| tbmres, |
| slot); |
| } |
| |
| /* |
| * Prepare to fetch tuples from the next block in a sample scan. Returns false |
| * if the sample scan is finished, true otherwise. `scan` needs to have been |
| * started via table_beginscan_sampling(). |
| * |
| * This will call the TsmRoutine's NextSampleBlock() callback if necessary |
| * (i.e. NextSampleBlock is not NULL), or perform a sequential scan over the |
| * underlying relation. |
| */ |
| static inline bool |
| table_scan_sample_next_block(TableScanDesc scan, |
| struct SampleScanState *scanstate) |
| { |
| /* |
| * We don't expect direct calls to table_scan_sample_next_block with valid |
| * CheckXidAlive for catalog or regular tables. See detailed comments in |
| * xact.c where these variables are declared. |
| */ |
| if (unlikely(TransactionIdIsValid(CheckXidAlive) && !bsysscan)) |
| elog(ERROR, "unexpected table_scan_sample_next_block call during logical decoding"); |
| return scan->rs_rd->rd_tableam->scan_sample_next_block(scan, scanstate); |
| } |
| |
| /* |
| * Fetch the next sample tuple into `slot` and return true if a visible tuple |
| * was found, false otherwise. table_scan_sample_next_block() needs to |
| * previously have selected a block (i.e. returned true), and no previous |
| * table_scan_sample_next_tuple() for the same block may have returned false. |
| * |
| * This will call the TsmRoutine's NextSampleTuple() callback. |
| */ |
| static inline bool |
| table_scan_sample_next_tuple(TableScanDesc scan, |
| struct SampleScanState *scanstate, |
| TupleTableSlot *slot) |
| { |
| /* |
| * We don't expect direct calls to table_scan_sample_next_tuple with valid |
| * CheckXidAlive for catalog or regular tables. See detailed comments in |
| * xact.c where these variables are declared. |
| */ |
| if (unlikely(TransactionIdIsValid(CheckXidAlive) && !bsysscan)) |
| elog(ERROR, "unexpected table_scan_sample_next_tuple call during logical decoding"); |
| return scan->rs_rd->rd_tableam->scan_sample_next_tuple(scan, scanstate, |
| slot); |
| } |
| |
| |
| /* ---------------------------------------------------------------------------- |
| * Functions to make modifications a bit simpler. |
| * ---------------------------------------------------------------------------- |
| */ |
| |
| extern void simple_table_tuple_insert(Relation rel, TupleTableSlot *slot); |
| extern void simple_table_tuple_delete(Relation rel, ItemPointer tid, |
| Snapshot snapshot); |
| extern void simple_table_tuple_update(Relation rel, ItemPointer otid, |
| TupleTableSlot *slot, Snapshot snapshot, |
| TU_UpdateIndexes *update_indexes); |
| |
| |
| /* ---------------------------------------------------------------------------- |
| * Helper functions to implement parallel scans for block oriented AMs. |
| * ---------------------------------------------------------------------------- |
| */ |
| |
| extern Size table_block_parallelscan_estimate(Relation rel); |
| extern Size table_block_parallelscan_initialize(Relation rel, |
| ParallelTableScanDesc pscan); |
| extern void table_block_parallelscan_reinitialize(Relation rel, |
| ParallelTableScanDesc pscan); |
| extern BlockNumber table_block_parallelscan_nextpage(Relation rel, |
| ParallelBlockTableScanWorker pbscanwork, |
| ParallelBlockTableScanDesc pbscan); |
| extern void table_block_parallelscan_startblock_init(Relation rel, |
| ParallelBlockTableScanWorker pbscanwork, |
| ParallelBlockTableScanDesc pbscan); |
| |
| |
| /* ---------------------------------------------------------------------------- |
| * Helper functions to implement relation sizing for block oriented AMs. |
| * ---------------------------------------------------------------------------- |
| */ |
| |
| extern uint64 table_block_relation_size(Relation rel, ForkNumber forkNumber); |
| extern void table_block_relation_estimate_size(Relation rel, |
| int32 *attr_widths, |
| BlockNumber *pages, |
| double *tuples, |
| double *allvisfrac, |
| Size overhead_bytes_per_tuple, |
| Size usable_bytes_per_page); |
| |
| /* ---------------------------------------------------------------------------- |
| * Functions in tableamapi.c |
| * ---------------------------------------------------------------------------- |
| */ |
| |
| extern const TableAmRoutine *GetTableAmRoutine(Oid amhandler); |
| extern const TableAmRoutine *GetTableAmRoutineByAmId(Oid amoid); |
| extern const TableAmRoutine *GetHeapamTableAmRoutine(void); |
| |
| /* ---------------------------------------------------------------------------- |
| * Hook function to run init/fini for storage extensions |
| * ---------------------------------------------------------------------------- |
| */ |
| enum CmdType; |
| typedef void (*ext_dml_func_hook_type) (Relation relation, enum CmdType operation); |
| extern PGDLLIMPORT ext_dml_func_hook_type ext_dml_init_hook; |
| extern PGDLLIMPORT ext_dml_func_hook_type ext_dml_finish_hook; |
| |
| #endif /* TABLEAM_H */ |